diff options
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 84 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 19 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.h | 8 |
3 files changed, 58 insertions, 53 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a090815a6dd9..c74ac585c639 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -2516,7 +2516,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data) | |||
| 2516 | { | 2516 | { |
| 2517 | struct i40e_q_vector *q_vector = data; | 2517 | struct i40e_q_vector *q_vector = data; |
| 2518 | 2518 | ||
| 2519 | if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) | 2519 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
| 2520 | return IRQ_HANDLED; | 2520 | return IRQ_HANDLED; |
| 2521 | 2521 | ||
| 2522 | napi_schedule(&q_vector->napi); | 2522 | napi_schedule(&q_vector->napi); |
| @@ -2533,7 +2533,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) | |||
| 2533 | { | 2533 | { |
| 2534 | struct i40e_q_vector *q_vector = data; | 2534 | struct i40e_q_vector *q_vector = data; |
| 2535 | 2535 | ||
| 2536 | if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) | 2536 | if (!q_vector->tx.ring && !q_vector->rx.ring) |
| 2537 | return IRQ_HANDLED; | 2537 | return IRQ_HANDLED; |
| 2538 | 2538 | ||
| 2539 | pr_info("fdir ring cleaning needed\n"); | 2539 | pr_info("fdir ring cleaning needed\n"); |
| @@ -2560,14 +2560,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) | |||
| 2560 | for (vector = 0; vector < q_vectors; vector++) { | 2560 | for (vector = 0; vector < q_vectors; vector++) { |
| 2561 | struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; | 2561 | struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; |
| 2562 | 2562 | ||
| 2563 | if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { | 2563 | if (q_vector->tx.ring && q_vector->rx.ring) { |
| 2564 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2564 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2565 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); | 2565 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); |
| 2566 | tx_int_idx++; | 2566 | tx_int_idx++; |
| 2567 | } else if (q_vector->rx.ring[0]) { | 2567 | } else if (q_vector->rx.ring) { |
| 2568 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2568 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2569 | "%s-%s-%d", basename, "rx", rx_int_idx++); | 2569 | "%s-%s-%d", basename, "rx", rx_int_idx++); |
| 2570 | } else if (q_vector->tx.ring[0]) { | 2570 | } else if (q_vector->tx.ring) { |
| 2571 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2571 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
| 2572 | "%s-%s-%d", basename, "tx", tx_int_idx++); | 2572 | "%s-%s-%d", basename, "tx", tx_int_idx++); |
| 2573 | } else { | 2573 | } else { |
| @@ -2778,40 +2778,26 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
| 2778 | } | 2778 | } |
| 2779 | 2779 | ||
| 2780 | /** | 2780 | /** |
| 2781 | * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector | 2781 | * i40e_map_vector_to_qp - Assigns the queue pair to the vector |
| 2782 | * @vsi: the VSI being configured | 2782 | * @vsi: the VSI being configured |
| 2783 | * @v_idx: vector index | 2783 | * @v_idx: vector index |
| 2784 | * @r_idx: rx queue index | 2784 | * @qp_idx: queue pair index |
| 2785 | **/ | 2785 | **/ |
| 2786 | static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) | 2786 | static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) |
| 2787 | { | 2787 | { |
| 2788 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; | 2788 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; |
| 2789 | struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); | 2789 | struct i40e_ring *tx_ring = &(vsi->tx_rings[qp_idx]); |
| 2790 | 2790 | struct i40e_ring *rx_ring = &(vsi->rx_rings[qp_idx]); | |
| 2791 | rx_ring->q_vector = q_vector; | ||
| 2792 | q_vector->rx.ring[q_vector->rx.count] = rx_ring; | ||
| 2793 | q_vector->rx.count++; | ||
| 2794 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | ||
| 2795 | q_vector->vsi = vsi; | ||
| 2796 | } | ||
| 2797 | |||
| 2798 | /** | ||
| 2799 | * i40e_map_vector_to_txq - Assigns the Tx queue to the vector | ||
| 2800 | * @vsi: the VSI being configured | ||
| 2801 | * @v_idx: vector index | ||
| 2802 | * @t_idx: tx queue index | ||
| 2803 | **/ | ||
| 2804 | static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx) | ||
| 2805 | { | ||
| 2806 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; | ||
| 2807 | struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]); | ||
| 2808 | 2791 | ||
| 2809 | tx_ring->q_vector = q_vector; | 2792 | tx_ring->q_vector = q_vector; |
| 2810 | q_vector->tx.ring[q_vector->tx.count] = tx_ring; | 2793 | tx_ring->next = q_vector->tx.ring; |
| 2794 | q_vector->tx.ring = tx_ring; | ||
| 2811 | q_vector->tx.count++; | 2795 | q_vector->tx.count++; |
| 2812 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | 2796 | |
| 2813 | q_vector->num_ringpairs++; | 2797 | rx_ring->q_vector = q_vector; |
| 2814 | q_vector->vsi = vsi; | 2798 | rx_ring->next = q_vector->rx.ring; |
| 2799 | q_vector->rx.ring = rx_ring; | ||
| 2800 | q_vector->rx.count++; | ||
| 2815 | } | 2801 | } |
| 2816 | 2802 | ||
| 2817 | /** | 2803 | /** |
| @@ -2827,7 +2813,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) | |||
| 2827 | { | 2813 | { |
| 2828 | int qp_remaining = vsi->num_queue_pairs; | 2814 | int qp_remaining = vsi->num_queue_pairs; |
| 2829 | int q_vectors = vsi->num_q_vectors; | 2815 | int q_vectors = vsi->num_q_vectors; |
| 2830 | int qp_per_vector; | 2816 | int num_ringpairs; |
| 2831 | int v_start = 0; | 2817 | int v_start = 0; |
| 2832 | int qp_idx = 0; | 2818 | int qp_idx = 0; |
| 2833 | 2819 | ||
| @@ -2835,11 +2821,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) | |||
| 2835 | * group them so there are multiple queues per vector. | 2821 | * group them so there are multiple queues per vector. |
| 2836 | */ | 2822 | */ |
| 2837 | for (; v_start < q_vectors && qp_remaining; v_start++) { | 2823 | for (; v_start < q_vectors && qp_remaining; v_start++) { |
| 2838 | qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); | 2824 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; |
| 2839 | for (; qp_per_vector; | 2825 | |
| 2840 | qp_per_vector--, qp_idx++, qp_remaining--) { | 2826 | num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); |
| 2841 | map_vector_to_rxq(vsi, v_start, qp_idx); | 2827 | |
| 2842 | map_vector_to_txq(vsi, v_start, qp_idx); | 2828 | q_vector->num_ringpairs = num_ringpairs; |
| 2829 | |||
| 2830 | q_vector->rx.count = 0; | ||
| 2831 | q_vector->tx.count = 0; | ||
| 2832 | q_vector->rx.ring = NULL; | ||
| 2833 | q_vector->tx.ring = NULL; | ||
| 2834 | |||
| 2835 | while (num_ringpairs--) { | ||
| 2836 | map_vector_to_qp(vsi, v_start, qp_idx); | ||
| 2837 | qp_idx++; | ||
| 2838 | qp_remaining--; | ||
| 2843 | } | 2839 | } |
| 2844 | } | 2840 | } |
| 2845 | } | 2841 | } |
| @@ -3179,16 +3175,17 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) | |||
| 3179 | static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) | 3175 | static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) |
| 3180 | { | 3176 | { |
| 3181 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; | 3177 | struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; |
| 3182 | int r_idx; | 3178 | struct i40e_ring *ring; |
| 3183 | 3179 | ||
| 3184 | if (!q_vector) | 3180 | if (!q_vector) |
| 3185 | return; | 3181 | return; |
| 3186 | 3182 | ||
| 3187 | /* disassociate q_vector from rings */ | 3183 | /* disassociate q_vector from rings */ |
| 3188 | for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++) | 3184 | i40e_for_each_ring(ring, q_vector->tx) |
| 3189 | q_vector->tx.ring[r_idx]->q_vector = NULL; | 3185 | ring->q_vector = NULL; |
| 3190 | for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++) | 3186 | |
| 3191 | q_vector->rx.ring[r_idx]->q_vector = NULL; | 3187 | i40e_for_each_ring(ring, q_vector->rx) |
| 3188 | ring->q_vector = NULL; | ||
| 3192 | 3189 | ||
| 3193 | /* only VSI w/ an associated netdev is set up w/ NAPI */ | 3190 | /* only VSI w/ an associated netdev is set up w/ NAPI */ |
| 3194 | if (vsi->netdev) | 3191 | if (vsi->netdev) |
| @@ -5312,6 +5309,9 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) | |||
| 5312 | netif_napi_add(vsi->netdev, &q_vector->napi, | 5309 | netif_napi_add(vsi->netdev, &q_vector->napi, |
| 5313 | i40e_napi_poll, vsi->work_limit); | 5310 | i40e_napi_poll, vsi->work_limit); |
| 5314 | 5311 | ||
| 5312 | q_vector->rx.latency_range = I40E_LOW_LATENCY; | ||
| 5313 | q_vector->tx.latency_range = I40E_LOW_LATENCY; | ||
| 5314 | |||
| 5315 | /* tie q_vector and vsi together */ | 5315 | /* tie q_vector and vsi together */ |
| 5316 | vsi->q_vectors[v_idx] = q_vector; | 5316 | vsi->q_vectors[v_idx] = q_vector; |
| 5317 | 5317 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3e73bc093737..f153f3770346 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -1100,27 +1100,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
| 1100 | struct i40e_q_vector *q_vector = | 1100 | struct i40e_q_vector *q_vector = |
| 1101 | container_of(napi, struct i40e_q_vector, napi); | 1101 | container_of(napi, struct i40e_q_vector, napi); |
| 1102 | struct i40e_vsi *vsi = q_vector->vsi; | 1102 | struct i40e_vsi *vsi = q_vector->vsi; |
| 1103 | struct i40e_ring *ring; | ||
| 1103 | bool clean_complete = true; | 1104 | bool clean_complete = true; |
| 1104 | int budget_per_ring; | 1105 | int budget_per_ring; |
| 1105 | int i; | ||
| 1106 | 1106 | ||
| 1107 | if (test_bit(__I40E_DOWN, &vsi->state)) { | 1107 | if (test_bit(__I40E_DOWN, &vsi->state)) { |
| 1108 | napi_complete(napi); | 1108 | napi_complete(napi); |
| 1109 | return 0; | 1109 | return 0; |
| 1110 | } | 1110 | } |
| 1111 | 1111 | ||
| 1112 | /* Since the actual Tx work is minimal, we can give the Tx a larger | ||
| 1113 | * budget and be more aggressive about cleaning up the Tx descriptors. | ||
| 1114 | */ | ||
| 1115 | i40e_for_each_ring(ring, q_vector->tx) | ||
| 1116 | clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); | ||
| 1117 | |||
| 1112 | /* We attempt to distribute budget to each Rx queue fairly, but don't | 1118 | /* We attempt to distribute budget to each Rx queue fairly, but don't |
| 1113 | * allow the budget to go below 1 because that would exit polling early. | 1119 | * allow the budget to go below 1 because that would exit polling early. |
| 1114 | * Since the actual Tx work is minimal, we can give the Tx a larger | ||
| 1115 | * budget and be more aggressive about cleaning up the Tx descriptors. | ||
| 1116 | */ | 1120 | */ |
| 1117 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); | 1121 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); |
| 1118 | for (i = 0; i < q_vector->num_ringpairs; i++) { | 1122 | |
| 1119 | clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], | 1123 | i40e_for_each_ring(ring, q_vector->rx) |
| 1120 | vsi->work_limit); | 1124 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); |
| 1121 | clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i], | ||
| 1122 | budget_per_ring); | ||
| 1123 | } | ||
| 1124 | 1125 | ||
| 1125 | /* If work not completed, return budget and polling will return */ | 1126 | /* If work not completed, return budget and polling will return */ |
| 1126 | if (!clean_complete) | 1127 | if (!clean_complete) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 7f3f7e3e4238..c2a6746a5ec9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
| @@ -180,6 +180,7 @@ enum i40e_ring_state_t { | |||
| 180 | 180 | ||
| 181 | /* struct that defines a descriptor ring, associated with a VSI */ | 181 | /* struct that defines a descriptor ring, associated with a VSI */ |
| 182 | struct i40e_ring { | 182 | struct i40e_ring { |
| 183 | struct i40e_ring *next; /* pointer to next ring in q_vector */ | ||
| 183 | void *desc; /* Descriptor ring memory */ | 184 | void *desc; /* Descriptor ring memory */ |
| 184 | struct device *dev; /* Used for DMA mapping */ | 185 | struct device *dev; /* Used for DMA mapping */ |
| 185 | struct net_device *netdev; /* netdev ring maps to */ | 186 | struct net_device *netdev; /* netdev ring maps to */ |
| @@ -236,9 +237,8 @@ enum i40e_latency_range { | |||
| 236 | }; | 237 | }; |
| 237 | 238 | ||
| 238 | struct i40e_ring_container { | 239 | struct i40e_ring_container { |
| 239 | #define I40E_MAX_RINGPAIR_PER_VECTOR 8 | ||
| 240 | /* array of pointers to rings */ | 240 | /* array of pointers to rings */ |
| 241 | struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; | 241 | struct i40e_ring *ring; |
| 242 | unsigned int total_bytes; /* total bytes processed this int */ | 242 | unsigned int total_bytes; /* total bytes processed this int */ |
| 243 | unsigned int total_packets; /* total packets processed this int */ | 243 | unsigned int total_packets; /* total packets processed this int */ |
| 244 | u16 count; | 244 | u16 count; |
| @@ -246,6 +246,10 @@ struct i40e_ring_container { | |||
| 246 | u16 itr; | 246 | u16 itr; |
| 247 | }; | 247 | }; |
| 248 | 248 | ||
| 249 | /* iterator for handling rings in ring container */ | ||
| 250 | #define i40e_for_each_ring(pos, head) \ | ||
| 251 | for (pos = (head).ring; pos != NULL; pos = pos->next) | ||
| 252 | |||
| 249 | void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); | 253 | void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); |
| 250 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 254 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
| 251 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); | 255 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); |
