aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrett Creeley <brett.creeley@intel.com>2019-02-08 15:50:59 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-03-19 20:24:03 -0400
commitad71b256ba4e6e469d60e3f7b9973fd195b04bee (patch)
treedbf7f86e7058939ad11fb9caa1c9a13551ead638
parent544f63d307b103d0b1e2bc25f1830d48df177031 (diff)
ice: Determine descriptor count and ring size based on PAGE_SIZE
Currently we set the default number of Tx and Rx descriptors to 128 by default. For Rx this amounts to a full page (assuming 4K pages) because each Rx descriptor is 32 Bytes, but for Tx it only amounts to a half page because each Tx descriptor is 16 Bytes (assuming 4K pages). Instead of assuming 4K pages, determine the ring size and the number of descriptors for Tx and Rx based on a calculation using the PAGE_SIZE, ICE_MAX_NUM_DESC, and ICE_REQ_DESC_MULTIPLE. This change is being made to improve the performance of the driver when using the default settings. Signed-off-by: Brett Creeley <brett.creeley@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
3 files changed, 43 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 89440775aea1..38a5aafb8840 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -42,10 +42,21 @@
42 42
43extern const char ice_drv_ver[]; 43extern const char ice_drv_ver[];
44#define ICE_BAR0 0 44#define ICE_BAR0 0
45#define ICE_DFLT_NUM_DESC 128
46#define ICE_REQ_DESC_MULTIPLE 32 45#define ICE_REQ_DESC_MULTIPLE 32
47#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE 46#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
48#define ICE_MAX_NUM_DESC 8160 47#define ICE_MAX_NUM_DESC 8160
48/* set default number of Rx/Tx descriptors to the minimum between
49 * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
50 */
51#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
52 ALIGN(PAGE_SIZE / \
53 sizeof(union ice_32byte_rx_desc), \
54 ICE_REQ_DESC_MULTIPLE))
55#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
56 ALIGN(PAGE_SIZE / \
57 sizeof(struct ice_tx_desc), \
58 ICE_REQ_DESC_MULTIPLE))
59
49#define ICE_DFLT_TRAFFIC_CLASS BIT(0) 60#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
50#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) 61#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
51#define ICE_ETHTOOL_FWVER_LEN 32 62#define ICE_ETHTOOL_FWVER_LEN 32
@@ -257,7 +268,8 @@ struct ice_vsi {
257 u16 num_txq; /* Used Tx queues */ 268 u16 num_txq; /* Used Tx queues */
258 u16 alloc_rxq; /* Allocated Rx queues */ 269 u16 alloc_rxq; /* Allocated Rx queues */
259 u16 num_rxq; /* Used Rx queues */ 270 u16 num_rxq; /* Used Rx queues */
260 u16 num_desc; 271 u16 num_rx_desc;
272 u16 num_tx_desc;
261 struct ice_tc_cfg tc_cfg; 273 struct ice_tc_cfg tc_cfg;
262} ____cacheline_internodealigned_in_smp; 274} ____cacheline_internodealigned_in_smp;
263 275
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index c6572f6fb488..d3061f243877 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -276,7 +276,26 @@ err_txrings:
276} 276}
277 277
278/** 278/**
279 * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI 279 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
280 * @vsi: the VSI being configured
281 */
282static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
283{
284 switch (vsi->type) {
285 case ICE_VSI_PF:
286 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
287 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
288 break;
289 default:
290 dev_dbg(&vsi->back->pdev->dev,
291 "Not setting number of Tx/Rx descriptors for VSI type %d\n",
292 vsi->type);
293 break;
294 }
295}
296
297/**
298 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
280 * @vsi: the VSI being configured 299 * @vsi: the VSI being configured
281 * 300 *
282 * Return 0 on success and a negative value on error 301 * Return 0 on success and a negative value on error
@@ -289,7 +308,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
289 case ICE_VSI_PF: 308 case ICE_VSI_PF:
290 vsi->alloc_txq = pf->num_lan_tx; 309 vsi->alloc_txq = pf->num_lan_tx;
291 vsi->alloc_rxq = pf->num_lan_rx; 310 vsi->alloc_rxq = pf->num_lan_rx;
292 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
293 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); 311 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
294 break; 312 break;
295 case ICE_VSI_VF: 313 case ICE_VSI_VF:
@@ -307,6 +325,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
307 vsi->type); 325 vsi->type);
308 break; 326 break;
309 } 327 }
328
329 ice_vsi_set_num_desc(vsi);
310} 330}
311 331
312/** 332/**
@@ -1212,7 +1232,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1212 ring->ring_active = false; 1232 ring->ring_active = false;
1213 ring->vsi = vsi; 1233 ring->vsi = vsi;
1214 ring->dev = &pf->pdev->dev; 1234 ring->dev = &pf->pdev->dev;
1215 ring->count = vsi->num_desc; 1235 ring->count = vsi->num_tx_desc;
1216 vsi->tx_rings[i] = ring; 1236 vsi->tx_rings[i] = ring;
1217 } 1237 }
1218 1238
@@ -1231,7 +1251,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1231 ring->vsi = vsi; 1251 ring->vsi = vsi;
1232 ring->netdev = vsi->netdev; 1252 ring->netdev = vsi->netdev;
1233 ring->dev = &pf->pdev->dev; 1253 ring->dev = &pf->pdev->dev;
1234 ring->count = vsi->num_desc; 1254 ring->count = vsi->num_rx_desc;
1235 vsi->rx_rings[i] = ring; 1255 vsi->rx_rings[i] = ring;
1236 } 1256 }
1237 1257
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c289d97f477d..fad308c936b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -236,9 +236,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
236 if (!tx_ring->tx_buf) 236 if (!tx_ring->tx_buf)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
239 /* round up to nearest 4K */ 239 /* round up to nearest page */
240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
241 4096); 241 PAGE_SIZE);
242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
243 GFP_KERNEL); 243 GFP_KERNEL);
244 if (!tx_ring->desc) { 244 if (!tx_ring->desc) {
@@ -339,9 +339,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
339 if (!rx_ring->rx_buf) 339 if (!rx_ring->rx_buf)
340 return -ENOMEM; 340 return -ENOMEM;
341 341
342 /* round up to nearest 4K */ 342 /* round up to nearest page */
343 rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); 343 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
344 rx_ring->size = ALIGN(rx_ring->size, 4096); 344 PAGE_SIZE);
345 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 345 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
346 GFP_KERNEL); 346 GFP_KERNEL);
347 if (!rx_ring->desc) { 347 if (!rx_ring->desc) {