aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorBrett Creeley <brett.creeley@intel.com>2019-02-08 15:50:59 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-03-19 20:24:03 -0400
commitad71b256ba4e6e469d60e3f7b9973fd195b04bee (patch)
treedbf7f86e7058939ad11fb9caa1c9a13551ead638 /drivers/net/ethernet/intel/ice/ice_txrx.c
parent544f63d307b103d0b1e2bc25f1830d48df177031 (diff)
ice: Determine descriptor count and ring size based on PAGE_SIZE
Currently we set the default number of Tx and Rx descriptors to 128 by default. For Rx this amounts to a full page (assuming 4K pages) because each Rx descriptor is 32 Bytes, but for Tx it only amounts to a half page because each Tx descriptor is 16 Bytes (assuming 4K pages). Instead of assuming 4K pages, determine the ring size and the number of descriptors for Tx and Rx based on a calculation using the PAGE_SIZE, ICE_MAX_NUM_DESC, and ICE_REQ_DESC_MULTIPLE. This change is being made to improve the performance of the driver when using the default settings. Signed-off-by: Brett Creeley <brett.creeley@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c289d97f477d..fad308c936b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -236,9 +236,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
236 if (!tx_ring->tx_buf) 236 if (!tx_ring->tx_buf)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
239 /* round up to nearest 4K */ 239 /* round up to nearest page */
240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), 240 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
241 4096); 241 PAGE_SIZE);
242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, 242 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
243 GFP_KERNEL); 243 GFP_KERNEL);
244 if (!tx_ring->desc) { 244 if (!tx_ring->desc) {
@@ -339,9 +339,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
339 if (!rx_ring->rx_buf) 339 if (!rx_ring->rx_buf)
340 return -ENOMEM; 340 return -ENOMEM;
341 341
342 /* round up to nearest 4K */ 342 /* round up to nearest page */
343 rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc); 343 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
344 rx_ring->size = ALIGN(rx_ring->size, 4096); 344 PAGE_SIZE);
345 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, 345 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
346 GFP_KERNEL); 346 GFP_KERNEL);
347 if (!rx_ring->desc) { 347 if (!rx_ring->desc) {