aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-01-29 18:33:14 -0500
committerBen Hutchings <bhutchings@solarflare.com>2013-03-07 15:22:06 -0500
commit272baeeb6a98f5f746c2eeab4973c2df89e9d7ea (patch)
treec894c86c4331a376e728fdffb5cdbcd301158628 /drivers/net/ethernet/sfc
parent80c2e716d555912168f93853f96a24d0de75897b (diff)
sfc: Properly distinguish RX buffer and DMA lengths
Replace efx_nic::rx_buffer_len with efx_nic::rx_dma_len, the maximum RX DMA length. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/efx.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/rx.c19
3 files changed, 15 insertions, 20 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 5e1ddc559b4f..34b56ec87fba 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -639,12 +639,11 @@ static void efx_start_datapath(struct efx_nic *efx)
639 * support the current MTU, including padding for header 639 * support the current MTU, including padding for header
640 * alignment and overruns. 640 * alignment and overruns.
641 */ 641 */
642 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 642 efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
643 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 643 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
644 efx->type->rx_buffer_hash_size + 644 efx->type->rx_buffer_padding);
645 efx->type->rx_buffer_padding); 645 efx->rx_buffer_order = get_order(sizeof(struct efx_rx_page_state) +
646 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 646 EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
647 sizeof(struct efx_rx_page_state));
648 647
649 /* We must keep at least one descriptor in a TX ring empty. 648 /* We must keep at least one descriptor in a TX ring empty.
650 * We could avoid this when the queue size does not exactly 649 * We could avoid this when the queue size does not exactly
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f74411fc000c..fc6770e07d5a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -669,8 +669,7 @@ struct vfdi_status;
669 * @n_channels: Number of channels in use 669 * @n_channels: Number of channels in use
670 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 670 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
671 * @n_tx_channels: Number of channels used for TX 671 * @n_tx_channels: Number of channels used for TX
672 * @rx_buffer_len: RX buffer length, including start alignment but excluding 672 * @rx_dma_len: Current maximum RX DMA length
673 * any metadata
674 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 673 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
675 * @rx_hash_key: Toeplitz hash key for RSS 674 * @rx_hash_key: Toeplitz hash key for RSS
676 * @rx_indir_table: Indirection table for RSS 675 * @rx_indir_table: Indirection table for RSS
@@ -786,7 +785,7 @@ struct efx_nic {
786 unsigned rss_spread; 785 unsigned rss_spread;
787 unsigned tx_channel_offset; 786 unsigned tx_channel_offset;
788 unsigned n_tx_channels; 787 unsigned n_tx_channels;
789 unsigned int rx_buffer_len; 788 unsigned int rx_dma_len;
790 unsigned int rx_buffer_order; 789 unsigned int rx_buffer_order;
791 u8 rx_hash_key[40]; 790 u8 rx_hash_key[40];
792 u32 rx_indir_table[128]; 791 u32 rx_indir_table[128];
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index e7aa28eb9327..31361db28f91 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -27,8 +27,9 @@
27/* Number of RX descriptors pushed at once. */ 27/* Number of RX descriptors pushed at once. */
28#define EFX_RX_BATCH 8 28#define EFX_RX_BATCH 8
29 29
30/* Maximum size of a buffer sharing a page */ 30/* Maximum length for an RX descriptor sharing a page */
31#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) 31#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
32 - EFX_PAGE_IP_ALIGN)
32 33
33/* Size of buffer allocated for skb header area. */ 34/* Size of buffer allocated for skb header area. */
34#define EFX_SKB_HEADERS 64u 35#define EFX_SKB_HEADERS 64u
@@ -52,10 +53,6 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
52{ 53{
53 return buf->page_offset + efx->type->rx_buffer_hash_size; 54 return buf->page_offset + efx->type->rx_buffer_hash_size;
54} 55}
55static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
56{
57 return PAGE_SIZE << efx->rx_buffer_order;
58}
59 56
60static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) 57static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
61{ 58{
@@ -105,7 +102,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
105 if (unlikely(page == NULL)) 102 if (unlikely(page == NULL))
106 return -ENOMEM; 103 return -ENOMEM;
107 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, 104 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
108 efx_rx_buf_size(efx), 105 PAGE_SIZE << efx->rx_buffer_order,
109 DMA_FROM_DEVICE); 106 DMA_FROM_DEVICE);
110 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { 107 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
111 __free_pages(page, efx->rx_buffer_order); 108 __free_pages(page, efx->rx_buffer_order);
@@ -124,12 +121,12 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
124 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 121 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
125 rx_buf->page = page; 122 rx_buf->page = page;
126 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; 123 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
127 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 124 rx_buf->len = efx->rx_dma_len;
128 rx_buf->flags = 0; 125 rx_buf->flags = 0;
129 ++rx_queue->added_count; 126 ++rx_queue->added_count;
130 ++state->refcnt; 127 ++state->refcnt;
131 128
132 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { 129 if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) {
133 /* Use the second half of the page */ 130 /* Use the second half of the page */
134 get_page(page); 131 get_page(page);
135 dma_addr += (PAGE_SIZE >> 1); 132 dma_addr += (PAGE_SIZE >> 1);
@@ -153,7 +150,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
153 if (--state->refcnt == 0) { 150 if (--state->refcnt == 0) {
154 dma_unmap_page(&efx->pci_dev->dev, 151 dma_unmap_page(&efx->pci_dev->dev,
155 state->dma_addr, 152 state->dma_addr,
156 efx_rx_buf_size(efx), 153 PAGE_SIZE << efx->rx_buffer_order,
157 DMA_FROM_DEVICE); 154 DMA_FROM_DEVICE);
158 } else if (used_len) { 155 } else if (used_len) {
159 dma_sync_single_for_cpu(&efx->pci_dev->dev, 156 dma_sync_single_for_cpu(&efx->pci_dev->dev,
@@ -221,7 +218,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
221 218
222 rx_buf->flags = 0; 219 rx_buf->flags = 0;
223 220
224 if (efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 221 if (efx->rx_dma_len <= EFX_RX_HALF_PAGE &&
225 page_count(rx_buf->page) == 1) 222 page_count(rx_buf->page) == 1)
226 efx_resurrect_rx_buffer(rx_queue, rx_buf); 223 efx_resurrect_rx_buffer(rx_queue, rx_buf);
227 224