aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-01-10 18:51:54 -0500
committerBen Hutchings <bhutchings@solarflare.com>2013-02-26 09:57:16 -0500
commitb590ace09d51cd39744e0f7662c5e4a0d1b5d952 (patch)
treec83ba26d5cc2e970c271f46d64b47e9767bce908 /drivers/net/ethernet/sfc/rx.c
parent3a68f19d7afb80f548d016effbc6ed52643a8085 (diff)
sfc: Fix efx_rx_buf_offset() in the presence of swiotlb
We assume that the mapping between DMA and virtual addresses is done on whole pages, so we can find the page offset of an RX buffer using the lower bits of the DMA address. However, swiotlb maps in units of 2K, breaking this assumption. Add an explicit page_offset field to struct efx_rx_buffer. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a5754916478e..879ff5849bbd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, 90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf) 91 struct efx_rx_buffer *buf)
92{ 92{
93 /* Offset is always within one page, so we don't need to consider 93 return buf->page_offset + efx->type->rx_buffer_hash_size;
94 * the page order.
95 */
96 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
97 efx->type->rx_buffer_hash_size;
98} 94}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 96{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 183 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 184 struct efx_rx_buffer *rx_buf;
189 struct page *page; 185 struct page *page;
186 unsigned int page_offset;
190 struct efx_rx_page_state *state; 187 struct efx_rx_page_state *state;
191 dma_addr_t dma_addr; 188 dma_addr_t dma_addr;
192 unsigned index, count; 189 unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 state->dma_addr = dma_addr; 208 state->dma_addr = dma_addr;
212 209
213 dma_addr += sizeof(struct efx_rx_page_state); 210 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state);
214 212
215 split: 213 split:
216 index = rx_queue->added_count & rx_queue->ptr_mask; 214 index = rx_queue->added_count & rx_queue->ptr_mask;
217 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
218 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
219 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset;
220 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
221 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
222 ++rx_queue->added_count; 221 ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 /* Use the second half of the page */ 226 /* Use the second half of the page */
228 get_page(page); 227 get_page(page);
229 dma_addr += (PAGE_SIZE >> 1); 228 dma_addr += (PAGE_SIZE >> 1);
229 page_offset += (PAGE_SIZE >> 1);
230 ++count; 230 ++count;
231 goto split; 231 goto split;
232 } 232 }