aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-07-04 18:48:46 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-28 19:29:53 -0400
commit1c6d3d1d21e522c9722cd6bdfd57afb171786028 (patch)
treebd6e918aa0918f5f3d24c1da24b5791396052035 /drivers/net/ethernet/sfc
parentc3a5491228f90fd82e67b7ccd2fcbaf984d44c07 (diff)
sfc: Fix memory leak when discarding scattered packets
[ Upstream commit 734d4e159b283a4ae4d007b7e7a91d84398ccb92 ] Commit 2768935a4660 ('sfc: reuse pages to avoid DMA mapping/unmapping costs') did not fully take account of DMA scattering which was introduced immediately before. If a received packet is invalid and must be discarded, we only drop a reference to the first buffer's page, but we need to drop a reference for each buffer the packet used. I think this bug was missed partly because efx_recycle_rx_buffers() was not renamed and so no longer does what its name says. It does not change the state of buffers, but only prepares the underlying pages for recycling. Rename it accordingly. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/rx.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a7dfe36cabf4..5173eaac5bca 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
282} 282}
283 283
284/* Recycle the pages that are used by buffers that have just been received. */ 284/* Recycle the pages that are used by buffers that have just been received. */
285static void efx_recycle_rx_buffers(struct efx_channel *channel, 285static void efx_recycle_rx_pages(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf, 286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags) 287 unsigned int n_frags)
288{ 288{
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
290 290
@@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel,
294 } while (--n_frags); 294 } while (--n_frags);
295} 295}
296 296
297static void efx_discard_rx_packet(struct efx_channel *channel,
298 struct efx_rx_buffer *rx_buf,
299 unsigned int n_frags)
300{
301 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
302
303 efx_recycle_rx_pages(channel, rx_buf, n_frags);
304
305 do {
306 efx_free_rx_buffer(rx_buf);
307 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
308 } while (--n_frags);
309}
310
297/** 311/**
298 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 312 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
299 * @rx_queue: RX descriptor queue 313 * @rx_queue: RX descriptor queue
@@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
533 */ 547 */
534 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 548 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
535 efx_rx_flush_packet(channel); 549 efx_rx_flush_packet(channel);
536 put_page(rx_buf->page); 550 efx_discard_rx_packet(channel, rx_buf, n_frags);
537 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
538 return; 551 return;
539 } 552 }
540 553
@@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
570 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); 583 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
571 } 584 }
572 585
573 /* All fragments have been DMA-synced, so recycle buffers and pages. */ 586 /* All fragments have been DMA-synced, so recycle pages. */
574 rx_buf = efx_rx_buffer(rx_queue, index); 587 rx_buf = efx_rx_buffer(rx_queue, index);
575 efx_recycle_rx_buffers(channel, rx_buf, n_frags); 588 efx_recycle_rx_pages(channel, rx_buf, n_frags);
576 589
577 /* Pipeline receives so that we give time for packet headers to be 590 /* Pipeline receives so that we give time for packet headers to be
578 * prefetched into cache. 591 * prefetched into cache.