diff options
author | Jon Mason <jon.mason@intel.com> | 2013-02-01 17:25:37 -0500 |
---|---|---|
committer | Jon Mason <jon.mason@intel.com> | 2013-05-15 13:57:45 -0400 |
commit | b77b2637b39ecc380bb08992380d7d48452b0872 (patch) | |
tree | aa19d879e5a13dbda6817c439fd9154e5f934366 /drivers/ntb | |
parent | 113fc505b83b2d16e820ca74fa07f99a34877b1d (diff) |
NTB: Link toggle memory leak
Each link-up will allocate a new NTB receive buffer when the NTB
properties are negotiated with the remote system. These allocations did
not check for existing buffers and thus did not free them. Now, the
driver will check for an existing buffer and free it if not of the
correct size, before trying to alloc a new one.
Signed-off-by: Jon Mason <jon.mason@intel.com>
Diffstat (limited to 'drivers/ntb')
-rw-r--r-- | drivers/ntb/ntb_transport.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 79a3203eccd9..be416d6850f0 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -507,17 +507,37 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
507 | qp->tx_pkts = 0; | 507 | qp->tx_pkts = 0; |
508 | } | 508 | } |
509 | 509 | ||
510 | static void ntb_free_mw(struct ntb_transport *nt, int num_mw) | ||
511 | { | ||
512 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | ||
513 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | ||
514 | |||
515 | if (!mw->virt_addr) | ||
516 | return; | ||
517 | |||
518 | dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); | ||
519 | mw->virt_addr = NULL; | ||
520 | } | ||
521 | |||
510 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | 522 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) |
511 | { | 523 | { |
512 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | 524 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; |
513 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | 525 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); |
514 | 526 | ||
527 | /* No need to re-setup */ | ||
528 | if (mw->size == ALIGN(size, 4096)) | ||
529 | return 0; | ||
530 | |||
531 | if (mw->size != 0) | ||
532 | ntb_free_mw(nt, num_mw); | ||
533 | |||
515 | /* Alloc memory for receiving data. Must be 4k aligned */ | 534 | /* Alloc memory for receiving data. Must be 4k aligned */ |
516 | mw->size = ALIGN(size, 4096); | 535 | mw->size = ALIGN(size, 4096); |
517 | 536 | ||
518 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, | 537 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, |
519 | GFP_KERNEL); | 538 | GFP_KERNEL); |
520 | if (!mw->virt_addr) { | 539 | if (!mw->virt_addr) { |
540 | mw->size = 0; | ||
521 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", | 541 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", |
522 | (int) mw->size); | 542 | (int) mw->size); |
523 | return -ENOMEM; | 543 | return -ENOMEM; |
@@ -529,18 +549,6 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | |||
529 | return 0; | 549 | return 0; |
530 | } | 550 | } |
531 | 551 | ||
532 | static void ntb_free_mw(struct ntb_transport *nt, int num_mw) | ||
533 | { | ||
534 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | ||
535 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | ||
536 | |||
537 | if (!mw->virt_addr) | ||
538 | return; | ||
539 | |||
540 | dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); | ||
541 | mw->virt_addr = NULL; | ||
542 | } | ||
543 | |||
544 | static void ntb_qp_link_cleanup(struct work_struct *work) | 552 | static void ntb_qp_link_cleanup(struct work_struct *work) |
545 | { | 553 | { |
546 | struct ntb_transport_qp *qp = container_of(work, | 554 | struct ntb_transport_qp *qp = container_of(work, |