diff options
author | Hiroshi DOYU <hdoyu@nvidia.com> | 2012-07-02 07:26:38 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2012-07-17 05:29:58 -0400 |
commit | 9e971a03af736acc6f96c200c2626d3bcb3d6927 (patch) | |
tree | f26c2333cf659ca9c8854173251e0182bba9ca5d /drivers | |
parent | 0bdbf4ccef4f87016e2fa0c0b34f7a025f364c3d (diff) |
iommu/tegra: smmu: Fix unsleepable memory allocation at alloc_pdir()
alloc_pdir() is called from smmu_iommu_domain_init() with spin_lock
held. memory allocations in alloc_pdir() had to be atomic. Instead of
converting into atomic allocation, this patch once releases a lock,
does the allocation, holds the lock again and then sees if it's raced
or not in order to avoid introducing mutex and preallocation.
Signed-off-by: Hiroshi DOYU <hdoyu@nvidia.com>
Reported-by: Chris Wright <chrisw@sous-sol.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Acked-by: Stephen Warren <swarren@wwwdotorg.org>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 77 |
1 files changed, 45 insertions, 32 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 68441fcca261..68a15a0d5b8a 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -555,28 +555,39 @@ static inline void put_signature(struct smmu_as *as, | |||
555 | /* | 555 | /* |
556 | * Caller must lock/unlock as | 556 | * Caller must lock/unlock as |
557 | */ | 557 | */ |
558 | static int alloc_pdir(struct smmu_as *as) | 558 | static int alloc_pdir(struct smmu_as *as, unsigned long *flags) |
559 | { | 559 | { |
560 | unsigned long *pdir; | 560 | unsigned long *pdir; |
561 | int pdn; | 561 | int pdn, err = 0; |
562 | u32 val; | 562 | u32 val; |
563 | struct smmu_device *smmu = as->smmu; | 563 | struct smmu_device *smmu = as->smmu; |
564 | struct page *page; | ||
565 | unsigned int *cnt; | ||
564 | 566 | ||
565 | as->pte_count = devm_kzalloc(smmu->dev, | 567 | /* |
566 | sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); | 568 | * do the allocation outside the as->lock |
567 | if (!as->pte_count) { | 569 | */ |
568 | dev_err(smmu->dev, | 570 | spin_unlock_irqrestore(&as->lock, *flags); |
569 | "failed to allocate smmu_device PTE cunters\n"); | 571 | cnt = devm_kzalloc(smmu->dev, |
570 | return -ENOMEM; | 572 | sizeof(cnt[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); |
573 | page = alloc_page(GFP_KERNEL | __GFP_DMA); | ||
574 | spin_lock_irqsave(&as->lock, *flags); | ||
575 | |||
576 | if (as->pdir_page) { | ||
577 | /* We raced, free the redundant */ | ||
578 | err = -EAGAIN; | ||
579 | goto err_out; | ||
571 | } | 580 | } |
572 | as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA); | 581 | |
573 | if (!as->pdir_page) { | 582 | if (!page || !cnt) { |
574 | dev_err(smmu->dev, | 583 | dev_err(smmu->dev, "failed to allocate at %s\n", __func__); |
575 | "failed to allocate smmu_device page directory\n"); | 584 | err = -ENOMEM; |
576 | devm_kfree(smmu->dev, as->pte_count); | 585 | goto err_out; |
577 | as->pte_count = NULL; | ||
578 | return -ENOMEM; | ||
579 | } | 586 | } |
587 | |||
588 | as->pdir_page = page; | ||
589 | as->pte_count = cnt; | ||
590 | |||
580 | SetPageReserved(as->pdir_page); | 591 | SetPageReserved(as->pdir_page); |
581 | pdir = page_address(as->pdir_page); | 592 | pdir = page_address(as->pdir_page); |
582 | 593 | ||
@@ -593,6 +604,12 @@ static int alloc_pdir(struct smmu_as *as) | |||
593 | FLUSH_SMMU_REGS(as->smmu); | 604 | FLUSH_SMMU_REGS(as->smmu); |
594 | 605 | ||
595 | return 0; | 606 | return 0; |
607 | |||
608 | err_out: | ||
609 | devm_kfree(smmu->dev, cnt); | ||
610 | if (page) | ||
611 | __free_page(page); | ||
612 | return err; | ||
596 | } | 613 | } |
597 | 614 | ||
598 | static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) | 615 | static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) |
@@ -784,29 +801,29 @@ out: | |||
784 | 801 | ||
785 | static int smmu_iommu_domain_init(struct iommu_domain *domain) | 802 | static int smmu_iommu_domain_init(struct iommu_domain *domain) |
786 | { | 803 | { |
787 | int i; | 804 | int i, err = -ENODEV; |
788 | unsigned long flags; | 805 | unsigned long flags; |
789 | struct smmu_as *as; | 806 | struct smmu_as *as; |
790 | struct smmu_device *smmu = smmu_handle; | 807 | struct smmu_device *smmu = smmu_handle; |
791 | 808 | ||
792 | /* Look for a free AS with lock held */ | 809 | /* Look for a free AS with lock held */ |
793 | for (i = 0; i < smmu->num_as; i++) { | 810 | for (i = 0; i < smmu->num_as; i++) { |
794 | struct smmu_as *tmp = &smmu->as[i]; | 811 | as = &smmu->as[i]; |
795 | 812 | spin_lock_irqsave(&as->lock, flags); | |
796 | spin_lock_irqsave(&tmp->lock, flags); | 813 | if (!as->pdir_page) { |
797 | if (!tmp->pdir_page) { | 814 | err = alloc_pdir(as, &flags); |
798 | as = tmp; | 815 | if (!err) |
799 | goto found; | 816 | goto found; |
800 | } | 817 | } |
801 | spin_unlock_irqrestore(&tmp->lock, flags); | 818 | spin_unlock_irqrestore(&as->lock, flags); |
819 | if (err != -EAGAIN) | ||
820 | break; | ||
802 | } | 821 | } |
803 | dev_err(smmu->dev, "no free AS\n"); | 822 | if (i == smmu->num_as) |
804 | return -ENODEV; | 823 | dev_err(smmu->dev, "no free AS\n"); |
824 | return err; | ||
805 | 825 | ||
806 | found: | 826 | found: |
807 | if (alloc_pdir(as) < 0) | ||
808 | goto err_alloc_pdir; | ||
809 | |||
810 | spin_lock(&smmu->lock); | 827 | spin_lock(&smmu->lock); |
811 | 828 | ||
812 | /* Update PDIR register */ | 829 | /* Update PDIR register */ |
@@ -822,10 +839,6 @@ found: | |||
822 | 839 | ||
823 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | 840 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); |
824 | return 0; | 841 | return 0; |
825 | |||
826 | err_alloc_pdir: | ||
827 | spin_unlock_irqrestore(&as->lock, flags); | ||
828 | return -ENODEV; | ||
829 | } | 842 | } |
830 | 843 | ||
831 | static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | 844 | static void smmu_iommu_domain_destroy(struct iommu_domain *domain) |