aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeert Uytterhoeven <geert+renesas@glider.be>2018-07-20 12:16:59 -0400
committerJoerg Roedel <jroedel@suse.de>2018-07-27 03:41:24 -0400
commit46583e8c48c5a094ba28060615b3a7c8c576690f (patch)
tree6d21abb0e49100538d36c40895e3d43bd9c904ac
parent2ae86955703e9e6a119af4bbe27f6b6dd7a43131 (diff)
iommu/ipmmu-vmsa: Fix allocation in atomic context
When attaching a device to an IOMMU group with CONFIG_DEBUG_ATOMIC_SLEEP=y: BUG: sleeping function called from invalid context at mm/slab.h:421 in_atomic(): 1, irqs_disabled(): 128, pid: 61, name: kworker/1:1 ... Call trace: ... arm_lpae_alloc_pgtable+0x114/0x184 arm_64_lpae_alloc_pgtable_s1+0x2c/0x128 arm_32_lpae_alloc_pgtable_s1+0x40/0x6c alloc_io_pgtable_ops+0x60/0x88 ipmmu_attach_device+0x140/0x334 ipmmu_attach_device() takes a spinlock, while arm_lpae_alloc_pgtable() allocates memory using GFP_KERNEL. Originally, the ipmmu-vmsa driver had its own custom page table allocation implementation using GFP_ATOMIC, hence the spinlock was fine. Fix this by replacing the spinlock by a mutex, like the arm-smmu driver does. Fixes: f20ed39f53145e45 ("iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator") Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/ipmmu-vmsa.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 6cbd2bdb92ce..41eee3401f05 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -74,7 +74,7 @@ struct ipmmu_vmsa_domain {
74 struct io_pgtable_ops *iop; 74 struct io_pgtable_ops *iop;
75 75
76 unsigned int context_id; 76 unsigned int context_id;
77 spinlock_t lock; /* Protects mappings */ 77 struct mutex mutex; /* Protects mappings */
78}; 78};
79 79
80static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) 80static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
@@ -600,7 +600,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
600 if (!domain) 600 if (!domain)
601 return NULL; 601 return NULL;
602 602
603 spin_lock_init(&domain->lock); 603 mutex_init(&domain->mutex);
604 604
605 return &domain->io_domain; 605 return &domain->io_domain;
606} 606}
@@ -646,7 +646,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
646 struct iommu_fwspec *fwspec = dev->iommu_fwspec; 646 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
647 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); 647 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
648 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); 648 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
649 unsigned long flags;
650 unsigned int i; 649 unsigned int i;
651 int ret = 0; 650 int ret = 0;
652 651
@@ -655,7 +654,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
655 return -ENXIO; 654 return -ENXIO;
656 } 655 }
657 656
658 spin_lock_irqsave(&domain->lock, flags); 657 mutex_lock(&domain->mutex);
659 658
660 if (!domain->mmu) { 659 if (!domain->mmu) {
661 /* The domain hasn't been used yet, initialize it. */ 660 /* The domain hasn't been used yet, initialize it. */
@@ -679,7 +678,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
679 } else 678 } else
680 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); 679 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
681 680
682 spin_unlock_irqrestore(&domain->lock, flags); 681 mutex_unlock(&domain->mutex);
683 682
684 if (ret < 0) 683 if (ret < 0)
685 return ret; 684 return ret;