aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2019-04-03 00:12:33 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-17 07:36:51 -0400
commit7a3a4d763837d3aa654cd1059030950410c04d77 (patch)
tree7aac3da0f61a6d20c362b9dda93bdcb1d0be833f
parenteb9d7a62c38628ab0ba6e59d22d7cb7930e415d1 (diff)
powerpc/mm_iommu: Allow pinning large regions
When called with vmas_arg==NULL, get_user_pages_longterm() allocates an array of nr_pages*8 which can easily get greater that the max order, for example, registering memory for a 256GB guest does this and fails in __alloc_pages_nodemask(). This adds a loop over chunks of entries to fit the max order limit. Fixes: 678e174c4c16 ("powerpc/mm/iommu: allow migration of cma allocated pages during mm_iommu_do_alloc", 2019-03-05) Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 9d9be850f8c2..8330f135294f 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -98,6 +98,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
98 struct mm_iommu_table_group_mem_t *mem, *mem2; 98 struct mm_iommu_table_group_mem_t *mem, *mem2;
99 long i, ret, locked_entries = 0, pinned = 0; 99 long i, ret, locked_entries = 0, pinned = 0;
100 unsigned int pageshift; 100 unsigned int pageshift;
101 unsigned long entry, chunk;
101 102
102 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { 103 if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
103 ret = mm_iommu_adjust_locked_vm(mm, entries, true); 104 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
@@ -134,11 +135,26 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
134 } 135 }
135 136
136 down_read(&mm->mmap_sem); 137 down_read(&mm->mmap_sem);
137 ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL); 138 chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
139 sizeof(struct vm_area_struct *);
140 chunk = min(chunk, entries);
141 for (entry = 0; entry < entries; entry += chunk) {
142 unsigned long n = min(entries - entry, chunk);
143
144 ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
145 FOLL_WRITE, mem->hpages + entry, NULL);
146 if (ret == n) {
147 pinned += n;
148 continue;
149 }
150 if (ret > 0)
151 pinned += ret;
152 break;
153 }
138 up_read(&mm->mmap_sem); 154 up_read(&mm->mmap_sem);
139 pinned = ret > 0 ? ret : 0; 155 if (pinned != entries) {
140 if (ret != entries) { 156 if (!ret)
141 ret = -EFAULT; 157 ret = -EFAULT;
142 goto free_exit; 158 goto free_exit;
143 } 159 }
144 160