aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2017-11-09 12:27:36 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2017-11-13 07:34:19 -0500
commit6a72dc038b615229a1b285829d6c8378d15c2347 (patch)
tree3258a738aae36b3dc5aab119ead8e11706191751
parent7ece370996b694ae263025e056ad785afc1be5ab (diff)
powerpc/64s/hash: Fix 128TB-512TB virtual address boundary case allocation
When allocating VA space with a hint that crosses 128TB, the SLB addr_limit variable is not expanded if addr is not > 128TB, but the slice allocation looks at task_size, which is 512TB. This results in slice_check_fit() incorrectly succeeding because the slice_count truncates off bit 128 of the requested mask, so the comparison to the available mask succeeds. Fix this by using mm->context.addr_limit instead of mm->task_size for testing allocation limits. This causes such allocations to fail. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Cc: stable@vger.kernel.org # v4.12+ Reported-by: Florian Weimer <fweimer@redhat.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/slice.c50
1 files changed, 24 insertions, 26 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 48a5312103a1..3889201b560c 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
96{ 96{
97 struct vm_area_struct *vma; 97 struct vm_area_struct *vma;
98 98
99 if ((mm->task_size - len) < addr) 99 if ((mm->context.addr_limit - len) < addr)
100 return 0; 100 return 0;
101 vma = find_vma(mm, addr); 101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vm_start_gap(vma)); 102 return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
133 if (!slice_low_has_vma(mm, i)) 133 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i; 134 ret->low_slices |= 1u << i;
135 135
136 if (mm->task_size <= SLICE_LOW_TOP) 136 if (mm->context.addr_limit <= SLICE_LOW_TOP)
137 return; 137 return;
138 138
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) 139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
@@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
412 struct slice_mask compat_mask; 412 struct slice_mask compat_mask;
413 int fixed = (flags & MAP_FIXED); 413 int fixed = (flags & MAP_FIXED);
414 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); 414 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
415 unsigned long page_size = 1UL << pshift;
415 struct mm_struct *mm = current->mm; 416 struct mm_struct *mm = current->mm;
416 unsigned long newaddr; 417 unsigned long newaddr;
417 unsigned long high_limit; 418 unsigned long high_limit;
418 419
419 /* 420 high_limit = DEFAULT_MAP_WINDOW;
420 * Check if we need to expland slice area. 421 if (addr >= high_limit)
421 */ 422 high_limit = TASK_SIZE;
422 if (unlikely(addr >= mm->context.addr_limit && 423
423 mm->context.addr_limit != TASK_SIZE)) { 424 if (len > high_limit)
424 mm->context.addr_limit = TASK_SIZE; 425 return -ENOMEM;
426 if (len & (page_size - 1))
427 return -EINVAL;
428 if (fixed) {
429 if (addr & (page_size - 1))
430 return -EINVAL;
431 if (addr > high_limit - len)
432 return -ENOMEM;
433 }
434
435 if (high_limit > mm->context.addr_limit) {
436 mm->context.addr_limit = high_limit;
425 on_each_cpu(slice_flush_segments, mm, 1); 437 on_each_cpu(slice_flush_segments, mm, 1);
426 } 438 }
427 /* 439
428 * This mmap request can allocate upt to 512TB
429 */
430 if (addr >= DEFAULT_MAP_WINDOW)
431 high_limit = mm->context.addr_limit;
432 else
433 high_limit = DEFAULT_MAP_WINDOW;
434 /* 440 /*
435 * init different masks 441 * init different masks
436 */ 442 */
@@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
446 452
447 /* Sanity checks */ 453 /* Sanity checks */
448 BUG_ON(mm->task_size == 0); 454 BUG_ON(mm->task_size == 0);
455 BUG_ON(mm->context.addr_limit == 0);
449 VM_BUG_ON(radix_enabled()); 456 VM_BUG_ON(radix_enabled());
450 457
451 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 458 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
452 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", 459 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
453 addr, len, flags, topdown); 460 addr, len, flags, topdown);
454 461
455 if (len > mm->task_size)
456 return -ENOMEM;
457 if (len & ((1ul << pshift) - 1))
458 return -EINVAL;
459 if (fixed && (addr & ((1ul << pshift) - 1)))
460 return -EINVAL;
461 if (fixed && addr > (mm->task_size - len))
462 return -ENOMEM;
463
464 /* If hint, make sure it matches our alignment restrictions */ 462 /* If hint, make sure it matches our alignment restrictions */
465 if (!fixed && addr) { 463 if (!fixed && addr) {
466 addr = _ALIGN_UP(addr, 1ul << pshift); 464 addr = _ALIGN_UP(addr, page_size);
467 slice_dbg(" aligned addr=%lx\n", addr); 465 slice_dbg(" aligned addr=%lx\n", addr);
468 /* Ignore hint if it's too large or overlaps a VMA */ 466 /* Ignore hint if it's too large or overlaps a VMA */
469 if (addr > mm->task_size - len || 467 if (addr > high_limit - len ||
470 !slice_area_is_free(mm, addr, len)) 468 !slice_area_is_free(mm, addr, len))
471 addr = 0; 469 addr = 0;
472 } 470 }