summaryrefslogtreecommitdiffstats
path: root/mm/util.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 19:10:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 19:10:23 -0400
commit9c9fa97a8edbc3668dfc7a25de516e80c146e86f (patch)
tree2dc0e90203796a4b346ce190f9521c3294104058 /mm/util.c
parent5184d449600f501a8688069f35c138c6b3bf8b94 (diff)
parent2b38d01b4de8b1bbda7f5f7e91252609557635fc (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few hot fixes - ocfs2 updates - almost all of -mm (slab-generic, slab, slub, kmemleak, kasan, cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug, sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy, oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap, zsmalloc) * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (132 commits) mm/zsmalloc.c: fix a -Wunused-function warning zswap: do not map same object twice zswap: use movable memory if zpool support allocate movable memory zpool: add malloc_support_movable to zpool_driver shmem: fix obsolete comment in shmem_getpage_gfp() mm/madvise: reduce code duplication in error handling paths mm: mmap: increase sockets maximum memory size pgoff for 32bits mm/mmap.c: refine find_vma_prev() with rb_last() riscv: make mmap allocation top-down by default mips: use generic mmap top-down layout and brk randomization mips: replace arch specific way to determine 32bit task with generic version mips: adjust brk randomization offset to fit generic version mips: use STACK_TOP when computing mmap base address mips: properly account for stack randomization and stack guard gap arm: use generic mmap top-down layout and brk randomization arm: use STACK_TOP when computing mmap base address arm: properly account for stack randomization and stack guard gap arm64, mm: make randomization selected by generic topdown mmap layout arm64, mm: move generic mmap layout functions to mm arm64: consider stack randomization for mmap base only when necessary ...
Diffstat (limited to 'mm/util.c')
-rw-r--r--mm/util.c122
1 files changed, 120 insertions, 2 deletions
diff --git a/mm/util.c b/mm/util.c
index e6351a80f248..3ad6db9a722e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -16,6 +16,13 @@
16#include <linux/hugetlb.h> 16#include <linux/hugetlb.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/userfaultfd_k.h> 18#include <linux/userfaultfd_k.h>
19#include <linux/elf.h>
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
22#include <linux/random.h>
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
19 26
20#include <linux/uaccess.h> 27#include <linux/uaccess.h>
21 28
@@ -293,7 +300,105 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
293 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 300 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
294} 301}
295 302
296#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 303#ifndef STACK_RND_MASK
304#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
305#endif
306
307unsigned long randomize_stack_top(unsigned long stack_top)
308{
309 unsigned long random_variable = 0;
310
311 if (current->flags & PF_RANDOMIZE) {
312 random_variable = get_random_long();
313 random_variable &= STACK_RND_MASK;
314 random_variable <<= PAGE_SHIFT;
315 }
316#ifdef CONFIG_STACK_GROWSUP
317 return PAGE_ALIGN(stack_top) + random_variable;
318#else
319 return PAGE_ALIGN(stack_top) - random_variable;
320#endif
321}
322
323#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
324unsigned long arch_randomize_brk(struct mm_struct *mm)
325{
326 /* Is the current task 32bit ? */
327 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
328 return randomize_page(mm->brk, SZ_32M);
329
330 return randomize_page(mm->brk, SZ_1G);
331}
332
333unsigned long arch_mmap_rnd(void)
334{
335 unsigned long rnd;
336
337#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
338 if (is_compat_task())
339 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
340 else
341#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
342 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
343
344 return rnd << PAGE_SHIFT;
345}
346
347static int mmap_is_legacy(struct rlimit *rlim_stack)
348{
349 if (current->personality & ADDR_COMPAT_LAYOUT)
350 return 1;
351
352 if (rlim_stack->rlim_cur == RLIM_INFINITY)
353 return 1;
354
355 return sysctl_legacy_va_layout;
356}
357
358/*
359 * Leave enough space between the mmap area and the stack to honour ulimit in
360 * the face of randomisation.
361 */
362#define MIN_GAP (SZ_128M)
363#define MAX_GAP (STACK_TOP / 6 * 5)
364
365static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
366{
367 unsigned long gap = rlim_stack->rlim_cur;
368 unsigned long pad = stack_guard_gap;
369
370 /* Account for stack randomization if necessary */
371 if (current->flags & PF_RANDOMIZE)
372 pad += (STACK_RND_MASK << PAGE_SHIFT);
373
374 /* Values close to RLIM_INFINITY can overflow. */
375 if (gap + pad > gap)
376 gap += pad;
377
378 if (gap < MIN_GAP)
379 gap = MIN_GAP;
380 else if (gap > MAX_GAP)
381 gap = MAX_GAP;
382
383 return PAGE_ALIGN(STACK_TOP - gap - rnd);
384}
385
386void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
387{
388 unsigned long random_factor = 0UL;
389
390 if (current->flags & PF_RANDOMIZE)
391 random_factor = arch_mmap_rnd();
392
393 if (mmap_is_legacy(rlim_stack)) {
394 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
395 mm->get_unmapped_area = arch_get_unmapped_area;
396 } else {
397 mm->mmap_base = mmap_base(random_factor, rlim_stack);
398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
399 }
400}
401#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
297void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 402void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
298{ 403{
299 mm->mmap_base = TASK_UNMAPPED_BASE; 404 mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -521,7 +626,7 @@ bool page_mapped(struct page *page)
521 return true; 626 return true;
522 if (PageHuge(page)) 627 if (PageHuge(page))
523 return false; 628 return false;
524 for (i = 0; i < (1 << compound_order(page)); i++) { 629 for (i = 0; i < compound_nr(page); i++) {
525 if (atomic_read(&page[i]._mapcount) >= 0) 630 if (atomic_read(&page[i]._mapcount) >= 0)
526 return true; 631 return true;
527 } 632 }
@@ -783,3 +888,16 @@ out_mm:
783out: 888out:
784 return res; 889 return res;
785} 890}
891
892int memcmp_pages(struct page *page1, struct page *page2)
893{
894 char *addr1, *addr2;
895 int ret;
896
897 addr1 = kmap_atomic(page1);
898 addr2 = kmap_atomic(page2);
899 ret = memcmp(addr1, addr2, PAGE_SIZE);
900 kunmap_atomic(addr2);
901 kunmap_atomic(addr1);
902 return ret;
903}