diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 4 | ||||
-rw-r--r-- | mm/filemap_xip.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 15 | ||||
-rw-r--r-- | mm/nommu.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 1 | ||||
-rw-r--r-- | mm/slob.c | 1 | ||||
-rw-r--r-- | mm/slub.c | 6 |
7 files changed, 26 insertions, 6 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index b0ceb29da4c7..e8644b1e5527 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | int bdi_init(struct backing_dev_info *bdi) | 8 | int bdi_init(struct backing_dev_info *bdi) |
9 | { | 9 | { |
10 | int i, j; | 10 | int i; |
11 | int err; | 11 | int err; |
12 | 12 | ||
13 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { | 13 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
@@ -21,7 +21,7 @@ int bdi_init(struct backing_dev_info *bdi) | |||
21 | 21 | ||
22 | if (err) { | 22 | if (err) { |
23 | err: | 23 | err: |
24 | for (j = 0; j < i; j++) | 24 | while (i--) |
25 | percpu_counter_destroy(&bdi->bdi_stat[i]); | 25 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
26 | } | 26 | } |
27 | 27 | ||
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 32132f3cd641..e233fff61b4b 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c | |||
@@ -314,7 +314,7 @@ __xip_file_write(struct file *filp, const char __user *buf, | |||
314 | fault_in_pages_readable(buf, bytes); | 314 | fault_in_pages_readable(buf, bytes); |
315 | kaddr = kmap_atomic(page, KM_USER0); | 315 | kaddr = kmap_atomic(page, KM_USER0); |
316 | copied = bytes - | 316 | copied = bytes - |
317 | __copy_from_user_inatomic_nocache(kaddr, buf, bytes); | 317 | __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); |
318 | kunmap_atomic(kaddr, KM_USER0); | 318 | kunmap_atomic(kaddr, KM_USER0); |
319 | flush_dcache_page(page); | 319 | flush_dcache_page(page); |
320 | 320 | ||
@@ -912,6 +912,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, | |||
912 | if (!len) | 912 | if (!len) |
913 | return -EINVAL; | 913 | return -EINVAL; |
914 | 914 | ||
915 | if (!(flags & MAP_FIXED)) | ||
916 | addr = round_hint_to_min(addr); | ||
917 | |||
915 | error = arch_mmap_check(addr, len, flags); | 918 | error = arch_mmap_check(addr, len, flags); |
916 | if (error) | 919 | if (error) |
917 | return error; | 920 | return error; |
@@ -1615,6 +1618,12 @@ static inline int expand_downwards(struct vm_area_struct *vma, | |||
1615 | */ | 1618 | */ |
1616 | if (unlikely(anon_vma_prepare(vma))) | 1619 | if (unlikely(anon_vma_prepare(vma))) |
1617 | return -ENOMEM; | 1620 | return -ENOMEM; |
1621 | |||
1622 | address &= PAGE_MASK; | ||
1623 | error = security_file_mmap(0, 0, 0, 0, address, 1); | ||
1624 | if (error) | ||
1625 | return error; | ||
1626 | |||
1618 | anon_vma_lock(vma); | 1627 | anon_vma_lock(vma); |
1619 | 1628 | ||
1620 | /* | 1629 | /* |
@@ -1622,8 +1631,6 @@ static inline int expand_downwards(struct vm_area_struct *vma, | |||
1622 | * is required to hold the mmap_sem in read mode. We need the | 1631 | * is required to hold the mmap_sem in read mode. We need the |
1623 | * anon_vma lock to serialize against concurrent expand_stacks. | 1632 | * anon_vma lock to serialize against concurrent expand_stacks. |
1624 | */ | 1633 | */ |
1625 | address &= PAGE_MASK; | ||
1626 | error = 0; | ||
1627 | 1634 | ||
1628 | /* Somebody else might have raced and expanded it already */ | 1635 | /* Somebody else might have raced and expanded it already */ |
1629 | if (address < vma->vm_start) { | 1636 | if (address < vma->vm_start) { |
@@ -1934,6 +1941,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
1934 | if (is_hugepage_only_range(mm, addr, len)) | 1941 | if (is_hugepage_only_range(mm, addr, len)) |
1935 | return -EINVAL; | 1942 | return -EINVAL; |
1936 | 1943 | ||
1944 | error = security_file_mmap(0, 0, 0, 0, addr, 1); | ||
1945 | if (error) | ||
1946 | return error; | ||
1947 | |||
1937 | flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; | 1948 | flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; |
1938 | 1949 | ||
1939 | error = arch_mmap_check(addr, len, flags); | 1950 | error = arch_mmap_check(addr, len, flags); |
diff --git a/mm/nommu.c b/mm/nommu.c index 35622c590925..b989cb928a7c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -829,6 +829,9 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
829 | void *result; | 829 | void *result; |
830 | int ret; | 830 | int ret; |
831 | 831 | ||
832 | if (!(flags & MAP_FIXED)) | ||
833 | addr = round_hint_to_min(addr); | ||
834 | |||
832 | /* decide whether we should attempt the mapping, and if so what sort of | 835 | /* decide whether we should attempt the mapping, and if so what sort of |
833 | * mapping */ | 836 | * mapping */ |
834 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 837 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
@@ -4475,3 +4475,4 @@ size_t ksize(const void *objp) | |||
4475 | 4475 | ||
4476 | return obj_size(virt_to_cache(objp)); | 4476 | return obj_size(virt_to_cache(objp)); |
4477 | } | 4477 | } |
4478 | EXPORT_SYMBOL(ksize); | ||
@@ -495,6 +495,7 @@ size_t ksize(const void *block) | |||
495 | else | 495 | else |
496 | return sp->page.private; | 496 | return sp->page.private; |
497 | } | 497 | } |
498 | EXPORT_SYMBOL(ksize); | ||
498 | 499 | ||
499 | struct kmem_cache { | 500 | struct kmem_cache { |
500 | unsigned int size, align; | 501 | unsigned int size, align; |
@@ -2558,8 +2558,12 @@ size_t ksize(const void *object) | |||
2558 | if (unlikely(object == ZERO_SIZE_PTR)) | 2558 | if (unlikely(object == ZERO_SIZE_PTR)) |
2559 | return 0; | 2559 | return 0; |
2560 | 2560 | ||
2561 | page = get_object_page(object); | 2561 | page = virt_to_head_page(object); |
2562 | BUG_ON(!page); | 2562 | BUG_ON(!page); |
2563 | |||
2564 | if (unlikely(!PageSlab(page))) | ||
2565 | return PAGE_SIZE << compound_order(page); | ||
2566 | |||
2563 | s = page->slab; | 2567 | s = page->slab; |
2564 | BUG_ON(!s); | 2568 | BUG_ON(!s); |
2565 | 2569 | ||