diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 8 | ||||
| -rw-r--r-- | mm/fremap.c | 17 | ||||
| -rw-r--r-- | mm/hugetlb.c | 20 | ||||
| -rw-r--r-- | mm/memory.c | 48 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 8 | ||||
| -rw-r--r-- | mm/mlock.c | 11 | ||||
| -rw-r--r-- | mm/mmap.c | 6 | ||||
| -rw-r--r-- | mm/nommu.c | 2 | ||||
| -rw-r--r-- | mm/process_vm_access.c | 8 | ||||
| -rw-r--r-- | mm/vmscan.c | 2 |
10 files changed, 93 insertions, 37 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index ae55c1e04d10..3bea74f1ccfe 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -286,8 +286,12 @@ config NR_QUICK | |||
| 286 | default "1" | 286 | default "1" |
| 287 | 287 | ||
| 288 | config VIRT_TO_BUS | 288 | config VIRT_TO_BUS |
| 289 | def_bool y | 289 | bool |
| 290 | depends on HAVE_VIRT_TO_BUS | 290 | help |
| 291 | An architecture should select this if it implements the | ||
| 292 | deprecated interface virt_to_bus(). All new architectures | ||
| 293 | should probably not select this. | ||
| 294 | |||
| 291 | 295 | ||
| 292 | config MMU_NOTIFIER | 296 | config MMU_NOTIFIER |
| 293 | bool | 297 | bool |
diff --git a/mm/fremap.c b/mm/fremap.c index 0cd4c11488ed..87da3590c61e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
| @@ -129,7 +129,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
| 129 | struct vm_area_struct *vma; | 129 | struct vm_area_struct *vma; |
| 130 | int err = -EINVAL; | 130 | int err = -EINVAL; |
| 131 | int has_write_lock = 0; | 131 | int has_write_lock = 0; |
| 132 | vm_flags_t vm_flags; | 132 | vm_flags_t vm_flags = 0; |
| 133 | 133 | ||
| 134 | if (prot) | 134 | if (prot) |
| 135 | return err; | 135 | return err; |
| @@ -204,10 +204,8 @@ get_write_lock: | |||
| 204 | unsigned long addr; | 204 | unsigned long addr; |
| 205 | struct file *file = get_file(vma->vm_file); | 205 | struct file *file = get_file(vma->vm_file); |
| 206 | 206 | ||
| 207 | vm_flags = vma->vm_flags; | 207 | addr = mmap_region(file, start, size, |
| 208 | if (!(flags & MAP_NONBLOCK)) | 208 | vma->vm_flags, pgoff); |
| 209 | vm_flags |= VM_POPULATE; | ||
| 210 | addr = mmap_region(file, start, size, vm_flags, pgoff); | ||
| 211 | fput(file); | 209 | fput(file); |
| 212 | if (IS_ERR_VALUE(addr)) { | 210 | if (IS_ERR_VALUE(addr)) { |
| 213 | err = addr; | 211 | err = addr; |
| @@ -226,12 +224,6 @@ get_write_lock: | |||
| 226 | mutex_unlock(&mapping->i_mmap_mutex); | 224 | mutex_unlock(&mapping->i_mmap_mutex); |
| 227 | } | 225 | } |
| 228 | 226 | ||
| 229 | if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) { | ||
| 230 | if (!has_write_lock) | ||
| 231 | goto get_write_lock; | ||
| 232 | vma->vm_flags |= VM_POPULATE; | ||
| 233 | } | ||
| 234 | |||
| 235 | if (vma->vm_flags & VM_LOCKED) { | 227 | if (vma->vm_flags & VM_LOCKED) { |
| 236 | /* | 228 | /* |
| 237 | * drop PG_Mlocked flag for over-mapped range | 229 | * drop PG_Mlocked flag for over-mapped range |
| @@ -254,7 +246,8 @@ get_write_lock: | |||
| 254 | */ | 246 | */ |
| 255 | 247 | ||
| 256 | out: | 248 | out: |
| 257 | vm_flags = vma->vm_flags; | 249 | if (vma) |
| 250 | vm_flags = vma->vm_flags; | ||
| 258 | if (likely(!has_write_lock)) | 251 | if (likely(!has_write_lock)) |
| 259 | up_read(&mm->mmap_sem); | 252 | up_read(&mm->mmap_sem); |
| 260 | else | 253 | else |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33bb199..1a12f5b9a0ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) | |||
| 2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ |
| 2125 | unsigned long hugetlb_total_pages(void) | 2125 | unsigned long hugetlb_total_pages(void) |
| 2126 | { | 2126 | { |
| 2127 | struct hstate *h = &default_hstate; | 2127 | struct hstate *h; |
| 2128 | return h->nr_huge_pages * pages_per_huge_page(h); | 2128 | unsigned long nr_total_pages = 0; |
| 2129 | |||
| 2130 | for_each_hstate(h) | ||
| 2131 | nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); | ||
| 2132 | return nr_total_pages; | ||
| 2129 | } | 2133 | } |
| 2130 | 2134 | ||
| 2131 | static int hugetlb_acct_memory(struct hstate *h, long delta) | 2135 | static int hugetlb_acct_memory(struct hstate *h, long delta) |
| @@ -2957,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2957 | break; | 2961 | break; |
| 2958 | } | 2962 | } |
| 2959 | 2963 | ||
| 2960 | if (absent || | 2964 | /* |
| 2965 | * We need call hugetlb_fault for both hugepages under migration | ||
| 2966 | * (in which case hugetlb_fault waits for the migration,) and | ||
| 2967 | * hwpoisoned hugepages (in which case we need to prevent the | ||
| 2968 | * caller from accessing to them.) In order to do this, we use | ||
| 2969 | * here is_swap_pte instead of is_hugetlb_entry_migration and | ||
| 2970 | * is_hugetlb_entry_hwpoisoned. This is because it simply covers | ||
| 2971 | * both cases, and because we can't follow correct pages | ||
| 2972 | * directly from any kind of swap entries. | ||
| 2973 | */ | ||
| 2974 | if (absent || is_swap_pte(huge_ptep_get(pte)) || | ||
| 2961 | ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { | 2975 | ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { |
| 2962 | int ret; | 2976 | int ret; |
| 2963 | 2977 | ||
diff --git a/mm/memory.c b/mm/memory.c index 494526ae024a..ba94dec5b259 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |||
| 216 | tlb->mm = mm; | 216 | tlb->mm = mm; |
| 217 | 217 | ||
| 218 | tlb->fullmm = fullmm; | 218 | tlb->fullmm = fullmm; |
| 219 | tlb->need_flush_all = 0; | ||
| 219 | tlb->start = -1UL; | 220 | tlb->start = -1UL; |
| 220 | tlb->end = 0; | 221 | tlb->end = 0; |
| 221 | tlb->need_flush = 0; | 222 | tlb->need_flush = 0; |
| @@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 2392 | } | 2393 | } |
| 2393 | EXPORT_SYMBOL(remap_pfn_range); | 2394 | EXPORT_SYMBOL(remap_pfn_range); |
| 2394 | 2395 | ||
| 2396 | /** | ||
| 2397 | * vm_iomap_memory - remap memory to userspace | ||
| 2398 | * @vma: user vma to map to | ||
| 2399 | * @start: start of area | ||
| 2400 | * @len: size of area | ||
| 2401 | * | ||
| 2402 | * This is a simplified io_remap_pfn_range() for common driver use. The | ||
| 2403 | * driver just needs to give us the physical memory range to be mapped, | ||
| 2404 | * we'll figure out the rest from the vma information. | ||
| 2405 | * | ||
| 2406 | * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get | ||
| 2407 | * whatever write-combining details or similar. | ||
| 2408 | */ | ||
| 2409 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) | ||
| 2410 | { | ||
| 2411 | unsigned long vm_len, pfn, pages; | ||
| 2412 | |||
| 2413 | /* Check that the physical memory area passed in looks valid */ | ||
| 2414 | if (start + len < start) | ||
| 2415 | return -EINVAL; | ||
| 2416 | /* | ||
| 2417 | * You *really* shouldn't map things that aren't page-aligned, | ||
| 2418 | * but we've historically allowed it because IO memory might | ||
| 2419 | * just have smaller alignment. | ||
| 2420 | */ | ||
| 2421 | len += start & ~PAGE_MASK; | ||
| 2422 | pfn = start >> PAGE_SHIFT; | ||
| 2423 | pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; | ||
| 2424 | if (pfn + pages < pfn) | ||
| 2425 | return -EINVAL; | ||
| 2426 | |||
| 2427 | /* We start the mapping 'vm_pgoff' pages into the area */ | ||
| 2428 | if (vma->vm_pgoff > pages) | ||
| 2429 | return -EINVAL; | ||
| 2430 | pfn += vma->vm_pgoff; | ||
| 2431 | pages -= vma->vm_pgoff; | ||
| 2432 | |||
| 2433 | /* Can we fit all of the mapping? */ | ||
| 2434 | vm_len = vma->vm_end - vma->vm_start; | ||
| 2435 | if (vm_len >> PAGE_SHIFT > pages) | ||
| 2436 | return -EINVAL; | ||
| 2437 | |||
| 2438 | /* Ok, let it rip */ | ||
| 2439 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); | ||
| 2440 | } | ||
| 2441 | EXPORT_SYMBOL(vm_iomap_memory); | ||
| 2442 | |||
| 2395 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, | 2443 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, |
| 2396 | unsigned long addr, unsigned long end, | 2444 | unsigned long addr, unsigned long end, |
| 2397 | pte_fn_t fn, void *data) | 2445 | pte_fn_t fn, void *data) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b81a367b9f39..ee3765760818 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -1779,7 +1779,11 @@ void try_offline_node(int nid) | |||
| 1779 | for (i = 0; i < MAX_NR_ZONES; i++) { | 1779 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 1780 | struct zone *zone = pgdat->node_zones + i; | 1780 | struct zone *zone = pgdat->node_zones + i; |
| 1781 | 1781 | ||
| 1782 | if (zone->wait_table) | 1782 | /* |
| 1783 | * wait_table may be allocated from boot memory, | ||
| 1784 | * here only free if it's allocated by vmalloc. | ||
| 1785 | */ | ||
| 1786 | if (is_vmalloc_addr(zone->wait_table)) | ||
| 1783 | vfree(zone->wait_table); | 1787 | vfree(zone->wait_table); |
| 1784 | } | 1788 | } |
| 1785 | 1789 | ||
| @@ -1801,7 +1805,7 @@ int __ref remove_memory(int nid, u64 start, u64 size) | |||
| 1801 | int retry = 1; | 1805 | int retry = 1; |
| 1802 | 1806 | ||
| 1803 | start_pfn = PFN_DOWN(start); | 1807 | start_pfn = PFN_DOWN(start); |
| 1804 | end_pfn = start_pfn + PFN_DOWN(size); | 1808 | end_pfn = PFN_UP(start + size - 1); |
| 1805 | 1809 | ||
| 1806 | /* | 1810 | /* |
| 1807 | * When CONFIG_MEMCG is on, one memory block may be used by other | 1811 | * When CONFIG_MEMCG is on, one memory block may be used by other |
diff --git a/mm/mlock.c b/mm/mlock.c index 1c5e33fce639..79b7cf7d1bca 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
| 358 | 358 | ||
| 359 | newflags = vma->vm_flags & ~VM_LOCKED; | 359 | newflags = vma->vm_flags & ~VM_LOCKED; |
| 360 | if (on) | 360 | if (on) |
| 361 | newflags |= VM_LOCKED | VM_POPULATE; | 361 | newflags |= VM_LOCKED; |
| 362 | 362 | ||
| 363 | tmp = vma->vm_end; | 363 | tmp = vma->vm_end; |
| 364 | if (tmp > end) | 364 | if (tmp > end) |
| @@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |||
| 418 | * range with the first VMA. Also, skip undesirable VMA types. | 418 | * range with the first VMA. Also, skip undesirable VMA types. |
| 419 | */ | 419 | */ |
| 420 | nend = min(end, vma->vm_end); | 420 | nend = min(end, vma->vm_end); |
| 421 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != | 421 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 422 | VM_POPULATE) | ||
| 423 | continue; | 422 | continue; |
| 424 | if (nstart < vma->vm_start) | 423 | if (nstart < vma->vm_start) |
| 425 | nstart = vma->vm_start; | 424 | nstart = vma->vm_start; |
| @@ -492,9 +491,9 @@ static int do_mlockall(int flags) | |||
| 492 | struct vm_area_struct * vma, * prev = NULL; | 491 | struct vm_area_struct * vma, * prev = NULL; |
| 493 | 492 | ||
| 494 | if (flags & MCL_FUTURE) | 493 | if (flags & MCL_FUTURE) |
| 495 | current->mm->def_flags |= VM_LOCKED | VM_POPULATE; | 494 | current->mm->def_flags |= VM_LOCKED; |
| 496 | else | 495 | else |
| 497 | current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); | 496 | current->mm->def_flags &= ~VM_LOCKED; |
| 498 | if (flags == MCL_FUTURE) | 497 | if (flags == MCL_FUTURE) |
| 499 | goto out; | 498 | goto out; |
| 500 | 499 | ||
| @@ -503,7 +502,7 @@ static int do_mlockall(int flags) | |||
| 503 | 502 | ||
| 504 | newflags = vma->vm_flags & ~VM_LOCKED; | 503 | newflags = vma->vm_flags & ~VM_LOCKED; |
| 505 | if (flags & MCL_CURRENT) | 504 | if (flags & MCL_CURRENT) |
| 506 | newflags |= VM_LOCKED | VM_POPULATE; | 505 | newflags |= VM_LOCKED; |
| 507 | 506 | ||
| 508 | /* Ignore errors */ | 507 | /* Ignore errors */ |
| 509 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); | 508 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); |
| @@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
| 1306 | } | 1306 | } |
| 1307 | 1307 | ||
| 1308 | addr = mmap_region(file, addr, len, vm_flags, pgoff); | 1308 | addr = mmap_region(file, addr, len, vm_flags, pgoff); |
| 1309 | if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) | 1309 | if (!IS_ERR_VALUE(addr) && |
| 1310 | ((vm_flags & VM_LOCKED) || | ||
| 1311 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) | ||
| 1310 | *populate = len; | 1312 | *populate = len; |
| 1311 | return addr; | 1313 | return addr; |
| 1312 | } | 1314 | } |
| @@ -1938,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
| 1938 | 1940 | ||
| 1939 | /* Check the cache first. */ | 1941 | /* Check the cache first. */ |
| 1940 | /* (Cache hit rate is typically around 35%.) */ | 1942 | /* (Cache hit rate is typically around 35%.) */ |
| 1941 | vma = mm->mmap_cache; | 1943 | vma = ACCESS_ONCE(mm->mmap_cache); |
| 1942 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { | 1944 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { |
| 1943 | struct rb_node *rb_node; | 1945 | struct rb_node *rb_node; |
| 1944 | 1946 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index e19328087534..2f3ea749c318 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
| 821 | struct vm_area_struct *vma; | 821 | struct vm_area_struct *vma; |
| 822 | 822 | ||
| 823 | /* check the cache first */ | 823 | /* check the cache first */ |
| 824 | vma = mm->mmap_cache; | 824 | vma = ACCESS_ONCE(mm->mmap_cache); |
| 825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) |
| 826 | return vma; | 826 | return vma; |
| 827 | 827 | ||
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 926b46649749..fd26d0433509 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
| @@ -429,12 +429,6 @@ compat_process_vm_rw(compat_pid_t pid, | |||
| 429 | if (flags != 0) | 429 | if (flags != 0) |
| 430 | return -EINVAL; | 430 | return -EINVAL; |
| 431 | 431 | ||
| 432 | if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec))) | ||
| 433 | goto out; | ||
| 434 | |||
| 435 | if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec))) | ||
| 436 | goto out; | ||
| 437 | |||
| 438 | if (vm_write) | 432 | if (vm_write) |
| 439 | rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, | 433 | rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, |
| 440 | UIO_FASTIOV, iovstack_l, | 434 | UIO_FASTIOV, iovstack_l, |
| @@ -459,8 +453,6 @@ free_iovecs: | |||
| 459 | kfree(iov_r); | 453 | kfree(iov_r); |
| 460 | if (iov_l != iovstack_l) | 454 | if (iov_l != iovstack_l) |
| 461 | kfree(iov_l); | 455 | kfree(iov_l); |
| 462 | |||
| 463 | out: | ||
| 464 | return rc; | 456 | return rc; |
| 465 | } | 457 | } |
| 466 | 458 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 88c5fed8b9a4..669fba39be1a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -3188,9 +3188,9 @@ int kswapd_run(int nid) | |||
| 3188 | if (IS_ERR(pgdat->kswapd)) { | 3188 | if (IS_ERR(pgdat->kswapd)) { |
| 3189 | /* failure at boot is fatal */ | 3189 | /* failure at boot is fatal */ |
| 3190 | BUG_ON(system_state == SYSTEM_BOOTING); | 3190 | BUG_ON(system_state == SYSTEM_BOOTING); |
| 3191 | pgdat->kswapd = NULL; | ||
| 3192 | pr_err("Failed to start kswapd on node %d\n", nid); | 3191 | pr_err("Failed to start kswapd on node %d\n", nid); |
| 3193 | ret = PTR_ERR(pgdat->kswapd); | 3192 | ret = PTR_ERR(pgdat->kswapd); |
| 3193 | pgdat->kswapd = NULL; | ||
| 3194 | } | 3194 | } |
| 3195 | return ret; | 3195 | return ret; |
| 3196 | } | 3196 | } |
