diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 5 | ||||
-rw-r--r-- | mm/memblock.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 30 | ||||
-rw-r--r-- | mm/memory.c | 28 | ||||
-rw-r--r-- | mm/mempolicy.c | 11 | ||||
-rw-r--r-- | mm/migrate.c | 16 | ||||
-rw-r--r-- | mm/mmap.c | 77 | ||||
-rw-r--r-- | mm/nobootmem.c | 13 | ||||
-rw-r--r-- | mm/nommu.c | 41 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/percpu.c | 22 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 18 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
15 files changed, 191 insertions, 93 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b8ce6f450956..ae8f708e3d75 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -532,7 +532,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, | |||
532 | struct vm_area_struct *vma, | 532 | struct vm_area_struct *vma, |
533 | unsigned long address, int avoid_reserve) | 533 | unsigned long address, int avoid_reserve) |
534 | { | 534 | { |
535 | struct page *page; | 535 | struct page *page = NULL; |
536 | struct mempolicy *mpol; | 536 | struct mempolicy *mpol; |
537 | nodemask_t *nodemask; | 537 | nodemask_t *nodemask; |
538 | struct zonelist *zonelist; | 538 | struct zonelist *zonelist; |
@@ -2498,7 +2498,6 @@ retry_avoidcopy: | |||
2498 | if (outside_reserve) { | 2498 | if (outside_reserve) { |
2499 | BUG_ON(huge_pte_none(pte)); | 2499 | BUG_ON(huge_pte_none(pte)); |
2500 | if (unmap_ref_private(mm, vma, old_page, address)) { | 2500 | if (unmap_ref_private(mm, vma, old_page, address)) { |
2501 | BUG_ON(page_count(old_page) != 1); | ||
2502 | BUG_ON(huge_pte_none(pte)); | 2501 | BUG_ON(huge_pte_none(pte)); |
2503 | spin_lock(&mm->page_table_lock); | 2502 | spin_lock(&mm->page_table_lock); |
2504 | ptep = huge_pte_offset(mm, address & huge_page_mask(h)); | 2503 | ptep = huge_pte_offset(mm, address & huge_page_mask(h)); |
@@ -2791,6 +2790,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2791 | * so no worry about deadlock. | 2790 | * so no worry about deadlock. |
2792 | */ | 2791 | */ |
2793 | page = pte_page(entry); | 2792 | page = pte_page(entry); |
2793 | get_page(page); | ||
2794 | if (page != pagecache_page) | 2794 | if (page != pagecache_page) |
2795 | lock_page(page); | 2795 | lock_page(page); |
2796 | 2796 | ||
@@ -2822,6 +2822,7 @@ out_page_table_lock: | |||
2822 | } | 2822 | } |
2823 | if (page != pagecache_page) | 2823 | if (page != pagecache_page) |
2824 | unlock_page(page); | 2824 | unlock_page(page); |
2825 | put_page(page); | ||
2825 | 2826 | ||
2826 | out_mutex: | 2827 | out_mutex: |
2827 | mutex_unlock(&hugetlb_instantiation_mutex); | 2828 | mutex_unlock(&hugetlb_instantiation_mutex); |
diff --git a/mm/memblock.c b/mm/memblock.c index 99f285599501..a44eab3157f8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -330,6 +330,9 @@ static int __init_memblock memblock_add_region(struct memblock_type *type, | |||
330 | phys_addr_t end = base + memblock_cap_size(base, &size); | 330 | phys_addr_t end = base + memblock_cap_size(base, &size); |
331 | int i, nr_new; | 331 | int i, nr_new; |
332 | 332 | ||
333 | if (!size) | ||
334 | return 0; | ||
335 | |||
333 | /* special case for empty array */ | 336 | /* special case for empty array */ |
334 | if (type->regions[0].size == 0) { | 337 | if (type->regions[0].size == 0) { |
335 | WARN_ON(type->cnt != 1 || type->total_size); | 338 | WARN_ON(type->cnt != 1 || type->total_size); |
@@ -430,6 +433,9 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, | |||
430 | 433 | ||
431 | *start_rgn = *end_rgn = 0; | 434 | *start_rgn = *end_rgn = 0; |
432 | 435 | ||
436 | if (!size) | ||
437 | return 0; | ||
438 | |||
433 | /* we'll create at most two more regions */ | 439 | /* we'll create at most two more regions */ |
434 | while (type->cnt + 2 > type->max) | 440 | while (type->cnt + 2 > type->max) |
435 | if (memblock_double_array(type) < 0) | 441 | if (memblock_double_array(type) < 0) |
@@ -514,7 +520,6 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) | |||
514 | (unsigned long long)base, | 520 | (unsigned long long)base, |
515 | (unsigned long long)base + size, | 521 | (unsigned long long)base + size, |
516 | (void *)_RET_IP_); | 522 | (void *)_RET_IP_); |
517 | BUG_ON(0 == size); | ||
518 | 523 | ||
519 | return memblock_add_region(_rgn, base, size, MAX_NUMNODES); | 524 | return memblock_add_region(_rgn, base, size, MAX_NUMNODES); |
520 | } | 525 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 901bb03f2ae7..f342778a0c0a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2165,7 +2165,7 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, | |||
2165 | if (action == CPU_ONLINE) | 2165 | if (action == CPU_ONLINE) |
2166 | return NOTIFY_OK; | 2166 | return NOTIFY_OK; |
2167 | 2167 | ||
2168 | if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) | 2168 | if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) |
2169 | return NOTIFY_OK; | 2169 | return NOTIFY_OK; |
2170 | 2170 | ||
2171 | for_each_mem_cgroup(iter) | 2171 | for_each_mem_cgroup(iter) |
@@ -2476,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | |||
2476 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | 2476 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, |
2477 | struct page *page, | 2477 | struct page *page, |
2478 | unsigned int nr_pages, | 2478 | unsigned int nr_pages, |
2479 | struct page_cgroup *pc, | ||
2480 | enum charge_type ctype, | 2479 | enum charge_type ctype, |
2481 | bool lrucare) | 2480 | bool lrucare) |
2482 | { | 2481 | { |
2482 | struct page_cgroup *pc = lookup_page_cgroup(page); | ||
2483 | struct zone *uninitialized_var(zone); | 2483 | struct zone *uninitialized_var(zone); |
2484 | bool was_on_lru = false; | 2484 | bool was_on_lru = false; |
2485 | bool anon; | 2485 | bool anon; |
@@ -2716,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
2716 | { | 2716 | { |
2717 | struct mem_cgroup *memcg = NULL; | 2717 | struct mem_cgroup *memcg = NULL; |
2718 | unsigned int nr_pages = 1; | 2718 | unsigned int nr_pages = 1; |
2719 | struct page_cgroup *pc; | ||
2720 | bool oom = true; | 2719 | bool oom = true; |
2721 | int ret; | 2720 | int ret; |
2722 | 2721 | ||
@@ -2730,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
2730 | oom = false; | 2729 | oom = false; |
2731 | } | 2730 | } |
2732 | 2731 | ||
2733 | pc = lookup_page_cgroup(page); | ||
2734 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); | 2732 | ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); |
2735 | if (ret == -ENOMEM) | 2733 | if (ret == -ENOMEM) |
2736 | return ret; | 2734 | return ret; |
2737 | __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); | 2735 | __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); |
2738 | return 0; | 2736 | return 0; |
2739 | } | 2737 | } |
2740 | 2738 | ||
@@ -2831,16 +2829,13 @@ static void | |||
2831 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, | 2829 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, |
2832 | enum charge_type ctype) | 2830 | enum charge_type ctype) |
2833 | { | 2831 | { |
2834 | struct page_cgroup *pc; | ||
2835 | |||
2836 | if (mem_cgroup_disabled()) | 2832 | if (mem_cgroup_disabled()) |
2837 | return; | 2833 | return; |
2838 | if (!memcg) | 2834 | if (!memcg) |
2839 | return; | 2835 | return; |
2840 | cgroup_exclude_rmdir(&memcg->css); | 2836 | cgroup_exclude_rmdir(&memcg->css); |
2841 | 2837 | ||
2842 | pc = lookup_page_cgroup(page); | 2838 | __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); |
2843 | __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true); | ||
2844 | /* | 2839 | /* |
2845 | * Now swap is on-memory. This means this page may be | 2840 | * Now swap is on-memory. This means this page may be |
2846 | * counted both as mem and swap....double count. | 2841 | * counted both as mem and swap....double count. |
@@ -3298,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page, | |||
3298 | * page. In the case new page is migrated but not remapped, new page's | 3293 | * page. In the case new page is migrated but not remapped, new page's |
3299 | * mapcount will be finally 0 and we call uncharge in end_migration(). | 3294 | * mapcount will be finally 0 and we call uncharge in end_migration(). |
3300 | */ | 3295 | */ |
3301 | pc = lookup_page_cgroup(newpage); | ||
3302 | if (PageAnon(page)) | 3296 | if (PageAnon(page)) |
3303 | ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; | 3297 | ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; |
3304 | else if (page_is_file_cache(page)) | 3298 | else if (page_is_file_cache(page)) |
3305 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 3299 | ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; |
3306 | else | 3300 | else |
3307 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 3301 | ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; |
3308 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); | 3302 | __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false); |
3309 | return ret; | 3303 | return ret; |
3310 | } | 3304 | } |
3311 | 3305 | ||
@@ -3392,7 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, | |||
3392 | * the newpage may be on LRU(or pagevec for LRU) already. We lock | 3386 | * the newpage may be on LRU(or pagevec for LRU) already. We lock |
3393 | * LRU while we overwrite pc->mem_cgroup. | 3387 | * LRU while we overwrite pc->mem_cgroup. |
3394 | */ | 3388 | */ |
3395 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); | 3389 | __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); |
3396 | } | 3390 | } |
3397 | 3391 | ||
3398 | #ifdef CONFIG_DEBUG_VM | 3392 | #ifdef CONFIG_DEBUG_VM |
@@ -3763,7 +3757,7 @@ move_account: | |||
3763 | goto try_to_free; | 3757 | goto try_to_free; |
3764 | cond_resched(); | 3758 | cond_resched(); |
3765 | /* "ret" should also be checked to ensure all lists are empty. */ | 3759 | /* "ret" should also be checked to ensure all lists are empty. */ |
3766 | } while (memcg->res.usage > 0 || ret); | 3760 | } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret); |
3767 | out: | 3761 | out: |
3768 | css_put(&memcg->css); | 3762 | css_put(&memcg->css); |
3769 | return ret; | 3763 | return ret; |
@@ -3778,7 +3772,7 @@ try_to_free: | |||
3778 | lru_add_drain_all(); | 3772 | lru_add_drain_all(); |
3779 | /* try to free all pages in this cgroup */ | 3773 | /* try to free all pages in this cgroup */ |
3780 | shrink = 1; | 3774 | shrink = 1; |
3781 | while (nr_retries && memcg->res.usage > 0) { | 3775 | while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { |
3782 | int progress; | 3776 | int progress; |
3783 | 3777 | ||
3784 | if (signal_pending(current)) { | 3778 | if (signal_pending(current)) { |
@@ -4529,6 +4523,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, | |||
4529 | swap_buffers: | 4523 | swap_buffers: |
4530 | /* Swap primary and spare array */ | 4524 | /* Swap primary and spare array */ |
4531 | thresholds->spare = thresholds->primary; | 4525 | thresholds->spare = thresholds->primary; |
4526 | /* If all events are unregistered, free the spare array */ | ||
4527 | if (!new) { | ||
4528 | kfree(thresholds->spare); | ||
4529 | thresholds->spare = NULL; | ||
4530 | } | ||
4531 | |||
4532 | rcu_assign_pointer(thresholds->primary, new); | 4532 | rcu_assign_pointer(thresholds->primary, new); |
4533 | 4533 | ||
4534 | /* To be sure that nobody uses thresholds */ | 4534 | /* To be sure that nobody uses thresholds */ |
@@ -5469,7 +5469,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, | |||
5469 | * part of thp split is not executed yet. | 5469 | * part of thp split is not executed yet. |
5470 | */ | 5470 | */ |
5471 | if (pmd_trans_huge_lock(pmd, vma) == 1) { | 5471 | if (pmd_trans_huge_lock(pmd, vma) == 1) { |
5472 | if (!mc.precharge) { | 5472 | if (mc.precharge < HPAGE_PMD_NR) { |
5473 | spin_unlock(&vma->vm_mm->page_table_lock); | 5473 | spin_unlock(&vma->vm_mm->page_table_lock); |
5474 | return 0; | 5474 | return 0; |
5475 | } | 5475 | } |
diff --git a/mm/memory.c b/mm/memory.c index 6105f475fa86..1e77da6d82c1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb, | |||
1295 | 1295 | ||
1296 | static void unmap_single_vma(struct mmu_gather *tlb, | 1296 | static void unmap_single_vma(struct mmu_gather *tlb, |
1297 | struct vm_area_struct *vma, unsigned long start_addr, | 1297 | struct vm_area_struct *vma, unsigned long start_addr, |
1298 | unsigned long end_addr, unsigned long *nr_accounted, | 1298 | unsigned long end_addr, |
1299 | struct zap_details *details) | 1299 | struct zap_details *details) |
1300 | { | 1300 | { |
1301 | unsigned long start = max(vma->vm_start, start_addr); | 1301 | unsigned long start = max(vma->vm_start, start_addr); |
@@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1307 | if (end <= vma->vm_start) | 1307 | if (end <= vma->vm_start) |
1308 | return; | 1308 | return; |
1309 | 1309 | ||
1310 | if (vma->vm_flags & VM_ACCOUNT) | ||
1311 | *nr_accounted += (end - start) >> PAGE_SHIFT; | ||
1312 | |||
1313 | if (unlikely(is_pfn_mapping(vma))) | 1310 | if (unlikely(is_pfn_mapping(vma))) |
1314 | untrack_pfn_vma(vma, 0, 0); | 1311 | untrack_pfn_vma(vma, 0, 0); |
1315 | 1312 | ||
@@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1339 | * @vma: the starting vma | 1336 | * @vma: the starting vma |
1340 | * @start_addr: virtual address at which to start unmapping | 1337 | * @start_addr: virtual address at which to start unmapping |
1341 | * @end_addr: virtual address at which to end unmapping | 1338 | * @end_addr: virtual address at which to end unmapping |
1342 | * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here | ||
1343 | * @details: details of nonlinear truncation or shared cache invalidation | ||
1344 | * | 1339 | * |
1345 | * Unmap all pages in the vma list. | 1340 | * Unmap all pages in the vma list. |
1346 | * | 1341 | * |
@@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb, | |||
1355 | */ | 1350 | */ |
1356 | void unmap_vmas(struct mmu_gather *tlb, | 1351 | void unmap_vmas(struct mmu_gather *tlb, |
1357 | struct vm_area_struct *vma, unsigned long start_addr, | 1352 | struct vm_area_struct *vma, unsigned long start_addr, |
1358 | unsigned long end_addr, unsigned long *nr_accounted, | 1353 | unsigned long end_addr) |
1359 | struct zap_details *details) | ||
1360 | { | 1354 | { |
1361 | struct mm_struct *mm = vma->vm_mm; | 1355 | struct mm_struct *mm = vma->vm_mm; |
1362 | 1356 | ||
1363 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); | 1357 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); |
1364 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) | 1358 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) |
1365 | unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, | 1359 | unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); |
1366 | details); | ||
1367 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); | 1360 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); |
1368 | } | 1361 | } |
1369 | 1362 | ||
@@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb, | |||
1376 | * | 1369 | * |
1377 | * Caller must protect the VMA list | 1370 | * Caller must protect the VMA list |
1378 | */ | 1371 | */ |
1379 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, | 1372 | void zap_page_range(struct vm_area_struct *vma, unsigned long start, |
1380 | unsigned long size, struct zap_details *details) | 1373 | unsigned long size, struct zap_details *details) |
1381 | { | 1374 | { |
1382 | struct mm_struct *mm = vma->vm_mm; | 1375 | struct mm_struct *mm = vma->vm_mm; |
1383 | struct mmu_gather tlb; | 1376 | struct mmu_gather tlb; |
1384 | unsigned long end = address + size; | 1377 | unsigned long end = start + size; |
1385 | unsigned long nr_accounted = 0; | ||
1386 | 1378 | ||
1387 | lru_add_drain(); | 1379 | lru_add_drain(); |
1388 | tlb_gather_mmu(&tlb, mm, 0); | 1380 | tlb_gather_mmu(&tlb, mm, 0); |
1389 | update_hiwater_rss(mm); | 1381 | update_hiwater_rss(mm); |
1390 | unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); | 1382 | mmu_notifier_invalidate_range_start(mm, start, end); |
1391 | tlb_finish_mmu(&tlb, address, end); | 1383 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
1384 | unmap_single_vma(&tlb, vma, start, end, details); | ||
1385 | mmu_notifier_invalidate_range_end(mm, start, end); | ||
1386 | tlb_finish_mmu(&tlb, start, end); | ||
1392 | } | 1387 | } |
1393 | 1388 | ||
1394 | /** | 1389 | /** |
@@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
1406 | struct mm_struct *mm = vma->vm_mm; | 1401 | struct mm_struct *mm = vma->vm_mm; |
1407 | struct mmu_gather tlb; | 1402 | struct mmu_gather tlb; |
1408 | unsigned long end = address + size; | 1403 | unsigned long end = address + size; |
1409 | unsigned long nr_accounted = 0; | ||
1410 | 1404 | ||
1411 | lru_add_drain(); | 1405 | lru_add_drain(); |
1412 | tlb_gather_mmu(&tlb, mm, 0); | 1406 | tlb_gather_mmu(&tlb, mm, 0); |
1413 | update_hiwater_rss(mm); | 1407 | update_hiwater_rss(mm); |
1414 | mmu_notifier_invalidate_range_start(mm, address, end); | 1408 | mmu_notifier_invalidate_range_start(mm, address, end); |
1415 | unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); | 1409 | unmap_single_vma(&tlb, vma, address, end, details); |
1416 | mmu_notifier_invalidate_range_end(mm, address, end); | 1410 | mmu_notifier_invalidate_range_end(mm, address, end); |
1417 | tlb_finish_mmu(&tlb, address, end); | 1411 | tlb_finish_mmu(&tlb, address, end); |
1418 | } | 1412 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cfb6c8678754..b19569137529 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1361,11 +1361,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, | |||
1361 | 1361 | ||
1362 | mm = get_task_mm(task); | 1362 | mm = get_task_mm(task); |
1363 | put_task_struct(task); | 1363 | put_task_struct(task); |
1364 | if (mm) | 1364 | |
1365 | err = do_migrate_pages(mm, old, new, | 1365 | if (!mm) { |
1366 | capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); | ||
1367 | else | ||
1368 | err = -EINVAL; | 1366 | err = -EINVAL; |
1367 | goto out; | ||
1368 | } | ||
1369 | |||
1370 | err = do_migrate_pages(mm, old, new, | ||
1371 | capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); | ||
1369 | 1372 | ||
1370 | mmput(mm); | 1373 | mmput(mm); |
1371 | out: | 1374 | out: |
diff --git a/mm/migrate.c b/mm/migrate.c index 51c08a0c6f68..11072383ae12 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1388,14 +1388,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | |||
1388 | mm = get_task_mm(task); | 1388 | mm = get_task_mm(task); |
1389 | put_task_struct(task); | 1389 | put_task_struct(task); |
1390 | 1390 | ||
1391 | if (mm) { | 1391 | if (!mm) |
1392 | if (nodes) | 1392 | return -EINVAL; |
1393 | err = do_pages_move(mm, task_nodes, nr_pages, pages, | 1393 | |
1394 | nodes, status, flags); | 1394 | if (nodes) |
1395 | else | 1395 | err = do_pages_move(mm, task_nodes, nr_pages, pages, |
1396 | err = do_pages_stat(mm, nr_pages, pages, status); | 1396 | nodes, status, flags); |
1397 | } else | 1397 | else |
1398 | err = -EINVAL; | 1398 | err = do_pages_stat(mm, nr_pages, pages, status); |
1399 | 1399 | ||
1400 | mmput(mm); | 1400 | mmput(mm); |
1401 | return err; | 1401 | return err; |
@@ -240,6 +240,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
240 | return next; | 240 | return next; |
241 | } | 241 | } |
242 | 242 | ||
243 | static unsigned long do_brk(unsigned long addr, unsigned long len); | ||
244 | |||
243 | SYSCALL_DEFINE1(brk, unsigned long, brk) | 245 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
244 | { | 246 | { |
245 | unsigned long rlim, retval; | 247 | unsigned long rlim, retval; |
@@ -951,7 +953,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) | |||
951 | * The caller must hold down_write(¤t->mm->mmap_sem). | 953 | * The caller must hold down_write(¤t->mm->mmap_sem). |
952 | */ | 954 | */ |
953 | 955 | ||
954 | unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | 956 | static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, |
955 | unsigned long len, unsigned long prot, | 957 | unsigned long len, unsigned long prot, |
956 | unsigned long flags, unsigned long pgoff) | 958 | unsigned long flags, unsigned long pgoff) |
957 | { | 959 | { |
@@ -1087,7 +1089,32 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1087 | 1089 | ||
1088 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); | 1090 | return mmap_region(file, addr, len, flags, vm_flags, pgoff); |
1089 | } | 1091 | } |
1090 | EXPORT_SYMBOL(do_mmap_pgoff); | 1092 | |
1093 | unsigned long do_mmap(struct file *file, unsigned long addr, | ||
1094 | unsigned long len, unsigned long prot, | ||
1095 | unsigned long flag, unsigned long offset) | ||
1096 | { | ||
1097 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | ||
1098 | return -EINVAL; | ||
1099 | if (unlikely(offset & ~PAGE_MASK)) | ||
1100 | return -EINVAL; | ||
1101 | return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); | ||
1102 | } | ||
1103 | EXPORT_SYMBOL(do_mmap); | ||
1104 | |||
1105 | unsigned long vm_mmap(struct file *file, unsigned long addr, | ||
1106 | unsigned long len, unsigned long prot, | ||
1107 | unsigned long flag, unsigned long offset) | ||
1108 | { | ||
1109 | unsigned long ret; | ||
1110 | struct mm_struct *mm = current->mm; | ||
1111 | |||
1112 | down_write(&mm->mmap_sem); | ||
1113 | ret = do_mmap(file, addr, len, prot, flag, offset); | ||
1114 | up_write(&mm->mmap_sem); | ||
1115 | return ret; | ||
1116 | } | ||
1117 | EXPORT_SYMBOL(vm_mmap); | ||
1091 | 1118 | ||
1092 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | 1119 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
1093 | unsigned long, prot, unsigned long, flags, | 1120 | unsigned long, prot, unsigned long, flags, |
@@ -1862,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) | |||
1862 | */ | 1889 | */ |
1863 | static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) | 1890 | static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) |
1864 | { | 1891 | { |
1892 | unsigned long nr_accounted = 0; | ||
1893 | |||
1865 | /* Update high watermark before we lower total_vm */ | 1894 | /* Update high watermark before we lower total_vm */ |
1866 | update_hiwater_vm(mm); | 1895 | update_hiwater_vm(mm); |
1867 | do { | 1896 | do { |
1868 | long nrpages = vma_pages(vma); | 1897 | long nrpages = vma_pages(vma); |
1869 | 1898 | ||
1899 | if (vma->vm_flags & VM_ACCOUNT) | ||
1900 | nr_accounted += nrpages; | ||
1870 | mm->total_vm -= nrpages; | 1901 | mm->total_vm -= nrpages; |
1871 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); | 1902 | vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); |
1872 | vma = remove_vma(vma); | 1903 | vma = remove_vma(vma); |
1873 | } while (vma); | 1904 | } while (vma); |
1905 | vm_unacct_memory(nr_accounted); | ||
1874 | validate_mm(mm); | 1906 | validate_mm(mm); |
1875 | } | 1907 | } |
1876 | 1908 | ||
@@ -1885,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm, | |||
1885 | { | 1917 | { |
1886 | struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; | 1918 | struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; |
1887 | struct mmu_gather tlb; | 1919 | struct mmu_gather tlb; |
1888 | unsigned long nr_accounted = 0; | ||
1889 | 1920 | ||
1890 | lru_add_drain(); | 1921 | lru_add_drain(); |
1891 | tlb_gather_mmu(&tlb, mm, 0); | 1922 | tlb_gather_mmu(&tlb, mm, 0); |
1892 | update_hiwater_rss(mm); | 1923 | update_hiwater_rss(mm); |
1893 | unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); | 1924 | unmap_vmas(&tlb, vma, start, end); |
1894 | vm_unacct_memory(nr_accounted); | ||
1895 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 1925 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
1896 | next ? next->vm_start : 0); | 1926 | next ? next->vm_start : 0); |
1897 | tlb_finish_mmu(&tlb, start, end); | 1927 | tlb_finish_mmu(&tlb, start, end); |
@@ -2105,21 +2135,25 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2105 | 2135 | ||
2106 | return 0; | 2136 | return 0; |
2107 | } | 2137 | } |
2108 | |||
2109 | EXPORT_SYMBOL(do_munmap); | 2138 | EXPORT_SYMBOL(do_munmap); |
2110 | 2139 | ||
2111 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 2140 | int vm_munmap(unsigned long start, size_t len) |
2112 | { | 2141 | { |
2113 | int ret; | 2142 | int ret; |
2114 | struct mm_struct *mm = current->mm; | 2143 | struct mm_struct *mm = current->mm; |
2115 | 2144 | ||
2116 | profile_munmap(addr); | ||
2117 | |||
2118 | down_write(&mm->mmap_sem); | 2145 | down_write(&mm->mmap_sem); |
2119 | ret = do_munmap(mm, addr, len); | 2146 | ret = do_munmap(mm, start, len); |
2120 | up_write(&mm->mmap_sem); | 2147 | up_write(&mm->mmap_sem); |
2121 | return ret; | 2148 | return ret; |
2122 | } | 2149 | } |
2150 | EXPORT_SYMBOL(vm_munmap); | ||
2151 | |||
2152 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | ||
2153 | { | ||
2154 | profile_munmap(addr); | ||
2155 | return vm_munmap(addr, len); | ||
2156 | } | ||
2123 | 2157 | ||
2124 | static inline void verify_mm_writelocked(struct mm_struct *mm) | 2158 | static inline void verify_mm_writelocked(struct mm_struct *mm) |
2125 | { | 2159 | { |
@@ -2136,7 +2170,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2136 | * anonymous maps. eventually we may be able to do some | 2170 | * anonymous maps. eventually we may be able to do some |
2137 | * brk-specific accounting here. | 2171 | * brk-specific accounting here. |
2138 | */ | 2172 | */ |
2139 | unsigned long do_brk(unsigned long addr, unsigned long len) | 2173 | static unsigned long do_brk(unsigned long addr, unsigned long len) |
2140 | { | 2174 | { |
2141 | struct mm_struct * mm = current->mm; | 2175 | struct mm_struct * mm = current->mm; |
2142 | struct vm_area_struct * vma, * prev; | 2176 | struct vm_area_struct * vma, * prev; |
@@ -2232,7 +2266,17 @@ out: | |||
2232 | return addr; | 2266 | return addr; |
2233 | } | 2267 | } |
2234 | 2268 | ||
2235 | EXPORT_SYMBOL(do_brk); | 2269 | unsigned long vm_brk(unsigned long addr, unsigned long len) |
2270 | { | ||
2271 | struct mm_struct *mm = current->mm; | ||
2272 | unsigned long ret; | ||
2273 | |||
2274 | down_write(&mm->mmap_sem); | ||
2275 | ret = do_brk(addr, len); | ||
2276 | up_write(&mm->mmap_sem); | ||
2277 | return ret; | ||
2278 | } | ||
2279 | EXPORT_SYMBOL(vm_brk); | ||
2236 | 2280 | ||
2237 | /* Release all mmaps. */ | 2281 | /* Release all mmaps. */ |
2238 | void exit_mmap(struct mm_struct *mm) | 2282 | void exit_mmap(struct mm_struct *mm) |
@@ -2264,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2264 | tlb_gather_mmu(&tlb, mm, 1); | 2308 | tlb_gather_mmu(&tlb, mm, 1); |
2265 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2309 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
2266 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2310 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2267 | unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); | 2311 | unmap_vmas(&tlb, vma, 0, -1); |
2268 | vm_unacct_memory(nr_accounted); | ||
2269 | 2312 | ||
2270 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 2313 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); |
2271 | tlb_finish_mmu(&tlb, 0, -1); | 2314 | tlb_finish_mmu(&tlb, 0, -1); |
@@ -2274,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm) | |||
2274 | * Walk the list again, actually closing and freeing it, | 2317 | * Walk the list again, actually closing and freeing it, |
2275 | * with preemption enabled, without holding any MM locks. | 2318 | * with preemption enabled, without holding any MM locks. |
2276 | */ | 2319 | */ |
2277 | while (vma) | 2320 | while (vma) { |
2321 | if (vma->vm_flags & VM_ACCOUNT) | ||
2322 | nr_accounted += vma_pages(vma); | ||
2278 | vma = remove_vma(vma); | 2323 | vma = remove_vma(vma); |
2324 | } | ||
2325 | vm_unacct_memory(nr_accounted); | ||
2279 | 2326 | ||
2280 | BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); | 2327 | BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); |
2281 | } | 2328 | } |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 24f0fc1a56d6..1983fb1c7026 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) | |||
82 | 82 | ||
83 | static void __init __free_pages_memory(unsigned long start, unsigned long end) | 83 | static void __init __free_pages_memory(unsigned long start, unsigned long end) |
84 | { | 84 | { |
85 | int i; | 85 | unsigned long i, start_aligned, end_aligned; |
86 | unsigned long start_aligned, end_aligned; | ||
87 | int order = ilog2(BITS_PER_LONG); | 86 | int order = ilog2(BITS_PER_LONG); |
88 | 87 | ||
89 | start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); | 88 | start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); |
@@ -298,13 +297,19 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, | |||
298 | if (WARN_ON_ONCE(slab_is_available())) | 297 | if (WARN_ON_ONCE(slab_is_available())) |
299 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); | 298 | return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); |
300 | 299 | ||
300 | again: | ||
301 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, | 301 | ptr = __alloc_memory_core_early(pgdat->node_id, size, align, |
302 | goal, -1ULL); | 302 | goal, -1ULL); |
303 | if (ptr) | 303 | if (ptr) |
304 | return ptr; | 304 | return ptr; |
305 | 305 | ||
306 | return __alloc_memory_core_early(MAX_NUMNODES, size, align, | 306 | ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, |
307 | goal, -1ULL); | 307 | goal, -1ULL); |
308 | if (!ptr && goal) { | ||
309 | goal = 0; | ||
310 | goto again; | ||
311 | } | ||
312 | return ptr; | ||
308 | } | 313 | } |
309 | 314 | ||
310 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, | 315 | void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, |
diff --git a/mm/nommu.c b/mm/nommu.c index f59e170fceb4..bb8f4f004a82 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1233,7 +1233,7 @@ enomem: | |||
1233 | /* | 1233 | /* |
1234 | * handle mapping creation for uClinux | 1234 | * handle mapping creation for uClinux |
1235 | */ | 1235 | */ |
1236 | unsigned long do_mmap_pgoff(struct file *file, | 1236 | static unsigned long do_mmap_pgoff(struct file *file, |
1237 | unsigned long addr, | 1237 | unsigned long addr, |
1238 | unsigned long len, | 1238 | unsigned long len, |
1239 | unsigned long prot, | 1239 | unsigned long prot, |
@@ -1470,7 +1470,32 @@ error_getting_region: | |||
1470 | show_free_areas(0); | 1470 | show_free_areas(0); |
1471 | return -ENOMEM; | 1471 | return -ENOMEM; |
1472 | } | 1472 | } |
1473 | EXPORT_SYMBOL(do_mmap_pgoff); | 1473 | |
1474 | unsigned long do_mmap(struct file *file, unsigned long addr, | ||
1475 | unsigned long len, unsigned long prot, | ||
1476 | unsigned long flag, unsigned long offset) | ||
1477 | { | ||
1478 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | ||
1479 | return -EINVAL; | ||
1480 | if (unlikely(offset & ~PAGE_MASK)) | ||
1481 | return -EINVAL; | ||
1482 | return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); | ||
1483 | } | ||
1484 | EXPORT_SYMBOL(do_mmap); | ||
1485 | |||
1486 | unsigned long vm_mmap(struct file *file, unsigned long addr, | ||
1487 | unsigned long len, unsigned long prot, | ||
1488 | unsigned long flag, unsigned long offset) | ||
1489 | { | ||
1490 | unsigned long ret; | ||
1491 | struct mm_struct *mm = current->mm; | ||
1492 | |||
1493 | down_write(&mm->mmap_sem); | ||
1494 | ret = do_mmap(file, addr, len, prot, flag, offset); | ||
1495 | up_write(&mm->mmap_sem); | ||
1496 | return ret; | ||
1497 | } | ||
1498 | EXPORT_SYMBOL(vm_mmap); | ||
1474 | 1499 | ||
1475 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | 1500 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
1476 | unsigned long, prot, unsigned long, flags, | 1501 | unsigned long, prot, unsigned long, flags, |
@@ -1709,16 +1734,22 @@ erase_whole_vma: | |||
1709 | } | 1734 | } |
1710 | EXPORT_SYMBOL(do_munmap); | 1735 | EXPORT_SYMBOL(do_munmap); |
1711 | 1736 | ||
1712 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | 1737 | int vm_munmap(unsigned long addr, size_t len) |
1713 | { | 1738 | { |
1714 | int ret; | ||
1715 | struct mm_struct *mm = current->mm; | 1739 | struct mm_struct *mm = current->mm; |
1740 | int ret; | ||
1716 | 1741 | ||
1717 | down_write(&mm->mmap_sem); | 1742 | down_write(&mm->mmap_sem); |
1718 | ret = do_munmap(mm, addr, len); | 1743 | ret = do_munmap(mm, addr, len); |
1719 | up_write(&mm->mmap_sem); | 1744 | up_write(&mm->mmap_sem); |
1720 | return ret; | 1745 | return ret; |
1721 | } | 1746 | } |
1747 | EXPORT_SYMBOL(vm_munmap); | ||
1748 | |||
1749 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | ||
1750 | { | ||
1751 | return vm_munmap(addr, len); | ||
1752 | } | ||
1722 | 1753 | ||
1723 | /* | 1754 | /* |
1724 | * release all the mappings made in a process's VM space | 1755 | * release all the mappings made in a process's VM space |
@@ -1744,7 +1775,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1744 | kleave(""); | 1775 | kleave(""); |
1745 | } | 1776 | } |
1746 | 1777 | ||
1747 | unsigned long do_brk(unsigned long addr, unsigned long len) | 1778 | unsigned long vm_brk(unsigned long addr, unsigned long len) |
1748 | { | 1779 | { |
1749 | return -ENOMEM; | 1780 | return -ENOMEM; |
1750 | } | 1781 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a712fb9e04ce..9f389e50ed18 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4763,12 +4763,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
4763 | for (i = 0; i < MAX_NR_ZONES; i++) { | 4763 | for (i = 0; i < MAX_NR_ZONES; i++) { |
4764 | if (i == ZONE_MOVABLE) | 4764 | if (i == ZONE_MOVABLE) |
4765 | continue; | 4765 | continue; |
4766 | printk(" %-8s ", zone_names[i]); | 4766 | printk(KERN_CONT " %-8s ", zone_names[i]); |
4767 | if (arch_zone_lowest_possible_pfn[i] == | 4767 | if (arch_zone_lowest_possible_pfn[i] == |
4768 | arch_zone_highest_possible_pfn[i]) | 4768 | arch_zone_highest_possible_pfn[i]) |
4769 | printk("empty\n"); | 4769 | printk(KERN_CONT "empty\n"); |
4770 | else | 4770 | else |
4771 | printk("%0#10lx -> %0#10lx\n", | 4771 | printk(KERN_CONT "%0#10lx -> %0#10lx\n", |
4772 | arch_zone_lowest_possible_pfn[i], | 4772 | arch_zone_lowest_possible_pfn[i], |
4773 | arch_zone_highest_possible_pfn[i]); | 4773 | arch_zone_highest_possible_pfn[i]); |
4774 | } | 4774 | } |
@@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | |||
5203 | int ret; | 5203 | int ret; |
5204 | 5204 | ||
5205 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); | 5205 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
5206 | if (!write || (ret == -EINVAL)) | 5206 | if (!write || (ret < 0)) |
5207 | return ret; | 5207 | return ret; |
5208 | for_each_populated_zone(zone) { | 5208 | for_each_populated_zone(zone) { |
5209 | for_each_possible_cpu(cpu) { | 5209 | for_each_possible_cpu(cpu) { |
diff --git a/mm/percpu.c b/mm/percpu.c index f47af9123af7..bb4be7435ce3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl, | |||
1132 | for (alloc_end += gi->nr_units / upa; | 1132 | for (alloc_end += gi->nr_units / upa; |
1133 | alloc < alloc_end; alloc++) { | 1133 | alloc < alloc_end; alloc++) { |
1134 | if (!(alloc % apl)) { | 1134 | if (!(alloc % apl)) { |
1135 | printk("\n"); | 1135 | printk(KERN_CONT "\n"); |
1136 | printk("%spcpu-alloc: ", lvl); | 1136 | printk("%spcpu-alloc: ", lvl); |
1137 | } | 1137 | } |
1138 | printk("[%0*d] ", group_width, group); | 1138 | printk(KERN_CONT "[%0*d] ", group_width, group); |
1139 | 1139 | ||
1140 | for (unit_end += upa; unit < unit_end; unit++) | 1140 | for (unit_end += upa; unit < unit_end; unit++) |
1141 | if (gi->cpu_map[unit] != NR_CPUS) | 1141 | if (gi->cpu_map[unit] != NR_CPUS) |
1142 | printk("%0*d ", cpu_width, | 1142 | printk(KERN_CONT "%0*d ", cpu_width, |
1143 | gi->cpu_map[unit]); | 1143 | gi->cpu_map[unit]); |
1144 | else | 1144 | else |
1145 | printk("%s ", empty_str); | 1145 | printk(KERN_CONT "%s ", empty_str); |
1146 | } | 1146 | } |
1147 | } | 1147 | } |
1148 | printk("\n"); | 1148 | printk(KERN_CONT "\n"); |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | /** | 1151 | /** |
@@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |||
1650 | areas[group] = ptr; | 1650 | areas[group] = ptr; |
1651 | 1651 | ||
1652 | base = min(ptr, base); | 1652 | base = min(ptr, base); |
1653 | } | ||
1654 | |||
1655 | /* | ||
1656 | * Copy data and free unused parts. This should happen after all | ||
1657 | * allocations are complete; otherwise, we may end up with | ||
1658 | * overlapping groups. | ||
1659 | */ | ||
1660 | for (group = 0; group < ai->nr_groups; group++) { | ||
1661 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
1662 | void *ptr = areas[group]; | ||
1653 | 1663 | ||
1654 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { | 1664 | for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { |
1655 | if (gi->cpu_map[i] == NR_CPUS) { | 1665 | if (gi->cpu_map[i] == NR_CPUS) { |
@@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void) | |||
1885 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 1895 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
1886 | if (!ai || !fc) | 1896 | if (!ai || !fc) |
1887 | panic("Failed to allocate memory for percpu areas."); | 1897 | panic("Failed to allocate memory for percpu areas."); |
1898 | /* kmemleak tracks the percpu allocations separately */ | ||
1899 | kmemleak_free(fc); | ||
1888 | 1900 | ||
1889 | ai->dyn_size = unit_size; | 1901 | ai->dyn_size = unit_size; |
1890 | ai->unit_size = unit_size; | 1902 | ai->unit_size = unit_size; |
@@ -2040,7 +2040,7 @@ static bool has_cpu_slab(int cpu, void *info) | |||
2040 | struct kmem_cache *s = info; | 2040 | struct kmem_cache *s = info; |
2041 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 2041 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
2042 | 2042 | ||
2043 | return !!(c->page); | 2043 | return c->page || c->partial; |
2044 | } | 2044 | } |
2045 | 2045 | ||
2046 | static void flush_all(struct kmem_cache *s) | 2046 | static void flush_all(struct kmem_cache *s) |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 9d3dd3763cf7..4c5ff7f284d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -26,7 +26,7 @@ | |||
26 | */ | 26 | */ |
27 | static const struct address_space_operations swap_aops = { | 27 | static const struct address_space_operations swap_aops = { |
28 | .writepage = swap_writepage, | 28 | .writepage = swap_writepage, |
29 | .set_page_dirty = __set_page_dirty_nobuffers, | 29 | .set_page_dirty = __set_page_dirty_no_writeback, |
30 | .migratepage = migrate_page, | 30 | .migratepage = migrate_page, |
31 | }; | 31 | }; |
32 | 32 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 33c332bbab73..33dc256033b5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1568,9 +1568,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, | |||
1568 | reclaim_stat->recent_scanned[0] += nr_anon; | 1568 | reclaim_stat->recent_scanned[0] += nr_anon; |
1569 | reclaim_stat->recent_scanned[1] += nr_file; | 1569 | reclaim_stat->recent_scanned[1] += nr_file; |
1570 | 1570 | ||
1571 | if (current_is_kswapd()) | 1571 | if (global_reclaim(sc)) { |
1572 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); | 1572 | if (current_is_kswapd()) |
1573 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); | 1573 | __count_zone_vm_events(PGSTEAL_KSWAPD, zone, |
1574 | nr_reclaimed); | ||
1575 | else | ||
1576 | __count_zone_vm_events(PGSTEAL_DIRECT, zone, | ||
1577 | nr_reclaimed); | ||
1578 | } | ||
1574 | 1579 | ||
1575 | putback_inactive_pages(mz, &page_list); | 1580 | putback_inactive_pages(mz, &page_list); |
1576 | 1581 | ||
@@ -2107,12 +2112,7 @@ restart: | |||
2107 | * with multiple processes reclaiming pages, the total | 2112 | * with multiple processes reclaiming pages, the total |
2108 | * freeing target can get unreasonably large. | 2113 | * freeing target can get unreasonably large. |
2109 | */ | 2114 | */ |
2110 | if (nr_reclaimed >= nr_to_reclaim) | 2115 | if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) |
2111 | nr_to_reclaim = 0; | ||
2112 | else | ||
2113 | nr_to_reclaim -= nr_reclaimed; | ||
2114 | |||
2115 | if (!nr_to_reclaim && priority < DEF_PRIORITY) | ||
2116 | break; | 2116 | break; |
2117 | } | 2117 | } |
2118 | blk_finish_plug(&plug); | 2118 | blk_finish_plug(&plug); |
diff --git a/mm/vmstat.c b/mm/vmstat.c index f600557a7659..7db1b9bab492 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -738,7 +738,8 @@ const char * const vmstat_text[] = { | |||
738 | "pgmajfault", | 738 | "pgmajfault", |
739 | 739 | ||
740 | TEXTS_FOR_ZONES("pgrefill") | 740 | TEXTS_FOR_ZONES("pgrefill") |
741 | TEXTS_FOR_ZONES("pgsteal") | 741 | TEXTS_FOR_ZONES("pgsteal_kswapd") |
742 | TEXTS_FOR_ZONES("pgsteal_direct") | ||
742 | TEXTS_FOR_ZONES("pgscan_kswapd") | 743 | TEXTS_FOR_ZONES("pgscan_kswapd") |
743 | TEXTS_FOR_ZONES("pgscan_direct") | 744 | TEXTS_FOR_ZONES("pgscan_direct") |
744 | 745 | ||
@@ -747,7 +748,6 @@ const char * const vmstat_text[] = { | |||
747 | #endif | 748 | #endif |
748 | "pginodesteal", | 749 | "pginodesteal", |
749 | "slabs_scanned", | 750 | "slabs_scanned", |
750 | "kswapd_steal", | ||
751 | "kswapd_inodesteal", | 751 | "kswapd_inodesteal", |
752 | "kswapd_low_wmark_hit_quickly", | 752 | "kswapd_low_wmark_hit_quickly", |
753 | "kswapd_high_wmark_hit_quickly", | 753 | "kswapd_high_wmark_hit_quickly", |