aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c37
-rw-r--r--mm/page_alloc.c20
-rw-r--r--mm/shmem.c53
-rw-r--r--mm/vmscan.c124
6 files changed, 137 insertions, 108 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea8c3a4cd2ae..5f34bd8dda34 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2508{ 2508{
2509 struct hstate *h = hstate_vma(vma); 2509 struct hstate *h = hstate_vma(vma);
2510 int ret = VM_FAULT_SIGBUS; 2510 int ret = VM_FAULT_SIGBUS;
2511 int anon_rmap = 0;
2511 pgoff_t idx; 2512 pgoff_t idx;
2512 unsigned long size; 2513 unsigned long size;
2513 struct page *page; 2514 struct page *page;
@@ -2562,14 +2563,13 @@ retry:
2562 spin_lock(&inode->i_lock); 2563 spin_lock(&inode->i_lock);
2563 inode->i_blocks += blocks_per_huge_page(h); 2564 inode->i_blocks += blocks_per_huge_page(h);
2564 spin_unlock(&inode->i_lock); 2565 spin_unlock(&inode->i_lock);
2565 page_dup_rmap(page);
2566 } else { 2566 } else {
2567 lock_page(page); 2567 lock_page(page);
2568 if (unlikely(anon_vma_prepare(vma))) { 2568 if (unlikely(anon_vma_prepare(vma))) {
2569 ret = VM_FAULT_OOM; 2569 ret = VM_FAULT_OOM;
2570 goto backout_unlocked; 2570 goto backout_unlocked;
2571 } 2571 }
2572 hugepage_add_new_anon_rmap(page, vma, address); 2572 anon_rmap = 1;
2573 } 2573 }
2574 } else { 2574 } else {
2575 /* 2575 /*
@@ -2582,7 +2582,6 @@ retry:
2582 VM_FAULT_SET_HINDEX(h - hstates); 2582 VM_FAULT_SET_HINDEX(h - hstates);
2583 goto backout_unlocked; 2583 goto backout_unlocked;
2584 } 2584 }
2585 page_dup_rmap(page);
2586 } 2585 }
2587 2586
2588 /* 2587 /*
@@ -2606,6 +2605,10 @@ retry:
2606 if (!huge_pte_none(huge_ptep_get(ptep))) 2605 if (!huge_pte_none(huge_ptep_get(ptep)))
2607 goto backout; 2606 goto backout;
2608 2607
2608 if (anon_rmap)
2609 hugepage_add_new_anon_rmap(page, vma, address);
2610 else
2611 page_dup_rmap(page);
2609 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 2612 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2610 && (vma->vm_flags & VM_SHARED))); 2613 && (vma->vm_flags & VM_SHARED)));
2611 set_huge_pte_at(mm, address, ptep, new_pte); 2614 set_huge_pte_at(mm, address, ptep, new_pte);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3dbff4dcde35..4baddbae94cc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3247,7 +3247,7 @@ int mem_cgroup_prepare_migration(struct page *page,
3247 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; 3247 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3248 else 3248 else
3249 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3249 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3250 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype); 3250 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
3251 return ret; 3251 return ret;
3252} 3252}
3253 3253
diff --git a/mm/memory.c b/mm/memory.c
index 5e30583c2605..fa2f04e0337c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
878 } 878 }
879 if (likely(!non_swap_entry(entry))) 879 if (likely(!non_swap_entry(entry)))
880 rss[MM_SWAPENTS]++; 880 rss[MM_SWAPENTS]++;
881 else if (is_write_migration_entry(entry) && 881 else if (is_migration_entry(entry)) {
882 is_cow_mapping(vm_flags)) { 882 page = migration_entry_to_page(entry);
883 /* 883
884 * COW mappings require pages in both parent 884 if (PageAnon(page))
885 * and child to be set to read. 885 rss[MM_ANONPAGES]++;
886 */ 886 else
887 make_migration_entry_read(&entry); 887 rss[MM_FILEPAGES]++;
888 pte = swp_entry_to_pte(entry); 888
889 set_pte_at(src_mm, addr, src_pte, pte); 889 if (is_write_migration_entry(entry) &&
890 is_cow_mapping(vm_flags)) {
891 /*
892 * COW mappings require pages in both
893 * parent and child to be set to read.
894 */
895 make_migration_entry_read(&entry);
896 pte = swp_entry_to_pte(entry);
897 set_pte_at(src_mm, addr, src_pte, pte);
898 }
890 } 899 }
891 } 900 }
892 goto out_set_pte; 901 goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
1191 1200
1192 if (!non_swap_entry(entry)) 1201 if (!non_swap_entry(entry))
1193 rss[MM_SWAPENTS]--; 1202 rss[MM_SWAPENTS]--;
1203 else if (is_migration_entry(entry)) {
1204 struct page *page;
1205
1206 page = migration_entry_to_page(entry);
1207
1208 if (PageAnon(page))
1209 rss[MM_ANONPAGES]--;
1210 else
1211 rss[MM_FILEPAGES]--;
1212 }
1194 if (unlikely(!free_swap_and_cache(entry))) 1213 if (unlikely(!free_swap_and_cache(entry)))
1195 print_bad_pte(vma, addr, ptent, NULL); 1214 print_bad_pte(vma, addr, ptent, NULL);
1196 } 1215 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0027d8f4a1bb..d2186ecb36f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5413,7 +5413,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5413 5413
5414bool is_pageblock_removable_nolock(struct page *page) 5414bool is_pageblock_removable_nolock(struct page *page)
5415{ 5415{
5416 struct zone *zone = page_zone(page); 5416 struct zone *zone;
5417 unsigned long pfn;
5418
5419 /*
5420 * We have to be careful here because we are iterating over memory
5421 * sections which are not zone aware so we might end up outside of
5422 * the zone but still within the section.
5423 * We have to take care about the node as well. If the node is offline
5424 * its NODE_DATA will be NULL - see page_zone.
5425 */
5426 if (!node_online(page_to_nid(page)))
5427 return false;
5428
5429 zone = page_zone(page);
5430 pfn = page_to_pfn(page);
5431 if (zone->zone_start_pfn > pfn ||
5432 zone->zone_start_pfn + zone->spanned_pages <= pfn)
5433 return false;
5434
5417 return __count_immobile_pages(zone, page, 0); 5435 return __count_immobile_pages(zone, page, 0);
5418} 5436}
5419 5437
diff --git a/mm/shmem.c b/mm/shmem.c
index feead1943d92..269d049294ab 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
379/* 379/*
380 * Pagevec may contain swap entries, so shuffle up pages before releasing. 380 * Pagevec may contain swap entries, so shuffle up pages before releasing.
381 */ 381 */
382static void shmem_pagevec_release(struct pagevec *pvec) 382static void shmem_deswap_pagevec(struct pagevec *pvec)
383{ 383{
384 int i, j; 384 int i, j;
385 385
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
389 pvec->pages[j++] = page; 389 pvec->pages[j++] = page;
390 } 390 }
391 pvec->nr = j; 391 pvec->nr = j;
392 pagevec_release(pvec); 392}
393
394/*
395 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
396 */
397void shmem_unlock_mapping(struct address_space *mapping)
398{
399 struct pagevec pvec;
400 pgoff_t indices[PAGEVEC_SIZE];
401 pgoff_t index = 0;
402
403 pagevec_init(&pvec, 0);
404 /*
405 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
406 */
407 while (!mapping_unevictable(mapping)) {
408 /*
409 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
410 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
411 */
412 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
413 PAGEVEC_SIZE, pvec.pages, indices);
414 if (!pvec.nr)
415 break;
416 index = indices[pvec.nr - 1] + 1;
417 shmem_deswap_pagevec(&pvec);
418 check_move_unevictable_pages(pvec.pages, pvec.nr);
419 pagevec_release(&pvec);
420 cond_resched();
421 }
393} 422}
394 423
395/* 424/*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
440 } 469 }
441 unlock_page(page); 470 unlock_page(page);
442 } 471 }
443 shmem_pagevec_release(&pvec); 472 shmem_deswap_pagevec(&pvec);
473 pagevec_release(&pvec);
444 mem_cgroup_uncharge_end(); 474 mem_cgroup_uncharge_end();
445 cond_resched(); 475 cond_resched();
446 index++; 476 index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
470 continue; 500 continue;
471 } 501 }
472 if (index == start && indices[0] > end) { 502 if (index == start && indices[0] > end) {
473 shmem_pagevec_release(&pvec); 503 shmem_deswap_pagevec(&pvec);
504 pagevec_release(&pvec);
474 break; 505 break;
475 } 506 }
476 mem_cgroup_uncharge_start(); 507 mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
494 } 525 }
495 unlock_page(page); 526 unlock_page(page);
496 } 527 }
497 shmem_pagevec_release(&pvec); 528 shmem_deswap_pagevec(&pvec);
529 pagevec_release(&pvec);
498 mem_cgroup_uncharge_end(); 530 mem_cgroup_uncharge_end();
499 index++; 531 index++;
500 } 532 }
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
1068 user_shm_unlock(inode->i_size, user); 1100 user_shm_unlock(inode->i_size, user);
1069 info->flags &= ~VM_LOCKED; 1101 info->flags &= ~VM_LOCKED;
1070 mapping_clear_unevictable(file->f_mapping); 1102 mapping_clear_unevictable(file->f_mapping);
1071 /*
1072 * Ensure that a racing putback_lru_page() can see
1073 * the pages of this mapping are evictable when we
1074 * skip them due to !PageLRU during the scan.
1075 */
1076 smp_mb__after_clear_bit();
1077 scan_mapping_unevictable_pages(file->f_mapping);
1078 } 1103 }
1079 retval = 0; 1104 retval = 0;
1080 1105
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2445 return 0; 2470 return 0;
2446} 2471}
2447 2472
2473void shmem_unlock_mapping(struct address_space *mapping)
2474{
2475}
2476
2448void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2477void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
2449{ 2478{
2450 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2479 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2880396f7953..c52b23552659 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -26,7 +26,6 @@
26#include <linux/buffer_head.h> /* for try_to_release_page(), 26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */ 27 buffer_heads_over_limit */
28#include <linux/mm_inline.h> 28#include <linux/mm_inline.h>
29#include <linux/pagevec.h>
30#include <linux/backing-dev.h> 29#include <linux/backing-dev.h>
31#include <linux/rmap.h> 30#include <linux/rmap.h>
32#include <linux/topology.h> 31#include <linux/topology.h>
@@ -661,7 +660,7 @@ redo:
661 * When racing with an mlock or AS_UNEVICTABLE clearing 660 * When racing with an mlock or AS_UNEVICTABLE clearing
662 * (page is unlocked) make sure that if the other thread 661 * (page is unlocked) make sure that if the other thread
663 * does not observe our setting of PG_lru and fails 662 * does not observe our setting of PG_lru and fails
664 * isolation/check_move_unevictable_page, 663 * isolation/check_move_unevictable_pages,
665 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move 664 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
666 * the page back to the evictable list. 665 * the page back to the evictable list.
667 * 666 *
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
3499 return 1; 3498 return 1;
3500} 3499}
3501 3500
3501#ifdef CONFIG_SHMEM
3502/** 3502/**
3503 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 3503 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3504 * @page: page to check evictability and move to appropriate lru list 3504 * @pages: array of pages to check
3505 * @zone: zone page is in 3505 * @nr_pages: number of pages to check
3506 * 3506 *
3507 * Checks a page for evictability and moves the page to the appropriate 3507 * Checks pages for evictability and moves them to the appropriate lru list.
3508 * zone lru list.
3509 * 3508 *
3510 * Restrictions: zone->lru_lock must be held, page must be on LRU and must 3509 * This function is only used for SysV IPC SHM_UNLOCK.
3511 * have PageUnevictable set.
3512 */ 3510 */
3513static void check_move_unevictable_page(struct page *page, struct zone *zone) 3511void check_move_unevictable_pages(struct page **pages, int nr_pages)
3514{ 3512{
3515 struct lruvec *lruvec; 3513 struct lruvec *lruvec;
3514 struct zone *zone = NULL;
3515 int pgscanned = 0;
3516 int pgrescued = 0;
3517 int i;
3516 3518
3517 VM_BUG_ON(PageActive(page)); 3519 for (i = 0; i < nr_pages; i++) {
3518retry: 3520 struct page *page = pages[i];
3519 ClearPageUnevictable(page); 3521 struct zone *pagezone;
3520 if (page_evictable(page, NULL)) {
3521 enum lru_list l = page_lru_base_type(page);
3522
3523 __dec_zone_state(zone, NR_UNEVICTABLE);
3524 lruvec = mem_cgroup_lru_move_lists(zone, page,
3525 LRU_UNEVICTABLE, l);
3526 list_move(&page->lru, &lruvec->lists[l]);
3527 __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3528 __count_vm_event(UNEVICTABLE_PGRESCUED);
3529 } else {
3530 /*
3531 * rotate unevictable list
3532 */
3533 SetPageUnevictable(page);
3534 lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
3535 LRU_UNEVICTABLE);
3536 list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
3537 if (page_evictable(page, NULL))
3538 goto retry;
3539 }
3540}
3541
3542/**
3543 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3544 * @mapping: struct address_space to scan for evictable pages
3545 *
3546 * Scan all pages in mapping. Check unevictable pages for
3547 * evictability and move them to the appropriate zone lru list.
3548 */
3549void scan_mapping_unevictable_pages(struct address_space *mapping)
3550{
3551 pgoff_t next = 0;
3552 pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3553 PAGE_CACHE_SHIFT;
3554 struct zone *zone;
3555 struct pagevec pvec;
3556
3557 if (mapping->nrpages == 0)
3558 return;
3559
3560 pagevec_init(&pvec, 0);
3561 while (next < end &&
3562 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3563 int i;
3564 int pg_scanned = 0;
3565
3566 zone = NULL;
3567
3568 for (i = 0; i < pagevec_count(&pvec); i++) {
3569 struct page *page = pvec.pages[i];
3570 pgoff_t page_index = page->index;
3571 struct zone *pagezone = page_zone(page);
3572 3522
3573 pg_scanned++; 3523 pgscanned++;
3574 if (page_index > next) 3524 pagezone = page_zone(page);
3575 next = page_index; 3525 if (pagezone != zone) {
3576 next++; 3526 if (zone)
3527 spin_unlock_irq(&zone->lru_lock);
3528 zone = pagezone;
3529 spin_lock_irq(&zone->lru_lock);
3530 }
3577 3531
3578 if (pagezone != zone) { 3532 if (!PageLRU(page) || !PageUnevictable(page))
3579 if (zone) 3533 continue;
3580 spin_unlock_irq(&zone->lru_lock);
3581 zone = pagezone;
3582 spin_lock_irq(&zone->lru_lock);
3583 }
3584 3534
3585 if (PageLRU(page) && PageUnevictable(page)) 3535 if (page_evictable(page, NULL)) {
3586 check_move_unevictable_page(page, zone); 3536 enum lru_list lru = page_lru_base_type(page);
3537
3538 VM_BUG_ON(PageActive(page));
3539 ClearPageUnevictable(page);
3540 __dec_zone_state(zone, NR_UNEVICTABLE);
3541 lruvec = mem_cgroup_lru_move_lists(zone, page,
3542 LRU_UNEVICTABLE, lru);
3543 list_move(&page->lru, &lruvec->lists[lru]);
3544 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3545 pgrescued++;
3587 } 3546 }
3588 if (zone)
3589 spin_unlock_irq(&zone->lru_lock);
3590 pagevec_release(&pvec);
3591
3592 count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3593 } 3547 }
3594 3548
3549 if (zone) {
3550 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3551 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3552 spin_unlock_irq(&zone->lru_lock);
3553 }
3595} 3554}
3555#endif /* CONFIG_SHMEM */
3596 3556
3597static void warn_scan_unevictable_pages(void) 3557static void warn_scan_unevictable_pages(void)
3598{ 3558{