aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/mlock.c18
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/vmscan.c35
6 files changed, 17 insertions, 51 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d143ab67be44..6058b53dcb89 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1796,6 +1796,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1796static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 1796static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1797 struct page *page, unsigned long address) 1797 struct page *page, unsigned long address)
1798{ 1798{
1799 struct hstate *h = hstate_vma(vma);
1799 struct vm_area_struct *iter_vma; 1800 struct vm_area_struct *iter_vma;
1800 struct address_space *mapping; 1801 struct address_space *mapping;
1801 struct prio_tree_iter iter; 1802 struct prio_tree_iter iter;
@@ -1805,7 +1806,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1805 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 1806 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1806 * from page cache lookup which is in HPAGE_SIZE units. 1807 * from page cache lookup which is in HPAGE_SIZE units.
1807 */ 1808 */
1808 address = address & huge_page_mask(hstate_vma(vma)); 1809 address = address & huge_page_mask(h);
1809 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) 1810 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1810 + (vma->vm_pgoff >> PAGE_SHIFT); 1811 + (vma->vm_pgoff >> PAGE_SHIFT);
1811 mapping = (struct address_space *)page_private(page); 1812 mapping = (struct address_space *)page_private(page);
@@ -1824,7 +1825,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1824 */ 1825 */
1825 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 1826 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1826 unmap_hugepage_range(iter_vma, 1827 unmap_hugepage_range(iter_vma,
1827 address, address + HPAGE_SIZE, 1828 address, address + huge_page_size(h),
1828 page); 1829 page);
1829 } 1830 }
1830 1831
diff --git a/mm/mlock.c b/mm/mlock.c
index 008ea70b7afa..1ada366570cb 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page)
66 putback_lru_page(page); 66 putback_lru_page(page);
67 } else { 67 } else {
68 /* 68 /*
69 * Page not on the LRU yet. Flush all pagevecs and retry. 69 * We lost the race. the page already moved to evictable list.
70 */ 70 */
71 lru_add_drain_all(); 71 if (PageUnevictable(page))
72 if (!isolate_lru_page(page))
73 putback_lru_page(page);
74 else if (PageUnevictable(page))
75 count_vm_event(UNEVICTABLE_PGSTRANDED); 72 count_vm_event(UNEVICTABLE_PGSTRANDED);
76
77 } 73 }
78} 74}
79 75
@@ -166,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
166 unsigned long addr = start; 162 unsigned long addr = start;
167 struct page *pages[16]; /* 16 gives a reasonable batch */ 163 struct page *pages[16]; /* 16 gives a reasonable batch */
168 int nr_pages = (end - start) / PAGE_SIZE; 164 int nr_pages = (end - start) / PAGE_SIZE;
169 int ret; 165 int ret = 0;
170 int gup_flags = 0; 166 int gup_flags = 0;
171 167
172 VM_BUG_ON(start & ~PAGE_MASK); 168 VM_BUG_ON(start & ~PAGE_MASK);
@@ -187,8 +183,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
187 if (vma->vm_flags & VM_WRITE) 183 if (vma->vm_flags & VM_WRITE)
188 gup_flags |= GUP_FLAGS_WRITE; 184 gup_flags |= GUP_FLAGS_WRITE;
189 185
190 lru_add_drain_all(); /* push cached pages to LRU */
191
192 while (nr_pages > 0) { 186 while (nr_pages > 0) {
193 int i; 187 int i;
194 188
@@ -251,8 +245,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
251 ret = 0; 245 ret = 0;
252 } 246 }
253 247
254 lru_add_drain_all(); /* to update stats */
255
256 return ret; /* count entire vma as locked_vm */ 248 return ret; /* count entire vma as locked_vm */
257} 249}
258 250
@@ -546,6 +538,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
546 if (!can_do_mlock()) 538 if (!can_do_mlock())
547 return -EPERM; 539 return -EPERM;
548 540
541 lru_add_drain_all(); /* flush pagevec */
542
549 down_write(&current->mm->mmap_sem); 543 down_write(&current->mm->mmap_sem);
550 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 544 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
551 start &= PAGE_MASK; 545 start &= PAGE_MASK;
@@ -612,6 +606,8 @@ asmlinkage long sys_mlockall(int flags)
612 if (!can_do_mlock()) 606 if (!can_do_mlock())
613 goto out; 607 goto out;
614 608
609 lru_add_drain_all(); /* flush pagevec */
610
615 down_write(&current->mm->mmap_sem); 611 down_write(&current->mm->mmap_sem);
616 612
617 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 613 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
diff --git a/mm/mmap.c b/mm/mmap.c
index de14ac21e5b5..d4855a682ab6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1704,7 +1704,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
1704 vma = find_vma_prev(mm, addr, &prev); 1704 vma = find_vma_prev(mm, addr, &prev);
1705 if (vma && (vma->vm_start <= addr)) 1705 if (vma && (vma->vm_start <= addr))
1706 return vma; 1706 return vma;
1707 if (expand_stack(prev, addr)) 1707 if (!prev || expand_stack(prev, addr))
1708 return NULL; 1708 return NULL;
1709 if (prev->vm_flags & VM_LOCKED) { 1709 if (prev->vm_flags & VM_LOCKED) {
1710 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) 1710 if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54069e64e3a8..d8ac01474563 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1561,6 +1561,10 @@ nofail_alloc:
1561 1561
1562 /* We now go into synchronous reclaim */ 1562 /* We now go into synchronous reclaim */
1563 cpuset_memory_pressure_bump(); 1563 cpuset_memory_pressure_bump();
1564 /*
1565 * The task's cpuset might have expanded its set of allowable nodes
1566 */
1567 cpuset_update_task_memory_state();
1564 p->flags |= PF_MEMALLOC; 1568 p->flags |= PF_MEMALLOC;
1565 reclaim_state.reclaimed_slab = 0; 1569 reclaim_state.reclaimed_slab = 0;
1566 p->reclaim_state = &reclaim_state; 1570 p->reclaim_state = &reclaim_state;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f59d797dc5a9..1223d927904d 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -165,7 +165,7 @@ int online_page_cgroup(unsigned long start_pfn,
165 unsigned long start, end, pfn; 165 unsigned long start, end, pfn;
166 int fail = 0; 166 int fail = 0;
167 167
168 start = start_pfn & (PAGES_PER_SECTION - 1); 168 start = start_pfn & ~(PAGES_PER_SECTION - 1);
169 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 169 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
170 170
171 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 171 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
@@ -188,7 +188,7 @@ int offline_page_cgroup(unsigned long start_pfn,
188{ 188{
189 unsigned long start, end, pfn; 189 unsigned long start, end, pfn;
190 190
191 start = start_pfn & (PAGES_PER_SECTION - 1); 191 start = start_pfn & ~(PAGES_PER_SECTION - 1);
192 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 192 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
193 193
194 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 194 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3b5860294bb6..c141b3e78071 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2368,39 +2368,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
2368 return 1; 2368 return 1;
2369} 2369}
2370 2370
2371static void show_page_path(struct page *page)
2372{
2373 char buf[256];
2374 if (page_is_file_cache(page)) {
2375 struct address_space *mapping = page->mapping;
2376 struct dentry *dentry;
2377 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
2378
2379 spin_lock(&mapping->i_mmap_lock);
2380 dentry = d_find_alias(mapping->host);
2381 printk(KERN_INFO "rescued: %s %lu\n",
2382 dentry_path(dentry, buf, 256), pgoff);
2383 spin_unlock(&mapping->i_mmap_lock);
2384 } else {
2385#if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU)
2386 struct anon_vma *anon_vma;
2387 struct vm_area_struct *vma;
2388
2389 anon_vma = page_lock_anon_vma(page);
2390 if (!anon_vma)
2391 return;
2392
2393 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
2394 printk(KERN_INFO "rescued: anon %s\n",
2395 vma->vm_mm->owner->comm);
2396 break;
2397 }
2398 page_unlock_anon_vma(anon_vma);
2399#endif
2400 }
2401}
2402
2403
2404/** 2371/**
2405 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list 2372 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
2406 * @page: page to check evictability and move to appropriate lru list 2373 * @page: page to check evictability and move to appropriate lru list
@@ -2421,8 +2388,6 @@ retry:
2421 if (page_evictable(page, NULL)) { 2388 if (page_evictable(page, NULL)) {
2422 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); 2389 enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
2423 2390
2424 show_page_path(page);
2425
2426 __dec_zone_state(zone, NR_UNEVICTABLE); 2391 __dec_zone_state(zone, NR_UNEVICTABLE);
2427 list_move(&page->lru, &zone->lru[l].list); 2392 list_move(&page->lru, &zone->lru[l].list);
2428 __inc_zone_state(zone, NR_INACTIVE_ANON + l); 2393 __inc_zone_state(zone, NR_INACTIVE_ANON + l);