aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c14
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory.c41
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mm_init.c8
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c21
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap.c9
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmscan.c8
18 files changed, 103 insertions, 66 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 42bbc6909ba4..54e968650855 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
559 * 559 *
560 * The first mb is necessary to safely close the critical section opened by the 560 * The first mb is necessary to safely close the critical section opened by the
561 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 561 * test_and_set_bit() to lock the page; the second mb is necessary to enforce
562 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 562 * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
563 * parallel wait_on_page_locked()). 563 * races with a parallel wait_on_page_locked()).
564 */ 564 */
565void unlock_page(struct page *page) 565void unlock_page(struct page *page)
566{ 566{
567 smp_mb__before_clear_bit(); 567 smp_mb__before_clear_bit();
568 if (!TestClearPageLocked(page)) 568 if (!test_and_clear_bit(PG_locked, &page->flags))
569 BUG(); 569 BUG();
570 smp_mb__after_clear_bit(); 570 smp_mb__after_clear_bit();
571 wake_up_page(page, PG_locked); 571 wake_up_page(page, PG_locked);
@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
931 struct page *page = find_get_page(mapping, index); 931 struct page *page = find_get_page(mapping, index);
932 932
933 if (page) { 933 if (page) {
934 if (!TestSetPageLocked(page)) 934 if (trylock_page(page))
935 return page; 935 return page;
936 page_cache_release(page); 936 page_cache_release(page);
937 return NULL; 937 return NULL;
@@ -1027,7 +1027,7 @@ find_page:
1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1028 !mapping->a_ops->is_partially_uptodate) 1028 !mapping->a_ops->is_partially_uptodate)
1029 goto page_not_up_to_date; 1029 goto page_not_up_to_date;
1030 if (TestSetPageLocked(page)) 1030 if (!trylock_page(page))
1031 goto page_not_up_to_date; 1031 goto page_not_up_to_date;
1032 if (!mapping->a_ops->is_partially_uptodate(page, 1032 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset)) 1033 desc, offset))
@@ -1879,7 +1879,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
1879 * The !iov->iov_len check ensures we skip over unlikely 1879 * The !iov->iov_len check ensures we skip over unlikely
1880 * zero-length segments (without overruning the iovec). 1880 * zero-length segments (without overruning the iovec).
1881 */ 1881 */
1882 while (bytes || unlikely(!iov->iov_len && i->count)) { 1882 while (bytes || unlikely(i->count && !iov->iov_len)) {
1883 int copy; 1883 int copy;
1884 1884
1885 copy = min(bytes, iov->iov_len - base); 1885 copy = min(bytes, iov->iov_len - base);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 254ce2b90158..757ca983fd99 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1283,7 +1283,12 @@ module_exit(hugetlb_exit);
1283 1283
1284static int __init hugetlb_init(void) 1284static int __init hugetlb_init(void)
1285{ 1285{
1286 BUILD_BUG_ON(HPAGE_SHIFT == 0); 1286 /* Some platform decide whether they support huge pages at boot
1287 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1288 * there is no such support
1289 */
1290 if (HPAGE_SHIFT == 0)
1291 return 0;
1287 1292
1288 if (!size_to_hstate(default_hstate_size)) { 1293 if (!size_to_hstate(default_hstate_size)) {
1289 default_hstate_size = HPAGE_SIZE; 1294 default_hstate_size = HPAGE_SIZE;
diff --git a/mm/madvise.c b/mm/madvise.c
index 23a0ec3e0ea0..f9349c18a1b5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -132,10 +132,10 @@ static long madvise_willneed(struct vm_area_struct * vma,
132 * Application no longer needs these pages. If the pages are dirty, 132 * Application no longer needs these pages. If the pages are dirty,
133 * it's OK to just throw them away. The app will be more careful about 133 * it's OK to just throw them away. The app will be more careful about
134 * data it wants to keep. Be sure to free swap resources too. The 134 * data it wants to keep. Be sure to free swap resources too. The
135 * zap_page_range call sets things up for refill_inactive to actually free 135 * zap_page_range call sets things up for shrink_active_list to actually free
136 * these pages later if no one else has touched them in the meantime, 136 * these pages later if no one else has touched them in the meantime,
137 * although we could add these pages to a global reuse list for 137 * although we could add these pages to a global reuse list for
138 * refill_inactive to pick up before reclaiming other pages. 138 * shrink_active_list to pick up before reclaiming other pages.
139 * 139 *
140 * NB: This interface discards data rather than pushes it out to swap, 140 * NB: This interface discards data rather than pushes it out to swap,
141 * as some implementations do. This has performance implications for 141 * as some implementations do. This has performance implications for
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fba566c51322..7056c3bdb478 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1168,9 +1168,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1168 mem = mem_cgroup_from_cont(cont); 1168 mem = mem_cgroup_from_cont(cont);
1169 old_mem = mem_cgroup_from_cont(old_cont); 1169 old_mem = mem_cgroup_from_cont(old_cont);
1170 1170
1171 if (mem == old_mem)
1172 goto out;
1173
1174 /* 1171 /*
1175 * Only thread group leaders are allowed to migrate, the mm_struct is 1172 * Only thread group leaders are allowed to migrate, the mm_struct is
1176 * in effect owned by the leader 1173 * in effect owned by the leader
diff --git a/mm/memory.c b/mm/memory.c
index 67f0ab9077d9..1002f473f497 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -994,6 +994,29 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
994 return end; 994 return end;
995} 995}
996 996
997/**
998 * zap_vma_ptes - remove ptes mapping the vma
999 * @vma: vm_area_struct holding ptes to be zapped
1000 * @address: starting address of pages to zap
1001 * @size: number of bytes to zap
1002 *
1003 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1004 *
1005 * The entire address range must be fully contained within the vma.
1006 *
1007 * Returns 0 if successful.
1008 */
1009int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1010 unsigned long size)
1011{
1012 if (address < vma->vm_start || address + size > vma->vm_end ||
1013 !(vma->vm_flags & VM_PFNMAP))
1014 return -1;
1015 zap_page_range(vma, address, size, NULL);
1016 return 0;
1017}
1018EXPORT_SYMBOL_GPL(zap_vma_ptes);
1019
997/* 1020/*
998 * Do a quick page-table lookup for a single page. 1021 * Do a quick page-table lookup for a single page.
999 */ 1022 */
@@ -1766,7 +1789,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1766 * not dirty accountable. 1789 * not dirty accountable.
1767 */ 1790 */
1768 if (PageAnon(old_page)) { 1791 if (PageAnon(old_page)) {
1769 if (!TestSetPageLocked(old_page)) { 1792 if (trylock_page(old_page)) {
1770 reuse = can_share_swap_page(old_page); 1793 reuse = can_share_swap_page(old_page);
1771 unlock_page(old_page); 1794 unlock_page(old_page);
1772 } 1795 }
@@ -2742,16 +2765,26 @@ int make_pages_present(unsigned long addr, unsigned long end)
2742 2765
2743 vma = find_vma(current->mm, addr); 2766 vma = find_vma(current->mm, addr);
2744 if (!vma) 2767 if (!vma)
2745 return -1; 2768 return -ENOMEM;
2746 write = (vma->vm_flags & VM_WRITE) != 0; 2769 write = (vma->vm_flags & VM_WRITE) != 0;
2747 BUG_ON(addr >= end); 2770 BUG_ON(addr >= end);
2748 BUG_ON(end > vma->vm_end); 2771 BUG_ON(end > vma->vm_end);
2749 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 2772 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
2750 ret = get_user_pages(current, current->mm, addr, 2773 ret = get_user_pages(current, current->mm, addr,
2751 len, write, 0, NULL, NULL); 2774 len, write, 0, NULL, NULL);
2752 if (ret < 0) 2775 if (ret < 0) {
2776 /*
2777 SUS require strange return value to mlock
2778 - invalid addr generate to ENOMEM.
2779 - out of memory should generate EAGAIN.
2780 */
2781 if (ret == -EFAULT)
2782 ret = -ENOMEM;
2783 else if (ret == -ENOMEM)
2784 ret = -EAGAIN;
2753 return ret; 2785 return ret;
2754 return ret == len ? 0 : -1; 2786 }
2787 return ret == len ? 0 : -ENOMEM;
2755} 2788}
2756 2789
2757#if !defined(__HAVE_ARCH_GATE_AREA) 2790#if !defined(__HAVE_ARCH_GATE_AREA)
diff --git a/mm/migrate.c b/mm/migrate.c
index 153572fb60b8..2a80136b23bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -605,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
605 * establishing additional references. We are the only one 605 * establishing additional references. We are the only one
606 * holding a reference to the new page at this point. 606 * holding a reference to the new page at this point.
607 */ 607 */
608 if (TestSetPageLocked(newpage)) 608 if (!trylock_page(newpage))
609 BUG(); 609 BUG();
610 610
611 /* Prepare mapping for the new page.*/ 611 /* Prepare mapping for the new page.*/
@@ -667,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
667 BUG_ON(charge); 667 BUG_ON(charge);
668 668
669 rc = -EAGAIN; 669 rc = -EAGAIN;
670 if (TestSetPageLocked(page)) { 670 if (!trylock_page(page)) {
671 if (!force) 671 if (!force)
672 goto move_newpage; 672 goto move_newpage;
673 lock_page(page); 673 lock_page(page);
diff --git a/mm/mlock.c b/mm/mlock.c
index 7b2656055d6a..01fbe93eff5c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -78,8 +78,6 @@ success:
78 78
79 mm->locked_vm -= pages; 79 mm->locked_vm -= pages;
80out: 80out:
81 if (ret == -ENOMEM)
82 ret = -EAGAIN;
83 return ret; 81 return ret;
84} 82}
85 83
diff --git a/mm/mm_init.c b/mm/mm_init.c
index c6af41ea9994..936ef2efd892 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -14,6 +14,10 @@
14#ifdef CONFIG_DEBUG_MEMORY_INIT 14#ifdef CONFIG_DEBUG_MEMORY_INIT
15int __meminitdata mminit_loglevel; 15int __meminitdata mminit_loglevel;
16 16
17#ifndef SECTIONS_SHIFT
18#define SECTIONS_SHIFT 0
19#endif
20
17/* The zonelists are simply reported, validation is manual. */ 21/* The zonelists are simply reported, validation is manual. */
18void mminit_verify_zonelist(void) 22void mminit_verify_zonelist(void)
19{ 23{
@@ -74,11 +78,7 @@ void __init mminit_verify_pageflags_layout(void)
74 NR_PAGEFLAGS); 78 NR_PAGEFLAGS);
75 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 79 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
76 "Section %d Node %d Zone %d\n", 80 "Section %d Node %d Zone %d\n",
77#ifdef SECTIONS_SHIFT
78 SECTIONS_SHIFT, 81 SECTIONS_SHIFT,
79#else
80 0,
81#endif
82 NODES_SHIFT, 82 NODES_SHIFT,
83 ZONES_SHIFT); 83 ZONES_SHIFT);
84 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_offsets", 84 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_offsets",
diff --git a/mm/mmap.c b/mm/mmap.c
index 32a287b631d4..339cf5c4d5d8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -370,7 +370,7 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
370 if (vma_tmp->vm_end > addr) { 370 if (vma_tmp->vm_end > addr) {
371 vma = vma_tmp; 371 vma = vma_tmp;
372 if (vma_tmp->vm_start <= addr) 372 if (vma_tmp->vm_start <= addr)
373 return vma; 373 break;
374 __rb_link = &__rb_parent->rb_left; 374 __rb_link = &__rb_parent->rb_left;
375 } else { 375 } else {
376 rb_prev = __rb_parent; 376 rb_prev = __rb_parent;
diff --git a/mm/nommu.c b/mm/nommu.c
index 5edccd9c9218..ed75bc962fbe 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -266,6 +266,27 @@ void *vmalloc_node(unsigned long size, int node)
266} 266}
267EXPORT_SYMBOL(vmalloc_node); 267EXPORT_SYMBOL(vmalloc_node);
268 268
269#ifndef PAGE_KERNEL_EXEC
270# define PAGE_KERNEL_EXEC PAGE_KERNEL
271#endif
272
273/**
274 * vmalloc_exec - allocate virtually contiguous, executable memory
275 * @size: allocation size
276 *
277 * Kernel-internal function to allocate enough pages to cover @size
278 * the page level allocator and map them into contiguous and
279 * executable kernel virtual space.
280 *
281 * For tight control over page level allocator and protection flags
282 * use __vmalloc() instead.
283 */
284
285void *vmalloc_exec(unsigned long size)
286{
287 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
288}
289
269/** 290/**
270 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 291 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
271 * @size: allocation size 292 * @size: allocation size
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3cf3d05b6bd4..401d104d2bb6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3753,23 +3753,6 @@ unsigned long __init find_min_pfn_with_active_regions(void)
3753 return find_min_pfn_for_node(MAX_NUMNODES); 3753 return find_min_pfn_for_node(MAX_NUMNODES);
3754} 3754}
3755 3755
3756/**
3757 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3758 *
3759 * It returns the maximum PFN based on information provided via
3760 * add_active_range().
3761 */
3762unsigned long __init find_max_pfn_with_active_regions(void)
3763{
3764 int i;
3765 unsigned long max_pfn = 0;
3766
3767 for (i = 0; i < nr_nodemap_entries; i++)
3768 max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3769
3770 return max_pfn;
3771}
3772
3773/* 3756/*
3774 * early_calculate_totalpages() 3757 * early_calculate_totalpages()
3775 * Sum pages in active regions for movable zone. 3758 * Sum pages in active regions for movable zone.
diff --git a/mm/rmap.c b/mm/rmap.c
index 99bc3f9cd796..1ea4e6fcee77 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked,
422 referenced += page_referenced_anon(page, mem_cont); 422 referenced += page_referenced_anon(page, mem_cont);
423 else if (is_locked) 423 else if (is_locked)
424 referenced += page_referenced_file(page, mem_cont); 424 referenced += page_referenced_file(page, mem_cont);
425 else if (TestSetPageLocked(page)) 425 else if (!trylock_page(page))
426 referenced++; 426 referenced++;
427 else { 427 else {
428 if (page->mapping) 428 if (page->mapping)
@@ -667,7 +667,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
667 * Leaving it set also helps swapoff to reinstate ptes 667 * Leaving it set also helps swapoff to reinstate ptes
668 * faster for those pages still in swapcache. 668 * faster for those pages still in swapcache.
669 */ 669 */
670 if (page_test_dirty(page)) { 670 if ((!PageAnon(page) || PageSwapCache(page)) &&
671 page_test_dirty(page)) {
671 page_clear_dirty(page); 672 page_clear_dirty(page);
672 set_page_dirty(page); 673 set_page_dirty(page);
673 } 674 }
diff --git a/mm/shmem.c b/mm/shmem.c
index c1e5a3b4f758..04fb4f1ab88e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1265,7 +1265,7 @@ repeat:
1265 } 1265 }
1266 1266
1267 /* We have to do this with page locked to prevent races */ 1267 /* We have to do this with page locked to prevent races */
1268 if (TestSetPageLocked(swappage)) { 1268 if (!trylock_page(swappage)) {
1269 shmem_swp_unmap(entry); 1269 shmem_swp_unmap(entry);
1270 spin_unlock(&info->lock); 1270 spin_unlock(&info->lock);
1271 wait_on_page_locked(swappage); 1271 wait_on_page_locked(swappage);
@@ -1329,7 +1329,7 @@ repeat:
1329 shmem_swp_unmap(entry); 1329 shmem_swp_unmap(entry);
1330 filepage = find_get_page(mapping, idx); 1330 filepage = find_get_page(mapping, idx);
1331 if (filepage && 1331 if (filepage &&
1332 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1332 (!PageUptodate(filepage) || !trylock_page(filepage))) {
1333 spin_unlock(&info->lock); 1333 spin_unlock(&info->lock);
1334 wait_on_page_locked(filepage); 1334 wait_on_page_locked(filepage);
1335 page_cache_release(filepage); 1335 page_cache_release(filepage);
diff --git a/mm/swap.c b/mm/swap.c
index dd89234ee51f..9e0cb3118079 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -278,9 +278,10 @@ int lru_add_drain_all(void)
278 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 278 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
279 * for the remainder of the operation. 279 * for the remainder of the operation.
280 * 280 *
281 * The locking in this function is against shrink_cache(): we recheck the 281 * The locking in this function is against shrink_inactive_list(): we recheck
282 * page count inside the lock to see whether shrink_cache grabbed the page 282 * the page count inside the lock to see whether shrink_inactive_list()
283 * via the LRU. If it did, give up: shrink_cache will free it. 283 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
284 * will free it.
284 */ 285 */
285void release_pages(struct page **pages, int nr, int cold) 286void release_pages(struct page **pages, int nr, int cold)
286{ 287{
@@ -443,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec)
443 for (i = 0; i < pagevec_count(pvec); i++) { 444 for (i = 0; i < pagevec_count(pvec); i++) {
444 struct page *page = pvec->pages[i]; 445 struct page *page = pvec->pages[i];
445 446
446 if (PagePrivate(page) && !TestSetPageLocked(page)) { 447 if (PagePrivate(page) && trylock_page(page)) {
447 if (PagePrivate(page)) 448 if (PagePrivate(page))
448 try_to_release_page(page, 0); 449 try_to_release_page(page, 0);
449 unlock_page(page); 450 unlock_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b8035b055129..167cf2dc8a03 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -201,7 +201,7 @@ void delete_from_swap_cache(struct page *page)
201 */ 201 */
202static inline void free_swap_cache(struct page *page) 202static inline void free_swap_cache(struct page *page)
203{ 203{
204 if (PageSwapCache(page) && !TestSetPageLocked(page)) { 204 if (PageSwapCache(page) && trylock_page(page)) {
205 remove_exclusive_swap_page(page); 205 remove_exclusive_swap_page(page);
206 unlock_page(page); 206 unlock_page(page);
207 } 207 }
@@ -302,9 +302,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
302 * re-using the just freed swap entry for an existing page. 302 * re-using the just freed swap entry for an existing page.
303 * May fail (-ENOMEM) if radix-tree node allocation failed. 303 * May fail (-ENOMEM) if radix-tree node allocation failed.
304 */ 304 */
305 SetPageLocked(new_page); 305 set_page_locked(new_page);
306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); 306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
307 if (!err) { 307 if (likely(!err)) {
308 /* 308 /*
309 * Initiate read into locked page and return. 309 * Initiate read into locked page and return.
310 */ 310 */
@@ -312,7 +312,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
312 swap_readpage(NULL, new_page); 312 swap_readpage(NULL, new_page);
313 return new_page; 313 return new_page;
314 } 314 }
315 ClearPageLocked(new_page); 315 clear_page_locked(new_page);
316 swap_free(entry); 316 swap_free(entry);
317 } while (err != -ENOMEM); 317 } while (err != -ENOMEM);
318 318
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6beb6251e99d..1e330f2998fa 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -403,7 +403,7 @@ void free_swap_and_cache(swp_entry_t entry)
403 if (p) { 403 if (p) {
404 if (swap_entry_free(p, swp_offset(entry)) == 1) { 404 if (swap_entry_free(p, swp_offset(entry)) == 1) {
405 page = find_get_page(&swapper_space, entry.val); 405 page = find_get_page(&swapper_space, entry.val);
406 if (page && unlikely(TestSetPageLocked(page))) { 406 if (page && unlikely(!trylock_page(page))) {
407 page_cache_release(page); 407 page_cache_release(page);
408 page = NULL; 408 page = NULL;
409 } 409 }
@@ -656,8 +656,8 @@ static int unuse_mm(struct mm_struct *mm,
656 656
657 if (!down_read_trylock(&mm->mmap_sem)) { 657 if (!down_read_trylock(&mm->mmap_sem)) {
658 /* 658 /*
659 * Activate page so shrink_cache is unlikely to unmap its 659 * Activate page so shrink_inactive_list is unlikely to unmap
660 * ptes while lock is dropped, so swapoff can make progress. 660 * its ptes while lock is dropped, so swapoff can make progress.
661 */ 661 */
662 activate_page(page); 662 activate_page(page);
663 unlock_page(page); 663 unlock_page(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index e68443d74567..250505091d37 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -104,7 +104,6 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
104 cancel_dirty_page(page, PAGE_CACHE_SIZE); 104 cancel_dirty_page(page, PAGE_CACHE_SIZE);
105 105
106 remove_from_page_cache(page); 106 remove_from_page_cache(page);
107 ClearPageUptodate(page);
108 ClearPageMappedToDisk(page); 107 ClearPageMappedToDisk(page);
109 page_cache_release(page); /* pagecache ref */ 108 page_cache_release(page); /* pagecache ref */
110} 109}
@@ -188,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
188 if (page_index > next) 187 if (page_index > next)
189 next = page_index; 188 next = page_index;
190 next++; 189 next++;
191 if (TestSetPageLocked(page)) 190 if (!trylock_page(page))
192 continue; 191 continue;
193 if (PageWriteback(page)) { 192 if (PageWriteback(page)) {
194 unlock_page(page); 193 unlock_page(page);
@@ -281,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping,
281 pgoff_t index; 280 pgoff_t index;
282 int lock_failed; 281 int lock_failed;
283 282
284 lock_failed = TestSetPageLocked(page); 283 lock_failed = !trylock_page(page);
285 284
286 /* 285 /*
287 * We really shouldn't be looking at the ->index of an 286 * We really shouldn't be looking at the ->index of an
@@ -356,7 +355,6 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
356 BUG_ON(PagePrivate(page)); 355 BUG_ON(PagePrivate(page));
357 __remove_from_page_cache(page); 356 __remove_from_page_cache(page);
358 spin_unlock_irq(&mapping->tree_lock); 357 spin_unlock_irq(&mapping->tree_lock);
359 ClearPageUptodate(page);
360 page_cache_release(page); /* pagecache ref */ 358 page_cache_release(page); /* pagecache ref */
361 return 1; 359 return 1;
362failed: 360failed:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8f71761bc4b7..1ff1a58e7c10 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
496 page = lru_to_page(page_list); 496 page = lru_to_page(page_list);
497 list_del(&page->lru); 497 list_del(&page->lru);
498 498
499 if (TestSetPageLocked(page)) 499 if (!trylock_page(page))
500 goto keep; 500 goto keep;
501 501
502 VM_BUG_ON(PageActive(page)); 502 VM_BUG_ON(PageActive(page));
@@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
582 * A synchronous write - probably a ramdisk. Go 582 * A synchronous write - probably a ramdisk. Go
583 * ahead and try to reclaim the page. 583 * ahead and try to reclaim the page.
584 */ 584 */
585 if (TestSetPageLocked(page)) 585 if (!trylock_page(page))
586 goto keep; 586 goto keep;
587 if (PageDirty(page) || PageWriteback(page)) 587 if (PageDirty(page) || PageWriteback(page))
588 goto keep_locked; 588 goto keep_locked;
@@ -1408,7 +1408,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1408 if (sc->nr_scanned && priority < DEF_PRIORITY - 2) 1408 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1409 congestion_wait(WRITE, HZ/10); 1409 congestion_wait(WRITE, HZ/10);
1410 } 1410 }
1411 /* top priority shrink_caches still had more to do? don't OOM, then */ 1411 /* top priority shrink_zones still had more to do? don't OOM, then */
1412 if (!sc->all_unreclaimable && scan_global_lru(sc)) 1412 if (!sc->all_unreclaimable && scan_global_lru(sc))
1413 ret = nr_reclaimed; 1413 ret = nr_reclaimed;
1414out: 1414out:
@@ -1979,7 +1979,7 @@ module_init(kswapd_init)
1979int zone_reclaim_mode __read_mostly; 1979int zone_reclaim_mode __read_mostly;
1980 1980
1981#define RECLAIM_OFF 0 1981#define RECLAIM_OFF 0
1982#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ 1982#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
1983#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ 1983#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1984#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ 1984#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1985 1985