diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-03-09 04:38:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-03-09 04:38:59 -0500 |
commit | c8b44163b754612fc4769fe1c5df00e98fc9d3c6 (patch) | |
tree | 77706ff1f2a72ed294885b6cf0a7c0de0f92d6df /mm/memory.c | |
parent | ac23f25355ef53f3d14352fcff3c6817527a9749 (diff) | |
parent | a5abba989deceb731047425812d268daf7536575 (diff) |
Merge commit 'v2.6.38-rc8' into x86/asm
Merge reason: Update with the latest fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 34 |
1 files changed, 14 insertions, 20 deletions
diff --git a/mm/memory.c b/mm/memory.c index 31250faff390..5823698c2b71 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2219,7 +2219,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2219 | &ptl); | 2219 | &ptl); |
2220 | if (!pte_same(*page_table, orig_pte)) { | 2220 | if (!pte_same(*page_table, orig_pte)) { |
2221 | unlock_page(old_page); | 2221 | unlock_page(old_page); |
2222 | page_cache_release(old_page); | ||
2223 | goto unlock; | 2222 | goto unlock; |
2224 | } | 2223 | } |
2225 | page_cache_release(old_page); | 2224 | page_cache_release(old_page); |
@@ -2289,7 +2288,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2289 | &ptl); | 2288 | &ptl); |
2290 | if (!pte_same(*page_table, orig_pte)) { | 2289 | if (!pte_same(*page_table, orig_pte)) { |
2291 | unlock_page(old_page); | 2290 | unlock_page(old_page); |
2292 | page_cache_release(old_page); | ||
2293 | goto unlock; | 2291 | goto unlock; |
2294 | } | 2292 | } |
2295 | 2293 | ||
@@ -2367,16 +2365,6 @@ gotten: | |||
2367 | } | 2365 | } |
2368 | __SetPageUptodate(new_page); | 2366 | __SetPageUptodate(new_page); |
2369 | 2367 | ||
2370 | /* | ||
2371 | * Don't let another task, with possibly unlocked vma, | ||
2372 | * keep the mlocked page. | ||
2373 | */ | ||
2374 | if ((vma->vm_flags & VM_LOCKED) && old_page) { | ||
2375 | lock_page(old_page); /* for LRU manipulation */ | ||
2376 | clear_page_mlock(old_page); | ||
2377 | unlock_page(old_page); | ||
2378 | } | ||
2379 | |||
2380 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) | 2368 | if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) |
2381 | goto oom_free_new; | 2369 | goto oom_free_new; |
2382 | 2370 | ||
@@ -2444,10 +2432,20 @@ gotten: | |||
2444 | 2432 | ||
2445 | if (new_page) | 2433 | if (new_page) |
2446 | page_cache_release(new_page); | 2434 | page_cache_release(new_page); |
2447 | if (old_page) | ||
2448 | page_cache_release(old_page); | ||
2449 | unlock: | 2435 | unlock: |
2450 | pte_unmap_unlock(page_table, ptl); | 2436 | pte_unmap_unlock(page_table, ptl); |
2437 | if (old_page) { | ||
2438 | /* | ||
2439 | * Don't let another task, with possibly unlocked vma, | ||
2440 | * keep the mlocked page. | ||
2441 | */ | ||
2442 | if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { | ||
2443 | lock_page(old_page); /* LRU manipulation */ | ||
2444 | munlock_vma_page(old_page); | ||
2445 | unlock_page(old_page); | ||
2446 | } | ||
2447 | page_cache_release(old_page); | ||
2448 | } | ||
2451 | return ret; | 2449 | return ret; |
2452 | oom_free_new: | 2450 | oom_free_new: |
2453 | page_cache_release(new_page); | 2451 | page_cache_release(new_page); |
@@ -2650,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2650 | details.last_index = ULONG_MAX; | 2648 | details.last_index = ULONG_MAX; |
2651 | details.i_mmap_lock = &mapping->i_mmap_lock; | 2649 | details.i_mmap_lock = &mapping->i_mmap_lock; |
2652 | 2650 | ||
2651 | mutex_lock(&mapping->unmap_mutex); | ||
2653 | spin_lock(&mapping->i_mmap_lock); | 2652 | spin_lock(&mapping->i_mmap_lock); |
2654 | 2653 | ||
2655 | /* Protect against endless unmapping loops */ | 2654 | /* Protect against endless unmapping loops */ |
@@ -2666,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2666 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) | 2665 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) |
2667 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); | 2666 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); |
2668 | spin_unlock(&mapping->i_mmap_lock); | 2667 | spin_unlock(&mapping->i_mmap_lock); |
2668 | mutex_unlock(&mapping->unmap_mutex); | ||
2669 | } | 2669 | } |
2670 | EXPORT_SYMBOL(unmap_mapping_range); | 2670 | EXPORT_SYMBOL(unmap_mapping_range); |
2671 | 2671 | ||
@@ -3053,12 +3053,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3053 | goto out; | 3053 | goto out; |
3054 | } | 3054 | } |
3055 | charged = 1; | 3055 | charged = 1; |
3056 | /* | ||
3057 | * Don't let another task, with possibly unlocked vma, | ||
3058 | * keep the mlocked page. | ||
3059 | */ | ||
3060 | if (vma->vm_flags & VM_LOCKED) | ||
3061 | clear_page_mlock(vmf.page); | ||
3062 | copy_user_highpage(page, vmf.page, address, vma); | 3056 | copy_user_highpage(page, vmf.page, address, vma); |
3063 | __SetPageUptodate(page); | 3057 | __SetPageUptodate(page); |
3064 | } else { | 3058 | } else { |