aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorDavid Brown <davidb@codeaurora.org>2011-03-17 01:13:16 -0400
committerDavid Brown <davidb@codeaurora.org>2011-03-17 01:13:16 -0400
commit92c260f755c42337c550d8ac1f8ccd1b32bffb20 (patch)
tree6d04fefc1adeecabfb2b00c201e0db78fa2b5529 /mm/memory.c
parent8e76a80960bf06c245160a484d5a363ca6b520bb (diff)
parent05e34754518b6a90d5c392790c032575fab12d66 (diff)
Merge remote branch 'rmk/for-linus' into for-linus
* rmk/for-linus: (1557 commits) ARM: 6806/1: irq: introduce entry and exit functions for chained handlers ARM: 6781/1: Thumb-2: Work around buggy Thumb-2 short branch relocations in gas ARM: 6747/1: P2V: Thumb2 support ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump ARM: 6796/1: Footbridge: Fix I/O mappings for NOMMU mode ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9 ARM: 6772/1: errata: possible fault MMU translations following an ASID switch ARM: 6776/1: mach-ux500: activate fix for errata 753970 ARM: 6794/1: SPEAr: Append UL to device address macros. ARM: 6793/1: SPEAr: Remove unused *_SIZE macros from spear*.h files ARM: 6792/1: SPEAr: Replace SIZE macro's with SZ_4K macros ARM: 6791/1: SPEAr3xx: Declare device structures after shirq code ARM: 6790/1: SPEAr: Clock Framework: Rename usbd clock and align apb_clk entry ARM: 6789/1: SPEAr3xx: Rename sdio to sdhci ARM: 6788/1: SPEAr: Include mach/hardware.h instead of mach/spear.h ARM: 6787/1: SPEAr: Reorder #includes in .h & .c files. ARM: 6681/1: SPEAr: add debugfs support to clk API ARM: 6703/1: SPEAr: update clk API support ARM: 6679/1: SPEAr: make clk API functions more generic ARM: 6737/1: SPEAr: formalized timer support ... Conflicts: arch/arm/mach-msm/board-msm7x27.c arch/arm/mach-msm/board-msm7x30.c arch/arm/mach-msm/board-qsd8x50.c arch/arm/mach-msm/board-sapphire.c arch/arm/mach-msm/include/mach/memory.h
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c34
1 files changed, 14 insertions, 20 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 31250faff390..5823698c2b71 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2219,7 +2219,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2219 &ptl); 2219 &ptl);
2220 if (!pte_same(*page_table, orig_pte)) { 2220 if (!pte_same(*page_table, orig_pte)) {
2221 unlock_page(old_page); 2221 unlock_page(old_page);
2222 page_cache_release(old_page);
2223 goto unlock; 2222 goto unlock;
2224 } 2223 }
2225 page_cache_release(old_page); 2224 page_cache_release(old_page);
@@ -2289,7 +2288,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2289 &ptl); 2288 &ptl);
2290 if (!pte_same(*page_table, orig_pte)) { 2289 if (!pte_same(*page_table, orig_pte)) {
2291 unlock_page(old_page); 2290 unlock_page(old_page);
2292 page_cache_release(old_page);
2293 goto unlock; 2291 goto unlock;
2294 } 2292 }
2295 2293
@@ -2367,16 +2365,6 @@ gotten:
2367 } 2365 }
2368 __SetPageUptodate(new_page); 2366 __SetPageUptodate(new_page);
2369 2367
2370 /*
2371 * Don't let another task, with possibly unlocked vma,
2372 * keep the mlocked page.
2373 */
2374 if ((vma->vm_flags & VM_LOCKED) && old_page) {
2375 lock_page(old_page); /* for LRU manipulation */
2376 clear_page_mlock(old_page);
2377 unlock_page(old_page);
2378 }
2379
2380 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) 2368 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
2381 goto oom_free_new; 2369 goto oom_free_new;
2382 2370
@@ -2444,10 +2432,20 @@ gotten:
2444 2432
2445 if (new_page) 2433 if (new_page)
2446 page_cache_release(new_page); 2434 page_cache_release(new_page);
2447 if (old_page)
2448 page_cache_release(old_page);
2449unlock: 2435unlock:
2450 pte_unmap_unlock(page_table, ptl); 2436 pte_unmap_unlock(page_table, ptl);
2437 if (old_page) {
2438 /*
2439 * Don't let another task, with possibly unlocked vma,
2440 * keep the mlocked page.
2441 */
2442 if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
2443 lock_page(old_page); /* LRU manipulation */
2444 munlock_vma_page(old_page);
2445 unlock_page(old_page);
2446 }
2447 page_cache_release(old_page);
2448 }
2451 return ret; 2449 return ret;
2452oom_free_new: 2450oom_free_new:
2453 page_cache_release(new_page); 2451 page_cache_release(new_page);
@@ -2650,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
2650 details.last_index = ULONG_MAX; 2648 details.last_index = ULONG_MAX;
2651 details.i_mmap_lock = &mapping->i_mmap_lock; 2649 details.i_mmap_lock = &mapping->i_mmap_lock;
2652 2650
2651 mutex_lock(&mapping->unmap_mutex);
2653 spin_lock(&mapping->i_mmap_lock); 2652 spin_lock(&mapping->i_mmap_lock);
2654 2653
2655 /* Protect against endless unmapping loops */ 2654 /* Protect against endless unmapping loops */
@@ -2666,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
2666 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2665 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2667 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2666 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2668 spin_unlock(&mapping->i_mmap_lock); 2667 spin_unlock(&mapping->i_mmap_lock);
2668 mutex_unlock(&mapping->unmap_mutex);
2669} 2669}
2670EXPORT_SYMBOL(unmap_mapping_range); 2670EXPORT_SYMBOL(unmap_mapping_range);
2671 2671
@@ -3053,12 +3053,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3053 goto out; 3053 goto out;
3054 } 3054 }
3055 charged = 1; 3055 charged = 1;
3056 /*
3057 * Don't let another task, with possibly unlocked vma,
3058 * keep the mlocked page.
3059 */
3060 if (vma->vm_flags & VM_LOCKED)
3061 clear_page_mlock(vmf.page);
3062 copy_user_highpage(page, vmf.page, address, vma); 3056 copy_user_highpage(page, vmf.page, address, vma);
3063 __SetPageUptodate(page); 3057 __SetPageUptodate(page);
3064 } else { 3058 } else {