aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-01-30 07:31:20 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:20 -0500
commit95c354fe9f7d6decc08a92aa26eb233ecc2155bf (patch)
treeec9267032ea875e84216cfb20acb2cfc7c62149f /mm/memory.c
parenta95d67f87e1a5f1b4429be3ba3bf7b4051657908 (diff)
spinlock: lockbreak cleanup
The break_lock data structure and code for spinlocks is quite nasty. Not only does it double the size of a spinlock but it changes locking to a potentially less optimal trylock. Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a __raw_spin_is_contended that uses the lock data itself to determine whether there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is not set. Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to decouple it from the spinlock implementation, and make it typesafe (rwlocks do not have any need_lockbreak sites -- why do they even get bloated up with that break_lock then?). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 4b0144b24c12..673ebbf499c7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -513,8 +513,7 @@ again:
513 if (progress >= 32) { 513 if (progress >= 32) {
514 progress = 0; 514 progress = 0;
515 if (need_resched() || 515 if (need_resched() ||
516 need_lockbreak(src_ptl) || 516 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
517 need_lockbreak(dst_ptl))
518 break; 517 break;
519 } 518 }
520 if (pte_none(*src_pte)) { 519 if (pte_none(*src_pte)) {
@@ -853,7 +852,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
853 tlb_finish_mmu(*tlbp, tlb_start, start); 852 tlb_finish_mmu(*tlbp, tlb_start, start);
854 853
855 if (need_resched() || 854 if (need_resched() ||
856 (i_mmap_lock && need_lockbreak(i_mmap_lock))) { 855 (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
857 if (i_mmap_lock) { 856 if (i_mmap_lock) {
858 *tlbp = NULL; 857 *tlbp = NULL;
859 goto out; 858 goto out;
@@ -1768,8 +1767,7 @@ again:
1768 1767
1769 restart_addr = zap_page_range(vma, start_addr, 1768 restart_addr = zap_page_range(vma, start_addr,
1770 end_addr - start_addr, details); 1769 end_addr - start_addr, details);
1771 need_break = need_resched() || 1770 need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
1772 need_lockbreak(details->i_mmap_lock);
1773 1771
1774 if (restart_addr >= end_addr) { 1772 if (restart_addr >= end_addr) {
1775 /* We have now completed this vma: mark it so */ 1773 /* We have now completed this vma: mark it so */