aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-03 09:44:46 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:12:14 -0400
commitbd31b85960a7fcb2d7ede216460b8da71a88411c (patch)
treef2ab1a1105705856c5cdfc71bcf3f7b5f897d30d /arch/arm/mm/copypage-v6.c
parenta1741e7fcbc19a67520115df480ab17012cc3d0b (diff)
locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 63cca009713..3d9a1552cef 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -27,7 +27,7 @@
27#define from_address (0xffff8000) 27#define from_address (0xffff8000)
28#define to_address (0xffffc000) 28#define to_address (0xffffc000)
29 29
30static DEFINE_SPINLOCK(v6_lock); 30static DEFINE_RAW_SPINLOCK(v6_lock);
31 31
32/* 32/*
33 * Copy the user page. No aliasing to deal with so we can just 33 * Copy the user page. No aliasing to deal with so we can just
@@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
88 * Now copy the page using the same cache colour as the 88 * Now copy the page using the same cache colour as the
89 * pages ultimate destination. 89 * pages ultimate destination.
90 */ 90 */
91 spin_lock(&v6_lock); 91 raw_spin_lock(&v6_lock);
92 92
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
@@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
101 101
102 copy_page((void *)kto, (void *)kfrom); 102 copy_page((void *)kto, (void *)kfrom);
103 103
104 spin_unlock(&v6_lock); 104 raw_spin_unlock(&v6_lock);
105} 105}
106 106
107/* 107/*
@@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
121 * Now clear the page using the same cache colour as 121 * Now clear the page using the same cache colour as
122 * the pages ultimate destination. 122 * the pages ultimate destination.
123 */ 123 */
124 spin_lock(&v6_lock); 124 raw_spin_lock(&v6_lock);
125 125
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
127 flush_tlb_kernel_page(to); 127 flush_tlb_kernel_page(to);
128 clear_page((void *)to); 128 clear_page((void *)to);
129 129
130 spin_unlock(&v6_lock); 130 raw_spin_unlock(&v6_lock);
131} 131}
132 132
133struct cpu_user_fns v6_user_fns __initdata = { 133struct cpu_user_fns v6_user_fns __initdata = {