aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-10-16 09:10:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-16 11:32:29 -0400
commit29da7eb0ec69245c6e9b4eb5bdaa04af685f5c4f (patch)
tree14fb9ac86841af2cc17b999507b6879936616b1d
parent8741ca71a3f626a56595b88200ebf952ce77ceef (diff)
[PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co
Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually add in the compiler barriers that were provided by the latter. This makes FRV consistent with other archs. Furthermore, the compiler barrier effects are now there unconditionally - at least as far as preemption is concerned - because we don't want the compiler moving memory accesses out of the section of code in which the mapping is in force - in effect the kmap_atomic() must imply a LOCK-class barrier and the kunmap_atomic() must imply an UNLOCK-class barrier to the compiler. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-frv/highmem.h27
1 files changed, 14 insertions, 13 deletions
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index e2247c22a638..0f390f41f816 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
82 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ 82 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
83 \ 83 \
84 if (type != __KM_CACHE) \ 84 if (type != __KM_CACHE) \
85 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \ 85 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
86 else \ 86 else \
87 asm volatile("movgs %0,iampr"#ampr"\n" \ 87 asm volatile("movgs %0,iampr"#ampr"\n" \
88 "movgs %0,dampr"#ampr"\n" \ 88 "movgs %0,dampr"#ampr"\n" \
89 :: "r"(dampr) \ 89 :: "r"(dampr) : "memory" \
90 ); \ 90 ); \
91 \ 91 \
92 asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \ 92 asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
@@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
104 asm volatile("movgs %0,tplr \n" \ 104 asm volatile("movgs %0,tplr \n" \
105 "movgs %1,tppr \n" \ 105 "movgs %1,tppr \n" \
106 "tlbpr %0,gr0,#2,#1" \ 106 "tlbpr %0,gr0,#2,#1" \
107 : : "r"(damlr), "r"(dampr)); \ 107 : : "r"(damlr), "r"(dampr) : "memory"); \
108 \ 108 \
109 /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \ 109 /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
110 \ 110 \
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
115{ 115{
116 unsigned long paddr; 116 unsigned long paddr;
117 117
118 preempt_disable(); 118 inc_preempt_count();
119 paddr = page_to_phys(page); 119 paddr = page_to_phys(page);
120 120
121 switch (type) { 121 switch (type) {
@@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
138 } 138 }
139} 139}
140 140
141#define __kunmap_atomic_primary(type, ampr) \ 141#define __kunmap_atomic_primary(type, ampr) \
142do { \ 142do { \
143 asm volatile("movgs gr0,dampr"#ampr"\n"); \ 143 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
144 if (type == __KM_CACHE) \ 144 if (type == __KM_CACHE) \
145 asm volatile("movgs gr0,iampr"#ampr"\n"); \ 145 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
146} while(0) 146} while(0)
147 147
148#define __kunmap_atomic_secondary(slot, vaddr) \ 148#define __kunmap_atomic_secondary(slot, vaddr) \
149do { \ 149do { \
150 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \ 150 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
151} while(0) 151} while(0)
152 152
153static inline void kunmap_atomic(void *kvaddr, enum km_type type) 153static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
170 default: 170 default:
171 BUG(); 171 BUG();
172 } 172 }
173 preempt_enable(); 173 dec_preempt_count();
174 preempt_check_resched();
174} 175}
175 176
176#endif /* !__ASSEMBLY__ */ 177#endif /* !__ASSEMBLY__ */