aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-06-29 04:40:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-06-29 04:40:28 -0400
commit99a0616bcdabec9005159e50af91a36cc3f8bda8 (patch)
tree4c5bb08f6b392ffbc8a07d3b1d900971c6776243 /include/asm-arm
parenta839688362e32f01608838516036697e30618b39 (diff)
parent053a7b5b7617a72d7c61b6f84196d1c0f79b9849 (diff)
Merge with ../linux-2.6-smp
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/system.h12
-rw-r--r--include/asm-arm/tlbflush.h28
2 files changed, 31 insertions, 9 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 3d0d2860b6db..cdf49f442fd2 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -290,7 +290,6 @@ do { \
290}) 290})
291 291
292#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
293#error SMP not supported
294 293
295#define smp_mb() mb() 294#define smp_mb() mb()
296#define smp_rmb() rmb() 295#define smp_rmb() rmb()
@@ -304,6 +303,8 @@ do { \
304#define smp_wmb() barrier() 303#define smp_wmb() barrier()
305#define smp_read_barrier_depends() do { } while(0) 304#define smp_read_barrier_depends() do { } while(0)
306 305
306#endif /* CONFIG_SMP */
307
307#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 308#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
308/* 309/*
309 * On the StrongARM, "swp" is terminally broken since it bypasses the 310 * On the StrongARM, "swp" is terminally broken since it bypasses the
@@ -316,9 +317,16 @@ do { \
316 * 317 *
317 * We choose (1) since its the "easiest" to achieve here and is not 318 * We choose (1) since its the "easiest" to achieve here and is not
318 * dependent on the processor type. 319 * dependent on the processor type.
320 *
321 * NOTE that this solution won't work on an SMP system, so explcitly
322 * forbid it here.
319 */ 323 */
324#ifdef CONFIG_SMP
325#error SMP is not supported on SA1100/SA110
326#else
320#define swp_is_buggy 327#define swp_is_buggy
321#endif 328#endif
329#endif
322 330
323static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 331static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
324{ 332{
@@ -361,8 +369,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
361 return ret; 369 return ret;
362} 370}
363 371
364#endif /* CONFIG_SMP */
365
366#endif /* __ASSEMBLY__ */ 372#endif /* __ASSEMBLY__ */
367 373
368#define arch_align_stack(x) (x) 374#define arch_align_stack(x) (x)
diff --git a/include/asm-arm/tlbflush.h b/include/asm-arm/tlbflush.h
index 8a864b118569..9387a5e1ffe0 100644
--- a/include/asm-arm/tlbflush.h
+++ b/include/asm-arm/tlbflush.h
@@ -235,7 +235,7 @@ extern struct cpu_tlb_fns cpu_tlb;
235 235
236#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) 236#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
237 237
238static inline void flush_tlb_all(void) 238static inline void local_flush_tlb_all(void)
239{ 239{
240 const int zero = 0; 240 const int zero = 0;
241 const unsigned int __tlb_flag = __cpu_tlb_flags; 241 const unsigned int __tlb_flag = __cpu_tlb_flags;
@@ -253,7 +253,7 @@ static inline void flush_tlb_all(void)
253 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero)); 253 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
254} 254}
255 255
256static inline void flush_tlb_mm(struct mm_struct *mm) 256static inline void local_flush_tlb_mm(struct mm_struct *mm)
257{ 257{
258 const int zero = 0; 258 const int zero = 0;
259 const int asid = ASID(mm); 259 const int asid = ASID(mm);
@@ -282,7 +282,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
282} 282}
283 283
284static inline void 284static inline void
285flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 285local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
286{ 286{
287 const int zero = 0; 287 const int zero = 0;
288 const unsigned int __tlb_flag = __cpu_tlb_flags; 288 const unsigned int __tlb_flag = __cpu_tlb_flags;
@@ -313,7 +313,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
313 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr)); 313 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
314} 314}
315 315
316static inline void flush_tlb_kernel_page(unsigned long kaddr) 316static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
317{ 317{
318 const int zero = 0; 318 const int zero = 0;
319 const unsigned int __tlb_flag = __cpu_tlb_flags; 319 const unsigned int __tlb_flag = __cpu_tlb_flags;
@@ -384,8 +384,24 @@ static inline void clean_pmd_entry(pmd_t *pmd)
384/* 384/*
385 * Convert calls to our calling convention. 385 * Convert calls to our calling convention.
386 */ 386 */
387#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) 387#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
388#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) 388#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
389
390#ifndef CONFIG_SMP
391#define flush_tlb_all local_flush_tlb_all
392#define flush_tlb_mm local_flush_tlb_mm
393#define flush_tlb_page local_flush_tlb_page
394#define flush_tlb_kernel_page local_flush_tlb_kernel_page
395#define flush_tlb_range local_flush_tlb_range
396#define flush_tlb_kernel_range local_flush_tlb_kernel_range
397#else
398extern void flush_tlb_all(void);
399extern void flush_tlb_mm(struct mm_struct *mm);
400extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
401extern void flush_tlb_kernel_page(unsigned long kaddr);
402extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
403extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
404#endif
389 405
390/* 406/*
391 * if PG_dcache_dirty is set for the page, we need to ensure that any 407 * if PG_dcache_dirty is set for the page, we need to ensure that any