diff options
Diffstat (limited to 'include/asm-arm/system.h')
-rw-r--r-- | include/asm-arm/system.h | 130 |
1 files changed, 75 insertions, 55 deletions
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 39dd7008013c..8efa4ebdcacb 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h | |||
@@ -85,7 +85,9 @@ struct pt_regs; | |||
85 | void die(const char *msg, struct pt_regs *regs, int err) | 85 | void die(const char *msg, struct pt_regs *regs, int err) |
86 | __attribute__((noreturn)); | 86 | __attribute__((noreturn)); |
87 | 87 | ||
88 | void die_if_kernel(const char *str, struct pt_regs *regs, int err); | 88 | struct siginfo; |
89 | void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | ||
90 | unsigned long err, unsigned long trap); | ||
89 | 91 | ||
90 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | 92 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, |
91 | struct pt_regs *), | 93 | struct pt_regs *), |
@@ -137,7 +139,12 @@ extern unsigned int user_debug; | |||
137 | #define vectors_high() (0) | 139 | #define vectors_high() (0) |
138 | #endif | 140 | #endif |
139 | 141 | ||
142 | #if __LINUX_ARM_ARCH__ >= 6 | ||
143 | #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | ||
144 | : : "r" (0) : "memory") | ||
145 | #else | ||
140 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 146 | #define mb() __asm__ __volatile__ ("" : : : "memory") |
147 | #endif | ||
141 | #define rmb() mb() | 148 | #define rmb() mb() |
142 | #define wmb() mb() | 149 | #define wmb() mb() |
143 | #define read_barrier_depends() do { } while(0) | 150 | #define read_barrier_depends() do { } while(0) |
@@ -145,34 +152,12 @@ extern unsigned int user_debug; | |||
145 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | 152 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) |
146 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | 153 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
147 | 154 | ||
148 | #ifdef CONFIG_SMP | ||
149 | /* | ||
150 | * Define our own context switch locking. This allows us to enable | ||
151 | * interrupts over the context switch, otherwise we end up with high | ||
152 | * interrupt latency. The real problem area is switch_mm() which may | ||
153 | * do a full cache flush. | ||
154 | */ | ||
155 | #define prepare_arch_switch(rq,next) \ | ||
156 | do { \ | ||
157 | spin_lock(&(next)->switch_lock); \ | ||
158 | spin_unlock_irq(&(rq)->lock); \ | ||
159 | } while (0) | ||
160 | |||
161 | #define finish_arch_switch(rq,prev) \ | ||
162 | spin_unlock(&(prev)->switch_lock) | ||
163 | |||
164 | #define task_running(rq,p) \ | ||
165 | ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) | ||
166 | #else | ||
167 | /* | 155 | /* |
168 | * Our UP-case is more simple, but we assume knowledge of how | 156 | * switch_mm() may do a full cache flush over the context switch, |
169 | * spin_unlock_irq() and friends are implemented. This avoids | 157 | * so enable interrupts over the context switch to avoid high |
170 | * us needlessly decrementing and incrementing the preempt count. | 158 | * latency. |
171 | */ | 159 | */ |
172 | #define prepare_arch_switch(rq,next) local_irq_enable() | 160 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW |
173 | #define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock) | ||
174 | #define task_running(rq,p) ((rq)->curr == (p)) | ||
175 | #endif | ||
176 | 161 | ||
177 | /* | 162 | /* |
178 | * switch_to(prev, next) should switch from task `prev' to `next' | 163 | * switch_to(prev, next) should switch from task `prev' to `next' |
@@ -312,7 +297,6 @@ do { \ | |||
312 | }) | 297 | }) |
313 | 298 | ||
314 | #ifdef CONFIG_SMP | 299 | #ifdef CONFIG_SMP |
315 | #error SMP not supported | ||
316 | 300 | ||
317 | #define smp_mb() mb() | 301 | #define smp_mb() mb() |
318 | #define smp_rmb() rmb() | 302 | #define smp_rmb() rmb() |
@@ -326,6 +310,8 @@ do { \ | |||
326 | #define smp_wmb() barrier() | 310 | #define smp_wmb() barrier() |
327 | #define smp_read_barrier_depends() do { } while(0) | 311 | #define smp_read_barrier_depends() do { } while(0) |
328 | 312 | ||
313 | #endif /* CONFIG_SMP */ | ||
314 | |||
329 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | 315 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
330 | /* | 316 | /* |
331 | * On the StrongARM, "swp" is terminally broken since it bypasses the | 317 | * On the StrongARM, "swp" is terminally broken since it bypasses the |
@@ -338,6 +324,9 @@ do { \ | |||
338 | * | 324 | * |
339 | * We choose (1) since its the "easiest" to achieve here and is not | 325 | * We choose (1) since its the "easiest" to achieve here and is not |
340 | * dependent on the processor type. | 326 | * dependent on the processor type. |
327 | * | ||
328 | * NOTE that this solution won't work on an SMP system, so explcitly | ||
329 | * forbid it here. | ||
341 | */ | 330 | */ |
342 | #define swp_is_buggy | 331 | #define swp_is_buggy |
343 | #endif | 332 | #endif |
@@ -349,42 +338,73 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size | |||
349 | #ifdef swp_is_buggy | 338 | #ifdef swp_is_buggy |
350 | unsigned long flags; | 339 | unsigned long flags; |
351 | #endif | 340 | #endif |
341 | #if __LINUX_ARM_ARCH__ >= 6 | ||
342 | unsigned int tmp; | ||
343 | #endif | ||
352 | 344 | ||
353 | switch (size) { | 345 | switch (size) { |
354 | #ifdef swp_is_buggy | 346 | #if __LINUX_ARM_ARCH__ >= 6 |
355 | case 1: | 347 | case 1: |
356 | local_irq_save(flags); | 348 | asm volatile("@ __xchg1\n" |
357 | ret = *(volatile unsigned char *)ptr; | 349 | "1: ldrexb %0, [%3]\n" |
358 | *(volatile unsigned char *)ptr = x; | 350 | " strexb %1, %2, [%3]\n" |
359 | local_irq_restore(flags); | 351 | " teq %1, #0\n" |
360 | break; | 352 | " bne 1b" |
361 | 353 | : "=&r" (ret), "=&r" (tmp) | |
362 | case 4: | 354 | : "r" (x), "r" (ptr) |
363 | local_irq_save(flags); | 355 | : "memory", "cc"); |
364 | ret = *(volatile unsigned long *)ptr; | 356 | break; |
365 | *(volatile unsigned long *)ptr = x; | 357 | case 4: |
366 | local_irq_restore(flags); | 358 | asm volatile("@ __xchg4\n" |
367 | break; | 359 | "1: ldrex %0, [%3]\n" |
360 | " strex %1, %2, [%3]\n" | ||
361 | " teq %1, #0\n" | ||
362 | " bne 1b" | ||
363 | : "=&r" (ret), "=&r" (tmp) | ||
364 | : "r" (x), "r" (ptr) | ||
365 | : "memory", "cc"); | ||
366 | break; | ||
367 | #elif defined(swp_is_buggy) | ||
368 | #ifdef CONFIG_SMP | ||
369 | #error SMP is not supported on this platform | ||
370 | #endif | ||
371 | case 1: | ||
372 | local_irq_save(flags); | ||
373 | ret = *(volatile unsigned char *)ptr; | ||
374 | *(volatile unsigned char *)ptr = x; | ||
375 | local_irq_restore(flags); | ||
376 | break; | ||
377 | |||
378 | case 4: | ||
379 | local_irq_save(flags); | ||
380 | ret = *(volatile unsigned long *)ptr; | ||
381 | *(volatile unsigned long *)ptr = x; | ||
382 | local_irq_restore(flags); | ||
383 | break; | ||
368 | #else | 384 | #else |
369 | case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" | 385 | case 1: |
370 | : "=&r" (ret) | 386 | asm volatile("@ __xchg1\n" |
371 | : "r" (x), "r" (ptr) | 387 | " swpb %0, %1, [%2]" |
372 | : "memory", "cc"); | 388 | : "=&r" (ret) |
373 | break; | 389 | : "r" (x), "r" (ptr) |
374 | case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" | 390 | : "memory", "cc"); |
375 | : "=&r" (ret) | 391 | break; |
376 | : "r" (x), "r" (ptr) | 392 | case 4: |
377 | : "memory", "cc"); | 393 | asm volatile("@ __xchg4\n" |
378 | break; | 394 | " swp %0, %1, [%2]" |
395 | : "=&r" (ret) | ||
396 | : "r" (x), "r" (ptr) | ||
397 | : "memory", "cc"); | ||
398 | break; | ||
379 | #endif | 399 | #endif |
380 | default: __bad_xchg(ptr, size), ret = 0; | 400 | default: |
401 | __bad_xchg(ptr, size), ret = 0; | ||
402 | break; | ||
381 | } | 403 | } |
382 | 404 | ||
383 | return ret; | 405 | return ret; |
384 | } | 406 | } |
385 | 407 | ||
386 | #endif /* CONFIG_SMP */ | ||
387 | |||
388 | #endif /* __ASSEMBLY__ */ | 408 | #endif /* __ASSEMBLY__ */ |
389 | 409 | ||
390 | #define arch_align_stack(x) (x) | 410 | #define arch_align_stack(x) (x) |