aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include/asm/system.h')
-rw-r--r--arch/blackfin/include/asm/system.h185
1 files changed, 90 insertions, 95 deletions
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 8f1627d8bf09..a4c8254bec55 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -37,114 +37,98 @@
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/compiler.h> 38#include <linux/compiler.h>
39#include <mach/anomaly.h> 39#include <mach/anomaly.h>
40#include <asm/pda.h>
41#include <asm/processor.h>
42#include <asm/irq.h>
40 43
41/* 44/*
42 * Interrupt configuring macros. 45 * Force strict CPU ordering.
43 */ 46 */
47#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
48#define mb() __asm__ __volatile__ ("" : : : "memory")
49#define rmb() __asm__ __volatile__ ("" : : : "memory")
50#define wmb() __asm__ __volatile__ ("" : : : "memory")
51#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
52#define read_barrier_depends() do { } while(0)
44 53
45extern unsigned long irq_flags; 54#ifdef CONFIG_SMP
46 55asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
47#define local_irq_enable() \ 56asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value);
48 __asm__ __volatile__( \ 57asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value);
49 "sti %0;" \ 58asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr,
50 : \ 59 unsigned long new, unsigned long old);
51 : "d" (irq_flags) \ 60asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr,
52 ) 61 unsigned long new, unsigned long old);
53 62asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
54#define local_irq_disable() \ 63 unsigned long new, unsigned long old);
55 do { \ 64
56 int __tmp_dummy; \ 65#ifdef __ARCH_SYNC_CORE_DCACHE
57 __asm__ __volatile__( \ 66# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
58 "cli %0;" \ 67# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
59 : "=d" (__tmp_dummy) \ 68# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
60 ); \ 69#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
61 } while (0)
62
63#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
64# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
65#else
66# define NOP_PAD_ANOMALY_05000244
67#endif
68
69#define idle_with_irq_disabled() \
70 __asm__ __volatile__( \
71 NOP_PAD_ANOMALY_05000244 \
72 ".align 8;" \
73 "sti %0;" \
74 "idle;" \
75 : \
76 : "d" (irq_flags) \
77 )
78
79#ifdef CONFIG_DEBUG_HWERR
80# define __save_and_cli(x) \
81 __asm__ __volatile__( \
82 "cli %0;" \
83 "sti %1;" \
84 : "=&d" (x) \
85 : "d" (0x3F) \
86 )
87#else
88# define __save_and_cli(x) \
89 __asm__ __volatile__( \
90 "cli %0;" \
91 : "=&d" (x) \
92 )
93#endif
94
95#define local_save_flags(x) \
96 __asm__ __volatile__( \
97 "cli %0;" \
98 "sti %0;" \
99 : "=d" (x) \
100 )
101 70
102#ifdef CONFIG_DEBUG_HWERR
103#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
104#else 71#else
105#define irqs_enabled_from_flags(x) ((x) != 0x1f) 72# define smp_mb() barrier()
73# define smp_rmb() barrier()
74# define smp_wmb() barrier()
75#define smp_read_barrier_depends() barrier()
106#endif 76#endif
107 77
108#define local_irq_restore(x) \ 78static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
109 do { \ 79 int size)
110 if (irqs_enabled_from_flags(x)) \ 80{
111 local_irq_enable(); \ 81 unsigned long tmp;
112 } while (0)
113 82
114/* For spinlocks etc */ 83 switch (size) {
115#define local_irq_save(x) __save_and_cli(x) 84 case 1:
85 tmp = __raw_xchg_1_asm(ptr, x);
86 break;
87 case 2:
88 tmp = __raw_xchg_2_asm(ptr, x);
89 break;
90 case 4:
91 tmp = __raw_xchg_4_asm(ptr, x);
92 break;
93 }
116 94
117#define irqs_disabled() \ 95 return tmp;
118({ \ 96}
119 unsigned long flags; \
120 local_save_flags(flags); \
121 !irqs_enabled_from_flags(flags); \
122})
123 97
124/* 98/*
125 * Force strict CPU ordering. 99 * Atomic compare and exchange. Compare OLD with MEM, if identical,
100 * store NEW in MEM. Return the initial value in MEM. Success is
101 * indicated by comparing RETURN with OLD.
126 */ 102 */
127#define nop() asm volatile ("nop;\n\t"::) 103static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
128#define mb() asm volatile ("" : : :"memory") 104 unsigned long new, int size)
129#define rmb() asm volatile ("" : : :"memory") 105{
130#define wmb() asm volatile ("" : : :"memory") 106 unsigned long tmp;
131#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
132 107
133#define read_barrier_depends() do { } while(0) 108 switch (size) {
109 case 1:
110 tmp = __raw_cmpxchg_1_asm(ptr, new, old);
111 break;
112 case 2:
113 tmp = __raw_cmpxchg_2_asm(ptr, new, old);
114 break;
115 case 4:
116 tmp = __raw_cmpxchg_4_asm(ptr, new, old);
117 break;
118 }
119
120 return tmp;
121}
122#define cmpxchg(ptr, o, n) \
123 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
124 (unsigned long)(n), sizeof(*(ptr))))
125
126#else /* !CONFIG_SMP */
134 127
135#ifdef CONFIG_SMP
136#define smp_mb() mb()
137#define smp_rmb() rmb()
138#define smp_wmb() wmb()
139#define smp_read_barrier_depends() read_barrier_depends()
140#else
141#define smp_mb() barrier() 128#define smp_mb() barrier()
142#define smp_rmb() barrier() 129#define smp_rmb() barrier()
143#define smp_wmb() barrier() 130#define smp_wmb() barrier()
144#define smp_read_barrier_depends() do { } while(0) 131#define smp_read_barrier_depends() do { } while(0)
145#endif
146
147#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
148 132
149struct __xchg_dummy { 133struct __xchg_dummy {
150 unsigned long a[100]; 134 unsigned long a[100];
@@ -157,7 +141,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
157 unsigned long tmp = 0; 141 unsigned long tmp = 0;
158 unsigned long flags = 0; 142 unsigned long flags = 0;
159 143
160 local_irq_save(flags); 144 local_irq_save_hw(flags);
161 145
162 switch (size) { 146 switch (size) {
163 case 1: 147 case 1:
@@ -179,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
179 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 163 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
180 break; 164 break;
181 } 165 }
182 local_irq_restore(flags); 166 local_irq_restore_hw(flags);
183 return tmp; 167 return tmp;
184} 168}
185 169
@@ -194,9 +178,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
194 (unsigned long)(n), sizeof(*(ptr)))) 178 (unsigned long)(n), sizeof(*(ptr))))
195#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 179#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
196 180
197#ifndef CONFIG_SMP
198#include <asm-generic/cmpxchg.h> 181#include <asm-generic/cmpxchg.h>
199#endif 182
183#endif /* !CONFIG_SMP */
184
185#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
186#define tas(ptr) ((void)xchg((ptr), 1))
200 187
201#define prepare_to_switch() do { } while(0) 188#define prepare_to_switch() do { } while(0)
202 189
@@ -205,10 +192,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
205 * ptr isn't the current task, in which case it does nothing. 192 * ptr isn't the current task, in which case it does nothing.
206 */ 193 */
207 194
208#include <asm/blackfin.h> 195#include <asm/l1layout.h>
196#include <asm/mem_map.h>
209 197
210asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); 198asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
211 199
200#ifndef CONFIG_SMP
212#define switch_to(prev,next,last) \ 201#define switch_to(prev,next,last) \
213do { \ 202do { \
214 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ 203 memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
@@ -217,5 +206,11 @@ do { \
217 sizeof *L1_SCRATCH_TASK_INFO); \ 206 sizeof *L1_SCRATCH_TASK_INFO); \
218 (last) = resume (prev, next); \ 207 (last) = resume (prev, next); \
219} while (0) 208} while (0)
209#else
210#define switch_to(prev, next, last) \
211do { \
212 (last) = resume(prev, next); \
213} while (0)
214#endif
220 215
221#endif /* _BLACKFIN_SYSTEM_H */ 216#endif /* _BLACKFIN_SYSTEM_H */