diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/blackfin/include/asm/system.h | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'arch/blackfin/include/asm/system.h')
-rw-r--r-- | arch/blackfin/include/asm/system.h | 192 |
1 files changed, 192 insertions, 0 deletions
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h new file mode 100644 index 00000000000..44bd0cced72 --- /dev/null +++ b/arch/blackfin/include/asm/system.h | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright 2004-2009 Analog Devices Inc. | ||
3 | * Tony Kou (tonyko@lineo.ca) | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #ifndef _BLACKFIN_SYSTEM_H | ||
9 | #define _BLACKFIN_SYSTEM_H | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | #include <linux/irqflags.h> | ||
13 | #include <mach/anomaly.h> | ||
14 | #include <asm/cache.h> | ||
15 | #include <asm/pda.h> | ||
16 | #include <asm/irq.h> | ||
17 | |||
18 | /* | ||
19 | * Force strict CPU ordering. | ||
20 | */ | ||
21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | ||
22 | #define smp_mb() mb() | ||
23 | #define smp_rmb() rmb() | ||
24 | #define smp_wmb() wmb() | ||
25 | #define set_mb(var, value) do { var = value; mb(); } while (0) | ||
26 | #define smp_read_barrier_depends() read_barrier_depends() | ||
27 | |||
28 | #ifdef CONFIG_SMP | ||
29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | ||
30 | asmlinkage unsigned long __raw_xchg_2_asm(volatile void *ptr, unsigned long value); | ||
31 | asmlinkage unsigned long __raw_xchg_4_asm(volatile void *ptr, unsigned long value); | ||
32 | asmlinkage unsigned long __raw_cmpxchg_1_asm(volatile void *ptr, | ||
33 | unsigned long new, unsigned long old); | ||
34 | asmlinkage unsigned long __raw_cmpxchg_2_asm(volatile void *ptr, | ||
35 | unsigned long new, unsigned long old); | ||
36 | asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | ||
37 | unsigned long new, unsigned long old); | ||
38 | |||
39 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
40 | /* Force Core data cache coherence */ | ||
41 | # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) | ||
42 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) | ||
43 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) | ||
44 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | ||
45 | #else | ||
46 | # define mb() barrier() | ||
47 | # define rmb() barrier() | ||
48 | # define wmb() barrier() | ||
49 | # define read_barrier_depends() do { } while (0) | ||
50 | #endif | ||
51 | |||
52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||
53 | int size) | ||
54 | { | ||
55 | unsigned long tmp; | ||
56 | |||
57 | switch (size) { | ||
58 | case 1: | ||
59 | tmp = __raw_xchg_1_asm(ptr, x); | ||
60 | break; | ||
61 | case 2: | ||
62 | tmp = __raw_xchg_2_asm(ptr, x); | ||
63 | break; | ||
64 | case 4: | ||
65 | tmp = __raw_xchg_4_asm(ptr, x); | ||
66 | break; | ||
67 | } | ||
68 | |||
69 | return tmp; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
74 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
75 | * indicated by comparing RETURN with OLD. | ||
76 | */ | ||
77 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
78 | unsigned long new, int size) | ||
79 | { | ||
80 | unsigned long tmp; | ||
81 | |||
82 | switch (size) { | ||
83 | case 1: | ||
84 | tmp = __raw_cmpxchg_1_asm(ptr, new, old); | ||
85 | break; | ||
86 | case 2: | ||
87 | tmp = __raw_cmpxchg_2_asm(ptr, new, old); | ||
88 | break; | ||
89 | case 4: | ||
90 | tmp = __raw_cmpxchg_4_asm(ptr, new, old); | ||
91 | break; | ||
92 | } | ||
93 | |||
94 | return tmp; | ||
95 | } | ||
96 | #define cmpxchg(ptr, o, n) \ | ||
97 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | ||
98 | (unsigned long)(n), sizeof(*(ptr)))) | ||
99 | |||
100 | #else /* !CONFIG_SMP */ | ||
101 | |||
102 | #define mb() barrier() | ||
103 | #define rmb() barrier() | ||
104 | #define wmb() barrier() | ||
105 | #define read_barrier_depends() do { } while (0) | ||
106 | |||
107 | struct __xchg_dummy { | ||
108 | unsigned long a[100]; | ||
109 | }; | ||
110 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | ||
111 | |||
112 | #include <mach/blackfin.h> | ||
113 | |||
114 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | ||
115 | int size) | ||
116 | { | ||
117 | unsigned long tmp = 0; | ||
118 | unsigned long flags; | ||
119 | |||
120 | flags = hard_local_irq_save(); | ||
121 | |||
122 | switch (size) { | ||
123 | case 1: | ||
124 | __asm__ __volatile__ | ||
125 | ("%0 = b%2 (z);\n\t" | ||
126 | "b%2 = %1;\n\t" | ||
127 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
128 | break; | ||
129 | case 2: | ||
130 | __asm__ __volatile__ | ||
131 | ("%0 = w%2 (z);\n\t" | ||
132 | "w%2 = %1;\n\t" | ||
133 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
134 | break; | ||
135 | case 4: | ||
136 | __asm__ __volatile__ | ||
137 | ("%0 = %2;\n\t" | ||
138 | "%2 = %1;\n\t" | ||
139 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | ||
140 | break; | ||
141 | } | ||
142 | hard_local_irq_restore(flags); | ||
143 | return tmp; | ||
144 | } | ||
145 | |||
146 | #include <asm-generic/cmpxchg-local.h> | ||
147 | |||
148 | /* | ||
149 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | ||
150 | * them available. | ||
151 | */ | ||
152 | #define cmpxchg_local(ptr, o, n) \ | ||
153 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
154 | (unsigned long)(n), sizeof(*(ptr)))) | ||
155 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
156 | |||
157 | #include <asm-generic/cmpxchg.h> | ||
158 | |||
159 | #endif /* !CONFIG_SMP */ | ||
160 | |||
161 | #define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | ||
162 | #define tas(ptr) ((void)xchg((ptr), 1)) | ||
163 | |||
164 | #define prepare_to_switch() do { } while(0) | ||
165 | |||
166 | /* | ||
167 | * switch_to(n) should switch tasks to task ptr, first checking that | ||
168 | * ptr isn't the current task, in which case it does nothing. | ||
169 | */ | ||
170 | |||
171 | #include <asm/l1layout.h> | ||
172 | #include <asm/mem_map.h> | ||
173 | |||
174 | asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_struct *next); | ||
175 | |||
176 | #ifndef CONFIG_SMP | ||
177 | #define switch_to(prev,next,last) \ | ||
178 | do { \ | ||
179 | memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \ | ||
180 | sizeof *L1_SCRATCH_TASK_INFO); \ | ||
181 | memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \ | ||
182 | sizeof *L1_SCRATCH_TASK_INFO); \ | ||
183 | (last) = resume (prev, next); \ | ||
184 | } while (0) | ||
185 | #else | ||
186 | #define switch_to(prev, next, last) \ | ||
187 | do { \ | ||
188 | (last) = resume(prev, next); \ | ||
189 | } while (0) | ||
190 | #endif | ||
191 | |||
192 | #endif /* _BLACKFIN_SYSTEM_H */ | ||