aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mn10300/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mn10300/system.h')
-rw-r--r--include/asm-mn10300/system.h237
1 files changed, 0 insertions, 237 deletions
diff --git a/include/asm-mn10300/system.h b/include/asm-mn10300/system.h
deleted file mode 100644
index 8214fb7e7fe4..000000000000
--- a/include/asm-mn10300/system.h
+++ /dev/null
@@ -1,237 +0,0 @@
1/* MN10300 System definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SYSTEM_H
12#define _ASM_SYSTEM_H
13
14#include <asm/cpu-regs.h>
15
16#ifdef __KERNEL__
17#ifndef __ASSEMBLY__
18
19#include <linux/kernel.h>
20
21struct task_struct;
22struct thread_struct;
23
24extern asmlinkage
25struct task_struct *__switch_to(struct thread_struct *prev,
26 struct thread_struct *next,
27 struct task_struct *prev_task);
28
29/* context switching is now performed out-of-line in switch_to.S */
30#define switch_to(prev, next, last) \
31do { \
32 current->thread.wchan = (u_long) __builtin_return_address(0); \
33 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
34 mb(); \
35 current->thread.wchan = 0; \
36} while (0)
37
38#define arch_align_stack(x) (x)
39
40#define nop() asm volatile ("nop")
41
42#endif /* !__ASSEMBLY__ */
43
44/*
45 * Force strict CPU ordering.
46 * And yes, this is required on UP too when we're talking
47 * to devices.
48 *
49 * For now, "wmb()" doesn't actually do anything, as all
50 * Intel CPU's follow what Intel calls a *Processor Order*,
51 * in which all writes are seen in the program order even
52 * outside the CPU.
53 *
54 * I expect future Intel CPU's to have a weaker ordering,
55 * but I'd also expect them to finally get their act together
56 * and add some real memory barriers if so.
57 *
58 * Some non intel clones support out of order store. wmb() ceases to be a
59 * nop for these.
60 */
61
62#define mb() asm volatile ("": : :"memory")
63#define rmb() mb()
64#define wmb() asm volatile ("": : :"memory")
65
66#ifdef CONFIG_SMP
67#define smp_mb() mb()
68#define smp_rmb() rmb()
69#define smp_wmb() wmb()
70#else
71#define smp_mb() barrier()
72#define smp_rmb() barrier()
73#define smp_wmb() barrier()
74#endif
75
76#define set_mb(var, value) do { var = value; mb(); } while (0)
77#define set_wmb(var, value) do { var = value; wmb(); } while (0)
78
79#define read_barrier_depends() do {} while (0)
80#define smp_read_barrier_depends() do {} while (0)
81
82/*****************************************************************************/
83/*
84 * interrupt control
85 * - "disabled": run in IM1/2
86 * - level 0 - GDB stub
87 * - level 1 - virtual serial DMA (if present)
88 * - level 5 - normal interrupt priority
89 * - level 6 - timer interrupt
90 * - "enabled": run in IM7
91 */
92#ifdef CONFIG_MN10300_TTYSM
93#define MN10300_CLI_LEVEL EPSW_IM_2
94#else
95#define MN10300_CLI_LEVEL EPSW_IM_1
96#endif
97
98#define local_save_flags(x) \
99do { \
100 typecheck(unsigned long, x); \
101 asm volatile( \
102 " mov epsw,%0 \n" \
103 : "=d"(x) \
104 ); \
105} while (0)
106
107#define local_irq_disable() \
108do { \
109 asm volatile( \
110 " and %0,epsw \n" \
111 " or %1,epsw \n" \
112 " nop \n" \
113 " nop \n" \
114 " nop \n" \
115 : \
116 : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \
117 ); \
118} while (0)
119
120#define local_irq_save(x) \
121do { \
122 local_save_flags(x); \
123 local_irq_disable(); \
124} while (0)
125
126/*
127 * we make sure local_irq_enable() doesn't cause priority inversion
128 */
129#ifndef __ASSEMBLY__
130
131extern unsigned long __mn10300_irq_enabled_epsw;
132
133#endif
134
135#define local_irq_enable() \
136do { \
137 unsigned long tmp; \
138 \
139 asm volatile( \
140 " mov epsw,%0 \n" \
141 " and %1,%0 \n" \
142 " or %2,%0 \n" \
143 " mov %0,epsw \n" \
144 : "=&d"(tmp) \
145 : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
146 ); \
147} while (0)
148
149#define local_irq_restore(x) \
150do { \
151 typecheck(unsigned long, x); \
152 asm volatile( \
153 " mov %0,epsw \n" \
154 " nop \n" \
155 " nop \n" \
156 " nop \n" \
157 : \
158 : "d"(x) \
159 : "memory", "cc" \
160 ); \
161} while (0)
162
163#define irqs_disabled() \
164({ \
165 unsigned long flags; \
166 local_save_flags(flags); \
167 (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \
168})
169
170/* hook to save power by halting the CPU
171 * - called from the idle loop
172 * - must reenable interrupts (which takes three instruction cycles to complete)
173 */
174#define safe_halt() \
175do { \
176 asm volatile(" or %0,epsw \n" \
177 " nop \n" \
178 " nop \n" \
179 " bset %2,(%1) \n" \
180 : \
181 : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\
182 : "cc" \
183 ); \
184} while (0)
185
186#define STI or EPSW_IE|EPSW_IM,epsw
187#define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop
188
189/*****************************************************************************/
190/*
191 * MN10300 doesn't actually have an exchange instruction
192 */
193#ifndef __ASSEMBLY__
194
195struct __xchg_dummy { unsigned long a[100]; };
196#define __xg(x) ((struct __xchg_dummy *)(x))
197
198static inline
199unsigned long __xchg(volatile unsigned long *m, unsigned long val)
200{
201 unsigned long retval;
202 unsigned long flags;
203
204 local_irq_save(flags);
205 retval = *m;
206 *m = val;
207 local_irq_restore(flags);
208 return retval;
209}
210
211#define xchg(ptr, v) \
212 ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr), \
213 (unsigned long)(v)))
214
215static inline unsigned long __cmpxchg(volatile unsigned long *m,
216 unsigned long old, unsigned long new)
217{
218 unsigned long retval;
219 unsigned long flags;
220
221 local_irq_save(flags);
222 retval = *m;
223 if (retval == old)
224 *m = new;
225 local_irq_restore(flags);
226 return retval;
227}
228
229#define cmpxchg(ptr, o, n) \
230 ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
231 (unsigned long)(o), \
232 (unsigned long)(n)))
233
234#endif /* !__ASSEMBLY__ */
235
236#endif /* __KERNEL__ */
237#endif /* _ASM_SYSTEM_H */