aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/intrinsics.h
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-08-01 13:13:32 -0400
committerTony Luck <tony.luck@intel.com>2008-08-01 13:21:21 -0400
commit7f30491ccd28627742e37899453ae20e3da8e18f (patch)
tree7291c0a26ed3a31acf9542857af3981d278f5de8 /include/asm-ia64/intrinsics.h
parent94ad374a0751f40d25e22e036c37f7263569d24c (diff)
[IA64] Move include/asm-ia64 to arch/ia64/include/asm
After moving the the include files there were a few clean-ups: 1) Some files used #include <asm-ia64/xyz.h>, changed to <asm/xyz.h> 2) Some comments alerted maintainers to look at various header files to make matching updates if certain code were to be changed. Updated these comments to use the new include paths. 3) Some header files mentioned their own names in initial comments. Just deleted these self references. Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/intrinsics.h')
-rw-r--r--include/asm-ia64/intrinsics.h241
1 files changed, 0 insertions, 241 deletions
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
deleted file mode 100644
index 47d686dba1eb..000000000000
--- a/include/asm-ia64/intrinsics.h
+++ /dev/null
@@ -1,241 +0,0 @@
1#ifndef _ASM_IA64_INTRINSICS_H
2#define _ASM_IA64_INTRINSICS_H
3
4/*
5 * Compiler-dependent intrinsics.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11#ifndef __ASSEMBLY__
12
13/* include compiler specific intrinsics */
14#include <asm/ia64regs.h>
15#ifdef __INTEL_COMPILER
16# include <asm/intel_intrin.h>
17#else
18# include <asm/gcc_intrin.h>
19#endif
20
21#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
22
23#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
24do { \
25 ia64_native_set_rr(0x0000000000000000UL, (val0)); \
26 ia64_native_set_rr(0x2000000000000000UL, (val1)); \
27 ia64_native_set_rr(0x4000000000000000UL, (val2)); \
28 ia64_native_set_rr(0x6000000000000000UL, (val3)); \
29 ia64_native_set_rr(0x8000000000000000UL, (val4)); \
30} while (0)
31
32/*
33 * Force an unresolved reference if someone tries to use
34 * ia64_fetch_and_add() with a bad value.
35 */
36extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
37extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
38
39#define IA64_FETCHADD(tmp,v,n,sz,sem) \
40({ \
41 switch (sz) { \
42 case 4: \
43 tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
44 break; \
45 \
46 case 8: \
47 tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
48 break; \
49 \
50 default: \
51 __bad_size_for_ia64_fetch_and_add(); \
52 } \
53})
54
55#define ia64_fetchadd(i,v,sem) \
56({ \
57 __u64 _tmp; \
58 volatile __typeof__(*(v)) *_v = (v); \
59 /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
60 if ((i) == -16) \
61 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
62 else if ((i) == -8) \
63 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
64 else if ((i) == -4) \
65 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
66 else if ((i) == -1) \
67 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
68 else if ((i) == 1) \
69 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
70 else if ((i) == 4) \
71 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
72 else if ((i) == 8) \
73 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
74 else if ((i) == 16) \
75 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
76 else \
77 _tmp = __bad_increment_for_ia64_fetch_and_add(); \
78 (__typeof__(*(v))) (_tmp); /* return old value */ \
79})
80
81#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
82
83/*
84 * This function doesn't exist, so you'll get a linker error if
85 * something tries to do an invalid xchg().
86 */
87extern void ia64_xchg_called_with_bad_pointer (void);
88
89#define __xchg(x,ptr,size) \
90({ \
91 unsigned long __xchg_result; \
92 \
93 switch (size) { \
94 case 1: \
95 __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
96 break; \
97 \
98 case 2: \
99 __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
100 break; \
101 \
102 case 4: \
103 __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
104 break; \
105 \
106 case 8: \
107 __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
108 break; \
109 default: \
110 ia64_xchg_called_with_bad_pointer(); \
111 } \
112 __xchg_result; \
113})
114
115#define xchg(ptr,x) \
116 ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
117
118/*
119 * Atomic compare and exchange. Compare OLD with MEM, if identical,
120 * store NEW in MEM. Return the initial value in MEM. Success is
121 * indicated by comparing RETURN with OLD.
122 */
123
124#define __HAVE_ARCH_CMPXCHG 1
125
126/*
127 * This function doesn't exist, so you'll get a linker error
128 * if something tries to do an invalid cmpxchg().
129 */
130extern long ia64_cmpxchg_called_with_bad_pointer (void);
131
132#define ia64_cmpxchg(sem,ptr,old,new,size) \
133({ \
134 __u64 _o_, _r_; \
135 \
136 switch (size) { \
137 case 1: _o_ = (__u8 ) (long) (old); break; \
138 case 2: _o_ = (__u16) (long) (old); break; \
139 case 4: _o_ = (__u32) (long) (old); break; \
140 case 8: _o_ = (__u64) (long) (old); break; \
141 default: break; \
142 } \
143 switch (size) { \
144 case 1: \
145 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
146 break; \
147 \
148 case 2: \
149 _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
150 break; \
151 \
152 case 4: \
153 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
154 break; \
155 \
156 case 8: \
157 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
158 break; \
159 \
160 default: \
161 _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
162 break; \
163 } \
164 (__typeof__(old)) _r_; \
165})
166
167#define cmpxchg_acq(ptr, o, n) \
168 ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
169#define cmpxchg_rel(ptr, o, n) \
170 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
171
172/* for compatibility with other platforms: */
173#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
174#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
175
176#define cmpxchg_local cmpxchg
177#define cmpxchg64_local cmpxchg64
178
179#ifdef CONFIG_IA64_DEBUG_CMPXCHG
180# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
181# define CMPXCHG_BUGCHECK(v) \
182 do { \
183 if (_cmpxchg_bugcheck_count-- <= 0) { \
184 void *ip; \
185 extern int printk(const char *fmt, ...); \
186 ip = (void *) ia64_getreg(_IA64_REG_IP); \
187 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
188 break; \
189 } \
190 } while (0)
191#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
192# define CMPXCHG_BUGCHECK_DECL
193# define CMPXCHG_BUGCHECK(v)
194#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
195
196#endif
197
198#ifdef __KERNEL__
199#include <asm/paravirt_privop.h>
200#endif
201
202#ifndef __ASSEMBLY__
203#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
204#define IA64_INTRINSIC_API(name) pv_cpu_ops.name
205#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
206#else
207#define IA64_INTRINSIC_API(name) ia64_native_ ## name
208#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name
209#endif
210
211/************************************************/
212/* Instructions paravirtualized for correctness */
213/************************************************/
214/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
215/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
216 * is not currently used (though it may be in a long-format VHPT system!)
217 */
218#define ia64_fc IA64_INTRINSIC_API(fc)
219#define ia64_thash IA64_INTRINSIC_API(thash)
220#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid)
221#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd)
222
223
224/************************************************/
225/* Instructions paravirtualized for performance */
226/************************************************/
227#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
228#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
229#define ia64_getreg IA64_INTRINSIC_API(getreg)
230#define ia64_setreg IA64_INTRINSIC_API(setreg)
231#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
232#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
233#define ia64_ptcga IA64_INTRINSIC_API(ptcga)
234#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i)
235#define ia64_intrin_local_irq_restore \
236 IA64_INTRINSIC_API(intrin_local_irq_restore)
237#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4)
238
239#endif /* !__ASSEMBLY__ */
240
241#endif /* _ASM_IA64_INTRINSICS_H */