diff options
Diffstat (limited to 'arch/ia64/include/uapi/asm/intrinsics.h')
| -rw-r--r-- | arch/ia64/include/uapi/asm/intrinsics.h | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/arch/ia64/include/uapi/asm/intrinsics.h b/arch/ia64/include/uapi/asm/intrinsics.h new file mode 100644 index 000000000000..5829978ff466 --- /dev/null +++ b/arch/ia64/include/uapi/asm/intrinsics.h | |||
| @@ -0,0 +1,124 @@ | |||
| 1 | /* | ||
| 2 | * Compiler-dependent intrinsics. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2002-2003 Hewlett-Packard Co | ||
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
| 6 | */ | ||
| 7 | #ifndef _UAPI_ASM_IA64_INTRINSICS_H | ||
| 8 | #define _UAPI_ASM_IA64_INTRINSICS_H | ||
| 9 | |||
| 10 | |||
| 11 | #ifndef __ASSEMBLY__ | ||
| 12 | |||
| 13 | #include <linux/types.h> | ||
| 14 | /* include compiler specific intrinsics */ | ||
| 15 | #include <asm/ia64regs.h> | ||
| 16 | #ifdef __INTEL_COMPILER | ||
| 17 | # include <asm/intel_intrin.h> | ||
| 18 | #else | ||
| 19 | # include <asm/gcc_intrin.h> | ||
| 20 | #endif | ||
| 21 | #include <asm/cmpxchg.h> | ||
| 22 | |||
| 23 | #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) | ||
| 24 | |||
| 25 | #define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ | ||
| 26 | do { \ | ||
| 27 | ia64_native_set_rr(0x0000000000000000UL, (val0)); \ | ||
| 28 | ia64_native_set_rr(0x2000000000000000UL, (val1)); \ | ||
| 29 | ia64_native_set_rr(0x4000000000000000UL, (val2)); \ | ||
| 30 | ia64_native_set_rr(0x6000000000000000UL, (val3)); \ | ||
| 31 | ia64_native_set_rr(0x8000000000000000UL, (val4)); \ | ||
| 32 | } while (0) | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Force an unresolved reference if someone tries to use | ||
| 36 | * ia64_fetch_and_add() with a bad value. | ||
| 37 | */ | ||
| 38 | extern unsigned long __bad_size_for_ia64_fetch_and_add (void); | ||
| 39 | extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); | ||
| 40 | |||
| 41 | #define IA64_FETCHADD(tmp,v,n,sz,sem) \ | ||
| 42 | ({ \ | ||
| 43 | switch (sz) { \ | ||
| 44 | case 4: \ | ||
| 45 | tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ | ||
| 46 | break; \ | ||
| 47 | \ | ||
| 48 | case 8: \ | ||
| 49 | tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ | ||
| 50 | break; \ | ||
| 51 | \ | ||
| 52 | default: \ | ||
| 53 | __bad_size_for_ia64_fetch_and_add(); \ | ||
| 54 | } \ | ||
| 55 | }) | ||
| 56 | |||
| 57 | #define ia64_fetchadd(i,v,sem) \ | ||
| 58 | ({ \ | ||
| 59 | __u64 _tmp; \ | ||
| 60 | volatile __typeof__(*(v)) *_v = (v); \ | ||
| 61 | /* Can't use a switch () here: gcc isn't always smart enough for that... */ \ | ||
| 62 | if ((i) == -16) \ | ||
| 63 | IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \ | ||
| 64 | else if ((i) == -8) \ | ||
| 65 | IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \ | ||
| 66 | else if ((i) == -4) \ | ||
| 67 | IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \ | ||
| 68 | else if ((i) == -1) \ | ||
| 69 | IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \ | ||
| 70 | else if ((i) == 1) \ | ||
| 71 | IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \ | ||
| 72 | else if ((i) == 4) \ | ||
| 73 | IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \ | ||
| 74 | else if ((i) == 8) \ | ||
| 75 | IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \ | ||
| 76 | else if ((i) == 16) \ | ||
| 77 | IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \ | ||
| 78 | else \ | ||
| 79 | _tmp = __bad_increment_for_ia64_fetch_and_add(); \ | ||
| 80 | (__typeof__(*(v))) (_tmp); /* return old value */ \ | ||
| 81 | }) | ||
| 82 | |||
| 83 | #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ | ||
| 84 | |||
| 85 | #endif | ||
| 86 | |||
| 87 | |||
| 88 | #ifndef __ASSEMBLY__ | ||
| 89 | |||
| 90 | #define IA64_INTRINSIC_API(name) ia64_native_ ## name | ||
| 91 | #define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name | ||
| 92 | |||
| 93 | |||
| 94 | /************************************************/ | ||
| 95 | /* Instructions paravirtualized for correctness */ | ||
| 96 | /************************************************/ | ||
| 97 | /* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */ | ||
| 98 | /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" | ||
| 99 | * is not currently used (though it may be in a long-format VHPT system!) | ||
| 100 | */ | ||
| 101 | #define ia64_fc IA64_INTRINSIC_API(fc) | ||
| 102 | #define ia64_thash IA64_INTRINSIC_API(thash) | ||
| 103 | #define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid) | ||
| 104 | #define ia64_get_pmd IA64_INTRINSIC_API(get_pmd) | ||
| 105 | |||
| 106 | |||
| 107 | /************************************************/ | ||
| 108 | /* Instructions paravirtualized for performance */ | ||
| 109 | /************************************************/ | ||
| 110 | #define ia64_ssm IA64_INTRINSIC_MACRO(ssm) | ||
| 111 | #define ia64_rsm IA64_INTRINSIC_MACRO(rsm) | ||
| 112 | #define ia64_getreg IA64_INTRINSIC_MACRO(getreg) | ||
| 113 | #define ia64_setreg IA64_INTRINSIC_API(setreg) | ||
| 114 | #define ia64_set_rr IA64_INTRINSIC_API(set_rr) | ||
| 115 | #define ia64_get_rr IA64_INTRINSIC_API(get_rr) | ||
| 116 | #define ia64_ptcga IA64_INTRINSIC_API(ptcga) | ||
| 117 | #define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i) | ||
| 118 | #define ia64_intrin_local_irq_restore \ | ||
| 119 | IA64_INTRINSIC_API(intrin_local_irq_restore) | ||
| 120 | #define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4) | ||
| 121 | |||
| 122 | #endif /* !__ASSEMBLY__ */ | ||
| 123 | |||
| 124 | #endif /* _UAPI_ASM_IA64_INTRINSICS_H */ | ||
