diff options
| -rw-r--r-- | arch/ia64/sn/kernel/Makefile | 3 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/pio_phys.S | 71 | ||||
| -rw-r--r-- | include/asm-ia64/intel_intrin.h | 134 | ||||
| -rw-r--r-- | include/asm-ia64/sn/rw_mmr.h | 56 |
4 files changed, 95 insertions, 169 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 3e9b4eea7418..ab9c48c88012 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile | |||
| @@ -10,7 +10,8 @@ | |||
| 10 | CPPFLAGS += -I$(srctree)/arch/ia64/sn/include | 10 | CPPFLAGS += -I$(srctree)/arch/ia64/sn/include |
| 11 | 11 | ||
| 12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ | 12 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ |
| 13 | huberror.o io_init.o iomv.o klconflib.o sn2/ | 13 | huberror.o io_init.o iomv.o klconflib.o pio_phys.o \ |
| 14 | sn2/ | ||
| 14 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 15 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
| 15 | obj-$(CONFIG_SGI_TIOCX) += tiocx.o | 16 | obj-$(CONFIG_SGI_TIOCX) += tiocx.o |
| 16 | obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o | 17 | obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o |
diff --git a/arch/ia64/sn/kernel/pio_phys.S b/arch/ia64/sn/kernel/pio_phys.S new file mode 100644 index 000000000000..3c7d48d6ecb8 --- /dev/null +++ b/arch/ia64/sn/kernel/pio_phys.S | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | /* | ||
| 2 | * This file is subject to the terms and conditions of the GNU General Public | ||
| 3 | * License. See the file "COPYING" in the main directory of this archive | ||
| 4 | * for more details. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. | ||
| 7 | * | ||
| 8 | * This file contains macros used to access MMR registers via | ||
| 9 | * uncached physical addresses. | ||
| 10 | * pio_phys_read_mmr - read an MMR | ||
| 11 | * pio_phys_write_mmr - write an MMR | ||
| 12 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | ||
| 13 | * Second MMR will be skipped if address is NULL | ||
| 14 | * | ||
| 15 | * Addresses passed to these routines should be uncached physical addresses | ||
| 16 | * ie., 0x80000.... | ||
| 17 | */ | ||
| 18 | |||
| 19 | |||
| 20 | |||
| 21 | #include <asm/asmmacro.h> | ||
| 22 | #include <asm/page.h> | ||
| 23 | |||
| 24 | GLOBAL_ENTRY(pio_phys_read_mmr) | ||
| 25 | .prologue | ||
| 26 | .regstk 1,0,0,0 | ||
| 27 | .body | ||
| 28 | mov r2=psr | ||
| 29 | rsm psr.i | psr.dt | ||
| 30 | ;; | ||
| 31 | srlz.d | ||
| 32 | ld8.acq r8=[r32] | ||
| 33 | ;; | ||
| 34 | mov psr.l=r2;; | ||
| 35 | srlz.d | ||
| 36 | br.ret.sptk.many rp | ||
| 37 | END(pio_phys_read_mmr) | ||
| 38 | |||
| 39 | GLOBAL_ENTRY(pio_phys_write_mmr) | ||
| 40 | .prologue | ||
| 41 | .regstk 2,0,0,0 | ||
| 42 | .body | ||
| 43 | mov r2=psr | ||
| 44 | rsm psr.i | psr.dt | ||
| 45 | ;; | ||
| 46 | srlz.d | ||
| 47 | st8.rel [r32]=r33 | ||
| 48 | ;; | ||
| 49 | mov psr.l=r2;; | ||
| 50 | srlz.d | ||
| 51 | br.ret.sptk.many rp | ||
| 52 | END(pio_phys_write_mmr) | ||
| 53 | |||
| 54 | GLOBAL_ENTRY(pio_atomic_phys_write_mmrs) | ||
| 55 | .prologue | ||
| 56 | .regstk 4,0,0,0 | ||
| 57 | .body | ||
| 58 | mov r2=psr | ||
| 59 | cmp.ne p9,p0=r34,r0; | ||
| 60 | rsm psr.i | psr.dt | psr.ic | ||
| 61 | ;; | ||
| 62 | srlz.d | ||
| 63 | st8.rel [r32]=r33 | ||
| 64 | (p9) st8.rel [r34]=r35 | ||
| 65 | ;; | ||
| 66 | mov psr.l=r2;; | ||
| 67 | srlz.d | ||
| 68 | br.ret.sptk.many rp | ||
| 69 | END(pio_atomic_phys_write_mmrs) | ||
| 70 | |||
| 71 | |||
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h index a7122d850177..d069b6acddce 100644 --- a/include/asm-ia64/intel_intrin.h +++ b/include/asm-ia64/intel_intrin.h | |||
| @@ -5,113 +5,10 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> | 6 | * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> |
| 7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> | 7 | * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> |
| 8 | * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com> | ||
| 8 | * | 9 | * |
| 9 | */ | 10 | */ |
| 10 | #include <asm/types.h> | 11 | #include <ia64intrin.h> |
| 11 | |||
| 12 | void __lfetch(int lfhint, void *y); | ||
| 13 | void __lfetch_excl(int lfhint, void *y); | ||
| 14 | void __lfetch_fault(int lfhint, void *y); | ||
| 15 | void __lfetch_fault_excl(int lfhint, void *y); | ||
| 16 | |||
| 17 | /* In the following, whichFloatReg should be an integer from 0-127 */ | ||
| 18 | void __ldfs(const int whichFloatReg, void *src); | ||
| 19 | void __ldfd(const int whichFloatReg, void *src); | ||
| 20 | void __ldfe(const int whichFloatReg, void *src); | ||
| 21 | void __ldf8(const int whichFloatReg, void *src); | ||
| 22 | void __ldf_fill(const int whichFloatReg, void *src); | ||
| 23 | void __stfs(void *dst, const int whichFloatReg); | ||
| 24 | void __stfd(void *dst, const int whichFloatReg); | ||
| 25 | void __stfe(void *dst, const int whichFloatReg); | ||
| 26 | void __stf8(void *dst, const int whichFloatReg); | ||
| 27 | void __stf_spill(void *dst, const int whichFloatReg); | ||
| 28 | |||
| 29 | void __st1_rel(void *dst, const __s8 value); | ||
| 30 | void __st2_rel(void *dst, const __s16 value); | ||
| 31 | void __st4_rel(void *dst, const __s32 value); | ||
| 32 | void __st8_rel(void *dst, const __s64 value); | ||
| 33 | __u8 __ld1_acq(void *src); | ||
| 34 | __u16 __ld2_acq(void *src); | ||
| 35 | __u32 __ld4_acq(void *src); | ||
| 36 | __u64 __ld8_acq(void *src); | ||
| 37 | |||
| 38 | __u64 __fetchadd4_acq(__u32 *addend, const int increment); | ||
| 39 | __u64 __fetchadd4_rel(__u32 *addend, const int increment); | ||
| 40 | __u64 __fetchadd8_acq(__u64 *addend, const int increment); | ||
| 41 | __u64 __fetchadd8_rel(__u64 *addend, const int increment); | ||
| 42 | |||
| 43 | __u64 __getf_exp(double d); | ||
| 44 | |||
| 45 | /* OS Related Itanium(R) Intrinsics */ | ||
| 46 | |||
| 47 | /* The names to use for whichReg and whichIndReg below come from | ||
| 48 | the include file asm/ia64regs.h */ | ||
| 49 | |||
| 50 | __u64 __getIndReg(const int whichIndReg, __s64 index); | ||
| 51 | __u64 __getReg(const int whichReg); | ||
| 52 | |||
| 53 | void __setIndReg(const int whichIndReg, __s64 index, __u64 value); | ||
| 54 | void __setReg(const int whichReg, __u64 value); | ||
| 55 | |||
| 56 | void __mf(void); | ||
| 57 | void __mfa(void); | ||
| 58 | void __synci(void); | ||
| 59 | void __itcd(__s64 pa); | ||
| 60 | void __itci(__s64 pa); | ||
| 61 | void __itrd(__s64 whichTransReg, __s64 pa); | ||
| 62 | void __itri(__s64 whichTransReg, __s64 pa); | ||
| 63 | void __ptce(__s64 va); | ||
| 64 | void __ptcl(__s64 va, __s64 pagesz); | ||
| 65 | void __ptcg(__s64 va, __s64 pagesz); | ||
| 66 | void __ptcga(__s64 va, __s64 pagesz); | ||
| 67 | void __ptri(__s64 va, __s64 pagesz); | ||
| 68 | void __ptrd(__s64 va, __s64 pagesz); | ||
| 69 | void __invala (void); | ||
| 70 | void __invala_gr(const int whichGeneralReg /* 0-127 */ ); | ||
| 71 | void __invala_fr(const int whichFloatReg /* 0-127 */ ); | ||
| 72 | void __nop(const int); | ||
| 73 | void __fc(__u64 *addr); | ||
| 74 | void __sum(int mask); | ||
| 75 | void __rum(int mask); | ||
| 76 | void __ssm(int mask); | ||
| 77 | void __rsm(int mask); | ||
| 78 | __u64 __thash(__s64); | ||
| 79 | __u64 __ttag(__s64); | ||
| 80 | __s64 __tpa(__s64); | ||
| 81 | |||
| 82 | /* Intrinsics for implementing get/put_user macros */ | ||
| 83 | void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); | ||
| 84 | void __ld_user(const char *tableName, __u64 addr, char size, char relocType); | ||
| 85 | |||
| 86 | /* This intrinsic does not generate code, it creates a barrier across which | ||
| 87 | * the compiler will not schedule data access instructions. | ||
| 88 | */ | ||
| 89 | void __memory_barrier(void); | ||
| 90 | |||
| 91 | void __isrlz(void); | ||
| 92 | void __dsrlz(void); | ||
| 93 | |||
| 94 | __u64 _m64_mux1(__u64 a, const int n); | ||
| 95 | __u64 __thash(__u64); | ||
| 96 | |||
| 97 | /* Lock and Atomic Operation Related Intrinsics */ | ||
| 98 | __u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); | ||
| 99 | __u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); | ||
| 100 | __s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); | ||
| 101 | __s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); | ||
| 102 | |||
| 103 | __u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
| 104 | __u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); | ||
| 105 | __u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
| 106 | __u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); | ||
| 107 | __u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
| 108 | __u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); | ||
| 109 | __u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
| 110 | __u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); | ||
| 111 | |||
| 112 | __s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); | ||
| 113 | __s64 _m64_shrp(__s64 a, __s64 b, const int count); | ||
| 114 | __s64 _m64_popcnt(__s64 a); | ||
| 115 | 12 | ||
| 116 | #define ia64_barrier() __memory_barrier() | 13 | #define ia64_barrier() __memory_barrier() |
| 117 | 14 | ||
| @@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a); | |||
| 122 | #define ia64_getreg __getReg | 19 | #define ia64_getreg __getReg |
| 123 | #define ia64_setreg __setReg | 20 | #define ia64_setreg __setReg |
| 124 | 21 | ||
| 125 | #define ia64_hint(x) | 22 | #define ia64_hint __hint |
| 23 | #define ia64_hint_pause __hint_pause | ||
| 126 | 24 | ||
| 127 | #define ia64_mux1_brcst 0 | 25 | #define ia64_mux1_brcst _m64_mux1_brcst |
| 128 | #define ia64_mux1_mix 8 | 26 | #define ia64_mux1_mix _m64_mux1_mix |
| 129 | #define ia64_mux1_shuf 9 | 27 | #define ia64_mux1_shuf _m64_mux1_shuf |
| 130 | #define ia64_mux1_alt 10 | 28 | #define ia64_mux1_alt _m64_mux1_alt |
| 131 | #define ia64_mux1_rev 11 | 29 | #define ia64_mux1_rev _m64_mux1_rev |
| 132 | 30 | ||
| 133 | #define ia64_mux1 _m64_mux1 | 31 | #define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v))) |
| 134 | #define ia64_popcnt _m64_popcnt | 32 | #define ia64_popcnt _m64_popcnt |
| 135 | #define ia64_getf_exp __getf_exp | 33 | #define ia64_getf_exp __getf_exp |
| 136 | #define ia64_shrp _m64_shrp | 34 | #define ia64_shrp _m64_shrp |
| @@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a); | |||
| 158 | #define ia64_stf8 __stf8 | 56 | #define ia64_stf8 __stf8 |
| 159 | #define ia64_stf_spill __stf_spill | 57 | #define ia64_stf_spill __stf_spill |
| 160 | 58 | ||
| 161 | #define ia64_mf __mf | 59 | #define ia64_mf __mf |
| 162 | #define ia64_mfa __mfa | 60 | #define ia64_mfa __mfa |
| 163 | 61 | ||
| 164 | #define ia64_fetchadd4_acq __fetchadd4_acq | 62 | #define ia64_fetchadd4_acq __fetchadd4_acq |
| @@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a); | |||
| 234 | 132 | ||
| 235 | /* Values for lfhint in __lfetch and __lfetch_fault */ | 133 | /* Values for lfhint in __lfetch and __lfetch_fault */ |
| 236 | 134 | ||
| 237 | #define ia64_lfhint_none 0 | 135 | #define ia64_lfhint_none __lfhint_none |
| 238 | #define ia64_lfhint_nt1 1 | 136 | #define ia64_lfhint_nt1 __lfhint_nt1 |
| 239 | #define ia64_lfhint_nt2 2 | 137 | #define ia64_lfhint_nt2 __lfhint_nt2 |
| 240 | #define ia64_lfhint_nta 3 | 138 | #define ia64_lfhint_nta __lfhint_nta |
| 241 | 139 | ||
| 242 | #define ia64_lfetch __lfetch | 140 | #define ia64_lfetch __lfetch |
| 243 | #define ia64_lfetch_excl __lfetch_excl | 141 | #define ia64_lfetch_excl __lfetch_excl |
| @@ -254,4 +152,6 @@ do { \ | |||
| 254 | } \ | 152 | } \ |
| 255 | } while (0) | 153 | } while (0) |
| 256 | 154 | ||
| 155 | #define __builtin_trap() __break(0); | ||
| 156 | |||
| 257 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ | 157 | #endif /* _ASM_IA64_INTEL_INTRIN_H */ |
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h index f40fd1a5510d..2d78f4c5a45e 100644 --- a/include/asm-ia64/sn/rw_mmr.h +++ b/include/asm-ia64/sn/rw_mmr.h | |||
| @@ -3,15 +3,14 @@ | |||
| 3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. | 4 | * for more details. |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. |
| 7 | */ | 7 | */ |
| 8 | #ifndef _ASM_IA64_SN_RW_MMR_H | 8 | #ifndef _ASM_IA64_SN_RW_MMR_H |
| 9 | #define _ASM_IA64_SN_RW_MMR_H | 9 | #define _ASM_IA64_SN_RW_MMR_H |
| 10 | 10 | ||
| 11 | 11 | ||
| 12 | /* | 12 | /* |
| 13 | * This file contains macros used to access MMR registers via | 13 | * This file that access MMRs via uncached physical addresses. |
| 14 | * uncached physical addresses. | ||
| 15 | * pio_phys_read_mmr - read an MMR | 14 | * pio_phys_read_mmr - read an MMR |
| 16 | * pio_phys_write_mmr - write an MMR | 15 | * pio_phys_write_mmr - write an MMR |
| 17 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 | 16 | * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 |
| @@ -22,53 +21,8 @@ | |||
| 22 | */ | 21 | */ |
| 23 | 22 | ||
| 24 | 23 | ||
| 25 | extern inline long | 24 | extern long pio_phys_read_mmr(volatile long *mmr); |
| 26 | pio_phys_read_mmr(volatile long *mmr) | 25 | extern void pio_phys_write_mmr(volatile long *mmr, long val); |
| 27 | { | 26 | extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); |
| 28 | long val; | ||
| 29 | asm volatile | ||
| 30 | ("mov r2=psr;;" | ||
| 31 | "rsm psr.i | psr.dt;;" | ||
| 32 | "srlz.i;;" | ||
| 33 | "ld8.acq %0=[%1];;" | ||
| 34 | "mov psr.l=r2;;" | ||
| 35 | "srlz.i;;" | ||
| 36 | : "=r"(val) | ||
| 37 | : "r"(mmr) | ||
| 38 | : "r2"); | ||
| 39 | return val; | ||
| 40 | } | ||
| 41 | |||
| 42 | |||
| 43 | |||
| 44 | extern inline void | ||
| 45 | pio_phys_write_mmr(volatile long *mmr, long val) | ||
| 46 | { | ||
| 47 | asm volatile | ||
| 48 | ("mov r2=psr;;" | ||
| 49 | "rsm psr.i | psr.dt;;" | ||
| 50 | "srlz.i;;" | ||
| 51 | "st8.rel [%0]=%1;;" | ||
| 52 | "mov psr.l=r2;;" | ||
| 53 | "srlz.i;;" | ||
| 54 | :: "r"(mmr), "r"(val) | ||
| 55 | : "r2", "memory"); | ||
| 56 | } | ||
| 57 | |||
| 58 | extern inline void | ||
| 59 | pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) | ||
| 60 | { | ||
| 61 | asm volatile | ||
| 62 | ("mov r2=psr;;" | ||
| 63 | "rsm psr.i | psr.dt | psr.ic;;" | ||
| 64 | "cmp.ne p9,p0=%2,r0;" | ||
| 65 | "srlz.i;;" | ||
| 66 | "st8.rel [%0]=%1;" | ||
| 67 | "(p9) st8.rel [%2]=%3;;" | ||
| 68 | "mov psr.l=r2;;" | ||
| 69 | "srlz.i;;" | ||
| 70 | :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) | ||
| 71 | : "p9", "r2", "memory"); | ||
| 72 | } | ||
| 73 | 27 | ||
| 74 | #endif /* _ASM_IA64_SN_RW_MMR_H */ | 28 | #endif /* _ASM_IA64_SN_RW_MMR_H */ |
