aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-03-21 11:22:17 -0500
committerTony Luck <tony.luck@intel.com>2006-03-21 11:22:17 -0500
commitae02e964b669f19fb08d953032463ab7dc6f79eb (patch)
treee02ff418110b06c5b7cac8302f9930573f2f35b0
parent409761bb6a06bd61e2d8e27a1af534371d9537ed (diff)
parentdcc1dd2366a7c355fd8b6543c52685b864a2044f (diff)
Pull icc-cleanup into release branch
-rw-r--r--arch/ia64/sn/kernel/Makefile3
-rw-r--r--arch/ia64/sn/kernel/pio_phys.S71
-rw-r--r--include/asm-ia64/intel_intrin.h134
-rw-r--r--include/asm-ia64/sn/rw_mmr.h56
4 files changed, 95 insertions, 169 deletions
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 3e9b4eea7418..ab9c48c88012 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -10,7 +10,8 @@
10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include 10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
11 11
12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
13 huberror.o io_init.o iomv.o klconflib.o sn2/ 13 huberror.o io_init.o iomv.o klconflib.o pio_phys.o \
14 sn2/
14obj-$(CONFIG_IA64_GENERIC) += machvec.o 15obj-$(CONFIG_IA64_GENERIC) += machvec.o
15obj-$(CONFIG_SGI_TIOCX) += tiocx.o 16obj-$(CONFIG_SGI_TIOCX) += tiocx.o
16obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o 17obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
diff --git a/arch/ia64/sn/kernel/pio_phys.S b/arch/ia64/sn/kernel/pio_phys.S
new file mode 100644
index 000000000000..3c7d48d6ecb8
--- /dev/null
+++ b/arch/ia64/sn/kernel/pio_phys.S
@@ -0,0 +1,71 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * This file contains macros used to access MMR registers via
9 * uncached physical addresses.
10 * pio_phys_read_mmr - read an MMR
11 * pio_phys_write_mmr - write an MMR
12 * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
13 * Second MMR will be skipped if address is NULL
14 *
15 * Addresses passed to these routines should be uncached physical addresses
16 * ie., 0x80000....
17 */
18
19
20
21#include <asm/asmmacro.h>
22#include <asm/page.h>
23
24GLOBAL_ENTRY(pio_phys_read_mmr)
25 .prologue
26 .regstk 1,0,0,0
27 .body
28 mov r2=psr
29 rsm psr.i | psr.dt
30 ;;
31 srlz.d
32 ld8.acq r8=[r32]
33 ;;
34 mov psr.l=r2;;
35 srlz.d
36 br.ret.sptk.many rp
37END(pio_phys_read_mmr)
38
39GLOBAL_ENTRY(pio_phys_write_mmr)
40 .prologue
41 .regstk 2,0,0,0
42 .body
43 mov r2=psr
44 rsm psr.i | psr.dt
45 ;;
46 srlz.d
47 st8.rel [r32]=r33
48 ;;
49 mov psr.l=r2;;
50 srlz.d
51 br.ret.sptk.many rp
52END(pio_phys_write_mmr)
53
54GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
55 .prologue
56 .regstk 4,0,0,0
57 .body
58 mov r2=psr
59 cmp.ne p9,p0=r34,r0;
60 rsm psr.i | psr.dt | psr.ic
61 ;;
62 srlz.d
63 st8.rel [r32]=r33
64(p9) st8.rel [r34]=r35
65 ;;
66 mov psr.l=r2;;
67 srlz.d
68 br.ret.sptk.many rp
69END(pio_atomic_phys_write_mmrs)
70
71
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
index a7122d850177..d069b6acddce 100644
--- a/include/asm-ia64/intel_intrin.h
+++ b/include/asm-ia64/intel_intrin.h
@@ -5,113 +5,10 @@
5 * 5 *
6 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> 6 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
7 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> 7 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
8 * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
8 * 9 *
9 */ 10 */
10#include <asm/types.h> 11#include <ia64intrin.h>
11
12void __lfetch(int lfhint, void *y);
13void __lfetch_excl(int lfhint, void *y);
14void __lfetch_fault(int lfhint, void *y);
15void __lfetch_fault_excl(int lfhint, void *y);
16
17/* In the following, whichFloatReg should be an integer from 0-127 */
18void __ldfs(const int whichFloatReg, void *src);
19void __ldfd(const int whichFloatReg, void *src);
20void __ldfe(const int whichFloatReg, void *src);
21void __ldf8(const int whichFloatReg, void *src);
22void __ldf_fill(const int whichFloatReg, void *src);
23void __stfs(void *dst, const int whichFloatReg);
24void __stfd(void *dst, const int whichFloatReg);
25void __stfe(void *dst, const int whichFloatReg);
26void __stf8(void *dst, const int whichFloatReg);
27void __stf_spill(void *dst, const int whichFloatReg);
28
29void __st1_rel(void *dst, const __s8 value);
30void __st2_rel(void *dst, const __s16 value);
31void __st4_rel(void *dst, const __s32 value);
32void __st8_rel(void *dst, const __s64 value);
33__u8 __ld1_acq(void *src);
34__u16 __ld2_acq(void *src);
35__u32 __ld4_acq(void *src);
36__u64 __ld8_acq(void *src);
37
38__u64 __fetchadd4_acq(__u32 *addend, const int increment);
39__u64 __fetchadd4_rel(__u32 *addend, const int increment);
40__u64 __fetchadd8_acq(__u64 *addend, const int increment);
41__u64 __fetchadd8_rel(__u64 *addend, const int increment);
42
43__u64 __getf_exp(double d);
44
45/* OS Related Itanium(R) Intrinsics */
46
47/* The names to use for whichReg and whichIndReg below come from
48 the include file asm/ia64regs.h */
49
50__u64 __getIndReg(const int whichIndReg, __s64 index);
51__u64 __getReg(const int whichReg);
52
53void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
54void __setReg(const int whichReg, __u64 value);
55
56void __mf(void);
57void __mfa(void);
58void __synci(void);
59void __itcd(__s64 pa);
60void __itci(__s64 pa);
61void __itrd(__s64 whichTransReg, __s64 pa);
62void __itri(__s64 whichTransReg, __s64 pa);
63void __ptce(__s64 va);
64void __ptcl(__s64 va, __s64 pagesz);
65void __ptcg(__s64 va, __s64 pagesz);
66void __ptcga(__s64 va, __s64 pagesz);
67void __ptri(__s64 va, __s64 pagesz);
68void __ptrd(__s64 va, __s64 pagesz);
69void __invala (void);
70void __invala_gr(const int whichGeneralReg /* 0-127 */ );
71void __invala_fr(const int whichFloatReg /* 0-127 */ );
72void __nop(const int);
73void __fc(__u64 *addr);
74void __sum(int mask);
75void __rum(int mask);
76void __ssm(int mask);
77void __rsm(int mask);
78__u64 __thash(__s64);
79__u64 __ttag(__s64);
80__s64 __tpa(__s64);
81
82/* Intrinsics for implementing get/put_user macros */
83void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
84void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
85
86/* This intrinsic does not generate code, it creates a barrier across which
87 * the compiler will not schedule data access instructions.
88 */
89void __memory_barrier(void);
90
91void __isrlz(void);
92void __dsrlz(void);
93
94__u64 _m64_mux1(__u64 a, const int n);
95__u64 __thash(__u64);
96
97/* Lock and Atomic Operation Related Intrinsics */
98__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
99__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
100__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
101__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
102
103__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
104__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
105__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
106__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
107__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
108__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
109__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
110__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
111
112__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
113__s64 _m64_shrp(__s64 a, __s64 b, const int count);
114__s64 _m64_popcnt(__s64 a);
115 12
116#define ia64_barrier() __memory_barrier() 13#define ia64_barrier() __memory_barrier()
117 14
@@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a);
122#define ia64_getreg __getReg 19#define ia64_getreg __getReg
123#define ia64_setreg __setReg 20#define ia64_setreg __setReg
124 21
125#define ia64_hint(x) 22#define ia64_hint __hint
23#define ia64_hint_pause __hint_pause
126 24
127#define ia64_mux1_brcst 0 25#define ia64_mux1_brcst _m64_mux1_brcst
128#define ia64_mux1_mix 8 26#define ia64_mux1_mix _m64_mux1_mix
129#define ia64_mux1_shuf 9 27#define ia64_mux1_shuf _m64_mux1_shuf
130#define ia64_mux1_alt 10 28#define ia64_mux1_alt _m64_mux1_alt
131#define ia64_mux1_rev 11 29#define ia64_mux1_rev _m64_mux1_rev
132 30
133#define ia64_mux1 _m64_mux1 31#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
134#define ia64_popcnt _m64_popcnt 32#define ia64_popcnt _m64_popcnt
135#define ia64_getf_exp __getf_exp 33#define ia64_getf_exp __getf_exp
136#define ia64_shrp _m64_shrp 34#define ia64_shrp _m64_shrp
@@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a);
158#define ia64_stf8 __stf8 56#define ia64_stf8 __stf8
159#define ia64_stf_spill __stf_spill 57#define ia64_stf_spill __stf_spill
160 58
161#define ia64_mf __mf 59#define ia64_mf __mf
162#define ia64_mfa __mfa 60#define ia64_mfa __mfa
163 61
164#define ia64_fetchadd4_acq __fetchadd4_acq 62#define ia64_fetchadd4_acq __fetchadd4_acq
@@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a);
234 132
235/* Values for lfhint in __lfetch and __lfetch_fault */ 133/* Values for lfhint in __lfetch and __lfetch_fault */
236 134
237#define ia64_lfhint_none 0 135#define ia64_lfhint_none __lfhint_none
238#define ia64_lfhint_nt1 1 136#define ia64_lfhint_nt1 __lfhint_nt1
239#define ia64_lfhint_nt2 2 137#define ia64_lfhint_nt2 __lfhint_nt2
240#define ia64_lfhint_nta 3 138#define ia64_lfhint_nta __lfhint_nta
241 139
242#define ia64_lfetch __lfetch 140#define ia64_lfetch __lfetch
243#define ia64_lfetch_excl __lfetch_excl 141#define ia64_lfetch_excl __lfetch_excl
@@ -254,4 +152,6 @@ do { \
254 } \ 152 } \
255} while (0) 153} while (0)
256 154
155#define __builtin_trap() __break(0);
156
257#endif /* _ASM_IA64_INTEL_INTRIN_H */ 157#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h
index f40fd1a5510d..2d78f4c5a45e 100644
--- a/include/asm-ia64/sn/rw_mmr.h
+++ b/include/asm-ia64/sn/rw_mmr.h
@@ -3,15 +3,14 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_RW_MMR_H 8#ifndef _ASM_IA64_SN_RW_MMR_H
9#define _ASM_IA64_SN_RW_MMR_H 9#define _ASM_IA64_SN_RW_MMR_H
10 10
11 11
12/* 12/*
13 * This file contains macros used to access MMR registers via 13 * This file that access MMRs via uncached physical addresses.
14 * uncached physical addresses.
15 * pio_phys_read_mmr - read an MMR 14 * pio_phys_read_mmr - read an MMR
16 * pio_phys_write_mmr - write an MMR 15 * pio_phys_write_mmr - write an MMR
17 * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 16 * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
@@ -22,53 +21,8 @@
22 */ 21 */
23 22
24 23
25extern inline long 24extern long pio_phys_read_mmr(volatile long *mmr);
26pio_phys_read_mmr(volatile long *mmr) 25extern void pio_phys_write_mmr(volatile long *mmr, long val);
27{ 26extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
28 long val;
29 asm volatile
30 ("mov r2=psr;;"
31 "rsm psr.i | psr.dt;;"
32 "srlz.i;;"
33 "ld8.acq %0=[%1];;"
34 "mov psr.l=r2;;"
35 "srlz.i;;"
36 : "=r"(val)
37 : "r"(mmr)
38 : "r2");
39 return val;
40}
41
42
43
44extern inline void
45pio_phys_write_mmr(volatile long *mmr, long val)
46{
47 asm volatile
48 ("mov r2=psr;;"
49 "rsm psr.i | psr.dt;;"
50 "srlz.i;;"
51 "st8.rel [%0]=%1;;"
52 "mov psr.l=r2;;"
53 "srlz.i;;"
54 :: "r"(mmr), "r"(val)
55 : "r2", "memory");
56}
57
58extern inline void
59pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2)
60{
61 asm volatile
62 ("mov r2=psr;;"
63 "rsm psr.i | psr.dt | psr.ic;;"
64 "cmp.ne p9,p0=%2,r0;"
65 "srlz.i;;"
66 "st8.rel [%0]=%1;"
67 "(p9) st8.rel [%2]=%3;;"
68 "mov psr.l=r2;;"
69 "srlz.i;;"
70 :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
71 : "p9", "r2", "memory");
72}
73 27
74#endif /* _ASM_IA64_SN_RW_MMR_H */ 28#endif /* _ASM_IA64_SN_RW_MMR_H */