aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2012-10-14 19:55:38 -0400
committerChris Zankel <chris@zankel.net>2012-10-16 00:48:08 -0400
commitbc5378fcba974317f9657c4fdc78af227e1e1068 (patch)
tree998b8f8badf1d7a5dc0fd796f3e8501f8c5394ae
parentf4349b6e01c8927a04795885702a173b6a60573c (diff)
xtensa: reorganize SR referencing
- reference SRs by names where possible, not by numbers; - get rid of __stringify around SR names where possible; - remove unneeded SR names from asm/regs.h; - add SREG_ prefix to remaining SR names; Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S8
-rw-r--r--arch/xtensa/include/asm/atomic.h12
-rw-r--r--arch/xtensa/include/asm/cacheflush.h2
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h4
-rw-r--r--arch/xtensa/include/asm/coprocessor.h5
-rw-r--r--arch/xtensa/include/asm/delay.h2
-rw-r--r--arch/xtensa/include/asm/irqflags.h4
-rw-r--r--arch/xtensa/include/asm/mmu_context.h4
-rw-r--r--arch/xtensa/include/asm/regs.h55
-rw-r--r--arch/xtensa/include/asm/timex.h8
-rw-r--r--arch/xtensa/include/asm/tlbflush.h8
-rw-r--r--arch/xtensa/kernel/align.S38
-rw-r--r--arch/xtensa/kernel/coprocessor.S20
-rw-r--r--arch/xtensa/kernel/entry.S252
-rw-r--r--arch/xtensa/kernel/head.S36
-rw-r--r--arch/xtensa/kernel/irq.c6
-rw-r--r--arch/xtensa/kernel/traps.c18
-rw-r--r--arch/xtensa/kernel/vectors.S44
-rw-r--r--arch/xtensa/platforms/iss/setup.c10
-rw-r--r--arch/xtensa/platforms/xt2000/setup.c10
20 files changed, 254 insertions, 292 deletions
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index 4c316cd28a54..86c34dbc9cd0 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -51,17 +51,17 @@ _start:
51 /* 'reset' window registers */ 51 /* 'reset' window registers */
52 52
53 movi a4, 1 53 movi a4, 1
54 wsr a4, PS 54 wsr a4, ps
55 rsync 55 rsync
56 56
57 rsr a5, WINDOWBASE 57 rsr a5, windowbase
58 ssl a5 58 ssl a5
59 sll a4, a4 59 sll a4, a4
60 wsr a4, WINDOWSTART 60 wsr a4, windowstart
61 rsync 61 rsync
62 62
63 movi a4, 0x00040000 63 movi a4, 0x00040000
64 wsr a4, PS 64 wsr a4, ps
65 rsync 65 rsync
66 66
67 /* copy the loader to its address 67 /* copy the loader to its address
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index b40989308775..24f50cada70c 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -73,7 +73,7 @@ static inline void atomic_add(int i, atomic_t * v)
73 "l32i %0, %2, 0 \n\t" 73 "l32i %0, %2, 0 \n\t"
74 "add %0, %0, %1 \n\t" 74 "add %0, %0, %1 \n\t"
75 "s32i %0, %2, 0 \n\t" 75 "s32i %0, %2, 0 \n\t"
76 "wsr a15, "__stringify(PS)" \n\t" 76 "wsr a15, ps \n\t"
77 "rsync \n" 77 "rsync \n"
78 : "=&a" (vval) 78 : "=&a" (vval)
79 : "a" (i), "a" (v) 79 : "a" (i), "a" (v)
@@ -97,7 +97,7 @@ static inline void atomic_sub(int i, atomic_t *v)
97 "l32i %0, %2, 0 \n\t" 97 "l32i %0, %2, 0 \n\t"
98 "sub %0, %0, %1 \n\t" 98 "sub %0, %0, %1 \n\t"
99 "s32i %0, %2, 0 \n\t" 99 "s32i %0, %2, 0 \n\t"
100 "wsr a15, "__stringify(PS)" \n\t" 100 "wsr a15, ps \n\t"
101 "rsync \n" 101 "rsync \n"
102 : "=&a" (vval) 102 : "=&a" (vval)
103 : "a" (i), "a" (v) 103 : "a" (i), "a" (v)
@@ -118,7 +118,7 @@ static inline int atomic_add_return(int i, atomic_t * v)
118 "l32i %0, %2, 0 \n\t" 118 "l32i %0, %2, 0 \n\t"
119 "add %0, %0, %1 \n\t" 119 "add %0, %0, %1 \n\t"
120 "s32i %0, %2, 0 \n\t" 120 "s32i %0, %2, 0 \n\t"
121 "wsr a15, "__stringify(PS)" \n\t" 121 "wsr a15, ps \n\t"
122 "rsync \n" 122 "rsync \n"
123 : "=&a" (vval) 123 : "=&a" (vval)
124 : "a" (i), "a" (v) 124 : "a" (i), "a" (v)
@@ -137,7 +137,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
137 "l32i %0, %2, 0 \n\t" 137 "l32i %0, %2, 0 \n\t"
138 "sub %0, %0, %1 \n\t" 138 "sub %0, %0, %1 \n\t"
139 "s32i %0, %2, 0 \n\t" 139 "s32i %0, %2, 0 \n\t"
140 "wsr a15, "__stringify(PS)" \n\t" 140 "wsr a15, ps \n\t"
141 "rsync \n" 141 "rsync \n"
142 : "=&a" (vval) 142 : "=&a" (vval)
143 : "a" (i), "a" (v) 143 : "a" (i), "a" (v)
@@ -260,7 +260,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
260 "xor %1, %4, %3 \n\t" 260 "xor %1, %4, %3 \n\t"
261 "and %0, %0, %4 \n\t" 261 "and %0, %0, %4 \n\t"
262 "s32i %0, %2, 0 \n\t" 262 "s32i %0, %2, 0 \n\t"
263 "wsr a15, "__stringify(PS)" \n\t" 263 "wsr a15, ps \n\t"
264 "rsync \n" 264 "rsync \n"
265 : "=&a" (vval), "=a" (mask) 265 : "=&a" (vval), "=a" (mask)
266 : "a" (v), "a" (all_f), "1" (mask) 266 : "a" (v), "a" (all_f), "1" (mask)
@@ -277,7 +277,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
277 "l32i %0, %2, 0 \n\t" 277 "l32i %0, %2, 0 \n\t"
278 "or %0, %0, %1 \n\t" 278 "or %0, %0, %1 \n\t"
279 "s32i %0, %2, 0 \n\t" 279 "s32i %0, %2, 0 \n\t"
280 "wsr a15, "__stringify(PS)" \n\t" 280 "wsr a15, ps \n\t"
281 "rsync \n" 281 "rsync \n"
282 : "=&a" (vval) 282 : "=&a" (vval)
283 : "a" (mask), "a" (v) 283 : "a" (mask), "a" (v)
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 376cd9d5f455..569fec4f9a20 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -165,7 +165,7 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
165static inline u32 xtensa_get_cacheattr(void) 165static inline u32 xtensa_get_cacheattr(void)
166{ 166{
167 u32 r; 167 u32 r;
168 asm volatile(" rsr %0, CACHEATTR" : "=a"(r)); 168 asm volatile(" rsr %0, cacheattr" : "=a"(r));
169 return r; 169 return r;
170} 170}
171 171
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index e32149063d83..64dad04a9d27 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -27,7 +27,7 @@ __cmpxchg_u32(volatile int *p, int old, int new)
27 "bne %0, %2, 1f \n\t" 27 "bne %0, %2, 1f \n\t"
28 "s32i %3, %1, 0 \n\t" 28 "s32i %3, %1, 0 \n\t"
29 "1: \n\t" 29 "1: \n\t"
30 "wsr a15, "__stringify(PS)" \n\t" 30 "wsr a15, ps \n\t"
31 "rsync \n\t" 31 "rsync \n\t"
32 : "=&a" (old) 32 : "=&a" (old)
33 : "a" (p), "a" (old), "r" (new) 33 : "a" (p), "a" (old), "r" (new)
@@ -97,7 +97,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
97 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 97 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
98 "l32i %0, %1, 0 \n\t" 98 "l32i %0, %1, 0 \n\t"
99 "s32i %2, %1, 0 \n\t" 99 "s32i %2, %1, 0 \n\t"
100 "wsr a15, "__stringify(PS)" \n\t" 100 "wsr a15, ps \n\t"
101 "rsync \n\t" 101 "rsync \n\t"
102 : "=&a" (tmp) 102 : "=&a" (tmp)
103 : "a" (m), "a" (val) 103 : "a" (m), "a" (val)
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 75c94a1658b0..677501b32dfc 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -94,11 +94,10 @@
94#if XCHAL_HAVE_CP 94#if XCHAL_HAVE_CP
95 95
96#define RSR_CPENABLE(x) do { \ 96#define RSR_CPENABLE(x) do { \
97 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ 97 __asm__ __volatile__("rsr %0, cpenable" : "=a" (x)); \
98 } while(0); 98 } while(0);
99#define WSR_CPENABLE(x) do { \ 99#define WSR_CPENABLE(x) do { \
100 __asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \ 100 __asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x)); \
101 :: "a" (x)); \
102 } while(0); 101 } while(0);
103 102
104#endif /* XCHAL_HAVE_CP */ 103#endif /* XCHAL_HAVE_CP */
diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h
index e1d8c9e010c1..58c0a4fd4003 100644
--- a/arch/xtensa/include/asm/delay.h
+++ b/arch/xtensa/include/asm/delay.h
@@ -27,7 +27,7 @@ static inline void __delay(unsigned long loops)
27static __inline__ u32 xtensa_get_ccount(void) 27static __inline__ u32 xtensa_get_ccount(void)
28{ 28{
29 u32 ccount; 29 u32 ccount;
30 asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount)); 30 asm volatile ("rsr %0, ccount\n" : "=r" (ccount));
31 return ccount; 31 return ccount;
32} 32}
33 33
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index dae9a8bdcb17..f865b1c1eae4 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -16,7 +16,7 @@
16static inline unsigned long arch_local_save_flags(void) 16static inline unsigned long arch_local_save_flags(void)
17{ 17{
18 unsigned long flags; 18 unsigned long flags;
19 asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); 19 asm volatile("rsr %0, ps" : "=a" (flags));
20 return flags; 20 return flags;
21} 21}
22 22
@@ -41,7 +41,7 @@ static inline void arch_local_irq_enable(void)
41 41
42static inline void arch_local_irq_restore(unsigned long flags) 42static inline void arch_local_irq_restore(unsigned long flags)
43{ 43{
44 asm volatile("wsr %0, "__stringify(PS)" ; rsync" 44 asm volatile("wsr %0, ps; rsync"
45 :: "a" (flags) : "memory"); 45 :: "a" (flags) : "memory");
46} 46}
47 47
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index dbd8731a876a..feb10af96519 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -51,14 +51,14 @@ extern unsigned long asid_cache;
51 51
52static inline void set_rasid_register (unsigned long val) 52static inline void set_rasid_register (unsigned long val)
53{ 53{
54 __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" 54 __asm__ __volatile__ (" wsr %0, rasid\n\t"
55 " isync\n" : : "a" (val)); 55 " isync\n" : : "a" (val));
56} 56}
57 57
58static inline unsigned long get_rasid_register (void) 58static inline unsigned long get_rasid_register (void)
59{ 59{
60 unsigned long tmp; 60 unsigned long tmp;
61 __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp)); 61 __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
62 return tmp; 62 return tmp;
63} 63}
64 64
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index a3075b12aff1..8a8aa61ccc8d 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -27,52 +27,15 @@
27 27
28/* Special registers. */ 28/* Special registers. */
29 29
30#define LBEG 0 30#define SREG_MR 32
31#define LEND 1 31#define SREG_IBREAKA 128
32#define LCOUNT 2 32#define SREG_DBREAKA 144
33#define SAR 3 33#define SREG_DBREAKC 160
34#define BR 4 34#define SREG_EPC 176
35#define SCOMPARE1 12 35#define SREG_EPS 192
36#define ACCHI 16 36#define SREG_EXCSAVE 208
37#define ACCLO 17 37#define SREG_CCOMPARE 240
38#define MR 32 38#define SREG_MISC 244
39#define WINDOWBASE 72
40#define WINDOWSTART 73
41#define PTEVADDR 83
42#define RASID 90
43#define ITLBCFG 91
44#define DTLBCFG 92
45#define IBREAKENABLE 96
46#define DDR 104
47#define IBREAKA 128
48#define DBREAKA 144
49#define DBREAKC 160
50#define EPC 176
51#define EPC_1 177
52#define DEPC 192
53#define EPS 192
54#define EPS_1 193
55#define EXCSAVE 208
56#define EXCSAVE_1 209
57#define INTERRUPT 226
58#define INTENABLE 228
59#define PS 230
60#define THREADPTR 231
61#define EXCCAUSE 232
62#define DEBUGCAUSE 233
63#define CCOUNT 234
64#define PRID 235
65#define ICOUNT 236
66#define ICOUNTLEVEL 237
67#define EXCVADDR 238
68#define CCOMPARE 240
69#define MISC_SR 244
70
71/* Special names for read-only and write-only interrupt registers. */
72
73#define INTREAD 226
74#define INTSET 226
75#define INTCLEAR 227
76 39
77/* EXCCAUSE register fields */ 40/* EXCCAUSE register fields */
78 41
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 053bc4272106..175b3d5e1b01 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time;
63 * Register access. 63 * Register access.
64 */ 64 */
65 65
66#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r)) 66#define WSR_CCOUNT(r) asm volatile ("wsr %0, ccount" :: "a" (r))
67#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r)) 67#define RSR_CCOUNT(r) asm volatile ("rsr %0, ccount" : "=a" (r))
68#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) 68#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
69#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) 69#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
70 70
71static inline unsigned long get_ccount (void) 71static inline unsigned long get_ccount (void)
72{ 72{
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 46d240074f74..43dd348a5a47 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -86,26 +86,26 @@ static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
86 86
87static inline void set_itlbcfg_register (unsigned long val) 87static inline void set_itlbcfg_register (unsigned long val)
88{ 88{
89 __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t" 89 __asm__ __volatile__("wsr %0, itlbcfg\n\t" "isync\n\t"
90 : : "a" (val)); 90 : : "a" (val));
91} 91}
92 92
93static inline void set_dtlbcfg_register (unsigned long val) 93static inline void set_dtlbcfg_register (unsigned long val)
94{ 94{
95 __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t" 95 __asm__ __volatile__("wsr %0, dtlbcfg; dsync\n\t"
96 : : "a" (val)); 96 : : "a" (val));
97} 97}
98 98
99static inline void set_ptevaddr_register (unsigned long val) 99static inline void set_ptevaddr_register (unsigned long val)
100{ 100{
101 __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n" 101 __asm__ __volatile__(" wsr %0, ptevaddr; isync\n"
102 : : "a" (val)); 102 : : "a" (val));
103} 103}
104 104
105static inline unsigned long read_ptevaddr_register (void) 105static inline unsigned long read_ptevaddr_register (void)
106{ 106{
107 unsigned long tmp; 107 unsigned long tmp;
108 __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp)); 108 __asm__ __volatile__("rsr %0, ptevaddr\n\t" : "=a" (tmp));
109 return tmp; 109 return tmp;
110} 110}
111 111
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 33d6e9d2e83c..934ae58e2c79 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -170,15 +170,15 @@ ENTRY(fast_unaligned)
170 s32i a7, a2, PT_AREG7 170 s32i a7, a2, PT_AREG7
171 s32i a8, a2, PT_AREG8 171 s32i a8, a2, PT_AREG8
172 172
173 rsr a0, DEPC 173 rsr a0, depc
174 xsr a3, EXCSAVE_1 174 xsr a3, excsave1
175 s32i a0, a2, PT_AREG2 175 s32i a0, a2, PT_AREG2
176 s32i a3, a2, PT_AREG3 176 s32i a3, a2, PT_AREG3
177 177
178 /* Keep value of SAR in a0 */ 178 /* Keep value of SAR in a0 */
179 179
180 rsr a0, SAR 180 rsr a0, sar
181 rsr a8, EXCVADDR # load unaligned memory address 181 rsr a8, excvaddr # load unaligned memory address
182 182
183 /* Now, identify one of the following load/store instructions. 183 /* Now, identify one of the following load/store instructions.
184 * 184 *
@@ -197,7 +197,7 @@ ENTRY(fast_unaligned)
197 197
198 /* Extract the instruction that caused the unaligned access. */ 198 /* Extract the instruction that caused the unaligned access. */
199 199
200 rsr a7, EPC_1 # load exception address 200 rsr a7, epc1 # load exception address
201 movi a3, ~3 201 movi a3, ~3
202 and a3, a3, a7 # mask lower bits 202 and a3, a3, a7 # mask lower bits
203 203
@@ -275,16 +275,16 @@ ENTRY(fast_unaligned)
2751: 2751:
276 276
277#if XCHAL_HAVE_LOOPS 277#if XCHAL_HAVE_LOOPS
278 rsr a5, LEND # check if we reached LEND 278 rsr a5, lend # check if we reached LEND
279 bne a7, a5, 1f 279 bne a7, a5, 1f
280 rsr a5, LCOUNT # and LCOUNT != 0 280 rsr a5, lcount # and LCOUNT != 0
281 beqz a5, 1f 281 beqz a5, 1f
282 addi a5, a5, -1 # decrement LCOUNT and set 282 addi a5, a5, -1 # decrement LCOUNT and set
283 rsr a7, LBEG # set PC to LBEGIN 283 rsr a7, lbeg # set PC to LBEGIN
284 wsr a5, LCOUNT 284 wsr a5, lcount
285#endif 285#endif
286 286
2871: wsr a7, EPC_1 # skip load instruction 2871: wsr a7, epc1 # skip load instruction
288 extui a4, a4, INSN_T, 4 # extract target register 288 extui a4, a4, INSN_T, 4 # extract target register
289 movi a5, .Lload_table 289 movi a5, .Lload_table
290 addx8 a4, a4, a5 290 addx8 a4, a4, a5
@@ -355,16 +355,16 @@ ENTRY(fast_unaligned)
355 355
3561: 3561:
357#if XCHAL_HAVE_LOOPS 357#if XCHAL_HAVE_LOOPS
358 rsr a4, LEND # check if we reached LEND 358 rsr a4, lend # check if we reached LEND
359 bne a7, a4, 1f 359 bne a7, a4, 1f
360 rsr a4, LCOUNT # and LCOUNT != 0 360 rsr a4, lcount # and LCOUNT != 0
361 beqz a4, 1f 361 beqz a4, 1f
362 addi a4, a4, -1 # decrement LCOUNT and set 362 addi a4, a4, -1 # decrement LCOUNT and set
363 rsr a7, LBEG # set PC to LBEGIN 363 rsr a7, lbeg # set PC to LBEGIN
364 wsr a4, LCOUNT 364 wsr a4, lcount
365#endif 365#endif
366 366
3671: wsr a7, EPC_1 # skip store instruction 3671: wsr a7, epc1 # skip store instruction
368 movi a4, ~3 368 movi a4, ~3
369 and a4, a4, a8 # align memory address 369 and a4, a4, a8 # align memory address
370 370
@@ -406,7 +406,7 @@ ENTRY(fast_unaligned)
406 406
407.Lexit: 407.Lexit:
408 movi a4, 0 408 movi a4, 0
409 rsr a3, EXCSAVE_1 409 rsr a3, excsave1
410 s32i a4, a3, EXC_TABLE_FIXUP 410 s32i a4, a3, EXC_TABLE_FIXUP
411 411
412 /* Restore working register */ 412 /* Restore working register */
@@ -420,7 +420,7 @@ ENTRY(fast_unaligned)
420 420
421 /* restore SAR and return */ 421 /* restore SAR and return */
422 422
423 wsr a0, SAR 423 wsr a0, sar
424 l32i a0, a2, PT_AREG0 424 l32i a0, a2, PT_AREG0
425 l32i a2, a2, PT_AREG2 425 l32i a2, a2, PT_AREG2
426 rfe 426 rfe
@@ -438,10 +438,10 @@ ENTRY(fast_unaligned)
438 l32i a6, a2, PT_AREG6 438 l32i a6, a2, PT_AREG6
439 l32i a5, a2, PT_AREG5 439 l32i a5, a2, PT_AREG5
440 l32i a4, a2, PT_AREG4 440 l32i a4, a2, PT_AREG4
441 wsr a0, SAR 441 wsr a0, sar
442 mov a1, a2 442 mov a1, a2
443 443
444 rsr a0, PS 444 rsr a0, ps
445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode 445 bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
446 446
447 movi a0, _kernel_exception 447 movi a0, _kernel_exception
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 2bc1e145c0a4..54c3be313bfa 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -43,7 +43,7 @@
43/* IO protection is currently unsupported. */ 43/* IO protection is currently unsupported. */
44 44
45ENTRY(fast_io_protect) 45ENTRY(fast_io_protect)
46 wsr a0, EXCSAVE_1 46 wsr a0, excsave1
47 movi a0, unrecoverable_exception 47 movi a0, unrecoverable_exception
48 callx0 a0 48 callx0 a0
49 49
@@ -220,7 +220,7 @@ ENTRY(coprocessor_restore)
220 */ 220 */
221 221
222ENTRY(fast_coprocessor_double) 222ENTRY(fast_coprocessor_double)
223 wsr a0, EXCSAVE_1 223 wsr a0, excsave1
224 movi a0, unrecoverable_exception 224 movi a0, unrecoverable_exception
225 callx0 a0 225 callx0 a0
226 226
@@ -229,13 +229,13 @@ ENTRY(fast_coprocessor)
229 229
230 /* Save remaining registers a1-a3 and SAR */ 230 /* Save remaining registers a1-a3 and SAR */
231 231
232 xsr a3, EXCSAVE_1 232 xsr a3, excsave1
233 s32i a3, a2, PT_AREG3 233 s32i a3, a2, PT_AREG3
234 rsr a3, SAR 234 rsr a3, sar
235 s32i a1, a2, PT_AREG1 235 s32i a1, a2, PT_AREG1
236 s32i a3, a2, PT_SAR 236 s32i a3, a2, PT_SAR
237 mov a1, a2 237 mov a1, a2
238 rsr a2, DEPC 238 rsr a2, depc
239 s32i a2, a1, PT_AREG2 239 s32i a2, a1, PT_AREG2
240 240
241 /* 241 /*
@@ -248,17 +248,17 @@ ENTRY(fast_coprocessor)
248 248
249 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 249 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
250 250
251 rsr a3, EXCCAUSE 251 rsr a3, exccause
252 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 252 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
253 253
254 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ 254 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
255 255
256 ssl a3 # SAR: 32 - coprocessor_number 256 ssl a3 # SAR: 32 - coprocessor_number
257 movi a2, 1 257 movi a2, 1
258 rsr a0, CPENABLE 258 rsr a0, cpenable
259 sll a2, a2 259 sll a2, a2
260 or a0, a0, a2 260 or a0, a0, a2
261 wsr a0, CPENABLE 261 wsr a0, cpenable
262 rsync 262 rsync
263 263
264 /* Retrieve previous owner. (a3 still holds CP number) */ 264 /* Retrieve previous owner. (a3 still holds CP number) */
@@ -291,7 +291,7 @@ ENTRY(fast_coprocessor)
291 291
292 /* Note that only a0 and a1 were preserved. */ 292 /* Note that only a0 and a1 were preserved. */
293 293
2942: rsr a3, EXCCAUSE 2942: rsr a3, exccause
295 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 295 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
296 movi a0, coprocessor_owner 296 movi a0, coprocessor_owner
297 addx4 a0, a3, a0 297 addx4 a0, a3, a0
@@ -321,7 +321,7 @@ ENTRY(fast_coprocessor)
321 l32i a0, a1, PT_SAR 321 l32i a0, a1, PT_SAR
322 l32i a3, a1, PT_AREG3 322 l32i a3, a1, PT_AREG3
323 l32i a2, a1, PT_AREG2 323 l32i a2, a1, PT_AREG2
324 wsr a0, SAR 324 wsr a0, sar
325 l32i a0, a1, PT_AREG0 325 l32i a0, a1, PT_AREG0
326 l32i a1, a1, PT_AREG1 326 l32i a1, a1, PT_AREG1
327 327
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 7e6236073397..30b5c5f7b7eb 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -112,8 +112,8 @@ ENTRY(user_exception)
112 112
113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */
114 114
115 xsr a3, EXCSAVE_1 115 xsr a3, excsave1
116 rsr a0, DEPC 116 rsr a0, depc
117 s32i a1, a2, PT_AREG1 117 s32i a1, a2, PT_AREG1
118 s32i a0, a2, PT_AREG2 118 s32i a0, a2, PT_AREG2
119 s32i a3, a2, PT_AREG3 119 s32i a3, a2, PT_AREG3
@@ -125,16 +125,16 @@ _user_exception:
125 /* Save SAR and turn off single stepping */ 125 /* Save SAR and turn off single stepping */
126 126
127 movi a2, 0 127 movi a2, 0
128 rsr a3, SAR 128 rsr a3, sar
129 xsr a2, ICOUNTLEVEL 129 xsr a2, icountlevel
130 s32i a3, a1, PT_SAR 130 s32i a3, a1, PT_SAR
131 s32i a2, a1, PT_ICOUNTLEVEL 131 s32i a2, a1, PT_ICOUNTLEVEL
132 132
133 /* Rotate ws so that the current windowbase is at bit0. */ 133 /* Rotate ws so that the current windowbase is at bit0. */
134 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 134 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
135 135
136 rsr a2, WINDOWBASE 136 rsr a2, windowbase
137 rsr a3, WINDOWSTART 137 rsr a3, windowstart
138 ssr a2 138 ssr a2
139 s32i a2, a1, PT_WINDOWBASE 139 s32i a2, a1, PT_WINDOWBASE
140 s32i a3, a1, PT_WINDOWSTART 140 s32i a3, a1, PT_WINDOWSTART
@@ -205,12 +205,12 @@ _user_exception:
205 205
206 /* WINDOWBASE still in SAR! */ 206 /* WINDOWBASE still in SAR! */
207 207
208 rsr a2, SAR # original WINDOWBASE 208 rsr a2, sar # original WINDOWBASE
209 movi a3, 1 209 movi a3, 1
210 ssl a2 210 ssl a2
211 sll a3, a3 211 sll a3, a3
212 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit 212 wsr a3, windowstart # set corresponding WINDOWSTART bit
213 wsr a2, WINDOWBASE # and WINDOWSTART 213 wsr a2, windowbase # and WINDOWSTART
214 rsync 214 rsync
215 215
216 /* We are back to the original stack pointer (a1) */ 216 /* We are back to the original stack pointer (a1) */
@@ -252,8 +252,8 @@ ENTRY(kernel_exception)
252 252
253 /* Save a0, a2, a3, DEPC and set SP. */ 253 /* Save a0, a2, a3, DEPC and set SP. */
254 254
255 xsr a3, EXCSAVE_1 # restore a3, excsave_1 255 xsr a3, excsave1 # restore a3, excsave_1
256 rsr a0, DEPC # get a2 256 rsr a0, depc # get a2
257 s32i a1, a2, PT_AREG1 257 s32i a1, a2, PT_AREG1
258 s32i a0, a2, PT_AREG2 258 s32i a0, a2, PT_AREG2
259 s32i a3, a2, PT_AREG3 259 s32i a3, a2, PT_AREG3
@@ -265,16 +265,16 @@ _kernel_exception:
265 /* Save SAR and turn off single stepping */ 265 /* Save SAR and turn off single stepping */
266 266
267 movi a2, 0 267 movi a2, 0
268 rsr a3, SAR 268 rsr a3, sar
269 xsr a2, ICOUNTLEVEL 269 xsr a2, icountlevel
270 s32i a3, a1, PT_SAR 270 s32i a3, a1, PT_SAR
271 s32i a2, a1, PT_ICOUNTLEVEL 271 s32i a2, a1, PT_ICOUNTLEVEL
272 272
273 /* Rotate ws so that the current windowbase is at bit0. */ 273 /* Rotate ws so that the current windowbase is at bit0. */
274 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 274 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
275 275
276 rsr a2, WINDOWBASE # don't need to save these, we only 276 rsr a2, windowbase # don't need to save these, we only
277 rsr a3, WINDOWSTART # need shifted windowstart: windowmask 277 rsr a3, windowstart # need shifted windowstart: windowmask
278 ssr a2 278 ssr a2
279 slli a2, a3, 32-WSBITS 279 slli a2, a3, 32-WSBITS
280 src a2, a3, a2 280 src a2, a3, a2
@@ -323,24 +323,24 @@ common_exception:
323 323
324 /* Save some registers, disable loops and clear the syscall flag. */ 324 /* Save some registers, disable loops and clear the syscall flag. */
325 325
326 rsr a2, DEBUGCAUSE 326 rsr a2, debugcause
327 rsr a3, EPC_1 327 rsr a3, epc1
328 s32i a2, a1, PT_DEBUGCAUSE 328 s32i a2, a1, PT_DEBUGCAUSE
329 s32i a3, a1, PT_PC 329 s32i a3, a1, PT_PC
330 330
331 movi a2, -1 331 movi a2, -1
332 rsr a3, EXCVADDR 332 rsr a3, excvaddr
333 s32i a2, a1, PT_SYSCALL 333 s32i a2, a1, PT_SYSCALL
334 movi a2, 0 334 movi a2, 0
335 s32i a3, a1, PT_EXCVADDR 335 s32i a3, a1, PT_EXCVADDR
336 xsr a2, LCOUNT 336 xsr a2, lcount
337 s32i a2, a1, PT_LCOUNT 337 s32i a2, a1, PT_LCOUNT
338 338
339 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 339 /* It is now save to restore the EXC_TABLE_FIXUP variable. */
340 340
341 rsr a0, EXCCAUSE 341 rsr a0, exccause
342 movi a3, 0 342 movi a3, 0
343 rsr a2, EXCSAVE_1 343 rsr a2, excsave1
344 s32i a0, a1, PT_EXCCAUSE 344 s32i a0, a1, PT_EXCCAUSE
345 s32i a3, a2, EXC_TABLE_FIXUP 345 s32i a3, a2, EXC_TABLE_FIXUP
346 346
@@ -352,22 +352,22 @@ common_exception:
352 * (interrupts disabled) and if this exception is not an interrupt. 352 * (interrupts disabled) and if this exception is not an interrupt.
353 */ 353 */
354 354
355 rsr a3, PS 355 rsr a3, ps
356 addi a0, a0, -4 356 addi a0, a0, -4
357 movi a2, 1 357 movi a2, 1
358 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 358 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
359 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 359 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
360 movi a2, 1 << PS_WOE_BIT 360 movi a2, 1 << PS_WOE_BIT
361 or a3, a3, a2 361 or a3, a3, a2
362 rsr a0, EXCCAUSE 362 rsr a0, exccause
363 xsr a3, PS 363 xsr a3, ps
364 364
365 s32i a3, a1, PT_PS # save ps 365 s32i a3, a1, PT_PS # save ps
366 366
367 /* Save LBEG, LEND */ 367 /* Save lbeg, lend */
368 368
369 rsr a2, LBEG 369 rsr a2, lbeg
370 rsr a3, LEND 370 rsr a3, lend
371 s32i a2, a1, PT_LBEG 371 s32i a2, a1, PT_LBEG
372 s32i a3, a1, PT_LEND 372 s32i a3, a1, PT_LEND
373 373
@@ -432,7 +432,7 @@ common_exception_return:
432 432
433 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 433 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
434 434
435 wsr a3, PS /* disable interrupts */ 435 wsr a3, ps /* disable interrupts */
436 436
437 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 437 _bbci.l a3, PS_UM_BIT, kernel_exception_exit
438 438
@@ -444,12 +444,12 @@ user_exception_exit:
444 444
445 l32i a2, a1, PT_WINDOWBASE 445 l32i a2, a1, PT_WINDOWBASE
446 l32i a3, a1, PT_WINDOWSTART 446 l32i a3, a1, PT_WINDOWSTART
447 wsr a1, DEPC # use DEPC as temp storage 447 wsr a1, depc # use DEPC as temp storage
448 wsr a3, WINDOWSTART # restore WINDOWSTART 448 wsr a3, windowstart # restore WINDOWSTART
449 ssr a2 # preserve user's WB in the SAR 449 ssr a2 # preserve user's WB in the SAR
450 wsr a2, WINDOWBASE # switch to user's saved WB 450 wsr a2, windowbase # switch to user's saved WB
451 rsync 451 rsync
452 rsr a1, DEPC # restore stack pointer 452 rsr a1, depc # restore stack pointer
453 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 453 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
454 rotw -1 # we restore a4..a7 454 rotw -1 # we restore a4..a7
455 _bltui a6, 16, 1f # only have to restore current window? 455 _bltui a6, 16, 1f # only have to restore current window?
@@ -475,8 +475,8 @@ user_exception_exit:
475 475
476 /* Clear unrestored registers (don't leak anything to user-land */ 476 /* Clear unrestored registers (don't leak anything to user-land */
477 477
4781: rsr a0, WINDOWBASE 4781: rsr a0, windowbase
479 rsr a3, SAR 479 rsr a3, sar
480 sub a3, a0, a3 480 sub a3, a0, a3
481 beqz a3, 2f 481 beqz a3, 2f
482 extui a3, a3, 0, WBBITS 482 extui a3, a3, 0, WBBITS
@@ -556,7 +556,7 @@ kernel_exception_exit:
556 556
557 /* Test WINDOWSTART now. If spilled, do the movsp */ 557 /* Test WINDOWSTART now. If spilled, do the movsp */
558 558
559 rsr a3, WINDOWSTART 559 rsr a3, windowstart
560 addi a0, a3, -1 560 addi a0, a3, -1
561 and a3, a3, a0 561 and a3, a3, a0
562 _bnez a3, common_exception_exit 562 _bnez a3, common_exception_exit
@@ -604,24 +604,24 @@ common_exception_exit:
604 604
6051: l32i a2, a1, PT_PC 6051: l32i a2, a1, PT_PC
606 l32i a3, a1, PT_SAR 606 l32i a3, a1, PT_SAR
607 wsr a2, EPC_1 607 wsr a2, epc1
608 wsr a3, SAR 608 wsr a3, sar
609 609
610 /* Restore LBEG, LEND, LCOUNT */ 610 /* Restore LBEG, LEND, LCOUNT */
611 611
612 l32i a2, a1, PT_LBEG 612 l32i a2, a1, PT_LBEG
613 l32i a3, a1, PT_LEND 613 l32i a3, a1, PT_LEND
614 wsr a2, LBEG 614 wsr a2, lbeg
615 l32i a2, a1, PT_LCOUNT 615 l32i a2, a1, PT_LCOUNT
616 wsr a3, LEND 616 wsr a3, lend
617 wsr a2, LCOUNT 617 wsr a2, lcount
618 618
619 /* We control single stepping through the ICOUNTLEVEL register. */ 619 /* We control single stepping through the ICOUNTLEVEL register. */
620 620
621 l32i a2, a1, PT_ICOUNTLEVEL 621 l32i a2, a1, PT_ICOUNTLEVEL
622 movi a3, -2 622 movi a3, -2
623 wsr a2, ICOUNTLEVEL 623 wsr a2, icountlevel
624 wsr a3, ICOUNT 624 wsr a3, icount
625 625
626 /* Check if it was double exception. */ 626 /* Check if it was double exception. */
627 627
@@ -636,7 +636,7 @@ common_exception_exit:
636 l32i a1, a1, PT_AREG1 636 l32i a1, a1, PT_AREG1
637 rfe 637 rfe
638 638
6391: wsr a0, DEPC 6391: wsr a0, depc
640 l32i a0, a1, PT_AREG0 640 l32i a0, a1, PT_AREG0
641 l32i a1, a1, PT_AREG1 641 l32i a1, a1, PT_AREG1
642 rfde 642 rfde
@@ -651,25 +651,25 @@ common_exception_exit:
651 651
652ENTRY(debug_exception) 652ENTRY(debug_exception)
653 653
654 rsr a0, EPS + XCHAL_DEBUGLEVEL 654 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
655 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 655 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
656 656
657 /* Set EPC_1 and EXCCAUSE */ 657 /* Set EPC1 and EXCCAUSE */
658 658
659 wsr a2, DEPC # save a2 temporarily 659 wsr a2, depc # save a2 temporarily
660 rsr a2, EPC + XCHAL_DEBUGLEVEL 660 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL
661 wsr a2, EPC_1 661 wsr a2, epc1
662 662
663 movi a2, EXCCAUSE_MAPPED_DEBUG 663 movi a2, EXCCAUSE_MAPPED_DEBUG
664 wsr a2, EXCCAUSE 664 wsr a2, exccause
665 665
666 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 666 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/
667 667
668 movi a2, 1 << PS_EXCM_BIT 668 movi a2, 1 << PS_EXCM_BIT
669 or a2, a0, a2 669 or a2, a0, a2
670 movi a0, debug_exception # restore a3, debug jump vector 670 movi a0, debug_exception # restore a3, debug jump vector
671 wsr a2, PS 671 wsr a2, ps
672 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 672 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
673 673
674 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 674 /* Switch to kernel/user stack, restore jump vector, and save a0 */
675 675
@@ -680,19 +680,19 @@ ENTRY(debug_exception)
680 movi a0, 0 680 movi a0, 0
681 s32i a1, a2, PT_AREG1 681 s32i a1, a2, PT_AREG1
682 s32i a0, a2, PT_DEPC # mark it as a regular exception 682 s32i a0, a2, PT_DEPC # mark it as a regular exception
683 xsr a0, DEPC 683 xsr a0, depc
684 s32i a3, a2, PT_AREG3 684 s32i a3, a2, PT_AREG3
685 s32i a0, a2, PT_AREG2 685 s32i a0, a2, PT_AREG2
686 mov a1, a2 686 mov a1, a2
687 j _kernel_exception 687 j _kernel_exception
688 688
6892: rsr a2, EXCSAVE_1 6892: rsr a2, excsave1
690 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 690 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer
691 s32i a0, a2, PT_AREG0 691 s32i a0, a2, PT_AREG0
692 movi a0, 0 692 movi a0, 0
693 s32i a1, a2, PT_AREG1 693 s32i a1, a2, PT_AREG1
694 s32i a0, a2, PT_DEPC 694 s32i a0, a2, PT_DEPC
695 xsr a0, DEPC 695 xsr a0, depc
696 s32i a3, a2, PT_AREG3 696 s32i a3, a2, PT_AREG3
697 s32i a0, a2, PT_AREG2 697 s32i a0, a2, PT_AREG2
698 mov a1, a2 698 mov a1, a2
@@ -732,12 +732,12 @@ ENTRY(unrecoverable_exception)
732 movi a0, 1 732 movi a0, 1
733 movi a1, 0 733 movi a1, 0
734 734
735 wsr a0, WINDOWSTART 735 wsr a0, windowstart
736 wsr a1, WINDOWBASE 736 wsr a1, windowbase
737 rsync 737 rsync
738 738
739 movi a1, (1 << PS_WOE_BIT) | 1 739 movi a1, (1 << PS_WOE_BIT) | 1
740 wsr a1, PS 740 wsr a1, ps
741 rsync 741 rsync
742 742
743 movi a1, init_task 743 movi a1, init_task
@@ -793,7 +793,7 @@ ENTRY(fast_alloca)
793 l32i a0, a2, PT_DEPC 793 l32i a0, a2, PT_DEPC
794 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 794 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
795 795
796 rsr a0, DEPC # get a2 796 rsr a0, depc # get a2
797 s32i a4, a2, PT_AREG4 # save a4 and 797 s32i a4, a2, PT_AREG4 # save a4 and
798 s32i a0, a2, PT_AREG2 # a2 to stack 798 s32i a0, a2, PT_AREG2 # a2 to stack
799 799
@@ -804,8 +804,8 @@ ENTRY(fast_alloca)
804 804
805 /* Restore a3, excsave_1 */ 805 /* Restore a3, excsave_1 */
806 806
807 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl. 807 xsr a3, excsave1 # make sure excsave_1 is valid for dbl.
808 rsr a4, EPC_1 # get exception address 808 rsr a4, epc1 # get exception address
809 s32i a3, a2, PT_AREG3 # save a3 to stack 809 s32i a3, a2, PT_AREG3 # save a3 to stack
810 810
811#ifdef ALLOCA_EXCEPTION_IN_IRAM 811#ifdef ALLOCA_EXCEPTION_IN_IRAM
@@ -820,7 +820,7 @@ ENTRY(fast_alloca)
820 jx a3 820 jx a3
821 821
822.Lunhandled_double: 822.Lunhandled_double:
823 wsr a0, EXCSAVE_1 823 wsr a0, excsave1
824 movi a0, unrecoverable_exception 824 movi a0, unrecoverable_exception
825 callx0 a0 825 callx0 a0
826 826
@@ -852,7 +852,7 @@ ENTRY(fast_alloca)
852#endif 852#endif
853 addi a4, a4, 3 # step over movsp 853 addi a4, a4, 3 # step over movsp
854 _EXTUI_MOVSP_DST(a0) # extract destination register 854 _EXTUI_MOVSP_DST(a0) # extract destination register
855 wsr a4, EPC_1 # save new epc_1 855 wsr a4, epc1 # save new epc_1
856 856
857 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 857 _bnei a0, 1, 1f # no 'movsp a1, ax': jump
858 858
@@ -953,14 +953,14 @@ ENTRY(fast_syscall_kernel)
953 953
954 /* Skip syscall. */ 954 /* Skip syscall. */
955 955
956 rsr a0, EPC_1 956 rsr a0, epc1
957 addi a0, a0, 3 957 addi a0, a0, 3
958 wsr a0, EPC_1 958 wsr a0, epc1
959 959
960 l32i a0, a2, PT_DEPC 960 l32i a0, a2, PT_DEPC
961 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 961 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
962 962
963 rsr a0, DEPC # get syscall-nr 963 rsr a0, depc # get syscall-nr
964 _beqz a0, fast_syscall_spill_registers 964 _beqz a0, fast_syscall_spill_registers
965 _beqi a0, __NR_xtensa, fast_syscall_xtensa 965 _beqi a0, __NR_xtensa, fast_syscall_xtensa
966 966
@@ -970,14 +970,14 @@ ENTRY(fast_syscall_user)
970 970
971 /* Skip syscall. */ 971 /* Skip syscall. */
972 972
973 rsr a0, EPC_1 973 rsr a0, epc1
974 addi a0, a0, 3 974 addi a0, a0, 3
975 wsr a0, EPC_1 975 wsr a0, epc1
976 976
977 l32i a0, a2, PT_DEPC 977 l32i a0, a2, PT_DEPC
978 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 978 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
979 979
980 rsr a0, DEPC # get syscall-nr 980 rsr a0, depc # get syscall-nr
981 _beqz a0, fast_syscall_spill_registers 981 _beqz a0, fast_syscall_spill_registers
982 _beqi a0, __NR_xtensa, fast_syscall_xtensa 982 _beqi a0, __NR_xtensa, fast_syscall_xtensa
983 983
@@ -988,10 +988,10 @@ ENTRY(fast_syscall_unrecoverable)
988 /* Restore all states. */ 988 /* Restore all states. */
989 989
990 l32i a0, a2, PT_AREG0 # restore a0 990 l32i a0, a2, PT_AREG0 # restore a0
991 xsr a2, DEPC # restore a2, depc 991 xsr a2, depc # restore a2, depc
992 rsr a3, EXCSAVE_1 992 rsr a3, excsave1
993 993
994 wsr a0, EXCSAVE_1 994 wsr a0, excsave1
995 movi a0, unrecoverable_exception 995 movi a0, unrecoverable_exception
996 callx0 a0 996 callx0 a0
997 997
@@ -1047,7 +1047,7 @@ ENTRY(fast_syscall_unrecoverable)
1047 1047
1048ENTRY(fast_syscall_xtensa) 1048ENTRY(fast_syscall_xtensa)
1049 1049
1050 xsr a3, EXCSAVE_1 # restore a3, excsave1 1050 xsr a3, excsave1 # restore a3, excsave1
1051 1051
1052 s32i a7, a2, PT_AREG7 # we need an additional register 1052 s32i a7, a2, PT_AREG7 # we need an additional register
1053 movi a7, 4 # sizeof(unsigned int) 1053 movi a7, 4 # sizeof(unsigned int)
@@ -1124,13 +1124,13 @@ ENTRY(fast_syscall_spill_registers)
1124 1124
1125 movi a0, fast_syscall_spill_registers_fixup 1125 movi a0, fast_syscall_spill_registers_fixup
1126 s32i a0, a3, EXC_TABLE_FIXUP 1126 s32i a0, a3, EXC_TABLE_FIXUP
1127 rsr a0, WINDOWBASE 1127 rsr a0, windowbase
1128 s32i a0, a3, EXC_TABLE_PARAM 1128 s32i a0, a3, EXC_TABLE_PARAM
1129 1129
1130 /* Save a3 and SAR on stack. */ 1130 /* Save a3 and SAR on stack. */
1131 1131
1132 rsr a0, SAR 1132 rsr a0, sar
1133 xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1133 xsr a3, excsave1 # restore a3 and excsave_1
1134 s32i a3, a2, PT_AREG3 1134 s32i a3, a2, PT_AREG3
1135 s32i a4, a2, PT_AREG4 1135 s32i a4, a2, PT_AREG4
1136 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 1136 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
@@ -1148,7 +1148,7 @@ ENTRY(fast_syscall_spill_registers)
1148 l32i a3, a2, PT_AREG5 1148 l32i a3, a2, PT_AREG5
1149 l32i a4, a2, PT_AREG4 1149 l32i a4, a2, PT_AREG4
1150 l32i a0, a2, PT_AREG0 1150 l32i a0, a2, PT_AREG0
1151 wsr a3, SAR 1151 wsr a3, sar
1152 l32i a3, a2, PT_AREG3 1152 l32i a3, a2, PT_AREG3
1153 1153
1154 /* Restore clobbered registers. */ 1154 /* Restore clobbered registers. */
@@ -1173,8 +1173,8 @@ ENTRY(fast_syscall_spill_registers)
1173 1173
1174fast_syscall_spill_registers_fixup: 1174fast_syscall_spill_registers_fixup:
1175 1175
1176 rsr a2, WINDOWBASE # get current windowbase (a2 is saved) 1176 rsr a2, windowbase # get current windowbase (a2 is saved)
1177 xsr a0, DEPC # restore depc and a0 1177 xsr a0, depc # restore depc and a0
1178 ssl a2 # set shift (32 - WB) 1178 ssl a2 # set shift (32 - WB)
1179 1179
1180 /* We need to make sure the current registers (a0-a3) are preserved. 1180 /* We need to make sure the current registers (a0-a3) are preserved.
@@ -1182,12 +1182,12 @@ fast_syscall_spill_registers_fixup:
1182 * in WS, so that the exception handlers save them to the task stack. 1182 * in WS, so that the exception handlers save them to the task stack.
1183 */ 1183 */
1184 1184
1185 rsr a3, EXCSAVE_1 # get spill-mask 1185 rsr a3, excsave1 # get spill-mask
1186 slli a2, a3, 1 # shift left by one 1186 slli a2, a3, 1 # shift left by one
1187 1187
1188 slli a3, a2, 32-WSBITS 1188 slli a3, a2, 32-WSBITS
1189 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1189 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy......
1190 wsr a2, WINDOWSTART # set corrected windowstart 1190 wsr a2, windowstart # set corrected windowstart
1191 1191
1192 movi a3, exc_table 1192 movi a3, exc_table
1193 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1193 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2
@@ -1201,7 +1201,7 @@ fast_syscall_spill_registers_fixup:
1201 * excsave_1: a3 1201 * excsave_1: a3
1202 */ 1202 */
1203 1203
1204 wsr a3, WINDOWBASE 1204 wsr a3, windowbase
1205 rsync 1205 rsync
1206 1206
1207 /* We are now in the original frame when we entered _spill_registers: 1207 /* We are now in the original frame when we entered _spill_registers:
@@ -1227,7 +1227,7 @@ fast_syscall_spill_registers_fixup:
1227 /* Jump to the exception handler. */ 1227 /* Jump to the exception handler. */
1228 1228
1229 movi a3, exc_table 1229 movi a3, exc_table
1230 rsr a0, EXCCAUSE 1230 rsr a0, exccause
1231 addx4 a0, a0, a3 # find entry in table 1231 addx4 a0, a0, a3 # find entry in table
1232 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1232 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1233 jx a0 1233 jx a0
@@ -1236,28 +1236,28 @@ fast_syscall_spill_registers_fixup_return:
1236 1236
1237 /* When we return here, all registers have been restored (a2: DEPC) */ 1237 /* When we return here, all registers have been restored (a2: DEPC) */
1238 1238
1239 wsr a2, DEPC # exception address 1239 wsr a2, depc # exception address
1240 1240
1241 /* Restore fixup handler. */ 1241 /* Restore fixup handler. */
1242 1242
1243 xsr a3, EXCSAVE_1 1243 xsr a3, excsave1
1244 movi a2, fast_syscall_spill_registers_fixup 1244 movi a2, fast_syscall_spill_registers_fixup
1245 s32i a2, a3, EXC_TABLE_FIXUP 1245 s32i a2, a3, EXC_TABLE_FIXUP
1246 rsr a2, WINDOWBASE 1246 rsr a2, windowbase
1247 s32i a2, a3, EXC_TABLE_PARAM 1247 s32i a2, a3, EXC_TABLE_PARAM
1248 l32i a2, a3, EXC_TABLE_KSTK 1248 l32i a2, a3, EXC_TABLE_KSTK
1249 1249
1250 /* Load WB at the time the exception occurred. */ 1250 /* Load WB at the time the exception occurred. */
1251 1251
1252 rsr a3, SAR # WB is still in SAR 1252 rsr a3, sar # WB is still in SAR
1253 neg a3, a3 1253 neg a3, a3
1254 wsr a3, WINDOWBASE 1254 wsr a3, windowbase
1255 rsync 1255 rsync
1256 1256
1257 /* Restore a3 and return. */ 1257 /* Restore a3 and return. */
1258 1258
1259 movi a3, exc_table 1259 movi a3, exc_table
1260 xsr a3, EXCSAVE_1 1260 xsr a3, excsave1
1261 1261
1262 rfde 1262 rfde
1263 1263
@@ -1283,8 +1283,8 @@ ENTRY(_spill_registers)
1283 * Rotate ws right so that a4 = yyxxxwww1. 1283 * Rotate ws right so that a4 = yyxxxwww1.
1284 */ 1284 */
1285 1285
1286 rsr a4, WINDOWBASE 1286 rsr a4, windowbase
1287 rsr a3, WINDOWSTART # a3 = xxxwww1yy 1287 rsr a3, windowstart # a3 = xxxwww1yy
1288 ssr a4 # holds WB 1288 ssr a4 # holds WB
1289 slli a4, a3, WSBITS 1289 slli a4, a3, WSBITS
1290 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy 1290 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
@@ -1302,7 +1302,7 @@ ENTRY(_spill_registers)
1302 1302
1303 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1303 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1304 1304
1305 wsr a3, WINDOWSTART # save shifted windowstart 1305 wsr a3, windowstart # save shifted windowstart
1306 neg a4, a3 1306 neg a4, a3
1307 and a3, a4, a3 # first bit set from right: 000010000 1307 and a3, a4, a3 # first bit set from right: 000010000
1308 1308
@@ -1311,12 +1311,12 @@ ENTRY(_spill_registers)
1311 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1311 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1312 ssr a4 # save in SAR for later. 1312 ssr a4 # save in SAR for later.
1313 1313
1314 rsr a3, WINDOWBASE 1314 rsr a3, windowbase
1315 add a3, a3, a4 1315 add a3, a3, a4
1316 wsr a3, WINDOWBASE 1316 wsr a3, windowbase
1317 rsync 1317 rsync
1318 1318
1319 rsr a3, WINDOWSTART 1319 rsr a3, windowstart
1320 srl a3, a3 # shift windowstart 1320 srl a3, a3 # shift windowstart
1321 1321
1322 /* WB is now just one frame below the oldest frame in the register 1322 /* WB is now just one frame below the oldest frame in the register
@@ -1364,11 +1364,11 @@ ENTRY(_spill_registers)
1364.Lexit: /* Done. Do the final rotation, set WS, and return. */ 1364.Lexit: /* Done. Do the final rotation, set WS, and return. */
1365 1365
1366 rotw 1 1366 rotw 1
1367 rsr a3, WINDOWBASE 1367 rsr a3, windowbase
1368 ssl a3 1368 ssl a3
1369 movi a3, 1 1369 movi a3, 1
1370 sll a3, a3 1370 sll a3, a3
1371 wsr a3, WINDOWSTART 1371 wsr a3, windowstart
1372 ret 1372 ret
1373 1373
1374.Lc4: s32e a4, a9, -16 1374.Lc4: s32e a4, a9, -16
@@ -1429,7 +1429,7 @@ ENTRY(_spill_registers)
1429 * however, this condition is unrecoverable in kernel space. 1429 * however, this condition is unrecoverable in kernel space.
1430 */ 1430 */
1431 1431
1432 rsr a0, PS 1432 rsr a0, ps
1433 _bbci.l a0, PS_UM_BIT, 1f 1433 _bbci.l a0, PS_UM_BIT, 1f
1434 1434
1435 /* User space: Setup a dummy frame and kill application. 1435 /* User space: Setup a dummy frame and kill application.
@@ -1439,18 +1439,18 @@ ENTRY(_spill_registers)
1439 movi a0, 1 1439 movi a0, 1
1440 movi a1, 0 1440 movi a1, 0
1441 1441
1442 wsr a0, WINDOWSTART 1442 wsr a0, windowstart
1443 wsr a1, WINDOWBASE 1443 wsr a1, windowbase
1444 rsync 1444 rsync
1445 1445
1446 movi a0, 0 1446 movi a0, 0
1447 1447
1448 movi a3, exc_table 1448 movi a3, exc_table
1449 l32i a1, a3, EXC_TABLE_KSTK 1449 l32i a1, a3, EXC_TABLE_KSTK
1450 wsr a3, EXCSAVE_1 1450 wsr a3, excsave1
1451 1451
1452 movi a4, (1 << PS_WOE_BIT) | 1 1452 movi a4, (1 << PS_WOE_BIT) | 1
1453 wsr a4, PS 1453 wsr a4, ps
1454 rsync 1454 rsync
1455 1455
1456 movi a6, SIGSEGV 1456 movi a6, SIGSEGV
@@ -1459,7 +1459,7 @@ ENTRY(_spill_registers)
1459 1459
14601: /* Kernel space: PANIC! */ 14601: /* Kernel space: PANIC! */
1461 1461
1462 wsr a0, EXCSAVE_1 1462 wsr a0, excsave1
1463 movi a0, unrecoverable_exception 1463 movi a0, unrecoverable_exception
1464 callx0 a0 # should not return 1464 callx0 a0 # should not return
14651: j 1b 14651: j 1b
@@ -1524,7 +1524,7 @@ ENTRY(fast_second_level_miss)
1524 1524
1525 /* We deliberately destroy a3 that holds the exception table. */ 1525 /* We deliberately destroy a3 that holds the exception table. */
1526 1526
15278: rsr a3, EXCVADDR # fault address 15278: rsr a3, excvaddr # fault address
1528 _PGD_OFFSET(a0, a3, a1) 1528 _PGD_OFFSET(a0, a3, a1)
1529 l32i a0, a0, 0 # read pmdval 1529 l32i a0, a0, 0 # read pmdval
1530 beqz a0, 2f 1530 beqz a0, 2f
@@ -1561,7 +1561,7 @@ ENTRY(fast_second_level_miss)
1561 */ 1561 */
1562 1562
1563 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1563 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
1564 rsr a1, PTEVADDR 1564 rsr a1, ptevaddr
1565 addx2 a3, a3, a3 # -> 0,3,6,9 1565 addx2 a3, a3, a3 # -> 0,3,6,9
1566 srli a1, a1, PAGE_SHIFT 1566 srli a1, a1, PAGE_SHIFT
1567 extui a3, a3, 2, 2 # -> 0,0,1,2 1567 extui a3, a3, 2, 2 # -> 0,0,1,2
@@ -1583,18 +1583,18 @@ ENTRY(fast_second_level_miss)
1583 l32i a0, a2, PT_AREG0 1583 l32i a0, a2, PT_AREG0
1584 l32i a1, a2, PT_AREG1 1584 l32i a1, a2, PT_AREG1
1585 l32i a2, a2, PT_DEPC 1585 l32i a2, a2, PT_DEPC
1586 xsr a3, EXCSAVE_1 1586 xsr a3, excsave1
1587 1587
1588 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1588 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1589 1589
1590 /* Restore excsave1 and return. */ 1590 /* Restore excsave1 and return. */
1591 1591
1592 rsr a2, DEPC 1592 rsr a2, depc
1593 rfe 1593 rfe
1594 1594
1595 /* Return from double exception. */ 1595 /* Return from double exception. */
1596 1596
15971: xsr a2, DEPC 15971: xsr a2, depc
1598 esync 1598 esync
1599 rfde 1599 rfde
1600 1600
@@ -1618,7 +1618,7 @@ ENTRY(fast_second_level_miss)
1618 /* Make sure the exception originated in the special functions */ 1618 /* Make sure the exception originated in the special functions */
1619 1619
1620 movi a0, __tlbtemp_mapping_start 1620 movi a0, __tlbtemp_mapping_start
1621 rsr a3, EPC_1 1621 rsr a3, epc1
1622 bltu a3, a0, 2f 1622 bltu a3, a0, 2f
1623 movi a0, __tlbtemp_mapping_end 1623 movi a0, __tlbtemp_mapping_end
1624 bgeu a3, a0, 2f 1624 bgeu a3, a0, 2f
@@ -1626,7 +1626,7 @@ ENTRY(fast_second_level_miss)
1626 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1626 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1627 1627
1628 movi a3, TLBTEMP_BASE_1 1628 movi a3, TLBTEMP_BASE_1
1629 rsr a0, EXCVADDR 1629 rsr a0, excvaddr
1630 bltu a0, a3, 2f 1630 bltu a0, a3, 2f
1631 1631
1632 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) 1632 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
@@ -1635,7 +1635,7 @@ ENTRY(fast_second_level_miss)
1635 /* Check if we have to restore an ITLB mapping. */ 1635 /* Check if we have to restore an ITLB mapping. */
1636 1636
1637 movi a1, __tlbtemp_mapping_itlb 1637 movi a1, __tlbtemp_mapping_itlb
1638 rsr a3, EPC_1 1638 rsr a3, epc1
1639 sub a3, a3, a1 1639 sub a3, a3, a1
1640 1640
1641 /* Calculate VPN */ 1641 /* Calculate VPN */
@@ -1671,13 +1671,13 @@ ENTRY(fast_second_level_miss)
16712: /* Invalid PGD, default exception handling */ 16712: /* Invalid PGD, default exception handling */
1672 1672
1673 movi a3, exc_table 1673 movi a3, exc_table
1674 rsr a1, DEPC 1674 rsr a1, depc
1675 xsr a3, EXCSAVE_1 1675 xsr a3, excsave1
1676 s32i a1, a2, PT_AREG2 1676 s32i a1, a2, PT_AREG2
1677 s32i a3, a2, PT_AREG3 1677 s32i a3, a2, PT_AREG3
1678 mov a1, a2 1678 mov a1, a2
1679 1679
1680 rsr a2, PS 1680 rsr a2, ps
1681 bbsi.l a2, PS_UM_BIT, 1f 1681 bbsi.l a2, PS_UM_BIT, 1f
1682 j _kernel_exception 1682 j _kernel_exception
16831: j _user_exception 16831: j _user_exception
@@ -1712,7 +1712,7 @@ ENTRY(fast_store_prohibited)
1712 l32i a0, a1, TASK_MM # tsk->mm 1712 l32i a0, a1, TASK_MM # tsk->mm
1713 beqz a0, 9f 1713 beqz a0, 9f
1714 1714
17158: rsr a1, EXCVADDR # fault address 17158: rsr a1, excvaddr # fault address
1716 _PGD_OFFSET(a0, a1, a4) 1716 _PGD_OFFSET(a0, a1, a4)
1717 l32i a0, a0, 0 1717 l32i a0, a0, 0
1718 beqz a0, 2f 1718 beqz a0, 2f
@@ -1725,7 +1725,7 @@ ENTRY(fast_store_prohibited)
1725 1725
1726 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1726 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1727 or a4, a4, a1 1727 or a4, a4, a1
1728 rsr a1, EXCVADDR 1728 rsr a1, excvaddr
1729 s32i a4, a0, 0 1729 s32i a4, a0, 0
1730 1730
1731 /* We need to flush the cache if we have page coloring. */ 1731 /* We need to flush the cache if we have page coloring. */
@@ -1749,15 +1749,15 @@ ENTRY(fast_store_prohibited)
1749 1749
1750 /* Restore excsave1 and a3. */ 1750 /* Restore excsave1 and a3. */
1751 1751
1752 xsr a3, EXCSAVE_1 1752 xsr a3, excsave1
1753 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1753 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1754 1754
1755 rsr a2, DEPC 1755 rsr a2, depc
1756 rfe 1756 rfe
1757 1757
1758 /* Double exception. Restore FIXUP handler and return. */ 1758 /* Double exception. Restore FIXUP handler and return. */
1759 1759
17601: xsr a2, DEPC 17601: xsr a2, depc
1761 esync 1761 esync
1762 rfde 1762 rfde
1763 1763
@@ -1766,14 +1766,14 @@ ENTRY(fast_store_prohibited)
1766 1766
17672: /* If there was a problem, handle fault in C */ 17672: /* If there was a problem, handle fault in C */
1768 1768
1769 rsr a4, DEPC # still holds a2 1769 rsr a4, depc # still holds a2
1770 xsr a3, EXCSAVE_1 1770 xsr a3, excsave1
1771 s32i a4, a2, PT_AREG2 1771 s32i a4, a2, PT_AREG2
1772 s32i a3, a2, PT_AREG3 1772 s32i a3, a2, PT_AREG3
1773 l32i a4, a2, PT_AREG4 1773 l32i a4, a2, PT_AREG4
1774 mov a1, a2 1774 mov a1, a2
1775 1775
1776 rsr a2, PS 1776 rsr a2, ps
1777 bbsi.l a2, PS_UM_BIT, 1f 1777 bbsi.l a2, PS_UM_BIT, 1f
1778 j _kernel_exception 1778 j _kernel_exception
17791: j _user_exception 17791: j _user_exception
@@ -1901,8 +1901,8 @@ ENTRY(_switch_to)
1901 /* Disable ints while we manipulate the stack pointer. */ 1901 /* Disable ints while we manipulate the stack pointer. */
1902 1902
1903 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1903 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
1904 xsr a14, PS 1904 xsr a14, ps
1905 rsr a3, EXCSAVE_1 1905 rsr a3, excsave1
1906 rsync 1906 rsync
1907 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1907 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
1908 1908
@@ -1910,7 +1910,7 @@ ENTRY(_switch_to)
1910 1910
1911#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 1911#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1912 l32i a3, a5, THREAD_CPENABLE 1912 l32i a3, a5, THREAD_CPENABLE
1913 xsr a3, CPENABLE 1913 xsr a3, cpenable
1914 s32i a3, a4, THREAD_CPENABLE 1914 s32i a3, a4, THREAD_CPENABLE
1915#endif 1915#endif
1916 1916
@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
1924 * we return from kernel space. 1924 * we return from kernel space.
1925 */ 1925 */
1926 1926
1927 rsr a3, EXCSAVE_1 # exc_table 1927 rsr a3, excsave1 # exc_table
1928 movi a6, 0 1928 movi a6, 0
1929 addi a7, a5, PT_REGS_OFFSET 1929 addi a7, a5, PT_REGS_OFFSET
1930 s32i a6, a3, EXC_TABLE_FIXUP 1930 s32i a6, a3, EXC_TABLE_FIXUP
@@ -1937,7 +1937,7 @@ ENTRY(_switch_to)
1937 1937
1938 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1938 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
1939 1939
1940 wsr a14, PS 1940 wsr a14, ps
1941 mov a2, a12 # return 'prev' 1941 mov a2, a12 # return 'prev'
1942 rsync 1942 rsync
1943 1943
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 3ef91a73652d..bdc50788f35e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -61,18 +61,18 @@ _startup:
61 /* Disable interrupts and exceptions. */ 61 /* Disable interrupts and exceptions. */
62 62
63 movi a0, LOCKLEVEL 63 movi a0, LOCKLEVEL
64 wsr a0, PS 64 wsr a0, ps
65 65
66 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 66 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
67 67
68 wsr a2, EXCSAVE_1 68 wsr a2, excsave1
69 69
70 /* Start with a fresh windowbase and windowstart. */ 70 /* Start with a fresh windowbase and windowstart. */
71 71
72 movi a1, 1 72 movi a1, 1
73 movi a0, 0 73 movi a0, 0
74 wsr a1, WINDOWSTART 74 wsr a1, windowstart
75 wsr a0, WINDOWBASE 75 wsr a0, windowbase
76 rsync 76 rsync
77 77
78 /* Set a0 to 0 for the remaining initialization. */ 78 /* Set a0 to 0 for the remaining initialization. */
@@ -82,46 +82,46 @@ _startup:
82 /* Clear debugging registers. */ 82 /* Clear debugging registers. */
83 83
84#if XCHAL_HAVE_DEBUG 84#if XCHAL_HAVE_DEBUG
85 wsr a0, IBREAKENABLE 85 wsr a0, ibreakenable
86 wsr a0, ICOUNT 86 wsr a0, icount
87 movi a1, 15 87 movi a1, 15
88 wsr a0, ICOUNTLEVEL 88 wsr a0, icountlevel
89 89
90 .set _index, 0 90 .set _index, 0
91 .rept XCHAL_NUM_DBREAK - 1 91 .rept XCHAL_NUM_DBREAK - 1
92 wsr a0, DBREAKC + _index 92 wsr a0, SREG_DBREAKC + _index
93 .set _index, _index + 1 93 .set _index, _index + 1
94 .endr 94 .endr
95#endif 95#endif
96 96
97 /* Clear CCOUNT (not really necessary, but nice) */ 97 /* Clear CCOUNT (not really necessary, but nice) */
98 98
99 wsr a0, CCOUNT # not really necessary, but nice 99 wsr a0, ccount # not really necessary, but nice
100 100
101 /* Disable zero-loops. */ 101 /* Disable zero-loops. */
102 102
103#if XCHAL_HAVE_LOOPS 103#if XCHAL_HAVE_LOOPS
104 wsr a0, LCOUNT 104 wsr a0, lcount
105#endif 105#endif
106 106
107 /* Disable all timers. */ 107 /* Disable all timers. */
108 108
109 .set _index, 0 109 .set _index, 0
110 .rept XCHAL_NUM_TIMERS - 1 110 .rept XCHAL_NUM_TIMERS - 1
111 wsr a0, CCOMPARE + _index 111 wsr a0, SREG_CCOMPARE + _index
112 .set _index, _index + 1 112 .set _index, _index + 1
113 .endr 113 .endr
114 114
115 /* Interrupt initialization. */ 115 /* Interrupt initialization. */
116 116
117 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE 117 movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
118 wsr a0, INTENABLE 118 wsr a0, intenable
119 wsr a2, INTCLEAR 119 wsr a2, intclear
120 120
121 /* Disable coprocessors. */ 121 /* Disable coprocessors. */
122 122
123#if XCHAL_CP_NUM > 0 123#if XCHAL_CP_NUM > 0
124 wsr a0, CPENABLE 124 wsr a0, cpenable
125#endif 125#endif
126 126
127 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0 127 /* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
@@ -132,7 +132,7 @@ _startup:
132 */ 132 */
133 133
134 movi a1, 1 134 movi a1, 1
135 wsr a1, PS 135 wsr a1, ps
136 rsync 136 rsync
137 137
138 /* Initialize the caches. 138 /* Initialize the caches.
@@ -206,18 +206,18 @@ _startup:
206 addi a1, a1, KERNEL_STACK_SIZE 206 addi a1, a1, KERNEL_STACK_SIZE
207 207
208 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0 208 movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
209 wsr a2, PS # (enable reg-windows; progmode stack) 209 wsr a2, ps # (enable reg-windows; progmode stack)
210 rsync 210 rsync
211 211
212 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/ 212 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
213 213
214 movi a2, debug_exception 214 movi a2, debug_exception
215 wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL 215 wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
216 216
217 /* Set up EXCSAVE[1] to point to the exc_table. */ 217 /* Set up EXCSAVE[1] to point to the exc_table. */
218 218
219 movi a6, exc_table 219 movi a6, exc_table
220 xsr a6, EXCSAVE_1 220 xsr a6, excsave1
221 221
222 /* init_arch kick-starts the linux kernel */ 222 /* init_arch kick-starts the linux kernel */
223 223
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 98e77c3ef1c3..a6ce3e563739 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -72,13 +72,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
72static void xtensa_irq_mask(struct irq_data *d) 72static void xtensa_irq_mask(struct irq_data *d)
73{ 73{
74 cached_irq_mask &= ~(1 << d->irq); 74 cached_irq_mask &= ~(1 << d->irq);
75 set_sr (cached_irq_mask, INTENABLE); 75 set_sr (cached_irq_mask, intenable);
76} 76}
77 77
78static void xtensa_irq_unmask(struct irq_data *d) 78static void xtensa_irq_unmask(struct irq_data *d)
79{ 79{
80 cached_irq_mask |= 1 << d->irq; 80 cached_irq_mask |= 1 << d->irq;
81 set_sr (cached_irq_mask, INTENABLE); 81 set_sr (cached_irq_mask, intenable);
82} 82}
83 83
84static void xtensa_irq_enable(struct irq_data *d) 84static void xtensa_irq_enable(struct irq_data *d)
@@ -95,7 +95,7 @@ static void xtensa_irq_disable(struct irq_data *d)
95 95
96static void xtensa_irq_ack(struct irq_data *d) 96static void xtensa_irq_ack(struct irq_data *d)
97{ 97{
98 set_sr(1 << d->irq, INTCLEAR); 98 set_sr(1 << d->irq, intclear);
99} 99}
100 100
101static int xtensa_irq_retrigger(struct irq_data *d) 101static int xtensa_irq_retrigger(struct irq_data *d)
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index bc1e14cf9369..92ba9f83eaaf 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -202,8 +202,8 @@ extern void do_IRQ(int, struct pt_regs *);
202 202
203void do_interrupt (struct pt_regs *regs) 203void do_interrupt (struct pt_regs *regs)
204{ 204{
205 unsigned long intread = get_sr (INTREAD); 205 unsigned long intread = get_sr (interrupt);
206 unsigned long intenable = get_sr (INTENABLE); 206 unsigned long intenable = get_sr (intenable);
207 int i, mask; 207 int i, mask;
208 208
209 /* Handle all interrupts (no priorities). 209 /* Handle all interrupts (no priorities).
@@ -213,7 +213,7 @@ void do_interrupt (struct pt_regs *regs)
213 213
214 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) { 214 for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
215 if (mask & (intread & intenable)) { 215 if (mask & (intread & intenable)) {
216 set_sr (mask, INTCLEAR); 216 set_sr (mask, intclear);
217 do_IRQ (i,regs); 217 do_IRQ (i,regs);
218 } 218 }
219 } 219 }
@@ -339,7 +339,7 @@ void __init trap_init(void)
339 /* Initialize EXCSAVE_1 to hold the address of the exception table. */ 339 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
340 340
341 i = (unsigned long)exc_table; 341 i = (unsigned long)exc_table;
342 __asm__ __volatile__("wsr %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i)); 342 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i));
343} 343}
344 344
345/* 345/*
@@ -386,16 +386,16 @@ static inline void spill_registers(void)
386 unsigned int a0, ps; 386 unsigned int a0, ps;
387 387
388 __asm__ __volatile__ ( 388 __asm__ __volatile__ (
389 "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" 389 "movi a14, " __stringify(PS_EXCM_BIT | 1) "\n\t"
390 "mov a12, a0\n\t" 390 "mov a12, a0\n\t"
391 "rsr a13," __stringify(SAR) "\n\t" 391 "rsr a13, sar\n\t"
392 "xsr a14," __stringify(PS) "\n\t" 392 "xsr a14, ps\n\t"
393 "movi a0, _spill_registers\n\t" 393 "movi a0, _spill_registers\n\t"
394 "rsync\n\t" 394 "rsync\n\t"
395 "callx0 a0\n\t" 395 "callx0 a0\n\t"
396 "mov a0, a12\n\t" 396 "mov a0, a12\n\t"
397 "wsr a13," __stringify(SAR) "\n\t" 397 "wsr a13, sar\n\t"
398 "wsr a14," __stringify(PS) "\n\t" 398 "wsr a14, ps\n\t"
399 :: "a" (&a0), "a" (&ps) 399 :: "a" (&a0), "a" (&ps)
400 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); 400 : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
401} 401}
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 70066e3582d0..4462c1e595c2 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -69,11 +69,11 @@
69 69
70ENTRY(_UserExceptionVector) 70ENTRY(_UserExceptionVector)
71 71
72 xsr a3, EXCSAVE_1 # save a3 and get dispatch table 72 xsr a3, excsave1 # save a3 and get dispatch table
73 wsr a2, DEPC # save a2 73 wsr a2, depc # save a2
74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2 74 l32i a2, a3, EXC_TABLE_KSTK # load kernel stack to a2
75 s32i a0, a2, PT_AREG0 # save a0 to ESF 75 s32i a0, a2, PT_AREG0 # save a0 to ESF
76 rsr a0, EXCCAUSE # retrieve exception cause 76 rsr a0, exccause # retrieve exception cause
77 s32i a0, a2, PT_DEPC # mark it as a regular exception 77 s32i a0, a2, PT_DEPC # mark it as a regular exception
78 addx4 a0, a0, a3 # find entry in table 78 addx4 a0, a0, a3 # find entry in table
79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 79 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
@@ -93,11 +93,11 @@ ENTRY(_UserExceptionVector)
93 93
94ENTRY(_KernelExceptionVector) 94ENTRY(_KernelExceptionVector)
95 95
96 xsr a3, EXCSAVE_1 # save a3, and get dispatch table 96 xsr a3, excsave1 # save a3, and get dispatch table
97 wsr a2, DEPC # save a2 97 wsr a2, depc # save a2
98 addi a2, a1, -16-PT_SIZE # adjust stack pointer 98 addi a2, a1, -16-PT_SIZE # adjust stack pointer
99 s32i a0, a2, PT_AREG0 # save a0 to ESF 99 s32i a0, a2, PT_AREG0 # save a0 to ESF
100 rsr a0, EXCCAUSE # retrieve exception cause 100 rsr a0, exccause # retrieve exception cause
101 s32i a0, a2, PT_DEPC # mark it as a regular exception 101 s32i a0, a2, PT_DEPC # mark it as a regular exception
102 addx4 a0, a0, a3 # find entry in table 102 addx4 a0, a0, a3 # find entry in table
103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address 103 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address
@@ -205,17 +205,17 @@ ENTRY(_DoubleExceptionVector)
205 205
206 /* Deliberately destroy excsave (don't assume it's value was valid). */ 206 /* Deliberately destroy excsave (don't assume it's value was valid). */
207 207
208 wsr a3, EXCSAVE_1 # save a3 208 wsr a3, excsave1 # save a3
209 209
210 /* Check for kernel double exception (usually fatal). */ 210 /* Check for kernel double exception (usually fatal). */
211 211
212 rsr a3, PS 212 rsr a3, ps
213 _bbci.l a3, PS_UM_BIT, .Lksp 213 _bbci.l a3, PS_UM_BIT, .Lksp
214 214
215 /* Check if we are currently handling a window exception. */ 215 /* Check if we are currently handling a window exception. */
216 /* Note: We don't need to indicate that we enter a critical section. */ 216 /* Note: We don't need to indicate that we enter a critical section. */
217 217
218 xsr a0, DEPC # get DEPC, save a0 218 xsr a0, depc # get DEPC, save a0
219 219
220 movi a3, XCHAL_WINDOW_VECTORS_VADDR 220 movi a3, XCHAL_WINDOW_VECTORS_VADDR
221 _bltu a0, a3, .Lfixup 221 _bltu a0, a3, .Lfixup
@@ -243,21 +243,21 @@ ENTRY(_DoubleExceptionVector)
243 * Note: We can trash the current window frame (a0...a3) and depc! 243 * Note: We can trash the current window frame (a0...a3) and depc!
244 */ 244 */
245 245
246 wsr a2, DEPC # save stack pointer temporarily 246 wsr a2, depc # save stack pointer temporarily
247 rsr a0, PS 247 rsr a0, ps
248 extui a0, a0, PS_OWB_SHIFT, 4 248 extui a0, a0, PS_OWB_SHIFT, 4
249 wsr a0, WINDOWBASE 249 wsr a0, windowbase
250 rsync 250 rsync
251 251
252 /* We are now in the previous window frame. Save registers again. */ 252 /* We are now in the previous window frame. Save registers again. */
253 253
254 xsr a2, DEPC # save a2 and get stack pointer 254 xsr a2, depc # save a2 and get stack pointer
255 s32i a0, a2, PT_AREG0 255 s32i a0, a2, PT_AREG0
256 256
257 wsr a3, EXCSAVE_1 # save a3 257 wsr a3, excsave1 # save a3
258 movi a3, exc_table 258 movi a3, exc_table
259 259
260 rsr a0, EXCCAUSE 260 rsr a0, exccause
261 s32i a0, a2, PT_DEPC # mark it as a regular exception 261 s32i a0, a2, PT_DEPC # mark it as a regular exception
262 addx4 a0, a0, a3 262 addx4 a0, a0, a3
263 l32i a0, a0, EXC_TABLE_FAST_USER 263 l32i a0, a0, EXC_TABLE_FAST_USER
@@ -290,14 +290,14 @@ ENTRY(_DoubleExceptionVector)
290 290
291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ 291 /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */
292 292
293 xsr a3, DEPC 293 xsr a3, depc
294 s32i a0, a2, PT_DEPC 294 s32i a0, a2, PT_DEPC
295 s32i a3, a2, PT_AREG0 295 s32i a3, a2, PT_AREG0
296 296
297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ 297 /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */
298 298
299 movi a3, exc_table 299 movi a3, exc_table
300 rsr a0, EXCCAUSE 300 rsr a0, exccause
301 addx4 a0, a0, a3 301 addx4 a0, a0, a3
302 l32i a0, a0, EXC_TABLE_FAST_USER 302 l32i a0, a0, EXC_TABLE_FAST_USER
303 jx a0 303 jx a0
@@ -312,7 +312,7 @@ ENTRY(_DoubleExceptionVector)
312 312
313.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ 313.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
314 314
315 rsr a3, EXCCAUSE 315 rsr a3, exccause
316 beqi a3, EXCCAUSE_ITLB_MISS, 1f 316 beqi a3, EXCCAUSE_ITLB_MISS, 1f
317 addi a3, a3, -EXCCAUSE_DTLB_MISS 317 addi a3, a3, -EXCCAUSE_DTLB_MISS
318 bnez a3, .Lunrecoverable 318 bnez a3, .Lunrecoverable
@@ -328,11 +328,11 @@ ENTRY(_DoubleExceptionVector)
328 328
329.Lunrecoverable_fixup: 329.Lunrecoverable_fixup:
330 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE 330 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
331 xsr a0, DEPC 331 xsr a0, depc
332 332
333.Lunrecoverable: 333.Lunrecoverable:
334 rsr a3, EXCSAVE_1 334 rsr a3, excsave1
335 wsr a0, EXCSAVE_1 335 wsr a0, excsave1
336 movi a0, unrecoverable_exception 336 movi a0, unrecoverable_exception
337 callx0 a0 337 callx0 a0
338 338
@@ -349,7 +349,7 @@ ENTRY(_DoubleExceptionVector)
349 .section .DebugInterruptVector.text, "ax" 349 .section .DebugInterruptVector.text, "ax"
350 350
351ENTRY(_DebugInterruptVector) 351ENTRY(_DebugInterruptVector)
352 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 352 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
353 jx a0 353 jx a0
354 354
355 355
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index 927acf378ea3..e1700102f35e 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -61,13 +61,13 @@ void platform_restart(void)
61 * jump to the reset vector. */ 61 * jump to the reset vector. */
62 62
63 __asm__ __volatile__("movi a2, 15\n\t" 63 __asm__ __volatile__("movi a2, 15\n\t"
64 "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" 64 "wsr a2, icountlevel\n\t"
65 "movi a2, 0\n\t" 65 "movi a2, 0\n\t"
66 "wsr a2, " __stringify(ICOUNT) "\n\t" 66 "wsr a2, icount\n\t"
67 "wsr a2, " __stringify(IBREAKENABLE) "\n\t" 67 "wsr a2, ibreakenable\n\t"
68 "wsr a2, " __stringify(LCOUNT) "\n\t" 68 "wsr a2, lcount\n\t"
69 "movi a2, 0x1f\n\t" 69 "movi a2, 0x1f\n\t"
70 "wsr a2, " __stringify(PS) "\n\t" 70 "wsr a2, ps\n\t"
71 "isync\n\t" 71 "isync\n\t"
72 "jx %0\n\t" 72 "jx %0\n\t"
73 : 73 :
diff --git a/arch/xtensa/platforms/xt2000/setup.c b/arch/xtensa/platforms/xt2000/setup.c
index 9e83940ac265..c7d90f17886e 100644
--- a/arch/xtensa/platforms/xt2000/setup.c
+++ b/arch/xtensa/platforms/xt2000/setup.c
@@ -66,13 +66,13 @@ void platform_restart(void)
66 * jump to the reset vector. */ 66 * jump to the reset vector. */
67 67
68 __asm__ __volatile__ ("movi a2, 15\n\t" 68 __asm__ __volatile__ ("movi a2, 15\n\t"
69 "wsr a2, " __stringify(ICOUNTLEVEL) "\n\t" 69 "wsr a2, icountlevel\n\t"
70 "movi a2, 0\n\t" 70 "movi a2, 0\n\t"
71 "wsr a2, " __stringify(ICOUNT) "\n\t" 71 "wsr a2, icount\n\t"
72 "wsr a2, " __stringify(IBREAKENABLE) "\n\t" 72 "wsr a2, ibreakenable\n\t"
73 "wsr a2, " __stringify(LCOUNT) "\n\t" 73 "wsr a2, lcount\n\t"
74 "movi a2, 0x1f\n\t" 74 "movi a2, 0x1f\n\t"
75 "wsr a2, " __stringify(PS) "\n\t" 75 "wsr a2, ps\n\t"
76 "isync\n\t" 76 "isync\n\t"
77 "jx %0\n\t" 77 "jx %0\n\t"
78 : 78 :