diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/include/asm/irqflags.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/include/asm/irqflags.h')
-rw-r--r-- | arch/tile/include/asm/irqflags.h | 118 |
1 files changed, 67 insertions, 51 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 45cf67c2f286..5db0ce54284d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <arch/interrupts.h> | 18 | #include <arch/interrupts.h> |
19 | #include <arch/chip.h> | 19 | #include <arch/chip.h> |
20 | 20 | ||
21 | #if !defined(__tilegx__) && defined(__ASSEMBLY__) | ||
22 | |||
21 | /* | 23 | /* |
22 | * The set of interrupts we want to allow when interrupts are nominally | 24 | * The set of interrupts we want to allow when interrupts are nominally |
23 | * disabled. The remainder are effectively "NMI" interrupts from | 25 | * disabled. The remainder are effectively "NMI" interrupts from |
@@ -25,6 +27,16 @@ | |||
25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | 27 | * interrupts (aka "non-queued") are not blocked by the mask in any case. |
26 | */ | 28 | */ |
27 | #if CHIP_HAS_AUX_PERF_COUNTERS() | 29 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | ||
32 | #else | ||
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | ||
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | ||
35 | #endif | ||
36 | |||
37 | #else | ||
38 | |||
39 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
28 | #define LINUX_MASKABLE_INTERRUPTS \ | 40 | #define LINUX_MASKABLE_INTERRUPTS \ |
29 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | 41 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) |
30 | #else | 42 | #else |
@@ -32,6 +44,8 @@ | |||
32 | (~(INT_MASK(INT_PERF_COUNT))) | 44 | (~(INT_MASK(INT_PERF_COUNT))) |
33 | #endif | 45 | #endif |
34 | 46 | ||
47 | #endif | ||
48 | |||
35 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
36 | 50 | ||
37 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ | 51 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ |
@@ -47,53 +61,53 @@ | |||
47 | int __n = (n); \ | 61 | int __n = (n); \ |
48 | int __mask = 1 << (__n & 0x1f); \ | 62 | int __mask = 1 << (__n & 0x1f); \ |
49 | if (__n < 32) \ | 63 | if (__n < 32) \ |
50 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | 64 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \ |
51 | else \ | 65 | else \ |
52 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | 66 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \ |
53 | } while (0) | 67 | } while (0) |
54 | #define interrupt_mask_reset(n) do { \ | 68 | #define interrupt_mask_reset(n) do { \ |
55 | int __n = (n); \ | 69 | int __n = (n); \ |
56 | int __mask = 1 << (__n & 0x1f); \ | 70 | int __mask = 1 << (__n & 0x1f); \ |
57 | if (__n < 32) \ | 71 | if (__n < 32) \ |
58 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | 72 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \ |
59 | else \ | 73 | else \ |
60 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | 74 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \ |
61 | } while (0) | 75 | } while (0) |
62 | #define interrupt_mask_check(n) ({ \ | 76 | #define interrupt_mask_check(n) ({ \ |
63 | int __n = (n); \ | 77 | int __n = (n); \ |
64 | (((__n < 32) ? \ | 78 | (((__n < 32) ? \ |
65 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | 79 | __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \ |
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | 80 | __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \ |
67 | >> (__n & 0x1f)) & 1; \ | 81 | >> (__n & 0x1f)) & 1; \ |
68 | }) | 82 | }) |
69 | #define interrupt_mask_set_mask(mask) do { \ | 83 | #define interrupt_mask_set_mask(mask) do { \ |
70 | unsigned long long __m = (mask); \ | 84 | unsigned long long __m = (mask); \ |
71 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | 85 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \ |
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | 86 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \ |
73 | } while (0) | 87 | } while (0) |
74 | #define interrupt_mask_reset_mask(mask) do { \ | 88 | #define interrupt_mask_reset_mask(mask) do { \ |
75 | unsigned long long __m = (mask); \ | 89 | unsigned long long __m = (mask); \ |
76 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | 90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ |
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | 91 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ |
78 | } while (0) | 92 | } while (0) |
79 | #else | 93 | #else |
80 | #define interrupt_mask_set(n) \ | 94 | #define interrupt_mask_set(n) \ |
81 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | 95 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) |
82 | #define interrupt_mask_reset(n) \ | 96 | #define interrupt_mask_reset(n) \ |
83 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | 97 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n))) |
84 | #define interrupt_mask_check(n) \ | 98 | #define interrupt_mask_check(n) \ |
85 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | 99 | ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1) |
86 | #define interrupt_mask_set_mask(mask) \ | 100 | #define interrupt_mask_set_mask(mask) \ |
87 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | 101 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) |
88 | #define interrupt_mask_reset_mask(mask) \ | 102 | #define interrupt_mask_reset_mask(mask) \ |
89 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | 103 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) |
90 | #endif | 104 | #endif |
91 | 105 | ||
92 | /* | 106 | /* |
93 | * The set of interrupts we want active if irqs are enabled. | 107 | * The set of interrupts we want active if irqs are enabled. |
94 | * Note that in particular, the tile timer interrupt comes and goes | 108 | * Note that in particular, the tile timer interrupt comes and goes |
95 | * from this set, since we have no other way to turn off the timer. | 109 | * from this set, since we have no other way to turn off the timer. |
96 | * Likewise, INTCTRL_1 is removed and re-added during device | 110 | * Likewise, INTCTRL_K is removed and re-added during device |
97 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | 111 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. |
98 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | 112 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it |
99 | * is always claimed as an "active interrupt" so we can query that bit | 113 | * is always claimed as an "active interrupt" so we can query that bit |
@@ -103,55 +117,57 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
103 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | 117 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) |
104 | 118 | ||
105 | /* Disable interrupts. */ | 119 | /* Disable interrupts. */ |
106 | #define raw_local_irq_disable() \ | 120 | #define arch_local_irq_disable() \ |
107 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) | 121 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) |
108 | 122 | ||
109 | /* Disable all interrupts, including NMIs. */ | 123 | /* Disable all interrupts, including NMIs. */ |
110 | #define raw_local_irq_disable_all() \ | 124 | #define arch_local_irq_disable_all() \ |
111 | interrupt_mask_set_mask(-1UL) | 125 | interrupt_mask_set_mask(-1UL) |
112 | 126 | ||
113 | /* Re-enable all maskable interrupts. */ | 127 | /* Re-enable all maskable interrupts. */ |
114 | #define raw_local_irq_enable() \ | 128 | #define arch_local_irq_enable() \ |
115 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) | 129 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) |
116 | 130 | ||
117 | /* Disable or enable interrupts based on flag argument. */ | 131 | /* Disable or enable interrupts based on flag argument. */ |
118 | #define raw_local_irq_restore(disabled) do { \ | 132 | #define arch_local_irq_restore(disabled) do { \ |
119 | if (disabled) \ | 133 | if (disabled) \ |
120 | raw_local_irq_disable(); \ | 134 | arch_local_irq_disable(); \ |
121 | else \ | 135 | else \ |
122 | raw_local_irq_enable(); \ | 136 | arch_local_irq_enable(); \ |
123 | } while (0) | 137 | } while (0) |
124 | 138 | ||
125 | /* Return true if "flags" argument means interrupts are disabled. */ | 139 | /* Return true if "flags" argument means interrupts are disabled. */ |
126 | #define raw_irqs_disabled_flags(flags) ((flags) != 0) | 140 | #define arch_irqs_disabled_flags(flags) ((flags) != 0) |
127 | 141 | ||
128 | /* Return true if interrupts are currently disabled. */ | 142 | /* Return true if interrupts are currently disabled. */ |
129 | #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) | 143 | #define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) |
130 | 144 | ||
131 | /* Save whether interrupts are currently disabled. */ | 145 | /* Save whether interrupts are currently disabled. */ |
132 | #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) | 146 | #define arch_local_save_flags() arch_irqs_disabled() |
133 | 147 | ||
134 | /* Save whether interrupts are currently disabled, then disable them. */ | 148 | /* Save whether interrupts are currently disabled, then disable them. */ |
135 | #define raw_local_irq_save(flags) \ | 149 | #define arch_local_irq_save() ({ \ |
136 | do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) | 150 | unsigned long __flags = arch_local_save_flags(); \ |
151 | arch_local_irq_disable(); \ | ||
152 | __flags; }) | ||
137 | 153 | ||
138 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | 154 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ |
139 | #define raw_local_irq_mask(interrupt) \ | 155 | #define arch_local_irq_mask(interrupt) \ |
140 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | 156 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) |
141 | 157 | ||
142 | /* Prevent the given interrupt from being enabled immediately. */ | 158 | /* Prevent the given interrupt from being enabled immediately. */ |
143 | #define raw_local_irq_mask_now(interrupt) do { \ | 159 | #define arch_local_irq_mask_now(interrupt) do { \ |
144 | raw_local_irq_mask(interrupt); \ | 160 | arch_local_irq_mask(interrupt); \ |
145 | interrupt_mask_set(interrupt); \ | 161 | interrupt_mask_set(interrupt); \ |
146 | } while (0) | 162 | } while (0) |
147 | 163 | ||
148 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | 164 | /* Allow the given interrupt to be enabled next time we enable irqs. */ |
149 | #define raw_local_irq_unmask(interrupt) \ | 165 | #define arch_local_irq_unmask(interrupt) \ |
150 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | 166 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) |
151 | 167 | ||
152 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | 168 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ |
153 | #define raw_local_irq_unmask_now(interrupt) do { \ | 169 | #define arch_local_irq_unmask_now(interrupt) do { \ |
154 | raw_local_irq_unmask(interrupt); \ | 170 | arch_local_irq_unmask(interrupt); \ |
155 | if (!irqs_disabled()) \ | 171 | if (!irqs_disabled()) \ |
156 | interrupt_mask_reset(interrupt); \ | 172 | interrupt_mask_reset(interrupt); \ |
157 | } while (0) | 173 | } while (0) |
@@ -168,14 +184,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
168 | 184 | ||
169 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | 185 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ |
170 | #define IRQS_DISABLED(tmp) \ | 186 | #define IRQS_DISABLED(tmp) \ |
171 | mfspr tmp, INTERRUPT_MASK_1; \ | 187 | mfspr tmp, SPR_INTERRUPT_MASK_K; \ |
172 | andi tmp, tmp, 1 | 188 | andi tmp, tmp, 1 |
173 | 189 | ||
174 | /* Load up a pointer to &interrupts_enabled_mask. */ | 190 | /* Load up a pointer to &interrupts_enabled_mask. */ |
175 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | 191 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ |
176 | moveli reg, hw2_last(interrupts_enabled_mask); \ | 192 | moveli reg, hw2_last(interrupts_enabled_mask); \ |
177 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | 193 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ |
178 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | 194 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ |
179 | add reg, reg, tp | 195 | add reg, reg, tp |
180 | 196 | ||
181 | /* Disable interrupts. */ | 197 | /* Disable interrupts. */ |
@@ -183,18 +199,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
183 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | 199 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ |
184 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | 200 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ |
185 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | 201 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ |
186 | mtspr INTERRUPT_MASK_SET_1, tmp0 | 202 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp0 |
187 | 203 | ||
188 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | 204 | /* Disable ALL synchronous interrupts (used by NMI entry). */ |
189 | #define IRQ_DISABLE_ALL(tmp) \ | 205 | #define IRQ_DISABLE_ALL(tmp) \ |
190 | movei tmp, -1; \ | 206 | movei tmp, -1; \ |
191 | mtspr INTERRUPT_MASK_SET_1, tmp | 207 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp |
192 | 208 | ||
193 | /* Enable interrupts. */ | 209 | /* Enable interrupts. */ |
194 | #define IRQ_ENABLE(tmp0, tmp1) \ | 210 | #define IRQ_ENABLE(tmp0, tmp1) \ |
195 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 211 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
196 | ld tmp0, tmp0; \ | 212 | ld tmp0, tmp0; \ |
197 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | 213 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 |
198 | 214 | ||
199 | #else /* !__tilegx__ */ | 215 | #else /* !__tilegx__ */ |
200 | 216 | ||
@@ -208,33 +224,33 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
208 | * (making the original code's write of the "high" mask word idempotent). | 224 | * (making the original code's write of the "high" mask word idempotent). |
209 | */ | 225 | */ |
210 | #define IRQS_DISABLED(tmp) \ | 226 | #define IRQS_DISABLED(tmp) \ |
211 | mfspr tmp, INTERRUPT_MASK_1_0; \ | 227 | mfspr tmp, SPR_INTERRUPT_MASK_K_0; \ |
212 | shri tmp, tmp, INT_MEM_ERROR; \ | 228 | shri tmp, tmp, INT_MEM_ERROR; \ |
213 | andi tmp, tmp, 1 | 229 | andi tmp, tmp, 1 |
214 | 230 | ||
215 | /* Load up a pointer to &interrupts_enabled_mask. */ | 231 | /* Load up a pointer to &interrupts_enabled_mask. */ |
216 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | 232 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ |
217 | moveli reg, lo16(interrupts_enabled_mask); \ | 233 | moveli reg, lo16(interrupts_enabled_mask); \ |
218 | auli reg, reg, ha16(interrupts_enabled_mask);\ | 234 | auli reg, reg, ha16(interrupts_enabled_mask); \ |
219 | add reg, reg, tp | 235 | add reg, reg, tp |
220 | 236 | ||
221 | /* Disable interrupts. */ | 237 | /* Disable interrupts. */ |
222 | #define IRQ_DISABLE(tmp0, tmp1) \ | 238 | #define IRQ_DISABLE(tmp0, tmp1) \ |
223 | { \ | 239 | { \ |
224 | movei tmp0, -1; \ | 240 | movei tmp0, -1; \ |
225 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | 241 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
226 | }; \ | 242 | }; \ |
227 | { \ | 243 | { \ |
228 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | 244 | mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ |
229 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | 245 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \ |
230 | }; \ | 246 | }; \ |
231 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | 247 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 |
232 | 248 | ||
233 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | 249 | /* Disable ALL synchronous interrupts (used by NMI entry). */ |
234 | #define IRQ_DISABLE_ALL(tmp) \ | 250 | #define IRQ_DISABLE_ALL(tmp) \ |
235 | movei tmp, -1; \ | 251 | movei tmp, -1; \ |
236 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | 252 | mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \ |
237 | mtspr INTERRUPT_MASK_SET_1_1, tmp | 253 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp |
238 | 254 | ||
239 | /* Enable interrupts. */ | 255 | /* Enable interrupts. */ |
240 | #define IRQ_ENABLE(tmp0, tmp1) \ | 256 | #define IRQ_ENABLE(tmp0, tmp1) \ |
@@ -244,8 +260,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
244 | addi tmp1, tmp0, 4 \ | 260 | addi tmp1, tmp0, 4 \ |
245 | }; \ | 261 | }; \ |
246 | lw tmp1, tmp1; \ | 262 | lw tmp1, tmp1; \ |
247 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | 263 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ |
248 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | 264 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 |
249 | #endif | 265 | #endif |
250 | 266 | ||
251 | /* | 267 | /* |