diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-10-14 16:23:03 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-10-15 15:38:09 -0400 |
commit | a78c942df64ef4cf495fd4d8715e48501bd7f8a4 (patch) | |
tree | fe44212d36e6ca23dbe9f2c633824389216a3d1d /arch/tile/include/asm/irqflags.h | |
parent | bf65e440e8248f22b2eacf8d47961bb9d52260f7 (diff) |
arch/tile: parameterize system PLs to support KVM port
While not a port to KVM (yet), this change modifies the kernel
to be able to build either at PL1 or at PL2 with a suitable
config switch. Pushing up this change avoids handling branch
merge issues going forward with the KVM work.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/irqflags.h')
-rw-r--r-- | arch/tile/include/asm/irqflags.h | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 45cf67c2f286..6ebdd7d1e67a 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -47,53 +47,53 @@ | |||
47 | int __n = (n); \ | 47 | int __n = (n); \ |
48 | int __mask = 1 << (__n & 0x1f); \ | 48 | int __mask = 1 << (__n & 0x1f); \ |
49 | if (__n < 32) \ | 49 | if (__n < 32) \ |
50 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | 50 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \ |
51 | else \ | 51 | else \ |
52 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | 52 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \ |
53 | } while (0) | 53 | } while (0) |
54 | #define interrupt_mask_reset(n) do { \ | 54 | #define interrupt_mask_reset(n) do { \ |
55 | int __n = (n); \ | 55 | int __n = (n); \ |
56 | int __mask = 1 << (__n & 0x1f); \ | 56 | int __mask = 1 << (__n & 0x1f); \ |
57 | if (__n < 32) \ | 57 | if (__n < 32) \ |
58 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | 58 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \ |
59 | else \ | 59 | else \ |
60 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | 60 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \ |
61 | } while (0) | 61 | } while (0) |
62 | #define interrupt_mask_check(n) ({ \ | 62 | #define interrupt_mask_check(n) ({ \ |
63 | int __n = (n); \ | 63 | int __n = (n); \ |
64 | (((__n < 32) ? \ | 64 | (((__n < 32) ? \ |
65 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | 65 | __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \ |
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | 66 | __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \ |
67 | >> (__n & 0x1f)) & 1; \ | 67 | >> (__n & 0x1f)) & 1; \ |
68 | }) | 68 | }) |
69 | #define interrupt_mask_set_mask(mask) do { \ | 69 | #define interrupt_mask_set_mask(mask) do { \ |
70 | unsigned long long __m = (mask); \ | 70 | unsigned long long __m = (mask); \ |
71 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | 71 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \ |
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | 72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \ |
73 | } while (0) | 73 | } while (0) |
74 | #define interrupt_mask_reset_mask(mask) do { \ | 74 | #define interrupt_mask_reset_mask(mask) do { \ |
75 | unsigned long long __m = (mask); \ | 75 | unsigned long long __m = (mask); \ |
76 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | 76 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ |
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | 77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ |
78 | } while (0) | 78 | } while (0) |
79 | #else | 79 | #else |
80 | #define interrupt_mask_set(n) \ | 80 | #define interrupt_mask_set(n) \ |
81 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | 81 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) |
82 | #define interrupt_mask_reset(n) \ | 82 | #define interrupt_mask_reset(n) \ |
83 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | 83 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n))) |
84 | #define interrupt_mask_check(n) \ | 84 | #define interrupt_mask_check(n) \ |
85 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | 85 | ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1) |
86 | #define interrupt_mask_set_mask(mask) \ | 86 | #define interrupt_mask_set_mask(mask) \ |
87 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | 87 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) |
88 | #define interrupt_mask_reset_mask(mask) \ | 88 | #define interrupt_mask_reset_mask(mask) \ |
89 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | 89 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) |
90 | #endif | 90 | #endif |
91 | 91 | ||
92 | /* | 92 | /* |
93 | * The set of interrupts we want active if irqs are enabled. | 93 | * The set of interrupts we want active if irqs are enabled. |
94 | * Note that in particular, the tile timer interrupt comes and goes | 94 | * Note that in particular, the tile timer interrupt comes and goes |
95 | * from this set, since we have no other way to turn off the timer. | 95 | * from this set, since we have no other way to turn off the timer. |
96 | * Likewise, INTCTRL_1 is removed and re-added during device | 96 | * Likewise, INTCTRL_K is removed and re-added during device |
97 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | 97 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. |
98 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | 98 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it |
99 | * is always claimed as an "active interrupt" so we can query that bit | 99 | * is always claimed as an "active interrupt" so we can query that bit |
@@ -168,14 +168,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
168 | 168 | ||
169 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | 169 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ |
170 | #define IRQS_DISABLED(tmp) \ | 170 | #define IRQS_DISABLED(tmp) \ |
171 | mfspr tmp, INTERRUPT_MASK_1; \ | 171 | mfspr tmp, SPR_INTERRUPT_MASK_K; \ |
172 | andi tmp, tmp, 1 | 172 | andi tmp, tmp, 1 |
173 | 173 | ||
174 | /* Load up a pointer to &interrupts_enabled_mask. */ | 174 | /* Load up a pointer to &interrupts_enabled_mask. */ |
175 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | 175 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ |
176 | moveli reg, hw2_last(interrupts_enabled_mask); \ | 176 | moveli reg, hw2_last(interrupts_enabled_mask); \ |
177 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | 177 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ |
178 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | 178 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ |
179 | add reg, reg, tp | 179 | add reg, reg, tp |
180 | 180 | ||
181 | /* Disable interrupts. */ | 181 | /* Disable interrupts. */ |
@@ -183,18 +183,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
183 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | 183 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ |
184 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | 184 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ |
185 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | 185 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ |
186 | mtspr INTERRUPT_MASK_SET_1, tmp0 | 186 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp0 |
187 | 187 | ||
188 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | 188 | /* Disable ALL synchronous interrupts (used by NMI entry). */ |
189 | #define IRQ_DISABLE_ALL(tmp) \ | 189 | #define IRQ_DISABLE_ALL(tmp) \ |
190 | movei tmp, -1; \ | 190 | movei tmp, -1; \ |
191 | mtspr INTERRUPT_MASK_SET_1, tmp | 191 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp |
192 | 192 | ||
193 | /* Enable interrupts. */ | 193 | /* Enable interrupts. */ |
194 | #define IRQ_ENABLE(tmp0, tmp1) \ | 194 | #define IRQ_ENABLE(tmp0, tmp1) \ |
195 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 195 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
196 | ld tmp0, tmp0; \ | 196 | ld tmp0, tmp0; \ |
197 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | 197 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 |
198 | 198 | ||
199 | #else /* !__tilegx__ */ | 199 | #else /* !__tilegx__ */ |
200 | 200 | ||
@@ -208,14 +208,14 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
208 | * (making the original code's write of the "high" mask word idempotent). | 208 | * (making the original code's write of the "high" mask word idempotent). |
209 | */ | 209 | */ |
210 | #define IRQS_DISABLED(tmp) \ | 210 | #define IRQS_DISABLED(tmp) \ |
211 | mfspr tmp, INTERRUPT_MASK_1_0; \ | 211 | mfspr tmp, SPR_INTERRUPT_MASK_K_0; \ |
212 | shri tmp, tmp, INT_MEM_ERROR; \ | 212 | shri tmp, tmp, INT_MEM_ERROR; \ |
213 | andi tmp, tmp, 1 | 213 | andi tmp, tmp, 1 |
214 | 214 | ||
215 | /* Load up a pointer to &interrupts_enabled_mask. */ | 215 | /* Load up a pointer to &interrupts_enabled_mask. */ |
216 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | 216 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ |
217 | moveli reg, lo16(interrupts_enabled_mask); \ | 217 | moveli reg, lo16(interrupts_enabled_mask); \ |
218 | auli reg, reg, ha16(interrupts_enabled_mask);\ | 218 | auli reg, reg, ha16(interrupts_enabled_mask); \ |
219 | add reg, reg, tp | 219 | add reg, reg, tp |
220 | 220 | ||
221 | /* Disable interrupts. */ | 221 | /* Disable interrupts. */ |
@@ -225,16 +225,16 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
225 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | 225 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ |
226 | }; \ | 226 | }; \ |
227 | { \ | 227 | { \ |
228 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | 228 | mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ |
229 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | 229 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ |
230 | }; \ | 230 | }; \ |
231 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | 231 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 |
232 | 232 | ||
233 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | 233 | /* Disable ALL synchronous interrupts (used by NMI entry). */ |
234 | #define IRQ_DISABLE_ALL(tmp) \ | 234 | #define IRQ_DISABLE_ALL(tmp) \ |
235 | movei tmp, -1; \ | 235 | movei tmp, -1; \ |
236 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | 236 | mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \ |
237 | mtspr INTERRUPT_MASK_SET_1_1, tmp | 237 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp |
238 | 238 | ||
239 | /* Enable interrupts. */ | 239 | /* Enable interrupts. */ |
240 | #define IRQ_ENABLE(tmp0, tmp1) \ | 240 | #define IRQ_ENABLE(tmp0, tmp1) \ |
@@ -244,8 +244,8 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
244 | addi tmp1, tmp0, 4 \ | 244 | addi tmp1, tmp0, 4 \ |
245 | }; \ | 245 | }; \ |
246 | lw tmp1, tmp1; \ | 246 | lw tmp1, tmp1; \ |
247 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | 247 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ |
248 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | 248 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 |
249 | #endif | 249 | #endif |
250 | 250 | ||
251 | /* | 251 | /* |