diff options
author | Arnd Bergmann <arnd@arndb.de> | 2006-01-04 14:31:30 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-08 23:44:49 -0500 |
commit | f0831acc4b78e2d9737e8ed91b8b7505b21ddb83 (patch) | |
tree | 0c901e45cdc932776d3953cfcdf66015d6853bec /arch/powerpc/platforms/cell/spu_base.c | |
parent | ce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f (diff) |
[PATCH] spufs: abstract priv1 register access.
In a hypervisor based setup, direct access to the first
priviledged register space can typically not be allowed
to the kernel and has to be implemented through hypervisor
calls.
As suggested by Masato Noguchi, let's abstract the register
access trough a number of function calls. Since there is
currently no public specification of actual hypervisor
calls to implement this, I only provide a place that
makes it easier to hook into.
Cc: Masato Noguchi <Masato.Noguchi@jp.sony.com>
Cc: Geoff Levand <geoff.levand@am.sony.com>
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 51 |
1 files changed, 18 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index ae835474072..081b3dcbaf1 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -142,8 +142,7 @@ static int __spu_trap_mailbox(struct spu *spu) | |||
142 | 142 | ||
143 | /* atomically disable SPU mailbox interrupts */ | 143 | /* atomically disable SPU mailbox interrupts */ |
144 | spin_lock(&spu->register_lock); | 144 | spin_lock(&spu->register_lock); |
145 | out_be64(&spu->priv1->int_mask_class2_RW, | 145 | spu_int_mask_and(spu, 2, ~0x1); |
146 | in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1); | ||
147 | spin_unlock(&spu->register_lock); | 146 | spin_unlock(&spu->register_lock); |
148 | return 0; | 147 | return 0; |
149 | } | 148 | } |
@@ -180,8 +179,7 @@ static int __spu_trap_spubox(struct spu *spu) | |||
180 | 179 | ||
181 | /* atomically disable SPU mailbox interrupts */ | 180 | /* atomically disable SPU mailbox interrupts */ |
182 | spin_lock(&spu->register_lock); | 181 | spin_lock(&spu->register_lock); |
183 | out_be64(&spu->priv1->int_mask_class2_RW, | 182 | spu_int_mask_and(spu, 2, ~0x10); |
184 | in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10); | ||
185 | spin_unlock(&spu->register_lock); | 183 | spin_unlock(&spu->register_lock); |
186 | return 0; | 184 | return 0; |
187 | } | 185 | } |
@@ -206,8 +204,8 @@ spu_irq_class_0_bottom(struct spu *spu) | |||
206 | 204 | ||
207 | spu->class_0_pending = 0; | 205 | spu->class_0_pending = 0; |
208 | 206 | ||
209 | mask = in_be64(&spu->priv1->int_mask_class0_RW); | 207 | mask = spu_int_mask_get(spu, 0); |
210 | stat = in_be64(&spu->priv1->int_stat_class0_RW); | 208 | stat = spu_int_stat_get(spu, 0); |
211 | 209 | ||
212 | stat &= mask; | 210 | stat &= mask; |
213 | 211 | ||
@@ -220,7 +218,7 @@ spu_irq_class_0_bottom(struct spu *spu) | |||
220 | if (stat & 4) /* error on SPU */ | 218 | if (stat & 4) /* error on SPU */ |
221 | __spu_trap_error(spu); | 219 | __spu_trap_error(spu); |
222 | 220 | ||
223 | out_be64(&spu->priv1->int_stat_class0_RW, stat); | 221 | spu_int_stat_clear(spu, 0, stat); |
224 | 222 | ||
225 | return (stat & 0x7) ? -EIO : 0; | 223 | return (stat & 0x7) ? -EIO : 0; |
226 | } | 224 | } |
@@ -236,13 +234,13 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs) | |||
236 | 234 | ||
237 | /* atomically read & clear class1 status. */ | 235 | /* atomically read & clear class1 status. */ |
238 | spin_lock(&spu->register_lock); | 236 | spin_lock(&spu->register_lock); |
239 | mask = in_be64(&spu->priv1->int_mask_class1_RW); | 237 | mask = spu_int_mask_get(spu, 1); |
240 | stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask; | 238 | stat = spu_int_stat_get(spu, 1) & mask; |
241 | dar = in_be64(&spu->priv1->mfc_dar_RW); | 239 | dar = spu_mfc_dar_get(spu); |
242 | dsisr = in_be64(&spu->priv1->mfc_dsisr_RW); | 240 | dsisr = spu_mfc_dsisr_get(spu); |
243 | if (stat & 2) /* mapping fault */ | 241 | if (stat & 2) /* mapping fault */ |
244 | out_be64(&spu->priv1->mfc_dsisr_RW, 0UL); | 242 | spu_mfc_dsisr_set(spu, 0ul); |
245 | out_be64(&spu->priv1->int_stat_class1_RW, stat); | 243 | spu_int_stat_clear(spu, 1, stat); |
246 | spin_unlock(&spu->register_lock); | 244 | spin_unlock(&spu->register_lock); |
247 | 245 | ||
248 | if (stat & 1) /* segment fault */ | 246 | if (stat & 1) /* segment fault */ |
@@ -270,8 +268,8 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
270 | unsigned long mask; | 268 | unsigned long mask; |
271 | 269 | ||
272 | spu = data; | 270 | spu = data; |
273 | stat = in_be64(&spu->priv1->int_stat_class2_RW); | 271 | stat = spu_int_stat_get(spu, 2); |
274 | mask = in_be64(&spu->priv1->int_mask_class2_RW); | 272 | mask = spu_int_mask_get(spu, 2); |
275 | 273 | ||
276 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); | 274 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
277 | 275 | ||
@@ -292,7 +290,7 @@ spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | |||
292 | if (stat & 0x10) /* SPU mailbox threshold */ | 290 | if (stat & 0x10) /* SPU mailbox threshold */ |
293 | __spu_trap_spubox(spu); | 291 | __spu_trap_spubox(spu); |
294 | 292 | ||
295 | out_be64(&spu->priv1->int_stat_class2_RW, stat); | 293 | spu_int_stat_clear(spu, 2, stat); |
296 | return stat ? IRQ_HANDLED : IRQ_NONE; | 294 | return stat ? IRQ_HANDLED : IRQ_NONE; |
297 | } | 295 | } |
298 | 296 | ||
@@ -309,21 +307,18 @@ spu_request_irqs(struct spu *spu) | |||
309 | spu_irq_class_0, 0, spu->irq_c0, spu); | 307 | spu_irq_class_0, 0, spu->irq_c0, spu); |
310 | if (ret) | 308 | if (ret) |
311 | goto out; | 309 | goto out; |
312 | out_be64(&spu->priv1->int_mask_class0_RW, 0x7); | ||
313 | 310 | ||
314 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | 311 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); |
315 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | 312 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, |
316 | spu_irq_class_1, 0, spu->irq_c1, spu); | 313 | spu_irq_class_1, 0, spu->irq_c1, spu); |
317 | if (ret) | 314 | if (ret) |
318 | goto out1; | 315 | goto out1; |
319 | out_be64(&spu->priv1->int_mask_class1_RW, 0x3); | ||
320 | 316 | ||
321 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | 317 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); |
322 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | 318 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, |
323 | spu_irq_class_2, 0, spu->irq_c2, spu); | 319 | spu_irq_class_2, 0, spu->irq_c2, spu); |
324 | if (ret) | 320 | if (ret) |
325 | goto out2; | 321 | goto out2; |
326 | out_be64(&spu->priv1->int_mask_class2_RW, 0xe); | ||
327 | goto out; | 322 | goto out; |
328 | 323 | ||
329 | out2: | 324 | out2: |
@@ -383,13 +378,6 @@ static void spu_init_channels(struct spu *spu) | |||
383 | } | 378 | } |
384 | } | 379 | } |
385 | 380 | ||
386 | static void spu_init_regs(struct spu *spu) | ||
387 | { | ||
388 | out_be64(&spu->priv1->int_mask_class0_RW, 0x7); | ||
389 | out_be64(&spu->priv1->int_mask_class1_RW, 0x3); | ||
390 | out_be64(&spu->priv1->int_mask_class2_RW, 0xe); | ||
391 | } | ||
392 | |||
393 | struct spu *spu_alloc(void) | 381 | struct spu *spu_alloc(void) |
394 | { | 382 | { |
395 | struct spu *spu; | 383 | struct spu *spu; |
@@ -405,10 +393,8 @@ struct spu *spu_alloc(void) | |||
405 | } | 393 | } |
406 | up(&spu_mutex); | 394 | up(&spu_mutex); |
407 | 395 | ||
408 | if (spu) { | 396 | if (spu) |
409 | spu_init_channels(spu); | 397 | spu_init_channels(spu); |
410 | spu_init_regs(spu); | ||
411 | } | ||
412 | 398 | ||
413 | return spu; | 399 | return spu; |
414 | } | 400 | } |
@@ -579,8 +565,7 @@ static int __init spu_map_device(struct spu *spu, struct device_node *spe) | |||
579 | goto out_unmap; | 565 | goto out_unmap; |
580 | 566 | ||
581 | spu->priv1= map_spe_prop(spe, "priv1"); | 567 | spu->priv1= map_spe_prop(spe, "priv1"); |
582 | if (!spu->priv1) | 568 | /* priv1 is not available on a hypervisor */ |
583 | goto out_unmap; | ||
584 | 569 | ||
585 | spu->priv2= map_spe_prop(spe, "priv2"); | 570 | spu->priv2= map_spe_prop(spe, "priv2"); |
586 | if (!spu->priv2) | 571 | if (!spu->priv2) |
@@ -633,8 +618,8 @@ static int __init create_spu(struct device_node *spe) | |||
633 | spu->dsisr = 0UL; | 618 | spu->dsisr = 0UL; |
634 | spin_lock_init(&spu->register_lock); | 619 | spin_lock_init(&spu->register_lock); |
635 | 620 | ||
636 | out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); | 621 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
637 | out_be64(&spu->priv1->mfc_sr1_RW, 0x33); | 622 | spu_mfc_sr1_set(spu, 0x33); |
638 | 623 | ||
639 | spu->ibox_callback = NULL; | 624 | spu->ibox_callback = NULL; |
640 | spu->wbox_callback = NULL; | 625 | spu->wbox_callback = NULL; |