aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-04-23 21:45:03 -0400
committerPaul Mackerras <paulus@samba.org>2007-04-23 21:45:03 -0400
commit13177c8b7eaf7ab238e79533c746153ae116f5f8 (patch)
tree4a4675749672f201c23d6df82ee2b39eedff76f2 /arch
parent445c9b5507b9d09a2e9b0b4dbb16517708aa40e6 (diff)
parentccf17e9d008dfebbf90dfa4ee1a56e81c784c73e (diff)
Merge branch 'spufs' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6 into for-2.6.22
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c161
-rw-r--r--arch/powerpc/platforms/cell/spu_coredump.c34
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c45
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c19
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c211
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c152
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c42
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c123
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c109
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h34
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c8
14 files changed, 589 insertions, 366 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index eba7a2641dce..8086eb1ed60d 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -36,6 +36,8 @@
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37 37
38const struct spu_management_ops *spu_management_ops; 38const struct spu_management_ops *spu_management_ops;
39EXPORT_SYMBOL_GPL(spu_management_ops);
40
39const struct spu_priv1_ops *spu_priv1_ops; 41const struct spu_priv1_ops *spu_priv1_ops;
40 42
41static struct list_head spu_list[MAX_NUMNODES]; 43static struct list_head spu_list[MAX_NUMNODES];
@@ -290,7 +292,6 @@ spu_irq_class_1(int irq, void *data)
290 292
291 return stat ? IRQ_HANDLED : IRQ_NONE; 293 return stat ? IRQ_HANDLED : IRQ_NONE;
292} 294}
293EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
294 295
295static irqreturn_t 296static irqreturn_t
296spu_irq_class_2(int irq, void *data) 297spu_irq_class_2(int irq, void *data)
@@ -431,10 +432,11 @@ struct spu *spu_alloc_node(int node)
431 spu = list_entry(spu_list[node].next, struct spu, list); 432 spu = list_entry(spu_list[node].next, struct spu, list);
432 list_del_init(&spu->list); 433 list_del_init(&spu->list);
433 pr_debug("Got SPU %d %d\n", spu->number, spu->node); 434 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
434 spu_init_channels(spu);
435 } 435 }
436 mutex_unlock(&spu_mutex); 436 mutex_unlock(&spu_mutex);
437 437
438 if (spu)
439 spu_init_channels(spu);
438 return spu; 440 return spu;
439} 441}
440EXPORT_SYMBOL_GPL(spu_alloc_node); 442EXPORT_SYMBOL_GPL(spu_alloc_node);
@@ -461,108 +463,6 @@ void spu_free(struct spu *spu)
461} 463}
462EXPORT_SYMBOL_GPL(spu_free); 464EXPORT_SYMBOL_GPL(spu_free);
463 465
464static int spu_handle_mm_fault(struct spu *spu)
465{
466 struct mm_struct *mm = spu->mm;
467 struct vm_area_struct *vma;
468 u64 ea, dsisr, is_write;
469 int ret;
470
471 ea = spu->dar;
472 dsisr = spu->dsisr;
473#if 0
474 if (!IS_VALID_EA(ea)) {
475 return -EFAULT;
476 }
477#endif /* XXX */
478 if (mm == NULL) {
479 return -EFAULT;
480 }
481 if (mm->pgd == NULL) {
482 return -EFAULT;
483 }
484
485 down_read(&mm->mmap_sem);
486 vma = find_vma(mm, ea);
487 if (!vma)
488 goto bad_area;
489 if (vma->vm_start <= ea)
490 goto good_area;
491 if (!(vma->vm_flags & VM_GROWSDOWN))
492 goto bad_area;
493#if 0
494 if (expand_stack(vma, ea))
495 goto bad_area;
496#endif /* XXX */
497good_area:
498 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
499 if (is_write) {
500 if (!(vma->vm_flags & VM_WRITE))
501 goto bad_area;
502 } else {
503 if (dsisr & MFC_DSISR_ACCESS_DENIED)
504 goto bad_area;
505 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
506 goto bad_area;
507 }
508 ret = 0;
509 switch (handle_mm_fault(mm, vma, ea, is_write)) {
510 case VM_FAULT_MINOR:
511 current->min_flt++;
512 break;
513 case VM_FAULT_MAJOR:
514 current->maj_flt++;
515 break;
516 case VM_FAULT_SIGBUS:
517 ret = -EFAULT;
518 goto bad_area;
519 case VM_FAULT_OOM:
520 ret = -ENOMEM;
521 goto bad_area;
522 default:
523 BUG();
524 }
525 up_read(&mm->mmap_sem);
526 return ret;
527
528bad_area:
529 up_read(&mm->mmap_sem);
530 return -EFAULT;
531}
532
533int spu_irq_class_1_bottom(struct spu *spu)
534{
535 u64 ea, dsisr, access, error = 0UL;
536 int ret = 0;
537
538 ea = spu->dar;
539 dsisr = spu->dsisr;
540 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
541 u64 flags;
542
543 access = (_PAGE_PRESENT | _PAGE_USER);
544 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
545 local_irq_save(flags);
546 if (hash_page(ea, access, 0x300) != 0)
547 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
548 local_irq_restore(flags);
549 }
550 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
551 if ((ret = spu_handle_mm_fault(spu)) != 0)
552 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
553 else
554 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
555 }
556 spu->dar = 0UL;
557 spu->dsisr = 0UL;
558 if (!error) {
559 spu_restart_dma(spu);
560 } else {
561 spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
562 }
563 return ret;
564}
565
566struct sysdev_class spu_sysdev_class = { 466struct sysdev_class spu_sysdev_class = {
567 set_kset_name("spu") 467 set_kset_name("spu")
568}; 468};
@@ -636,12 +536,6 @@ static int spu_create_sysdev(struct spu *spu)
636 return 0; 536 return 0;
637} 537}
638 538
639static void spu_destroy_sysdev(struct spu *spu)
640{
641 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
642 sysdev_unregister(&spu->sysdev);
643}
644
645static int __init create_spu(void *data) 539static int __init create_spu(void *data)
646{ 540{
647 struct spu *spu; 541 struct spu *spu;
@@ -693,58 +587,37 @@ out:
693 return ret; 587 return ret;
694} 588}
695 589
696static void destroy_spu(struct spu *spu)
697{
698 list_del_init(&spu->list);
699 list_del_init(&spu->full_list);
700
701 spu_destroy_sysdev(spu);
702 spu_free_irqs(spu);
703 spu_destroy_spu(spu);
704 kfree(spu);
705}
706
707static void cleanup_spu_base(void)
708{
709 struct spu *spu, *tmp;
710 int node;
711
712 mutex_lock(&spu_mutex);
713 for (node = 0; node < MAX_NUMNODES; node++) {
714 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
715 destroy_spu(spu);
716 }
717 mutex_unlock(&spu_mutex);
718 sysdev_class_unregister(&spu_sysdev_class);
719}
720module_exit(cleanup_spu_base);
721
722static int __init init_spu_base(void) 590static int __init init_spu_base(void)
723{ 591{
724 int i, ret; 592 int i, ret = 0;
593
594 for (i = 0; i < MAX_NUMNODES; i++)
595 INIT_LIST_HEAD(&spu_list[i]);
725 596
726 if (!spu_management_ops) 597 if (!spu_management_ops)
727 return 0; 598 goto out;
728 599
729 /* create sysdev class for spus */ 600 /* create sysdev class for spus */
730 ret = sysdev_class_register(&spu_sysdev_class); 601 ret = sysdev_class_register(&spu_sysdev_class);
731 if (ret) 602 if (ret)
732 return ret; 603 goto out;
733
734 for (i = 0; i < MAX_NUMNODES; i++)
735 INIT_LIST_HEAD(&spu_list[i]);
736 604
737 ret = spu_enumerate_spus(create_spu); 605 ret = spu_enumerate_spus(create_spu);
738 606
739 if (ret) { 607 if (ret) {
740 printk(KERN_WARNING "%s: Error initializing spus\n", 608 printk(KERN_WARNING "%s: Error initializing spus\n",
741 __FUNCTION__); 609 __FUNCTION__);
742 cleanup_spu_base(); 610 goto out_unregister_sysdev_class;
743 return ret;
744 } 611 }
745 612
746 xmon_register_spus(&spu_full_list); 613 xmon_register_spus(&spu_full_list);
747 614
615 return 0;
616
617 out_unregister_sysdev_class:
618 sysdev_class_unregister(&spu_sysdev_class);
619 out:
620
748 return ret; 621 return ret;
749} 622}
750module_init(init_spu_base); 623module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
index 6915b418ee73..4fd37ff1e210 100644
--- a/arch/powerpc/platforms/cell/spu_coredump.c
+++ b/arch/powerpc/platforms/cell/spu_coredump.c
@@ -26,19 +26,18 @@
26 26
27#include <asm/spu.h> 27#include <asm/spu.h>
28 28
29static struct spu_coredump_calls spu_coredump_calls; 29static struct spu_coredump_calls *spu_coredump_calls;
30static DEFINE_MUTEX(spu_coredump_mutex); 30static DEFINE_MUTEX(spu_coredump_mutex);
31 31
32int arch_notes_size(void) 32int arch_notes_size(void)
33{ 33{
34 long ret; 34 long ret;
35 struct module *owner = spu_coredump_calls.owner;
36 35
37 ret = -ENOSYS; 36 ret = -ENOSYS;
38 mutex_lock(&spu_coredump_mutex); 37 mutex_lock(&spu_coredump_mutex);
39 if (owner && try_module_get(owner)) { 38 if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
40 ret = spu_coredump_calls.arch_notes_size(); 39 ret = spu_coredump_calls->arch_notes_size();
41 module_put(owner); 40 module_put(spu_coredump_calls->owner);
42 } 41 }
43 mutex_unlock(&spu_coredump_mutex); 42 mutex_unlock(&spu_coredump_mutex);
44 return ret; 43 return ret;
@@ -46,36 +45,35 @@ int arch_notes_size(void)
46 45
47void arch_write_notes(struct file *file) 46void arch_write_notes(struct file *file)
48{ 47{
49 struct module *owner = spu_coredump_calls.owner;
50
51 mutex_lock(&spu_coredump_mutex); 48 mutex_lock(&spu_coredump_mutex);
52 if (owner && try_module_get(owner)) { 49 if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
53 spu_coredump_calls.arch_write_notes(file); 50 spu_coredump_calls->arch_write_notes(file);
54 module_put(owner); 51 module_put(spu_coredump_calls->owner);
55 } 52 }
56 mutex_unlock(&spu_coredump_mutex); 53 mutex_unlock(&spu_coredump_mutex);
57} 54}
58 55
59int register_arch_coredump_calls(struct spu_coredump_calls *calls) 56int register_arch_coredump_calls(struct spu_coredump_calls *calls)
60{ 57{
61 if (spu_coredump_calls.owner) 58 int ret = 0;
62 return -EBUSY; 59
63 60
64 mutex_lock(&spu_coredump_mutex); 61 mutex_lock(&spu_coredump_mutex);
65 spu_coredump_calls.arch_notes_size = calls->arch_notes_size; 62 if (spu_coredump_calls)
66 spu_coredump_calls.arch_write_notes = calls->arch_write_notes; 63 ret = -EBUSY;
67 spu_coredump_calls.owner = calls->owner; 64 else
65 spu_coredump_calls = calls;
68 mutex_unlock(&spu_coredump_mutex); 66 mutex_unlock(&spu_coredump_mutex);
69 return 0; 67 return ret;
70} 68}
71EXPORT_SYMBOL_GPL(register_arch_coredump_calls); 69EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
72 70
73void unregister_arch_coredump_calls(struct spu_coredump_calls *calls) 71void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
74{ 72{
75 BUG_ON(spu_coredump_calls.owner != calls->owner); 73 BUG_ON(spu_coredump_calls != calls);
76 74
77 mutex_lock(&spu_coredump_mutex); 75 mutex_lock(&spu_coredump_mutex);
78 spu_coredump_calls.owner = NULL; 76 spu_coredump_calls = NULL;
79 mutex_unlock(&spu_coredump_mutex); 77 mutex_unlock(&spu_coredump_mutex);
80} 78}
81EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls); 79EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index 472217d19faf..2cd89c11af5a 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,4 +1,4 @@
1obj-y += switch.o 1obj-y += switch.o fault.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o coredump.o 4spufs-y += inode.o file.o context.o syscalls.o coredump.o
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 1898f0d3a8b8..3322528fa6eb 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx,
350 return ret; 350 return ret;
351} 351}
352 352
353static void spu_backing_restart_dma(struct spu_context *ctx)
354{
355 /* nothing to do here */
356}
357
353struct spu_context_ops spu_backing_ops = { 358struct spu_context_ops spu_backing_ops = {
354 .mbox_read = spu_backing_mbox_read, 359 .mbox_read = spu_backing_mbox_read,
355 .mbox_stat_read = spu_backing_mbox_stat_read, 360 .mbox_stat_read = spu_backing_mbox_stat_read,
@@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = {
376 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, 381 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
377 .get_mfc_free_elements = spu_backing_get_mfc_free_elements, 382 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
378 .send_mfc_command = spu_backing_send_mfc_command, 383 .send_mfc_command = spu_backing_send_mfc_command,
384 .restart_dma = spu_backing_restart_dma,
379}; 385};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 04ad2e364e97..a87d9ca3dba2 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -41,9 +41,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
41 goto out_free; 41 goto out_free;
42 } 42 }
43 spin_lock_init(&ctx->mmio_lock); 43 spin_lock_init(&ctx->mmio_lock);
44 spin_lock_init(&ctx->mapping_lock);
44 kref_init(&ctx->kref); 45 kref_init(&ctx->kref);
45 mutex_init(&ctx->state_mutex); 46 mutex_init(&ctx->state_mutex);
46 init_MUTEX(&ctx->run_sema); 47 mutex_init(&ctx->run_mutex);
47 init_waitqueue_head(&ctx->ibox_wq); 48 init_waitqueue_head(&ctx->ibox_wq);
48 init_waitqueue_head(&ctx->wbox_wq); 49 init_waitqueue_head(&ctx->wbox_wq);
49 init_waitqueue_head(&ctx->stop_wq); 50 init_waitqueue_head(&ctx->stop_wq);
@@ -51,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
51 ctx->state = SPU_STATE_SAVED; 52 ctx->state = SPU_STATE_SAVED;
52 ctx->ops = &spu_backing_ops; 53 ctx->ops = &spu_backing_ops;
53 ctx->owner = get_task_mm(current); 54 ctx->owner = get_task_mm(current);
55 INIT_LIST_HEAD(&ctx->rq);
54 if (gang) 56 if (gang)
55 spu_gang_add_ctx(gang, ctx); 57 spu_gang_add_ctx(gang, ctx);
56 ctx->rt_priority = current->rt_priority; 58 ctx->rt_priority = current->rt_priority;
@@ -75,6 +77,7 @@ void destroy_spu_context(struct kref *kref)
75 spu_fini_csa(&ctx->csa); 77 spu_fini_csa(&ctx->csa);
76 if (ctx->gang) 78 if (ctx->gang)
77 spu_gang_remove_ctx(ctx->gang, ctx); 79 spu_gang_remove_ctx(ctx->gang, ctx);
80 BUG_ON(!list_empty(&ctx->rq));
78 kfree(ctx); 81 kfree(ctx);
79} 82}
80 83
@@ -119,46 +122,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
119} 122}
120 123
121/** 124/**
122 * spu_acquire_exclusive - lock spu contex and protect against userspace access
123 * @ctx: spu contex to lock
124 *
125 * Note:
126 * Returns 0 and with the context locked on success
127 * Returns negative error and with the context _unlocked_ on failure.
128 */
129int spu_acquire_exclusive(struct spu_context *ctx)
130{
131 int ret = -EINVAL;
132
133 spu_acquire(ctx);
134 /*
135 * Context is about to be freed, so we can't acquire it anymore.
136 */
137 if (!ctx->owner)
138 goto out_unlock;
139
140 if (ctx->state == SPU_STATE_SAVED) {
141 ret = spu_activate(ctx, 0);
142 if (ret)
143 goto out_unlock;
144 } else {
145 /*
146 * We need to exclude userspace access to the context.
147 *
148 * To protect against memory access we invalidate all ptes
149 * and make sure the pagefault handlers block on the mutex.
150 */
151 spu_unmap_mappings(ctx);
152 }
153
154 return 0;
155
156 out_unlock:
157 spu_release(ctx);
158 return ret;
159}
160
161/**
162 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state 125 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
163 * @ctx: spu contex to lock 126 * @ctx: spu contex to lock
164 * 127 *
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 725e19561159..5d9ad5a0307b 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -169,12 +169,12 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
169 struct spu_context *ctx; 169 struct spu_context *ctx;
170 loff_t pos = 0; 170 loff_t pos = 0;
171 int sz, dfd, rc, total = 0; 171 int sz, dfd, rc, total = 0;
172 const int bufsz = 4096; 172 const int bufsz = PAGE_SIZE;
173 char *name; 173 char *name;
174 char fullname[80], *buf; 174 char fullname[80], *buf;
175 struct elf_note en; 175 struct elf_note en;
176 176
177 buf = kmalloc(bufsz, GFP_KERNEL); 177 buf = (void *)get_zeroed_page(GFP_KERNEL);
178 if (!buf) 178 if (!buf)
179 return; 179 return;
180 180
@@ -187,9 +187,8 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
187 sz = spufs_coredump_read[i].size; 187 sz = spufs_coredump_read[i].size;
188 188
189 ctx = ctx_info->ctx; 189 ctx = ctx_info->ctx;
190 if (!ctx) { 190 if (!ctx)
191 return; 191 goto out;
192 }
193 192
194 sprintf(fullname, "SPU/%d/%s", dfd, name); 193 sprintf(fullname, "SPU/%d/%s", dfd, name);
195 en.n_namesz = strlen(fullname) + 1; 194 en.n_namesz = strlen(fullname) + 1;
@@ -197,23 +196,25 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
197 en.n_type = NT_SPU; 196 en.n_type = NT_SPU;
198 197
199 if (!spufs_dump_write(file, &en, sizeof(en))) 198 if (!spufs_dump_write(file, &en, sizeof(en)))
200 return; 199 goto out;
201 if (!spufs_dump_write(file, fullname, en.n_namesz)) 200 if (!spufs_dump_write(file, fullname, en.n_namesz))
202 return; 201 goto out;
203 if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4))) 202 if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
204 return; 203 goto out;
205 204
206 do { 205 do {
207 rc = do_coredump_read(i, ctx, buf, bufsz, &pos); 206 rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
208 if (rc > 0) { 207 if (rc > 0) {
209 if (!spufs_dump_write(file, buf, rc)) 208 if (!spufs_dump_write(file, buf, rc))
210 return; 209 goto out;
211 total += rc; 210 total += rc;
212 } 211 }
213 } while (rc == bufsz && total < sz); 212 } while (rc == bufsz && total < sz);
214 213
215 spufs_dump_seek(file, roundup((unsigned long)file->f_pos 214 spufs_dump_seek(file, roundup((unsigned long)file->f_pos
216 - total + sz, 4)); 215 - total + sz, 4));
216out:
217 free_page((unsigned long)buf);
217} 218}
218 219
219static void spufs_arch_write_notes(struct file *file) 220static void spufs_arch_write_notes(struct file *file)
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
new file mode 100644
index 000000000000..0f75c07e29d8
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -0,0 +1,211 @@
1/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/module.h>
25
26#include <asm/spu.h>
27#include <asm/spu_csa.h>
28
29#include "spufs.h"
30
31/*
32 * This ought to be kept in sync with the powerpc specific do_page_fault
33 * function. Currently, there are a few corner cases that we haven't had
34 * to handle fortunately.
35 */
36static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr)
37{
38 struct vm_area_struct *vma;
39 unsigned long is_write;
40 int ret;
41
42#if 0
43 if (!IS_VALID_EA(ea)) {
44 return -EFAULT;
45 }
46#endif /* XXX */
47 if (mm == NULL) {
48 return -EFAULT;
49 }
50 if (mm->pgd == NULL) {
51 return -EFAULT;
52 }
53
54 down_read(&mm->mmap_sem);
55 vma = find_vma(mm, ea);
56 if (!vma)
57 goto bad_area;
58 if (vma->vm_start <= ea)
59 goto good_area;
60 if (!(vma->vm_flags & VM_GROWSDOWN))
61 goto bad_area;
62 if (expand_stack(vma, ea))
63 goto bad_area;
64good_area:
65 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
66 if (is_write) {
67 if (!(vma->vm_flags & VM_WRITE))
68 goto bad_area;
69 } else {
70 if (dsisr & MFC_DSISR_ACCESS_DENIED)
71 goto bad_area;
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
73 goto bad_area;
74 }
75 ret = 0;
76 switch (handle_mm_fault(mm, vma, ea, is_write)) {
77 case VM_FAULT_MINOR:
78 current->min_flt++;
79 break;
80 case VM_FAULT_MAJOR:
81 current->maj_flt++;
82 break;
83 case VM_FAULT_SIGBUS:
84 ret = -EFAULT;
85 goto bad_area;
86 case VM_FAULT_OOM:
87 ret = -ENOMEM;
88 goto bad_area;
89 default:
90 BUG();
91 }
92 up_read(&mm->mmap_sem);
93 return ret;
94
95bad_area:
96 up_read(&mm->mmap_sem);
97 return -EFAULT;
98}
99
100static void spufs_handle_dma_error(struct spu_context *ctx,
101 unsigned long ea, int type)
102{
103 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
104 ctx->event_return |= type;
105 wake_up_all(&ctx->stop_wq);
106 } else {
107 siginfo_t info;
108 memset(&info, 0, sizeof(info));
109
110 switch (type) {
111 case SPE_EVENT_INVALID_DMA:
112 info.si_signo = SIGBUS;
113 info.si_code = BUS_OBJERR;
114 break;
115 case SPE_EVENT_SPE_DATA_STORAGE:
116 info.si_signo = SIGBUS;
117 info.si_addr = (void __user *)ea;
118 info.si_code = BUS_ADRERR;
119 break;
120 case SPE_EVENT_DMA_ALIGNMENT:
121 info.si_signo = SIGBUS;
122 /* DAR isn't set for an alignment fault :( */
123 info.si_code = BUS_ADRALN;
124 break;
125 case SPE_EVENT_SPE_ERROR:
126 info.si_signo = SIGILL;
127 info.si_addr = (void __user *)(unsigned long)
128 ctx->ops->npc_read(ctx) - 4;
129 info.si_code = ILL_ILLOPC;
130 break;
131 }
132 if (info.si_signo)
133 force_sig_info(info.si_signo, &info, current);
134 }
135}
136
137void spufs_dma_callback(struct spu *spu, int type)
138{
139 spufs_handle_dma_error(spu->ctx, spu->dar, type);
140}
141EXPORT_SYMBOL_GPL(spufs_dma_callback);
142
143/*
144 * bottom half handler for page faults, we can't do this from
145 * interrupt context, since we might need to sleep.
146 * we also need to give up the mutex so we can get scheduled
147 * out while waiting for the backing store.
148 *
149 * TODO: try calling hash_page from the interrupt handler first
150 * in order to speed up the easy case.
151 */
152int spufs_handle_class1(struct spu_context *ctx)
153{
154 u64 ea, dsisr, access;
155 unsigned long flags;
156 int ret;
157
158 /*
159 * dar and dsisr get passed from the registers
160 * to the spu_context, to this function, but not
161 * back to the spu if it gets scheduled again.
162 *
163 * if we don't handle the fault for a saved context
164 * in time, we can still expect to get the same fault
165 * the immediately after the context restore.
166 */
167 if (ctx->state == SPU_STATE_RUNNABLE) {
168 ea = ctx->spu->dar;
169 dsisr = ctx->spu->dsisr;
170 ctx->spu->dar= ctx->spu->dsisr = 0;
171 } else {
172 ea = ctx->csa.priv1.mfc_dar_RW;
173 dsisr = ctx->csa.priv1.mfc_dsisr_RW;
174 ctx->csa.priv1.mfc_dar_RW = 0;
175 ctx->csa.priv1.mfc_dsisr_RW = 0;
176 }
177
178 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
179 return 0;
180
181 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
182 dsisr, ctx->state);
183
184 /* we must not hold the lock when entering spu_handle_mm_fault */
185 spu_release(ctx);
186
187 access = (_PAGE_PRESENT | _PAGE_USER);
188 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
189 local_irq_save(flags);
190 ret = hash_page(ea, access, 0x300);
191 local_irq_restore(flags);
192
193 /* hashing failed, so try the actual fault handler */
194 if (ret)
195 ret = spu_handle_mm_fault(current->mm, ea, dsisr);
196
197 spu_acquire(ctx);
198 /*
199 * If we handled the fault successfully and are in runnable
200 * state, restart the DMA.
201 * In case of unhandled error report the problem to user space.
202 */
203 if (!ret) {
204 if (ctx->spu)
205 ctx->ops->restart_dma(ctx);
206 } else
207 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
208
209 return ret;
210}
211EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 505266a568d4..d010b2464a98 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -44,9 +44,25 @@ spufs_mem_open(struct inode *inode, struct file *file)
44{ 44{
45 struct spufs_inode_info *i = SPUFS_I(inode); 45 struct spufs_inode_info *i = SPUFS_I(inode);
46 struct spu_context *ctx = i->i_ctx; 46 struct spu_context *ctx = i->i_ctx;
47
48 spin_lock(&ctx->mapping_lock);
47 file->private_data = ctx; 49 file->private_data = ctx;
48 ctx->local_store = inode->i_mapping; 50 if (!i->i_openers++)
49 smp_wmb(); 51 ctx->local_store = inode->i_mapping;
52 spin_unlock(&ctx->mapping_lock);
53 return 0;
54}
55
56static int
57spufs_mem_release(struct inode *inode, struct file *file)
58{
59 struct spufs_inode_info *i = SPUFS_I(inode);
60 struct spu_context *ctx = i->i_ctx;
61
62 spin_lock(&ctx->mapping_lock);
63 if (!--i->i_openers)
64 ctx->local_store = NULL;
65 spin_unlock(&ctx->mapping_lock);
50 return 0; 66 return 0;
51} 67}
52 68
@@ -149,6 +165,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
149 165
150static const struct file_operations spufs_mem_fops = { 166static const struct file_operations spufs_mem_fops = {
151 .open = spufs_mem_open, 167 .open = spufs_mem_open,
168 .release = spufs_mem_release,
152 .read = spufs_mem_read, 169 .read = spufs_mem_read,
153 .write = spufs_mem_write, 170 .write = spufs_mem_write,
154 .llseek = generic_file_llseek, 171 .llseek = generic_file_llseek,
@@ -238,16 +255,33 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
238 struct spufs_inode_info *i = SPUFS_I(inode); 255 struct spufs_inode_info *i = SPUFS_I(inode);
239 struct spu_context *ctx = i->i_ctx; 256 struct spu_context *ctx = i->i_ctx;
240 257
258 spin_lock(&ctx->mapping_lock);
241 file->private_data = ctx; 259 file->private_data = ctx;
242 ctx->cntl = inode->i_mapping; 260 if (!i->i_openers++)
243 smp_wmb(); 261 ctx->cntl = inode->i_mapping;
262 spin_unlock(&ctx->mapping_lock);
244 return simple_attr_open(inode, file, spufs_cntl_get, 263 return simple_attr_open(inode, file, spufs_cntl_get,
245 spufs_cntl_set, "0x%08lx"); 264 spufs_cntl_set, "0x%08lx");
246} 265}
247 266
267static int
268spufs_cntl_release(struct inode *inode, struct file *file)
269{
270 struct spufs_inode_info *i = SPUFS_I(inode);
271 struct spu_context *ctx = i->i_ctx;
272
273 simple_attr_close(inode, file);
274
275 spin_lock(&ctx->mapping_lock);
276 if (!--i->i_openers)
277 ctx->cntl = NULL;
278 spin_unlock(&ctx->mapping_lock);
279 return 0;
280}
281
248static const struct file_operations spufs_cntl_fops = { 282static const struct file_operations spufs_cntl_fops = {
249 .open = spufs_cntl_open, 283 .open = spufs_cntl_open,
250 .release = simple_attr_close, 284 .release = spufs_cntl_release,
251 .read = simple_attr_read, 285 .read = simple_attr_read,
252 .write = simple_attr_write, 286 .write = simple_attr_write,
253 .mmap = spufs_cntl_mmap, 287 .mmap = spufs_cntl_mmap,
@@ -723,12 +757,28 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
723{ 757{
724 struct spufs_inode_info *i = SPUFS_I(inode); 758 struct spufs_inode_info *i = SPUFS_I(inode);
725 struct spu_context *ctx = i->i_ctx; 759 struct spu_context *ctx = i->i_ctx;
760
761 spin_lock(&ctx->mapping_lock);
726 file->private_data = ctx; 762 file->private_data = ctx;
727 ctx->signal1 = inode->i_mapping; 763 if (!i->i_openers++)
728 smp_wmb(); 764 ctx->signal1 = inode->i_mapping;
765 spin_unlock(&ctx->mapping_lock);
729 return nonseekable_open(inode, file); 766 return nonseekable_open(inode, file);
730} 767}
731 768
769static int
770spufs_signal1_release(struct inode *inode, struct file *file)
771{
772 struct spufs_inode_info *i = SPUFS_I(inode);
773 struct spu_context *ctx = i->i_ctx;
774
775 spin_lock(&ctx->mapping_lock);
776 if (!--i->i_openers)
777 ctx->signal1 = NULL;
778 spin_unlock(&ctx->mapping_lock);
779 return 0;
780}
781
732static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 782static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
733 size_t len, loff_t *pos) 783 size_t len, loff_t *pos)
734{ 784{
@@ -821,6 +871,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
821 871
822static const struct file_operations spufs_signal1_fops = { 872static const struct file_operations spufs_signal1_fops = {
823 .open = spufs_signal1_open, 873 .open = spufs_signal1_open,
874 .release = spufs_signal1_release,
824 .read = spufs_signal1_read, 875 .read = spufs_signal1_read,
825 .write = spufs_signal1_write, 876 .write = spufs_signal1_write,
826 .mmap = spufs_signal1_mmap, 877 .mmap = spufs_signal1_mmap,
@@ -830,12 +881,28 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
830{ 881{
831 struct spufs_inode_info *i = SPUFS_I(inode); 882 struct spufs_inode_info *i = SPUFS_I(inode);
832 struct spu_context *ctx = i->i_ctx; 883 struct spu_context *ctx = i->i_ctx;
884
885 spin_lock(&ctx->mapping_lock);
833 file->private_data = ctx; 886 file->private_data = ctx;
834 ctx->signal2 = inode->i_mapping; 887 if (!i->i_openers++)
835 smp_wmb(); 888 ctx->signal2 = inode->i_mapping;
889 spin_unlock(&ctx->mapping_lock);
836 return nonseekable_open(inode, file); 890 return nonseekable_open(inode, file);
837} 891}
838 892
893static int
894spufs_signal2_release(struct inode *inode, struct file *file)
895{
896 struct spufs_inode_info *i = SPUFS_I(inode);
897 struct spu_context *ctx = i->i_ctx;
898
899 spin_lock(&ctx->mapping_lock);
900 if (!--i->i_openers)
901 ctx->signal2 = NULL;
902 spin_unlock(&ctx->mapping_lock);
903 return 0;
904}
905
839static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 906static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
840 size_t len, loff_t *pos) 907 size_t len, loff_t *pos)
841{ 908{
@@ -932,6 +999,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
932 999
933static const struct file_operations spufs_signal2_fops = { 1000static const struct file_operations spufs_signal2_fops = {
934 .open = spufs_signal2_open, 1001 .open = spufs_signal2_open,
1002 .release = spufs_signal2_release,
935 .read = spufs_signal2_read, 1003 .read = spufs_signal2_read,
936 .write = spufs_signal2_write, 1004 .write = spufs_signal2_write,
937 .mmap = spufs_signal2_mmap, 1005 .mmap = spufs_signal2_mmap,
@@ -1031,13 +1099,30 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
1031 struct spu_context *ctx = i->i_ctx; 1099 struct spu_context *ctx = i->i_ctx;
1032 1100
1033 file->private_data = i->i_ctx; 1101 file->private_data = i->i_ctx;
1034 ctx->mss = inode->i_mapping; 1102
1035 smp_wmb(); 1103 spin_lock(&ctx->mapping_lock);
1104 if (!i->i_openers++)
1105 ctx->mss = inode->i_mapping;
1106 spin_unlock(&ctx->mapping_lock);
1036 return nonseekable_open(inode, file); 1107 return nonseekable_open(inode, file);
1037} 1108}
1038 1109
1110static int
1111spufs_mss_release(struct inode *inode, struct file *file)
1112{
1113 struct spufs_inode_info *i = SPUFS_I(inode);
1114 struct spu_context *ctx = i->i_ctx;
1115
1116 spin_lock(&ctx->mapping_lock);
1117 if (!--i->i_openers)
1118 ctx->mss = NULL;
1119 spin_unlock(&ctx->mapping_lock);
1120 return 0;
1121}
1122
1039static const struct file_operations spufs_mss_fops = { 1123static const struct file_operations spufs_mss_fops = {
1040 .open = spufs_mss_open, 1124 .open = spufs_mss_open,
1125 .release = spufs_mss_release,
1041 .mmap = spufs_mss_mmap, 1126 .mmap = spufs_mss_mmap,
1042}; 1127};
1043 1128
@@ -1072,14 +1157,30 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
1072 struct spufs_inode_info *i = SPUFS_I(inode); 1157 struct spufs_inode_info *i = SPUFS_I(inode);
1073 struct spu_context *ctx = i->i_ctx; 1158 struct spu_context *ctx = i->i_ctx;
1074 1159
1160 spin_lock(&ctx->mapping_lock);
1075 file->private_data = i->i_ctx; 1161 file->private_data = i->i_ctx;
1076 ctx->psmap = inode->i_mapping; 1162 if (!i->i_openers++)
1077 smp_wmb(); 1163 ctx->psmap = inode->i_mapping;
1164 spin_unlock(&ctx->mapping_lock);
1078 return nonseekable_open(inode, file); 1165 return nonseekable_open(inode, file);
1079} 1166}
1080 1167
1168static int
1169spufs_psmap_release(struct inode *inode, struct file *file)
1170{
1171 struct spufs_inode_info *i = SPUFS_I(inode);
1172 struct spu_context *ctx = i->i_ctx;
1173
1174 spin_lock(&ctx->mapping_lock);
1175 if (!--i->i_openers)
1176 ctx->psmap = NULL;
1177 spin_unlock(&ctx->mapping_lock);
1178 return 0;
1179}
1180
1081static const struct file_operations spufs_psmap_fops = { 1181static const struct file_operations spufs_psmap_fops = {
1082 .open = spufs_psmap_open, 1182 .open = spufs_psmap_open,
1183 .release = spufs_psmap_release,
1083 .mmap = spufs_psmap_mmap, 1184 .mmap = spufs_psmap_mmap,
1084}; 1185};
1085 1186
@@ -1126,12 +1227,27 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
1126 if (atomic_read(&inode->i_count) != 1) 1227 if (atomic_read(&inode->i_count) != 1)
1127 return -EBUSY; 1228 return -EBUSY;
1128 1229
1230 spin_lock(&ctx->mapping_lock);
1129 file->private_data = ctx; 1231 file->private_data = ctx;
1130 ctx->mfc = inode->i_mapping; 1232 if (!i->i_openers++)
1131 smp_wmb(); 1233 ctx->mfc = inode->i_mapping;
1234 spin_unlock(&ctx->mapping_lock);
1132 return nonseekable_open(inode, file); 1235 return nonseekable_open(inode, file);
1133} 1236}
1134 1237
1238static int
1239spufs_mfc_release(struct inode *inode, struct file *file)
1240{
1241 struct spufs_inode_info *i = SPUFS_I(inode);
1242 struct spu_context *ctx = i->i_ctx;
1243
1244 spin_lock(&ctx->mapping_lock);
1245 if (!--i->i_openers)
1246 ctx->mfc = NULL;
1247 spin_unlock(&ctx->mapping_lock);
1248 return 0;
1249}
1250
1135/* interrupt-level mfc callback function. */ 1251/* interrupt-level mfc callback function. */
1136void spufs_mfc_callback(struct spu *spu) 1252void spufs_mfc_callback(struct spu *spu)
1137{ 1253{
@@ -1313,7 +1429,10 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1313 if (ret) 1429 if (ret)
1314 goto out; 1430 goto out;
1315 1431
1316 spu_acquire_runnable(ctx, 0); 1432 ret = spu_acquire_runnable(ctx, 0);
1433 if (ret)
1434 goto out;
1435
1317 if (file->f_flags & O_NONBLOCK) { 1436 if (file->f_flags & O_NONBLOCK) {
1318 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1437 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1319 } else { 1438 } else {
@@ -1399,6 +1518,7 @@ static int spufs_mfc_fasync(int fd, struct file *file, int on)
1399 1518
1400static const struct file_operations spufs_mfc_fops = { 1519static const struct file_operations spufs_mfc_fops = {
1401 .open = spufs_mfc_open, 1520 .open = spufs_mfc_open,
1521 .release = spufs_mfc_release,
1402 .read = spufs_mfc_read, 1522 .read = spufs_mfc_read,
1403 .write = spufs_mfc_write, 1523 .write = spufs_mfc_write,
1404 .poll = spufs_mfc_poll, 1524 .poll = spufs_mfc_poll,
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index ae42e03b8c86..428875c5e4ec 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -296,6 +296,14 @@ static int spu_hw_send_mfc_command(struct spu_context *ctx,
296 } 296 }
297} 297}
298 298
299static void spu_hw_restart_dma(struct spu_context *ctx)
300{
301 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
302
303 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
304 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
305}
306
299struct spu_context_ops spu_hw_ops = { 307struct spu_context_ops spu_hw_ops = {
300 .mbox_read = spu_hw_mbox_read, 308 .mbox_read = spu_hw_mbox_read,
301 .mbox_stat_read = spu_hw_mbox_stat_read, 309 .mbox_stat_read = spu_hw_mbox_stat_read,
@@ -320,4 +328,5 @@ struct spu_context_ops spu_hw_ops = {
320 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, 328 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
321 .get_mfc_free_elements = spu_hw_get_mfc_free_elements, 329 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
322 .send_mfc_command = spu_hw_send_mfc_command, 330 .send_mfc_command = spu_hw_send_mfc_command,
331 .restart_dma = spu_hw_restart_dma,
323}; 332};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index e3f4ee97c913..13e4f70ec8c0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -36,6 +36,7 @@
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/semaphore.h> 37#include <asm/semaphore.h>
38#include <asm/spu.h> 38#include <asm/spu.h>
39#include <asm/spu_priv1.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40 41
41#include "spufs.h" 42#include "spufs.h"
@@ -54,6 +55,7 @@ spufs_alloc_inode(struct super_block *sb)
54 55
55 ei->i_gang = NULL; 56 ei->i_gang = NULL;
56 ei->i_ctx = NULL; 57 ei->i_ctx = NULL;
58 ei->i_openers = 0;
57 59
58 return &ei->vfs_inode; 60 return &ei->vfs_inode;
59} 61}
@@ -520,13 +522,14 @@ out:
520 522
521/* File system initialization */ 523/* File system initialization */
522enum { 524enum {
523 Opt_uid, Opt_gid, Opt_err, 525 Opt_uid, Opt_gid, Opt_mode, Opt_err,
524}; 526};
525 527
526static match_table_t spufs_tokens = { 528static match_table_t spufs_tokens = {
527 { Opt_uid, "uid=%d" }, 529 { Opt_uid, "uid=%d" },
528 { Opt_gid, "gid=%d" }, 530 { Opt_gid, "gid=%d" },
529 { Opt_err, NULL }, 531 { Opt_mode, "mode=%o" },
532 { Opt_err, NULL },
530}; 533};
531 534
532static int 535static int
@@ -553,6 +556,11 @@ spufs_parse_options(char *options, struct inode *root)
553 return 0; 556 return 0;
554 root->i_gid = option; 557 root->i_gid = option;
555 break; 558 break;
559 case Opt_mode:
560 if (match_octal(&args[0], &option))
561 return 0;
562 root->i_mode = option | S_IFDIR;
563 break;
556 default: 564 default:
557 return 0; 565 return 0;
558 } 566 }
@@ -560,6 +568,11 @@ spufs_parse_options(char *options, struct inode *root)
560 return 1; 568 return 1;
561} 569}
562 570
571static void spufs_exit_isolated_loader(void)
572{
573 kfree(isolated_loader);
574}
575
563static void 576static void
564spufs_init_isolated_loader(void) 577spufs_init_isolated_loader(void)
565{ 578{
@@ -653,6 +666,10 @@ static int __init spufs_init(void)
653{ 666{
654 int ret; 667 int ret;
655 668
669 ret = -ENODEV;
670 if (!spu_management_ops)
671 goto out;
672
656 ret = -ENOMEM; 673 ret = -ENOMEM;
657 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 674 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
658 sizeof(struct spufs_inode_info), 0, 675 sizeof(struct spufs_inode_info), 0,
@@ -660,25 +677,29 @@ static int __init spufs_init(void)
660 677
661 if (!spufs_inode_cache) 678 if (!spufs_inode_cache)
662 goto out; 679 goto out;
663 if (spu_sched_init() != 0) { 680 ret = spu_sched_init();
664 kmem_cache_destroy(spufs_inode_cache);
665 goto out;
666 }
667 ret = register_filesystem(&spufs_type);
668 if (ret) 681 if (ret)
669 goto out_cache; 682 goto out_cache;
683 ret = register_filesystem(&spufs_type);
684 if (ret)
685 goto out_sched;
670 ret = register_spu_syscalls(&spufs_calls); 686 ret = register_spu_syscalls(&spufs_calls);
671 if (ret) 687 if (ret)
672 goto out_fs; 688 goto out_fs;
673 ret = register_arch_coredump_calls(&spufs_coredump_calls); 689 ret = register_arch_coredump_calls(&spufs_coredump_calls);
674 if (ret) 690 if (ret)
675 goto out_fs; 691 goto out_syscalls;
676 692
677 spufs_init_isolated_loader(); 693 spufs_init_isolated_loader();
678 694
679 return 0; 695 return 0;
696
697out_syscalls:
698 unregister_spu_syscalls(&spufs_calls);
680out_fs: 699out_fs:
681 unregister_filesystem(&spufs_type); 700 unregister_filesystem(&spufs_type);
701out_sched:
702 spu_sched_exit();
682out_cache: 703out_cache:
683 kmem_cache_destroy(spufs_inode_cache); 704 kmem_cache_destroy(spufs_inode_cache);
684out: 705out:
@@ -689,6 +710,7 @@ module_init(spufs_init);
689static void __exit spufs_exit(void) 710static void __exit spufs_exit(void)
690{ 711{
691 spu_sched_exit(); 712 spu_sched_exit();
713 spufs_exit_isolated_loader();
692 unregister_arch_coredump_calls(&spufs_coredump_calls); 714 unregister_arch_coredump_calls(&spufs_coredump_calls);
693 unregister_spu_syscalls(&spufs_calls); 715 unregister_spu_syscalls(&spufs_calls);
694 unregister_filesystem(&spufs_type); 716 unregister_filesystem(&spufs_type);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index f95a611ca362..57626600b1a4 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu)
18 wake_up_all(&ctx->stop_wq); 18 wake_up_all(&ctx->stop_wq);
19} 19}
20 20
21void spufs_dma_callback(struct spu *spu, int type)
22{
23 struct spu_context *ctx = spu->ctx;
24
25 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
26 ctx->event_return |= type;
27 wake_up_all(&ctx->stop_wq);
28 } else {
29 switch (type) {
30 case SPE_EVENT_DMA_ALIGNMENT:
31 case SPE_EVENT_SPE_DATA_STORAGE:
32 case SPE_EVENT_INVALID_DMA:
33 force_sig(SIGBUS, /* info, */ current);
34 break;
35 case SPE_EVENT_SPE_ERROR:
36 force_sig(SIGILL, /* info */ current);
37 break;
38 }
39 }
40}
41
42static inline int spu_stopped(struct spu_context *ctx, u32 * stat) 21static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
43{ 22{
44 struct spu *spu; 23 struct spu *spu;
@@ -63,13 +42,18 @@ static int spu_setup_isolated(struct spu_context *ctx)
63 const u32 status_loading = SPU_STATUS_RUNNING 42 const u32 status_loading = SPU_STATUS_RUNNING
64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; 43 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65 44
45 ret = -ENODEV;
66 if (!isolated_loader) 46 if (!isolated_loader)
67 return -ENODEV;
68
69 ret = spu_acquire_exclusive(ctx);
70 if (ret)
71 goto out; 47 goto out;
72 48
49 /*
50 * We need to exclude userspace access to the context.
51 *
52 * To protect against memory access we invalidate all ptes
53 * and make sure the pagefault handlers block on the mutex.
54 */
55 spu_unmap_mappings(ctx);
56
73 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; 57 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74 58
75 /* purge the MFC DMA queue to ensure no spurious accesses before we 59 /* purge the MFC DMA queue to ensure no spurious accesses before we
@@ -82,7 +66,7 @@ static int spu_setup_isolated(struct spu_context *ctx)
82 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", 66 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 __FUNCTION__); 67 __FUNCTION__);
84 ret = -EIO; 68 ret = -EIO;
85 goto out_unlock; 69 goto out;
86 } 70 }
87 cond_resched(); 71 cond_resched();
88 } 72 }
@@ -119,12 +103,15 @@ static int spu_setup_isolated(struct spu_context *ctx)
119 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); 103 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 104 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 ret = -EACCES; 105 ret = -EACCES;
106 goto out_drop_priv;
107 }
122 108
123 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) { 109 if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 /* This isn't allowed by the CBEA, but check anyway */ 110 /* This isn't allowed by the CBEA, but check anyway */
125 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); 111 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); 112 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 ret = -EINVAL; 113 ret = -EINVAL;
114 goto out_drop_priv;
128 } 115 }
129 116
130out_drop_priv: 117out_drop_priv:
@@ -132,30 +119,19 @@ out_drop_priv:
132 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; 119 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 spu_mfc_sr1_set(ctx->spu, sr1); 120 spu_mfc_sr1_set(ctx->spu, sr1);
134 121
135out_unlock:
136 spu_release(ctx);
137out: 122out:
138 return ret; 123 return ret;
139} 124}
140 125
141static inline int spu_run_init(struct spu_context *ctx, u32 * npc) 126static int spu_run_init(struct spu_context *ctx, u32 * npc)
142{ 127{
143 int ret;
144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
145
146 ret = spu_acquire_runnable(ctx, 0);
147 if (ret)
148 return ret;
149
150 if (ctx->flags & SPU_CREATE_ISOLATE) { 128 if (ctx->flags & SPU_CREATE_ISOLATE) {
129 unsigned long runcntl;
130
151 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 131 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
152 /* Need to release ctx, because spu_setup_isolated will 132 int ret = spu_setup_isolated(ctx);
153 * acquire it exclusively. 133 if (ret)
154 */ 134 return ret;
155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx);
157 if (!ret)
158 ret = spu_acquire_runnable(ctx, 0);
159 } 135 }
160 136
161 /* if userspace has set the runcntrl register (eg, to issue an 137 /* if userspace has set the runcntrl register (eg, to issue an
@@ -164,16 +140,17 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
164 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 140 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
165 if (runcntl == 0) 141 if (runcntl == 0)
166 runcntl = SPU_RUNCNTL_RUNNABLE; 142 runcntl = SPU_RUNCNTL_RUNNABLE;
143 ctx->ops->runcntl_write(ctx, runcntl);
167 } else { 144 } else {
168 spu_start_tick(ctx); 145 spu_start_tick(ctx);
169 ctx->ops->npc_write(ctx, *npc); 146 ctx->ops->npc_write(ctx, *npc);
147 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
170 } 148 }
171 149
172 ctx->ops->runcntl_write(ctx, runcntl); 150 return 0;
173 return ret;
174} 151}
175 152
176static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, 153static int spu_run_fini(struct spu_context *ctx, u32 * npc,
177 u32 * status) 154 u32 * status)
178{ 155{
179 int ret = 0; 156 int ret = 0;
@@ -189,19 +166,27 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
189 return ret; 166 return ret;
190} 167}
191 168
192static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, 169static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
193 u32 *status) 170 u32 *status)
194{ 171{
195 int ret; 172 int ret;
196 173
197 if ((ret = spu_run_fini(ctx, npc, status)) != 0) 174 ret = spu_run_fini(ctx, npc, status);
175 if (ret)
198 return ret; 176 return ret;
199 if (*status & (SPU_STATUS_STOPPED_BY_STOP | 177
200 SPU_STATUS_STOPPED_BY_HALT)) { 178 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
201 return *status; 179 return *status;
202 } 180
203 if ((ret = spu_run_init(ctx, npc)) != 0) 181 ret = spu_acquire_runnable(ctx, 0);
182 if (ret)
183 return ret;
184
185 ret = spu_run_init(ctx, npc);
186 if (ret) {
187 spu_release(ctx);
204 return ret; 188 return ret;
189 }
205 return 0; 190 return 0;
206} 191}
207 192
@@ -253,17 +238,17 @@ int spu_process_callback(struct spu_context *ctx)
253{ 238{
254 struct spu_syscall_block s; 239 struct spu_syscall_block s;
255 u32 ls_pointer, npc; 240 u32 ls_pointer, npc;
256 char *ls; 241 void __iomem *ls;
257 long spu_ret; 242 long spu_ret;
258 int ret; 243 int ret;
259 244
260 /* get syscall block from local store */ 245 /* get syscall block from local store */
261 npc = ctx->ops->npc_read(ctx); 246 npc = ctx->ops->npc_read(ctx) & ~3;
262 ls = ctx->ops->get_ls(ctx); 247 ls = (void __iomem *)ctx->ops->get_ls(ctx);
263 ls_pointer = *(u32*)(ls + npc); 248 ls_pointer = in_be32(ls + npc);
264 if (ls_pointer > (LS_SIZE - sizeof(s))) 249 if (ls_pointer > (LS_SIZE - sizeof(s)))
265 return -EFAULT; 250 return -EFAULT;
266 memcpy(&s, ls + ls_pointer, sizeof (s)); 251 memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
267 252
268 /* do actual syscall without pinning the spu */ 253 /* do actual syscall without pinning the spu */
269 ret = 0; 254 ret = 0;
@@ -283,7 +268,7 @@ int spu_process_callback(struct spu_context *ctx)
283 } 268 }
284 269
285 /* write result, jump over indirect pointer */ 270 /* write result, jump over indirect pointer */
286 memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret)); 271 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
287 ctx->ops->npc_write(ctx, npc); 272 ctx->ops->npc_write(ctx, npc);
288 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 273 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
289 return ret; 274 return ret;
@@ -292,11 +277,8 @@ int spu_process_callback(struct spu_context *ctx)
292static inline int spu_process_events(struct spu_context *ctx) 277static inline int spu_process_events(struct spu_context *ctx)
293{ 278{
294 struct spu *spu = ctx->spu; 279 struct spu *spu = ctx->spu;
295 u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
296 int ret = 0; 280 int ret = 0;
297 281
298 if (spu->dsisr & pte_fault)
299 ret = spu_irq_class_1_bottom(spu);
300 if (spu->class_0_pending) 282 if (spu->class_0_pending)
301 ret = spu_irq_class_0_bottom(spu); 283 ret = spu_irq_class_0_bottom(spu);
302 if (!ret && signal_pending(current)) 284 if (!ret && signal_pending(current))
@@ -310,14 +292,21 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
310 int ret; 292 int ret;
311 u32 status; 293 u32 status;
312 294
313 if (down_interruptible(&ctx->run_sema)) 295 if (mutex_lock_interruptible(&ctx->run_mutex))
314 return -ERESTARTSYS; 296 return -ERESTARTSYS;
315 297
316 ctx->ops->master_start(ctx); 298 ctx->ops->master_start(ctx);
317 ctx->event_return = 0; 299 ctx->event_return = 0;
318 ret = spu_run_init(ctx, npc); 300
301 ret = spu_acquire_runnable(ctx, 0);
319 if (ret) 302 if (ret)
303 return ret;
304
305 ret = spu_run_init(ctx, npc);
306 if (ret) {
307 spu_release(ctx);
320 goto out; 308 goto out;
309 }
321 310
322 do { 311 do {
323 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 312 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
@@ -330,6 +319,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
330 break; 319 break;
331 status &= ~SPU_STATUS_STOPPED_BY_STOP; 320 status &= ~SPU_STATUS_STOPPED_BY_STOP;
332 } 321 }
322 ret = spufs_handle_class1(ctx);
323 if (ret)
324 break;
325
333 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 326 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
334 ret = spu_reacquire_runnable(ctx, npc, &status); 327 ret = spu_reacquire_runnable(ctx, npc, &status);
335 if (ret) { 328 if (ret) {
@@ -363,6 +356,6 @@ out2:
363 356
364out: 357out:
365 *event = ctx->event_return; 358 *event = ctx->event_return;
366 up(&ctx->run_sema); 359 mutex_unlock(&ctx->run_mutex);
367 return ret; 360 return ret;
368} 361}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index c9561582ce2a..91030b8abdca 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -71,14 +71,27 @@ static inline int node_allowed(int node)
71 71
72void spu_start_tick(struct spu_context *ctx) 72void spu_start_tick(struct spu_context *ctx)
73{ 73{
74 if (ctx->policy == SCHED_RR) 74 if (ctx->policy == SCHED_RR) {
75 /*
76 * Make sure the exiting bit is cleared.
77 */
78 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
79 mb();
75 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); 80 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
81 }
76} 82}
77 83
78void spu_stop_tick(struct spu_context *ctx) 84void spu_stop_tick(struct spu_context *ctx)
79{ 85{
80 if (ctx->policy == SCHED_RR) 86 if (ctx->policy == SCHED_RR) {
87 /*
88 * While the work can be rearming normally setting this flag
89 * makes sure it does not rearm itself anymore.
90 */
91 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
92 mb();
81 cancel_delayed_work(&ctx->sched_work); 93 cancel_delayed_work(&ctx->sched_work);
94 }
82} 95}
83 96
84void spu_sched_tick(struct work_struct *work) 97void spu_sched_tick(struct work_struct *work)
@@ -86,7 +99,15 @@ void spu_sched_tick(struct work_struct *work)
86 struct spu_context *ctx = 99 struct spu_context *ctx =
87 container_of(work, struct spu_context, sched_work.work); 100 container_of(work, struct spu_context, sched_work.work);
88 struct spu *spu; 101 struct spu *spu;
89 int rearm = 1; 102 int preempted = 0;
103
104 /*
105 * If this context is being stopped avoid rescheduling from the
106 * scheduler tick because we would block on the state_mutex.
107 * The caller will yield the spu later on anyway.
108 */
109 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
110 return;
90 111
91 mutex_lock(&ctx->state_mutex); 112 mutex_lock(&ctx->state_mutex);
92 spu = ctx->spu; 113 spu = ctx->spu;
@@ -94,12 +115,19 @@ void spu_sched_tick(struct work_struct *work)
94 int best = sched_find_first_bit(spu_prio->bitmap); 115 int best = sched_find_first_bit(spu_prio->bitmap);
95 if (best <= ctx->prio) { 116 if (best <= ctx->prio) {
96 spu_deactivate(ctx); 117 spu_deactivate(ctx);
97 rearm = 0; 118 preempted = 1;
98 } 119 }
99 } 120 }
100 mutex_unlock(&ctx->state_mutex); 121 mutex_unlock(&ctx->state_mutex);
101 122
102 if (rearm) 123 if (preempted) {
124 /*
125 * We need to break out of the wait loop in spu_run manually
126 * to ensure this context gets put on the runqueue again
127 * ASAP.
128 */
129 wake_up(&ctx->stop_wq);
130 } else
103 spu_start_tick(ctx); 131 spu_start_tick(ctx);
104} 132}
105 133
@@ -208,58 +236,40 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
208 * spu_add_to_rq - add a context to the runqueue 236 * spu_add_to_rq - add a context to the runqueue
209 * @ctx: context to add 237 * @ctx: context to add
210 */ 238 */
211static void spu_add_to_rq(struct spu_context *ctx) 239static void __spu_add_to_rq(struct spu_context *ctx)
212{ 240{
213 spin_lock(&spu_prio->runq_lock); 241 int prio = ctx->prio;
214 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
215 set_bit(ctx->prio, spu_prio->bitmap);
216 spin_unlock(&spu_prio->runq_lock);
217}
218 242
219/** 243 list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
220 * spu_del_from_rq - remove a context from the runqueue 244 set_bit(prio, spu_prio->bitmap);
221 * @ctx: context to remove
222 */
223static void spu_del_from_rq(struct spu_context *ctx)
224{
225 spin_lock(&spu_prio->runq_lock);
226 list_del_init(&ctx->rq);
227 if (list_empty(&spu_prio->runq[ctx->prio]))
228 clear_bit(ctx->prio, spu_prio->bitmap);
229 spin_unlock(&spu_prio->runq_lock);
230} 245}
231 246
232/** 247static void __spu_del_from_rq(struct spu_context *ctx)
233 * spu_grab_context - remove one context from the runqueue
234 * @prio: priority of the context to be removed
235 *
236 * This function removes one context from the runqueue for priority @prio.
237 * If there is more than one context with the given priority the first
238 * task on the runqueue will be taken.
239 *
240 * Returns the spu_context it just removed.
241 *
242 * Must be called with spu_prio->runq_lock held.
243 */
244static struct spu_context *spu_grab_context(int prio)
245{ 248{
246 struct list_head *rq = &spu_prio->runq[prio]; 249 int prio = ctx->prio;
247 250
248 if (list_empty(rq)) 251 if (!list_empty(&ctx->rq))
249 return NULL; 252 list_del_init(&ctx->rq);
250 return list_entry(rq->next, struct spu_context, rq); 253 if (list_empty(&spu_prio->runq[prio]))
254 clear_bit(prio, spu_prio->bitmap);
251} 255}
252 256
253static void spu_prio_wait(struct spu_context *ctx) 257static void spu_prio_wait(struct spu_context *ctx)
254{ 258{
255 DEFINE_WAIT(wait); 259 DEFINE_WAIT(wait);
256 260
261 spin_lock(&spu_prio->runq_lock);
257 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 262 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
258 if (!signal_pending(current)) { 263 if (!signal_pending(current)) {
264 __spu_add_to_rq(ctx);
265 spin_unlock(&spu_prio->runq_lock);
259 mutex_unlock(&ctx->state_mutex); 266 mutex_unlock(&ctx->state_mutex);
260 schedule(); 267 schedule();
261 mutex_lock(&ctx->state_mutex); 268 mutex_lock(&ctx->state_mutex);
269 spin_lock(&spu_prio->runq_lock);
270 __spu_del_from_rq(ctx);
262 } 271 }
272 spin_unlock(&spu_prio->runq_lock);
263 __set_current_state(TASK_RUNNING); 273 __set_current_state(TASK_RUNNING);
264 remove_wait_queue(&ctx->stop_wq, &wait); 274 remove_wait_queue(&ctx->stop_wq, &wait);
265} 275}
@@ -280,9 +290,14 @@ static void spu_reschedule(struct spu *spu)
280 spin_lock(&spu_prio->runq_lock); 290 spin_lock(&spu_prio->runq_lock);
281 best = sched_find_first_bit(spu_prio->bitmap); 291 best = sched_find_first_bit(spu_prio->bitmap);
282 if (best < MAX_PRIO) { 292 if (best < MAX_PRIO) {
283 struct spu_context *ctx = spu_grab_context(best); 293 struct list_head *rq = &spu_prio->runq[best];
284 if (ctx) 294 struct spu_context *ctx;
285 wake_up(&ctx->stop_wq); 295
296 BUG_ON(list_empty(rq));
297
298 ctx = list_entry(rq->next, struct spu_context, rq);
299 __spu_del_from_rq(ctx);
300 wake_up(&ctx->stop_wq);
286 } 301 }
287 spin_unlock(&spu_prio->runq_lock); 302 spin_unlock(&spu_prio->runq_lock);
288} 303}
@@ -365,6 +380,12 @@ static struct spu *find_victim(struct spu_context *ctx)
365 } 380 }
366 spu_unbind_context(spu, victim); 381 spu_unbind_context(spu, victim);
367 mutex_unlock(&victim->state_mutex); 382 mutex_unlock(&victim->state_mutex);
383 /*
384 * We need to break out of the wait loop in spu_run
385 * manually to ensure this context gets put on the
386 * runqueue again ASAP.
387 */
388 wake_up(&victim->stop_wq);
368 return spu; 389 return spu;
369 } 390 }
370 } 391 }
@@ -377,7 +398,7 @@ static struct spu *find_victim(struct spu_context *ctx)
377 * @ctx: spu context to schedule 398 * @ctx: spu context to schedule
378 * @flags: flags (currently ignored) 399 * @flags: flags (currently ignored)
379 * 400 *
380 * Tries to find a free spu to run @ctx. If no free spu is availble 401 * Tries to find a free spu to run @ctx. If no free spu is available
381 * add the context to the runqueue so it gets woken up once an spu 402 * add the context to the runqueue so it gets woken up once an spu
382 * is available. 403 * is available.
383 */ 404 */
@@ -402,9 +423,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
402 return 0; 423 return 0;
403 } 424 }
404 425
405 spu_add_to_rq(ctx);
406 spu_prio_wait(ctx); 426 spu_prio_wait(ctx);
407 spu_del_from_rq(ctx);
408 } while (!signal_pending(current)); 427 } while (!signal_pending(current));
409 428
410 return -ERESTARTSYS; 429 return -ERESTARTSYS;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5c4e47d69d79..0a947fd7de57 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -41,7 +41,7 @@ struct spu_gang;
41 41
42/* ctx->sched_flags */ 42/* ctx->sched_flags */
43enum { 43enum {
44 SPU_SCHED_WAKE = 0, /* currently unused */ 44 SPU_SCHED_EXITING = 0,
45}; 45};
46 46
47struct spu_context { 47struct spu_context {
@@ -50,16 +50,17 @@ struct spu_context {
50 spinlock_t mmio_lock; /* protects mmio access */ 50 spinlock_t mmio_lock; /* protects mmio access */
51 struct address_space *local_store; /* local store mapping. */ 51 struct address_space *local_store; /* local store mapping. */
52 struct address_space *mfc; /* 'mfc' area mappings. */ 52 struct address_space *mfc; /* 'mfc' area mappings. */
53 struct address_space *cntl; /* 'control' area mappings. */ 53 struct address_space *cntl; /* 'control' area mappings. */
54 struct address_space *signal1; /* 'signal1' area mappings. */ 54 struct address_space *signal1; /* 'signal1' area mappings. */
55 struct address_space *signal2; /* 'signal2' area mappings. */ 55 struct address_space *signal2; /* 'signal2' area mappings. */
56 struct address_space *mss; /* 'mss' area mappings. */ 56 struct address_space *mss; /* 'mss' area mappings. */
57 struct address_space *psmap; /* 'psmap' area mappings. */ 57 struct address_space *psmap; /* 'psmap' area mappings. */
58 spinlock_t mapping_lock;
58 u64 object_id; /* user space pointer for oprofile */ 59 u64 object_id; /* user space pointer for oprofile */
59 60
60 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; 61 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
61 struct mutex state_mutex; 62 struct mutex state_mutex;
62 struct semaphore run_sema; 63 struct mutex run_mutex;
63 64
64 struct mm_struct *owner; 65 struct mm_struct *owner;
65 66
@@ -140,6 +141,7 @@ struct spu_context_ops {
140 struct spu_dma_info * info); 141 struct spu_dma_info * info);
141 void (*proxydma_info_read) (struct spu_context * ctx, 142 void (*proxydma_info_read) (struct spu_context * ctx,
142 struct spu_proxydma_info * info); 143 struct spu_proxydma_info * info);
144 void (*restart_dma)(struct spu_context *ctx);
143}; 145};
144 146
145extern struct spu_context_ops spu_hw_ops; 147extern struct spu_context_ops spu_hw_ops;
@@ -149,6 +151,7 @@ struct spufs_inode_info {
149 struct spu_context *i_ctx; 151 struct spu_context *i_ctx;
150 struct spu_gang *i_gang; 152 struct spu_gang *i_gang;
151 struct inode vfs_inode; 153 struct inode vfs_inode;
154 int i_openers;
152}; 155};
153#define SPUFS_I(inode) \ 156#define SPUFS_I(inode) \
154 container_of(inode, struct spufs_inode_info, vfs_inode) 157 container_of(inode, struct spufs_inode_info, vfs_inode)
@@ -170,6 +173,9 @@ int put_spu_gang(struct spu_gang *gang);
170void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); 173void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
171void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); 174void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
172 175
176/* fault handling */
177int spufs_handle_class1(struct spu_context *ctx);
178
173/* context management */ 179/* context management */
174static inline void spu_acquire(struct spu_context *ctx) 180static inline void spu_acquire(struct spu_context *ctx)
175{ 181{
@@ -190,7 +196,6 @@ void spu_unmap_mappings(struct spu_context *ctx);
190void spu_forget(struct spu_context *ctx); 196void spu_forget(struct spu_context *ctx);
191int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 197int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
192void spu_acquire_saved(struct spu_context *ctx); 198void spu_acquire_saved(struct spu_context *ctx);
193int spu_acquire_exclusive(struct spu_context *ctx);
194 199
195int spu_activate(struct spu_context *ctx, unsigned long flags); 200int spu_activate(struct spu_context *ctx, unsigned long flags);
196void spu_deactivate(struct spu_context *ctx); 201void spu_deactivate(struct spu_context *ctx);
@@ -218,14 +223,13 @@ extern char *isolated_loader;
218 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 223 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
219 if (condition) \ 224 if (condition) \
220 break; \ 225 break; \
221 if (!signal_pending(current)) { \ 226 if (signal_pending(current)) { \
222 spu_release(ctx); \ 227 __ret = -ERESTARTSYS; \
223 schedule(); \ 228 break; \
224 spu_acquire(ctx); \
225 continue; \
226 } \ 229 } \
227 __ret = -ERESTARTSYS; \ 230 spu_release(ctx); \
228 break; \ 231 schedule(); \
232 spu_acquire(ctx); \
229 } \ 233 } \
230 finish_wait(&(wq), &__wait); \ 234 finish_wait(&(wq), &__wait); \
231 __ret; \ 235 __ret; \
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index fd91c73de34e..8347c4a3f894 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2084,6 +2084,10 @@ int spu_save(struct spu_state *prev, struct spu *spu)
2084 int rc; 2084 int rc;
2085 2085
2086 acquire_spu_lock(spu); /* Step 1. */ 2086 acquire_spu_lock(spu); /* Step 1. */
2087 prev->dar = spu->dar;
2088 prev->dsisr = spu->dsisr;
2089 spu->dar = 0;
2090 spu->dsisr = 0;
2087 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2091 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2088 release_spu_lock(spu); 2092 release_spu_lock(spu);
2089 if (rc != 0 && rc != 2 && rc != 6) { 2093 if (rc != 0 && rc != 2 && rc != 6) {
@@ -2109,9 +2113,9 @@ int spu_restore(struct spu_state *new, struct spu *spu)
2109 2113
2110 acquire_spu_lock(spu); 2114 acquire_spu_lock(spu);
2111 harvest(NULL, spu); 2115 harvest(NULL, spu);
2112 spu->dar = 0;
2113 spu->dsisr = 0;
2114 spu->slb_replace = 0; 2116 spu->slb_replace = 0;
2117 new->dar = 0;
2118 new->dsisr = 0;
2115 spu->class_0_pending = 0; 2119 spu->class_0_pending = 0;
2116 rc = __do_spu_restore(new, spu); 2120 rc = __do_spu_restore(new, spu);
2117 release_spu_lock(spu); 2121 release_spu_lock(spu);