aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <abergman@de.ibm.com>2006-03-22 18:00:11 -0500
committerPaul Mackerras <paulus@samba.org>2006-03-26 22:48:26 -0500
commita33a7d7309d79656bc19a0e96fc4547a1633283e (patch)
treef3de6d139af1f1cbdf6d37800c9e13c07e9bc7f6
parent2dd14934c9138c562d93c501e88c6d6f061eb8ba (diff)
[PATCH] spufs: implement mfc access for PPE-side DMA
This patch adds a new file called 'mfc' to each spufs directory. The file accepts DMA commands that are a subset of what would be legal DMA commands for problem state register access. Upon reading the file, a bitmask is returned with the completed tag groups set. The file is meant to be used from an abstraction in libspe that is added by a different patch. From the kernel perspective, this means a process can now offload a memory copy from or into an SPE local store without having to run code on the SPE itself. The transfer will only be performed while the SPE is owned by one thread that is waiting in the spu_run system call and the data will be transferred into that thread's address space, independent of which thread started the transfer. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c47
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c294
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c57
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h20
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c3
-rw-r--r--include/asm-powerpc/spu.h1
9 files changed, 431 insertions, 5 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index a8fa1eeeb174..162b6cfa8a43 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -111,7 +111,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
113{ 113{
114 pr_debug("%s\n", __FUNCTION__); 114 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
115 115
116 /* Handle kernel space hash faults immediately. 116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */ 117 User hash faults need to be deferred to process context. */
@@ -168,7 +168,7 @@ static int __spu_trap_halt(struct spu *spu)
168static int __spu_trap_tag_group(struct spu *spu) 168static int __spu_trap_tag_group(struct spu *spu)
169{ 169{
170 pr_debug("%s\n", __FUNCTION__); 170 pr_debug("%s\n", __FUNCTION__);
171 /* wake_up(&spu->dma_wq); */ 171 spu->mfc_callback(spu);
172 return 0; 172 return 0;
173} 173}
174 174
@@ -242,6 +242,8 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
242 spu_mfc_dsisr_set(spu, 0ul); 242 spu_mfc_dsisr_set(spu, 0ul);
243 spu_int_stat_clear(spu, 1, stat); 243 spu_int_stat_clear(spu, 1, stat);
244 spin_unlock(&spu->register_lock); 244 spin_unlock(&spu->register_lock);
245 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
246 dar, dsisr);
245 247
246 if (stat & 1) /* segment fault */ 248 if (stat & 1) /* segment fault */
247 __spu_trap_data_seg(spu, dar); 249 __spu_trap_data_seg(spu, dar);
@@ -632,6 +634,7 @@ static int __init create_spu(struct device_node *spe)
632 spu->ibox_callback = NULL; 634 spu->ibox_callback = NULL;
633 spu->wbox_callback = NULL; 635 spu->wbox_callback = NULL;
634 spu->stop_callback = NULL; 636 spu->stop_callback = NULL;
637 spu->mfc_callback = NULL;
635 638
636 mutex_lock(&spu_mutex); 639 mutex_lock(&spu_mutex);
637 spu->number = number++; 640 spu->number = number++;
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index a5c489a53c61..f1d35ddc9df3 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -285,6 +285,49 @@ static void spu_backing_runcntl_stop(struct spu_context *ctx)
285 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); 285 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
286} 286}
287 287
288static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
289 u32 mode)
290{
291 struct spu_problem_collapsed *prob = &ctx->csa.prob;
292 int ret;
293
294 spin_lock(&ctx->csa.register_lock);
295 ret = -EAGAIN;
296 if (prob->dma_querytype_RW)
297 goto out;
298 ret = 0;
299 /* FIXME: what are the side-effects of this? */
300 prob->dma_querymask_RW = mask;
301 prob->dma_querytype_RW = mode;
302out:
303 spin_unlock(&ctx->csa.register_lock);
304
305 return ret;
306}
307
308static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
309{
310 return ctx->csa.prob.dma_tagstatus_R;
311}
312
313static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
314{
315 return ctx->csa.prob.dma_qstatus_R;
316}
317
318static int spu_backing_send_mfc_command(struct spu_context *ctx,
319 struct mfc_dma_command *cmd)
320{
321 int ret;
322
323 spin_lock(&ctx->csa.register_lock);
324 ret = -EAGAIN;
325 /* FIXME: set up priv2->puq */
326 spin_unlock(&ctx->csa.register_lock);
327
328 return ret;
329}
330
288struct spu_context_ops spu_backing_ops = { 331struct spu_context_ops spu_backing_ops = {
289 .mbox_read = spu_backing_mbox_read, 332 .mbox_read = spu_backing_mbox_read,
290 .mbox_stat_read = spu_backing_mbox_stat_read, 333 .mbox_stat_read = spu_backing_mbox_stat_read,
@@ -305,4 +348,8 @@ struct spu_context_ops spu_backing_ops = {
305 .get_ls = spu_backing_get_ls, 348 .get_ls = spu_backing_get_ls,
306 .runcntl_write = spu_backing_runcntl_write, 349 .runcntl_write = spu_backing_runcntl_write,
307 .runcntl_stop = spu_backing_runcntl_stop, 350 .runcntl_stop = spu_backing_runcntl_stop,
351 .set_mfc_query = spu_backing_set_mfc_query,
352 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
353 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
354 .send_mfc_command = spu_backing_send_mfc_command,
308}; 355};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 336f238102fd..7e016b9eab21 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -47,8 +47,11 @@ struct spu_context *alloc_spu_context(struct address_space *local_store)
47 init_waitqueue_head(&ctx->ibox_wq); 47 init_waitqueue_head(&ctx->ibox_wq);
48 init_waitqueue_head(&ctx->wbox_wq); 48 init_waitqueue_head(&ctx->wbox_wq);
49 init_waitqueue_head(&ctx->stop_wq); 49 init_waitqueue_head(&ctx->stop_wq);
50 init_waitqueue_head(&ctx->mfc_wq);
50 ctx->ibox_fasync = NULL; 51 ctx->ibox_fasync = NULL;
51 ctx->wbox_fasync = NULL; 52 ctx->wbox_fasync = NULL;
53 ctx->mfc_fasync = NULL;
54 ctx->tagwait = 0;
52 ctx->state = SPU_STATE_SAVED; 55 ctx->state = SPU_STATE_SAVED;
53 ctx->local_store = local_store; 56 ctx->local_store = local_store;
54 ctx->spu = NULL; 57 ctx->spu = NULL;
@@ -68,8 +71,6 @@ void destroy_spu_context(struct kref *kref)
68 ctx = container_of(kref, struct spu_context, kref); 71 ctx = container_of(kref, struct spu_context, kref);
69 down_write(&ctx->state_sema); 72 down_write(&ctx->state_sema);
70 spu_deactivate(ctx); 73 spu_deactivate(ctx);
71 ctx->ibox_fasync = NULL;
72 ctx->wbox_fasync = NULL;
73 up_write(&ctx->state_sema); 74 up_write(&ctx->state_sema);
74 spu_fini_csa(&ctx->csa); 75 spu_fini_csa(&ctx->csa);
75 kfree(ctx); 76 kfree(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index dfa649c9b956..62fe9941ccee 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -20,6 +20,8 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#undef DEBUG
24
23#include <linux/fs.h> 25#include <linux/fs.h>
24#include <linux/ioctl.h> 26#include <linux/ioctl.h>
25#include <linux/module.h> 27#include <linux/module.h>
@@ -641,6 +643,297 @@ static u64 spufs_signal2_type_get(void *data)
641DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 643DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
642 spufs_signal2_type_set, "%llu"); 644 spufs_signal2_type_set, "%llu");
643 645
646
647static int spufs_mfc_open(struct inode *inode, struct file *file)
648{
649 struct spufs_inode_info *i = SPUFS_I(inode);
650 struct spu_context *ctx = i->i_ctx;
651
652 /* we don't want to deal with DMA into other processes */
653 if (ctx->owner != current->mm)
654 return -EINVAL;
655
656 if (atomic_read(&inode->i_count) != 1)
657 return -EBUSY;
658
659 file->private_data = ctx;
660 return nonseekable_open(inode, file);
661}
662
663/* interrupt-level mfc callback function. */
664void spufs_mfc_callback(struct spu *spu)
665{
666 struct spu_context *ctx = spu->ctx;
667
668 wake_up_all(&ctx->mfc_wq);
669
670 pr_debug("%s %s\n", __FUNCTION__, spu->name);
671 if (ctx->mfc_fasync) {
672 u32 free_elements, tagstatus;
673 unsigned int mask;
674
675 /* no need for spu_acquire in interrupt context */
676 free_elements = ctx->ops->get_mfc_free_elements(ctx);
677 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
678
679 mask = 0;
680 if (free_elements & 0xffff)
681 mask |= POLLOUT;
682 if (tagstatus & ctx->tagwait)
683 mask |= POLLIN;
684
685 kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
686 }
687}
688
689static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
690{
691 /* See if there is one tag group is complete */
692 /* FIXME we need locking around tagwait */
693 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
694 ctx->tagwait &= ~*status;
695 if (*status)
696 return 1;
697
698 /* enable interrupt waiting for any tag group,
699 may silently fail if interrupts are already enabled */
700 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
701 return 0;
702}
703
704static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
705 size_t size, loff_t *pos)
706{
707 struct spu_context *ctx = file->private_data;
708 int ret = -EINVAL;
709 u32 status;
710
711 if (size != 4)
712 goto out;
713
714 spu_acquire(ctx);
715 if (file->f_flags & O_NONBLOCK) {
716 status = ctx->ops->read_mfc_tagstatus(ctx);
717 if (!(status & ctx->tagwait))
718 ret = -EAGAIN;
719 else
720 ctx->tagwait &= ~status;
721 } else {
722 ret = spufs_wait(ctx->mfc_wq,
723 spufs_read_mfc_tagstatus(ctx, &status));
724 }
725 spu_release(ctx);
726
727 if (ret)
728 goto out;
729
730 ret = 4;
731 if (copy_to_user(buffer, &status, 4))
732 ret = -EFAULT;
733
734out:
735 return ret;
736}
737
738static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
739{
740 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
741 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
742
743 switch (cmd->cmd) {
744 case MFC_PUT_CMD:
745 case MFC_PUTF_CMD:
746 case MFC_PUTB_CMD:
747 case MFC_GET_CMD:
748 case MFC_GETF_CMD:
749 case MFC_GETB_CMD:
750 break;
751 default:
752 pr_debug("invalid DMA opcode %x\n", cmd->cmd);
753 return -EIO;
754 }
755
756 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
757 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
758 cmd->ea, cmd->lsa);
759 return -EIO;
760 }
761
762 switch (cmd->size & 0xf) {
763 case 1:
764 break;
765 case 2:
766 if (cmd->lsa & 1)
767 goto error;
768 break;
769 case 4:
770 if (cmd->lsa & 3)
771 goto error;
772 break;
773 case 8:
774 if (cmd->lsa & 7)
775 goto error;
776 break;
777 case 0:
778 if (cmd->lsa & 15)
779 goto error;
780 break;
781 error:
782 default:
783 pr_debug("invalid DMA alignment %x for size %x\n",
784 cmd->lsa & 0xf, cmd->size);
785 return -EIO;
786 }
787
788 if (cmd->size > 16 * 1024) {
789 pr_debug("invalid DMA size %x\n", cmd->size);
790 return -EIO;
791 }
792
793 if (cmd->tag & 0xfff0) {
794 /* we reserve the higher tag numbers for kernel use */
795 pr_debug("invalid DMA tag\n");
796 return -EIO;
797 }
798
799 if (cmd->class) {
800 /* not supported in this version */
801 pr_debug("invalid DMA class\n");
802 return -EIO;
803 }
804
805 return 0;
806}
807
808static int spu_send_mfc_command(struct spu_context *ctx,
809 struct mfc_dma_command cmd,
810 int *error)
811{
812 *error = ctx->ops->send_mfc_command(ctx, &cmd);
813 if (*error == -EAGAIN) {
814 /* wait for any tag group to complete
815 so we have space for the new command */
816 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
817 /* try again, because the queue might be
818 empty again */
819 *error = ctx->ops->send_mfc_command(ctx, &cmd);
820 if (*error == -EAGAIN)
821 return 0;
822 }
823 return 1;
824}
825
826static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
827 size_t size, loff_t *pos)
828{
829 struct spu_context *ctx = file->private_data;
830 struct mfc_dma_command cmd;
831 int ret = -EINVAL;
832
833 if (size != sizeof cmd)
834 goto out;
835
836 ret = -EFAULT;
837 if (copy_from_user(&cmd, buffer, sizeof cmd))
838 goto out;
839
840 ret = spufs_check_valid_dma(&cmd);
841 if (ret)
842 goto out;
843
844 spu_acquire_runnable(ctx);
845 if (file->f_flags & O_NONBLOCK) {
846 ret = ctx->ops->send_mfc_command(ctx, &cmd);
847 } else {
848 int status;
849 ret = spufs_wait(ctx->mfc_wq,
850 spu_send_mfc_command(ctx, cmd, &status));
851 if (status)
852 ret = status;
853 }
854 spu_release(ctx);
855
856 if (ret)
857 goto out;
858
859 ctx->tagwait |= 1 << cmd.tag;
860
861out:
862 return ret;
863}
864
865static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
866{
867 struct spu_context *ctx = file->private_data;
868 u32 free_elements, tagstatus;
869 unsigned int mask;
870
871 spu_acquire(ctx);
872 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
873 free_elements = ctx->ops->get_mfc_free_elements(ctx);
874 tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
875 spu_release(ctx);
876
877 poll_wait(file, &ctx->mfc_wq, wait);
878
879 mask = 0;
880 if (free_elements & 0xffff)
881 mask |= POLLOUT | POLLWRNORM;
882 if (tagstatus & ctx->tagwait)
883 mask |= POLLIN | POLLRDNORM;
884
885 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
886 free_elements, tagstatus, ctx->tagwait);
887
888 return mask;
889}
890
891static int spufs_mfc_flush(struct file *file)
892{
893 struct spu_context *ctx = file->private_data;
894 int ret;
895
896 spu_acquire(ctx);
897#if 0
898/* this currently hangs */
899 ret = spufs_wait(ctx->mfc_wq,
900 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
901 if (ret)
902 goto out;
903 ret = spufs_wait(ctx->mfc_wq,
904 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
905out:
906#else
907 ret = 0;
908#endif
909 spu_release(ctx);
910
911 return ret;
912}
913
914static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
915 int datasync)
916{
917 return spufs_mfc_flush(file);
918}
919
920static int spufs_mfc_fasync(int fd, struct file *file, int on)
921{
922 struct spu_context *ctx = file->private_data;
923
924 return fasync_helper(fd, file, on, &ctx->mfc_fasync);
925}
926
927static struct file_operations spufs_mfc_fops = {
928 .open = spufs_mfc_open,
929 .read = spufs_mfc_read,
930 .write = spufs_mfc_write,
931 .poll = spufs_mfc_poll,
932 .flush = spufs_mfc_flush,
933 .fsync = spufs_mfc_fsync,
934 .fasync = spufs_mfc_fasync,
935};
936
644static void spufs_npc_set(void *data, u64 val) 937static void spufs_npc_set(void *data, u64 val)
645{ 938{
646 struct spu_context *ctx = data; 939 struct spu_context *ctx = data;
@@ -783,6 +1076,7 @@ struct tree_descr spufs_dir_contents[] = {
783 { "signal2", &spufs_signal2_fops, 0666, }, 1076 { "signal2", &spufs_signal2_fops, 0666, },
784 { "signal1_type", &spufs_signal1_type, 0666, }, 1077 { "signal1_type", &spufs_signal1_type, 0666, },
785 { "signal2_type", &spufs_signal2_type, 0666, }, 1078 { "signal2_type", &spufs_signal2_type, 0666, },
1079 { "mfc", &spufs_mfc_fops, 0666, },
786 { "npc", &spufs_npc_ops, 0666, }, 1080 { "npc", &spufs_npc_ops, 0666, },
787 { "fpcr", &spufs_fpcr_fops, 0666, }, 1081 { "fpcr", &spufs_fpcr_fops, 0666, },
788 { "decr", &spufs_decr_ops, 0666, }, 1082 { "decr", &spufs_decr_ops, 0666, },
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 5445719bff79..a13a8b5a014d 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -232,6 +232,59 @@ static void spu_hw_runcntl_stop(struct spu_context *ctx)
232 spin_unlock_irq(&ctx->spu->register_lock); 232 spin_unlock_irq(&ctx->spu->register_lock);
233} 233}
234 234
235static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
236{
237 struct spu_problem *prob = ctx->spu->problem;
238 int ret;
239
240 spin_lock_irq(&ctx->spu->register_lock);
241 ret = -EAGAIN;
242 if (in_be32(&prob->dma_querytype_RW))
243 goto out;
244 ret = 0;
245 out_be32(&prob->dma_querymask_RW, mask);
246 out_be32(&prob->dma_querytype_RW, mode);
247out:
248 spin_unlock_irq(&ctx->spu->register_lock);
249 return ret;
250}
251
252static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
253{
254 return in_be32(&ctx->spu->problem->dma_tagstatus_R);
255}
256
257static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
258{
259 return in_be32(&ctx->spu->problem->dma_qstatus_R);
260}
261
262static int spu_hw_send_mfc_command(struct spu_context *ctx,
263 struct mfc_dma_command *cmd)
264{
265 u32 status;
266 struct spu_problem *prob = ctx->spu->problem;
267
268 spin_lock_irq(&ctx->spu->register_lock);
269 out_be32(&prob->mfc_lsa_W, cmd->lsa);
270 out_be64(&prob->mfc_ea_W, cmd->ea);
271 out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
272 cmd->size << 16 | cmd->tag);
273 out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
274 cmd->class << 16 | cmd->cmd);
275 status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
276 spin_unlock_irq(&ctx->spu->register_lock);
277
278 switch (status & 0xffff) {
279 case 0:
280 return 0;
281 case 2:
282 return -EAGAIN;
283 default:
284 return -EINVAL;
285 }
286}
287
235struct spu_context_ops spu_hw_ops = { 288struct spu_context_ops spu_hw_ops = {
236 .mbox_read = spu_hw_mbox_read, 289 .mbox_read = spu_hw_mbox_read,
237 .mbox_stat_read = spu_hw_mbox_stat_read, 290 .mbox_stat_read = spu_hw_mbox_stat_read,
@@ -252,4 +305,8 @@ struct spu_context_ops spu_hw_ops = {
252 .get_ls = spu_hw_get_ls, 305 .get_ls = spu_hw_get_ls,
253 .runcntl_write = spu_hw_runcntl_write, 306 .runcntl_write = spu_hw_runcntl_write,
254 .runcntl_stop = spu_hw_runcntl_stop, 307 .runcntl_stop = spu_hw_runcntl_stop,
308 .set_mfc_query = spu_hw_set_mfc_query,
309 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
310 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
311 .send_mfc_command = spu_hw_send_mfc_command,
255}; 312};
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 963182fbd1aa..bf652cd77000 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -180,6 +180,7 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
180 spu->ibox_callback = spufs_ibox_callback; 180 spu->ibox_callback = spufs_ibox_callback;
181 spu->wbox_callback = spufs_wbox_callback; 181 spu->wbox_callback = spufs_wbox_callback;
182 spu->stop_callback = spufs_stop_callback; 182 spu->stop_callback = spufs_stop_callback;
183 spu->mfc_callback = spufs_mfc_callback;
183 mb(); 184 mb();
184 spu_unmap_mappings(ctx); 185 spu_unmap_mappings(ctx);
185 spu_restore(&ctx->csa, spu); 186 spu_restore(&ctx->csa, spu);
@@ -197,6 +198,7 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
197 spu->ibox_callback = NULL; 198 spu->ibox_callback = NULL;
198 spu->wbox_callback = NULL; 199 spu->wbox_callback = NULL;
199 spu->stop_callback = NULL; 200 spu->stop_callback = NULL;
201 spu->mfc_callback = NULL;
200 spu->mm = NULL; 202 spu->mm = NULL;
201 spu->pid = 0; 203 spu->pid = 0;
202 spu->prio = MAX_PRIO; 204 spu->prio = MAX_PRIO;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index db2601f0abd5..57d687ca3f03 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -55,13 +55,27 @@ struct spu_context {
55 wait_queue_head_t ibox_wq; 55 wait_queue_head_t ibox_wq;
56 wait_queue_head_t wbox_wq; 56 wait_queue_head_t wbox_wq;
57 wait_queue_head_t stop_wq; 57 wait_queue_head_t stop_wq;
58 wait_queue_head_t mfc_wq;
58 struct fasync_struct *ibox_fasync; 59 struct fasync_struct *ibox_fasync;
59 struct fasync_struct *wbox_fasync; 60 struct fasync_struct *wbox_fasync;
61 struct fasync_struct *mfc_fasync;
62 u32 tagwait;
60 struct spu_context_ops *ops; 63 struct spu_context_ops *ops;
61 struct work_struct reap_work; 64 struct work_struct reap_work;
62 u64 flags; 65 u64 flags;
63}; 66};
64 67
68struct mfc_dma_command {
69 int32_t pad; /* reserved */
70 uint32_t lsa; /* local storage address */
71 uint64_t ea; /* effective address */
72 uint16_t size; /* transfer size */
73 uint16_t tag; /* command tag */
74 uint16_t class; /* class ID */
75 uint16_t cmd; /* command opcode */
76};
77
78
65/* SPU context query/set operations. */ 79/* SPU context query/set operations. */
66struct spu_context_ops { 80struct spu_context_ops {
67 int (*mbox_read) (struct spu_context * ctx, u32 * data); 81 int (*mbox_read) (struct spu_context * ctx, u32 * data);
@@ -84,6 +98,11 @@ struct spu_context_ops {
84 char*(*get_ls) (struct spu_context * ctx); 98 char*(*get_ls) (struct spu_context * ctx);
85 void (*runcntl_write) (struct spu_context * ctx, u32 data); 99 void (*runcntl_write) (struct spu_context * ctx, u32 data);
86 void (*runcntl_stop) (struct spu_context * ctx); 100 void (*runcntl_stop) (struct spu_context * ctx);
101 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
102 u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
103 u32 (*get_mfc_free_elements)(struct spu_context *ctx);
104 int (*send_mfc_command)(struct spu_context *ctx,
105 struct mfc_dma_command *cmd);
87}; 106};
88 107
89extern struct spu_context_ops spu_hw_ops; 108extern struct spu_context_ops spu_hw_ops;
@@ -159,5 +178,6 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
159void spufs_ibox_callback(struct spu *spu); 178void spufs_ibox_callback(struct spu *spu);
160void spufs_wbox_callback(struct spu *spu); 179void spufs_wbox_callback(struct spu *spu);
161void spufs_stop_callback(struct spu *spu); 180void spufs_stop_callback(struct spu *spu);
181void spufs_mfc_callback(struct spu *spu);
162 182
163#endif 183#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 212db28531fa..97898d5d34e5 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2145,7 +2145,8 @@ static void init_priv1(struct spu_state *csa)
2145 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 2145 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2146 CLASS1_ENABLE_STORAGE_FAULT_INTR; 2146 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2147 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | 2147 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2148 CLASS2_ENABLE_SPU_HALT_INTR; 2148 CLASS2_ENABLE_SPU_HALT_INTR |
2149 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2149} 2150}
2150 2151
2151static void init_priv2(struct spu_state *csa) 2152static void init_priv2(struct spu_state *csa)
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index b5c90d6fdceb..8564b8234069 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -137,6 +137,7 @@ struct spu {
137 void (* wbox_callback)(struct spu *spu); 137 void (* wbox_callback)(struct spu *spu);
138 void (* ibox_callback)(struct spu *spu); 138 void (* ibox_callback)(struct spu *spu);
139 void (* stop_callback)(struct spu *spu); 139 void (* stop_callback)(struct spu *spu);
140 void (* mfc_callback)(struct spu *spu);
140 141
141 char irq_c0[8]; 142 char irq_c0[8];
142 char irq_c1[8]; 143 char irq_c1[8];