aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/context.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c114
1 files changed, 96 insertions, 18 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 41eea4576b6..5d6195fc107 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -20,39 +20,38 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 21 */
22 22
23#include <linux/fs.h>
24#include <linux/mm.h>
23#include <linux/slab.h> 25#include <linux/slab.h>
24#include <asm/spu.h> 26#include <asm/spu.h>
25#include <asm/spu_csa.h> 27#include <asm/spu_csa.h>
26#include "spufs.h" 28#include "spufs.h"
27 29
28struct spu_context *alloc_spu_context(void) 30struct spu_context *alloc_spu_context(struct address_space *local_store)
29{ 31{
30 struct spu_context *ctx; 32 struct spu_context *ctx;
31 ctx = kmalloc(sizeof *ctx, GFP_KERNEL); 33 ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
32 if (!ctx) 34 if (!ctx)
33 goto out; 35 goto out;
34 /* Future enhancement: do not call spu_alloc() 36 /* Binding to physical processor deferred
35 * here. This step should be deferred until 37 * until spu_activate().
36 * spu_run()!!
37 *
38 * More work needs to be done to read(),
39 * write(), mmap(), etc., so that operations
40 * are performed on CSA when the context is
41 * not currently being run. In this way we
42 * can support arbitrarily large number of
43 * entries in /spu, allow state queries, etc.
44 */ 38 */
45 ctx->spu = spu_alloc();
46 if (!ctx->spu)
47 goto out_free;
48 spu_init_csa(&ctx->csa); 39 spu_init_csa(&ctx->csa);
49 if (!ctx->csa.lscsa) { 40 if (!ctx->csa.lscsa) {
50 spu_free(ctx->spu);
51 goto out_free; 41 goto out_free;
52 } 42 }
53 init_rwsem(&ctx->backing_sema);
54 spin_lock_init(&ctx->mmio_lock); 43 spin_lock_init(&ctx->mmio_lock);
55 kref_init(&ctx->kref); 44 kref_init(&ctx->kref);
45 init_rwsem(&ctx->state_sema);
46 init_waitqueue_head(&ctx->ibox_wq);
47 init_waitqueue_head(&ctx->wbox_wq);
48 ctx->ibox_fasync = NULL;
49 ctx->wbox_fasync = NULL;
50 ctx->state = SPU_STATE_SAVED;
51 ctx->local_store = local_store;
52 ctx->spu = NULL;
53 ctx->ops = &spu_backing_ops;
54 ctx->owner = get_task_mm(current);
56 goto out; 55 goto out;
57out_free: 56out_free:
58 kfree(ctx); 57 kfree(ctx);
@@ -65,8 +64,11 @@ void destroy_spu_context(struct kref *kref)
65{ 64{
66 struct spu_context *ctx; 65 struct spu_context *ctx;
67 ctx = container_of(kref, struct spu_context, kref); 66 ctx = container_of(kref, struct spu_context, kref);
68 if (ctx->spu) 67 down_write(&ctx->state_sema);
69 spu_free(ctx->spu); 68 spu_deactivate(ctx);
69 ctx->ibox_fasync = NULL;
70 ctx->wbox_fasync = NULL;
71 up_write(&ctx->state_sema);
70 spu_fini_csa(&ctx->csa); 72 spu_fini_csa(&ctx->csa);
71 kfree(ctx); 73 kfree(ctx);
72} 74}
@@ -82,4 +84,80 @@ int put_spu_context(struct spu_context *ctx)
82 return kref_put(&ctx->kref, &destroy_spu_context); 84 return kref_put(&ctx->kref, &destroy_spu_context);
83} 85}
84 86
87/* give up the mm reference when the context is about to be destroyed */
88void spu_forget(struct spu_context *ctx)
89{
90 struct mm_struct *mm;
91 spu_acquire_saved(ctx);
92 mm = ctx->owner;
93 ctx->owner = NULL;
94 mmput(mm);
95 spu_release(ctx);
96}
97
98void spu_acquire(struct spu_context *ctx)
99{
100 down_read(&ctx->state_sema);
101}
102
103void spu_release(struct spu_context *ctx)
104{
105 up_read(&ctx->state_sema);
106}
107
108static void spu_unmap_mappings(struct spu_context *ctx)
109{
110 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
111}
112
113int spu_acquire_runnable(struct spu_context *ctx)
114{
115 int ret = 0;
85 116
117 down_read(&ctx->state_sema);
118 if (ctx->state == SPU_STATE_RUNNABLE)
119 return 0;
120 /* ctx is about to be freed, can't acquire any more */
121 if (!ctx->owner) {
122 ret = -EINVAL;
123 goto out;
124 }
125 up_read(&ctx->state_sema);
126
127 down_write(&ctx->state_sema);
128 if (ctx->state == SPU_STATE_SAVED) {
129 spu_unmap_mappings(ctx);
130 ret = spu_activate(ctx, 0);
131 ctx->state = SPU_STATE_RUNNABLE;
132 }
133 downgrade_write(&ctx->state_sema);
134 if (ret)
135 goto out;
136
137 /* On success, we return holding the lock */
138 return ret;
139out:
140 /* Release here, to simplify calling code. */
141 up_read(&ctx->state_sema);
142
143 return ret;
144}
145
146void spu_acquire_saved(struct spu_context *ctx)
147{
148 down_read(&ctx->state_sema);
149
150 if (ctx->state == SPU_STATE_SAVED)
151 return;
152
153 up_read(&ctx->state_sema);
154 down_write(&ctx->state_sema);
155
156 if (ctx->state == SPU_STATE_RUNNABLE) {
157 spu_unmap_mappings(ctx);
158 spu_deactivate(ctx);
159 ctx->state = SPU_STATE_SAVED;
160 }
161
162 downgrade_write(&ctx->state_sema);
163}