diff options
author | Luke Browning <lukebr@linux.vnet.ibm.com> | 2007-12-20 02:39:59 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 03:46:20 -0500 |
commit | 91569531d1297db42d68136ac0c85cd85223d0b9 (patch) | |
tree | 5e03e7782bd21a3557678c19930ed52e0cae3b9c /arch/powerpc/platforms/cell | |
parent | d6ad39bc53521275d14fde86bfb94d9b2ddb7a08 (diff) |
[POWERPC] spufs: reorganize spu_run_init
This cleans up spu_run_init so that it does all of the spu
initialization for spufs_run_spu. It initializes the spu context as
much as possible before it activates the spu and writes the runcntl
register.
Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 55 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 35 |
2 files changed, 56 insertions, 34 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 3b3de6c7ee5..652ae1366dc 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -152,23 +152,41 @@ out: | |||
152 | static int spu_run_init(struct spu_context *ctx, u32 *npc) | 152 | static int spu_run_init(struct spu_context *ctx, u32 *npc) |
153 | { | 153 | { |
154 | unsigned long runcntl; | 154 | unsigned long runcntl; |
155 | int ret; | ||
155 | 156 | ||
156 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); | 157 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
157 | 158 | ||
158 | if (ctx->flags & SPU_CREATE_ISOLATE) { | 159 | if (ctx->flags & SPU_CREATE_ISOLATE) { |
160 | /* | ||
161 | * Force activation of spu. Isolated state assumes that | ||
162 | * special loader context is loaded and running on spu. | ||
163 | */ | ||
164 | if (ctx->state == SPU_STATE_SAVED) { | ||
165 | spu_set_timeslice(ctx); | ||
166 | |||
167 | ret = spu_activate(ctx, 0); | ||
168 | if (ret) | ||
169 | return ret; | ||
170 | } | ||
159 | 171 | ||
160 | if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { | 172 | if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { |
161 | int ret = spu_setup_isolated(ctx); | 173 | ret = spu_setup_isolated(ctx); |
162 | if (ret) | 174 | if (ret) |
163 | return ret; | 175 | return ret; |
164 | } | 176 | } |
165 | 177 | ||
166 | /* if userspace has set the runcntrl register (eg, to issue an | 178 | /* |
167 | * isolated exit), we need to re-set it here */ | 179 | * If userspace has set the runcntrl register (eg, to |
180 | * issue an isolated exit), we need to re-set it here | ||
181 | */ | ||
168 | runcntl = ctx->ops->runcntl_read(ctx) & | 182 | runcntl = ctx->ops->runcntl_read(ctx) & |
169 | (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); | 183 | (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); |
170 | if (runcntl == 0) | 184 | if (runcntl == 0) |
171 | runcntl = SPU_RUNCNTL_RUNNABLE; | 185 | runcntl = SPU_RUNCNTL_RUNNABLE; |
186 | |||
187 | spuctx_switch_state(ctx, SPU_UTIL_USER); | ||
188 | ctx->ops->runcntl_write(ctx, runcntl); | ||
189 | |||
172 | } else { | 190 | } else { |
173 | unsigned long privcntl; | 191 | unsigned long privcntl; |
174 | 192 | ||
@@ -180,11 +198,17 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc) | |||
180 | 198 | ||
181 | ctx->ops->npc_write(ctx, *npc); | 199 | ctx->ops->npc_write(ctx, *npc); |
182 | ctx->ops->privcntl_write(ctx, privcntl); | 200 | ctx->ops->privcntl_write(ctx, privcntl); |
183 | } | ||
184 | 201 | ||
185 | ctx->ops->runcntl_write(ctx, runcntl); | 202 | if (ctx->state == SPU_STATE_SAVED) { |
203 | spu_set_timeslice(ctx); | ||
204 | ret = spu_activate(ctx, 0); | ||
205 | if (ret) | ||
206 | return ret; | ||
207 | } | ||
186 | 208 | ||
187 | spuctx_switch_state(ctx, SPU_UTIL_USER); | 209 | spuctx_switch_state(ctx, SPU_UTIL_USER); |
210 | ctx->ops->runcntl_write(ctx, runcntl); | ||
211 | } | ||
188 | 212 | ||
189 | return 0; | 213 | return 0; |
190 | } | 214 | } |
@@ -323,25 +347,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
323 | ctx->event_return = 0; | 347 | ctx->event_return = 0; |
324 | 348 | ||
325 | spu_acquire(ctx); | 349 | spu_acquire(ctx); |
326 | if (ctx->state == SPU_STATE_SAVED) { | ||
327 | __spu_update_sched_info(ctx); | ||
328 | spu_set_timeslice(ctx); | ||
329 | 350 | ||
330 | ret = spu_activate(ctx, 0); | 351 | spu_update_sched_info(ctx); |
331 | if (ret) { | ||
332 | spu_release(ctx); | ||
333 | goto out; | ||
334 | } | ||
335 | } else { | ||
336 | /* | ||
337 | * We have to update the scheduling priority under active_mutex | ||
338 | * to protect against find_victim(). | ||
339 | * | ||
340 | * No need to update the timeslice ASAP, it will get updated | ||
341 | * once the current one has expired. | ||
342 | */ | ||
343 | spu_update_sched_info(ctx); | ||
344 | } | ||
345 | 352 | ||
346 | ret = spu_run_init(ctx, npc); | 353 | ret = spu_run_init(ctx, npc); |
347 | if (ret) { | 354 | if (ret) { |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 82ea576c53a..ef0e5e230fb 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -105,6 +105,12 @@ void spu_set_timeslice(struct spu_context *ctx) | |||
105 | void __spu_update_sched_info(struct spu_context *ctx) | 105 | void __spu_update_sched_info(struct spu_context *ctx) |
106 | { | 106 | { |
107 | /* | 107 | /* |
108 | * assert that the context is not on the runqueue, so it is safe | ||
109 | * to change its scheduling parameters. | ||
110 | */ | ||
111 | BUG_ON(!list_empty(&ctx->rq)); | ||
112 | |||
113 | /* | ||
108 | * 32-Bit assignments are atomic on powerpc, and we don't care about | 114 | * 32-Bit assignments are atomic on powerpc, and we don't care about |
109 | * memory ordering here because retrieving the controlling thread is | 115 | * memory ordering here because retrieving the controlling thread is |
110 | * per definition racy. | 116 | * per definition racy. |
@@ -124,23 +130,28 @@ void __spu_update_sched_info(struct spu_context *ctx) | |||
124 | ctx->policy = current->policy; | 130 | ctx->policy = current->policy; |
125 | 131 | ||
126 | /* | 132 | /* |
127 | * A lot of places that don't hold list_mutex poke into | 133 | * TO DO: the context may be loaded, so we may need to activate |
128 | * cpus_allowed, including grab_runnable_context which | 134 | * it again on a different node. But it shouldn't hurt anything |
129 | * already holds the runq_lock. So abuse runq_lock | 135 | * to update its parameters, because we know that the scheduler |
130 | * to protect this field as well. | 136 | * is not actively looking at this field, since it is not on the |
137 | * runqueue. The context will be rescheduled on the proper node | ||
138 | * if it is timesliced or preempted. | ||
131 | */ | 139 | */ |
132 | spin_lock(&spu_prio->runq_lock); | ||
133 | ctx->cpus_allowed = current->cpus_allowed; | 140 | ctx->cpus_allowed = current->cpus_allowed; |
134 | spin_unlock(&spu_prio->runq_lock); | ||
135 | } | 141 | } |
136 | 142 | ||
137 | void spu_update_sched_info(struct spu_context *ctx) | 143 | void spu_update_sched_info(struct spu_context *ctx) |
138 | { | 144 | { |
139 | int node = ctx->spu->node; | 145 | int node; |
140 | 146 | ||
141 | mutex_lock(&cbe_spu_info[node].list_mutex); | 147 | if (ctx->state == SPU_STATE_RUNNABLE) { |
142 | __spu_update_sched_info(ctx); | 148 | node = ctx->spu->node; |
143 | mutex_unlock(&cbe_spu_info[node].list_mutex); | 149 | mutex_lock(&cbe_spu_info[node].list_mutex); |
150 | __spu_update_sched_info(ctx); | ||
151 | mutex_unlock(&cbe_spu_info[node].list_mutex); | ||
152 | } else { | ||
153 | __spu_update_sched_info(ctx); | ||
154 | } | ||
144 | } | 155 | } |
145 | 156 | ||
146 | static int __node_allowed(struct spu_context *ctx, int node) | 157 | static int __node_allowed(struct spu_context *ctx, int node) |
@@ -604,6 +615,10 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
604 | * higher priority contexts before lower priority | 615 | * higher priority contexts before lower priority |
605 | * ones, so this is safe until we introduce | 616 | * ones, so this is safe until we introduce |
606 | * priority inheritance schemes. | 617 | * priority inheritance schemes. |
618 | * | ||
619 | * XXX if the highest priority context is locked, | ||
620 | * this can loop a long time. Might be better to | ||
621 | * look at another context or give up after X retries. | ||
607 | */ | 622 | */ |
608 | if (!mutex_trylock(&victim->state_mutex)) { | 623 | if (!mutex_trylock(&victim->state_mutex)) { |
609 | victim = NULL; | 624 | victim = NULL; |