diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 23 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 34 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 6 |
3 files changed, 55 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index c0238ea5b55a..0b50fa5cb39d 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -18,15 +18,17 @@ void spufs_stop_callback(struct spu *spu) | |||
18 | wake_up_all(&ctx->stop_wq); | 18 | wake_up_all(&ctx->stop_wq); |
19 | } | 19 | } |
20 | 20 | ||
21 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) | 21 | static inline int spu_stopped(struct spu_context *ctx, u32 *stat) |
22 | { | 22 | { |
23 | struct spu *spu; | 23 | struct spu *spu; |
24 | u64 pte_fault; | 24 | u64 pte_fault; |
25 | 25 | ||
26 | *stat = ctx->ops->status_read(ctx); | 26 | *stat = ctx->ops->status_read(ctx); |
27 | if (ctx->state != SPU_STATE_RUNNABLE) | 27 | |
28 | return 1; | ||
29 | spu = ctx->spu; | 28 | spu = ctx->spu; |
29 | if (ctx->state != SPU_STATE_RUNNABLE || | ||
30 | test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) | ||
31 | return 1; | ||
30 | pte_fault = spu->dsisr & | 32 | pte_fault = spu->dsisr & |
31 | (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); | 33 | (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); |
32 | return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? | 34 | return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? |
@@ -124,7 +126,7 @@ out: | |||
124 | return ret; | 126 | return ret; |
125 | } | 127 | } |
126 | 128 | ||
127 | static int spu_run_init(struct spu_context *ctx, u32 * npc) | 129 | static int spu_run_init(struct spu_context *ctx, u32 *npc) |
128 | { | 130 | { |
129 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); | 131 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
130 | 132 | ||
@@ -158,8 +160,8 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc) | |||
158 | return 0; | 160 | return 0; |
159 | } | 161 | } |
160 | 162 | ||
161 | static int spu_run_fini(struct spu_context *ctx, u32 * npc, | 163 | static int spu_run_fini(struct spu_context *ctx, u32 *npc, |
162 | u32 * status) | 164 | u32 *status) |
163 | { | 165 | { |
164 | int ret = 0; | 166 | int ret = 0; |
165 | 167 | ||
@@ -298,6 +300,7 @@ static inline int spu_process_events(struct spu_context *ctx) | |||
298 | long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | 300 | long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) |
299 | { | 301 | { |
300 | int ret; | 302 | int ret; |
303 | struct spu *spu; | ||
301 | u32 status; | 304 | u32 status; |
302 | 305 | ||
303 | if (mutex_lock_interruptible(&ctx->run_mutex)) | 306 | if (mutex_lock_interruptible(&ctx->run_mutex)) |
@@ -333,6 +336,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
333 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); | 336 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); |
334 | if (unlikely(ret)) | 337 | if (unlikely(ret)) |
335 | break; | 338 | break; |
339 | spu = ctx->spu; | ||
340 | if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, | ||
341 | &ctx->sched_flags))) { | ||
342 | if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { | ||
343 | spu_switch_notify(spu, ctx); | ||
344 | continue; | ||
345 | } | ||
346 | } | ||
336 | 347 | ||
337 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); | 348 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
338 | 349 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 49b8f6867a96..88ec333e90d3 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -204,21 +204,51 @@ static void spu_remove_from_active_list(struct spu *spu) | |||
204 | 204 | ||
205 | static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); | 205 | static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); |
206 | 206 | ||
207 | static void spu_switch_notify(struct spu *spu, struct spu_context *ctx) | 207 | void spu_switch_notify(struct spu *spu, struct spu_context *ctx) |
208 | { | 208 | { |
209 | blocking_notifier_call_chain(&spu_switch_notifier, | 209 | blocking_notifier_call_chain(&spu_switch_notifier, |
210 | ctx ? ctx->object_id : 0, spu); | 210 | ctx ? ctx->object_id : 0, spu); |
211 | } | 211 | } |
212 | 212 | ||
213 | static void notify_spus_active(void) | ||
214 | { | ||
215 | int node; | ||
216 | |||
217 | /* | ||
218 | * Wake up the active spu_contexts. | ||
219 | * | ||
220 | * When the awakened processes see their "notify_active" flag is set, | ||
221 | * they will call spu_switch_notify(); | ||
222 | */ | ||
223 | for_each_online_node(node) { | ||
224 | struct spu *spu; | ||
225 | mutex_lock(&spu_prio->active_mutex[node]); | ||
226 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { | ||
227 | struct spu_context *ctx = spu->ctx; | ||
228 | set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); | ||
229 | mb(); /* make sure any tasks woken up below */ | ||
230 | /* can see the bit(s) set above */ | ||
231 | wake_up_all(&ctx->stop_wq); | ||
232 | } | ||
233 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
234 | } | ||
235 | } | ||
236 | |||
213 | int spu_switch_event_register(struct notifier_block * n) | 237 | int spu_switch_event_register(struct notifier_block * n) |
214 | { | 238 | { |
215 | return blocking_notifier_chain_register(&spu_switch_notifier, n); | 239 | int ret; |
240 | ret = blocking_notifier_chain_register(&spu_switch_notifier, n); | ||
241 | if (!ret) | ||
242 | notify_spus_active(); | ||
243 | return ret; | ||
216 | } | 244 | } |
245 | EXPORT_SYMBOL_GPL(spu_switch_event_register); | ||
217 | 246 | ||
218 | int spu_switch_event_unregister(struct notifier_block * n) | 247 | int spu_switch_event_unregister(struct notifier_block * n) |
219 | { | 248 | { |
220 | return blocking_notifier_chain_unregister(&spu_switch_notifier, n); | 249 | return blocking_notifier_chain_unregister(&spu_switch_notifier, n); |
221 | } | 250 | } |
251 | EXPORT_SYMBOL_GPL(spu_switch_event_unregister); | ||
222 | 252 | ||
223 | /** | 253 | /** |
224 | * spu_bind_context - bind spu context to physical spu | 254 | * spu_bind_context - bind spu context to physical spu |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 42d8da8f0fb5..692dbd0edc37 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -44,6 +44,11 @@ enum { | |||
44 | SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ | 44 | SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* ctx->sched_flags */ | ||
48 | enum { | ||
49 | SPU_SCHED_NOTIFY_ACTIVE, | ||
50 | }; | ||
51 | |||
47 | struct spu_context { | 52 | struct spu_context { |
48 | struct spu *spu; /* pointer to a physical SPU */ | 53 | struct spu *spu; /* pointer to a physical SPU */ |
49 | struct spu_state csa; /* SPU context save area. */ | 54 | struct spu_state csa; /* SPU context save area. */ |
@@ -240,6 +245,7 @@ void spu_release_saved(struct spu_context *ctx); | |||
240 | int spu_activate(struct spu_context *ctx, unsigned long flags); | 245 | int spu_activate(struct spu_context *ctx, unsigned long flags); |
241 | void spu_deactivate(struct spu_context *ctx); | 246 | void spu_deactivate(struct spu_context *ctx); |
242 | void spu_yield(struct spu_context *ctx); | 247 | void spu_yield(struct spu_context *ctx); |
248 | void spu_switch_notify(struct spu *spu, struct spu_context *ctx); | ||
243 | void spu_set_timeslice(struct spu_context *ctx); | 249 | void spu_set_timeslice(struct spu_context *ctx); |
244 | void spu_update_sched_info(struct spu_context *ctx); | 250 | void spu_update_sched_info(struct spu_context *ctx); |
245 | void __spu_update_sched_info(struct spu_context *ctx); | 251 | void __spu_update_sched_info(struct spu_context *ctx); |