aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c142
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h6
3 files changed, 151 insertions, 1 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 0a752ce67c8a..71a443253021 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -75,8 +75,10 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
75{ 75{
76 mutex_lock(&gang->mutex); 76 mutex_lock(&gang->mutex);
77 WARN_ON(ctx->gang != gang); 77 WARN_ON(ctx->gang != gang);
78 if (!list_empty(&ctx->aff_list)) 78 if (!list_empty(&ctx->aff_list)) {
79 list_del_init(&ctx->aff_list); 79 list_del_init(&ctx->aff_list);
80 gang->aff_flags &= ~AFF_OFFSETS_SET;
81 }
80 list_del_init(&ctx->gang_list); 82 list_del_init(&ctx->gang_list);
81 gang->contexts--; 83 gang->contexts--;
82 mutex_unlock(&gang->mutex); 84 mutex_unlock(&gang->mutex);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 6d0ab72cc70e..a9569de4c141 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -233,6 +233,8 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
233 233
234 if (ctx->flags & SPU_CREATE_NOSCHED) 234 if (ctx->flags & SPU_CREATE_NOSCHED)
235 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); 235 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
236 if (!list_empty(&ctx->aff_list))
237 atomic_inc(&ctx->gang->aff_sched_count);
236 238
237 ctx->stats.slb_flt_base = spu->stats.slb_flt; 239 ctx->stats.slb_flt_base = spu->stats.slb_flt;
238 ctx->stats.class2_intr_base = spu->stats.class2_intr; 240 ctx->stats.class2_intr_base = spu->stats.class2_intr;
@@ -259,6 +261,143 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
259 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 261 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
260} 262}
261 263
264/*
265 * XXX(hch): needs locking.
266 */
267static inline int sched_spu(struct spu *spu)
268{
269 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
270}
271
272static void aff_merge_remaining_ctxs(struct spu_gang *gang)
273{
274 struct spu_context *ctx;
275
276 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
277 if (list_empty(&ctx->aff_list))
278 list_add(&ctx->aff_list, &gang->aff_list_head);
279 }
280 gang->aff_flags |= AFF_MERGED;
281}
282
283static void aff_set_offsets(struct spu_gang *gang)
284{
285 struct spu_context *ctx;
286 int offset;
287
288 offset = -1;
289 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
290 aff_list) {
291 if (&ctx->aff_list == &gang->aff_list_head)
292 break;
293 ctx->aff_offset = offset--;
294 }
295
296 offset = 0;
297 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
298 if (&ctx->aff_list == &gang->aff_list_head)
299 break;
300 ctx->aff_offset = offset++;
301 }
302
303 gang->aff_flags |= AFF_OFFSETS_SET;
304}
305
306static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
307 int group_size, int lowest_offset)
308{
309 struct spu *spu;
310 int node, n;
311
312 /*
313 * TODO: A better algorithm could be used to find a good spu to be
314 * used as reference location for the ctxs chain.
315 */
316 node = cpu_to_node(raw_smp_processor_id());
317 for (n = 0; n < MAX_NUMNODES; n++, node++) {
318 node = (node < MAX_NUMNODES) ? node : 0;
319 if (!node_allowed(ctx, node))
320 continue;
321 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
322 if ((!mem_aff || spu->has_mem_affinity) &&
323 sched_spu(spu))
324 return spu;
325 }
326 }
327 return NULL;
328}
329
330static void aff_set_ref_point_location(struct spu_gang *gang)
331{
332 int mem_aff, gs, lowest_offset;
333 struct spu_context *ctx;
334 struct spu *tmp;
335
336 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
337 lowest_offset = 0;
338 gs = 0;
339
340 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
341 gs++;
342
343 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
344 aff_list) {
345 if (&ctx->aff_list == &gang->aff_list_head)
346 break;
347 lowest_offset = ctx->aff_offset;
348 }
349
350 gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset);
351}
352
353static struct spu *ctx_location(struct spu *ref, int offset)
354{
355 struct spu *spu;
356
357 spu = NULL;
358 if (offset >= 0) {
359 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
360 if (offset == 0)
361 break;
362 if (sched_spu(spu))
363 offset--;
364 }
365 } else {
366 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
367 if (offset == 0)
368 break;
369 if (sched_spu(spu))
370 offset++;
371 }
372 }
373 return spu;
374}
375
376/*
377 * affinity_check is called each time a context is going to be scheduled.
378 * It returns the spu ptr on which the context must run.
379 */
380struct spu *affinity_check(struct spu_context *ctx)
381{
382 struct spu_gang *gang;
383
384 if (list_empty(&ctx->aff_list))
385 return NULL;
386 gang = ctx->gang;
387 mutex_lock(&gang->aff_mutex);
388 if (!gang->aff_ref_spu) {
389 if (!(gang->aff_flags & AFF_MERGED))
390 aff_merge_remaining_ctxs(gang);
391 if (!(gang->aff_flags & AFF_OFFSETS_SET))
392 aff_set_offsets(gang);
393 aff_set_ref_point_location(gang);
394 }
395 mutex_unlock(&gang->aff_mutex);
396 if (!gang->aff_ref_spu)
397 return NULL;
398 return ctx_location(gang->aff_ref_spu, ctx->aff_offset);
399}
400
262/** 401/**
263 * spu_unbind_context - unbind spu context from physical spu 402 * spu_unbind_context - unbind spu context from physical spu
264 * @spu: physical spu to unbind from 403 * @spu: physical spu to unbind from
@@ -272,6 +411,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
272 411
273 if (spu->ctx->flags & SPU_CREATE_NOSCHED) 412 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
274 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); 413 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
414 if (!list_empty(&ctx->aff_list))
415 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
416 ctx->gang->aff_ref_spu = NULL;
275 spu_switch_notify(spu, NULL); 417 spu_switch_notify(spu, NULL);
276 spu_unmap_mappings(ctx); 418 spu_unmap_mappings(ctx);
277 spu_save(&ctx->csa, spu); 419 spu_save(&ctx->csa, spu);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 36da17987e9c..42d8da8f0fb5 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -112,6 +112,7 @@ struct spu_context {
112 112
113 struct list_head aff_list; 113 struct list_head aff_list;
114 int aff_head; 114 int aff_head;
115 int aff_offset;
115}; 116};
116 117
117struct spu_gang { 118struct spu_gang {
@@ -124,6 +125,8 @@ struct spu_gang {
124 struct list_head aff_list_head; 125 struct list_head aff_list_head;
125 struct mutex aff_mutex; 126 struct mutex aff_mutex;
126 int aff_flags; 127 int aff_flags;
128 struct spu *aff_ref_spu;
129 atomic_t aff_sched_count;
127}; 130};
128 131
129/* Flag bits for spu_gang aff_flags */ 132/* Flag bits for spu_gang aff_flags */
@@ -208,6 +211,9 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
208/* fault handling */ 211/* fault handling */
209int spufs_handle_class1(struct spu_context *ctx); 212int spufs_handle_class1(struct spu_context *ctx);
210 213
214/* affinity */
215struct spu *affinity_check(struct spu_context *ctx);
216
211/* context management */ 217/* context management */
212extern atomic_t nr_spu_contexts; 218extern atomic_t nr_spu_contexts;
213static inline void spu_acquire(struct spu_context *ctx) 219static inline void spu_acquire(struct spu_context *ctx)