diff options
author | Christoph Hellwig <hch@lst.de> | 2007-06-28 20:57:52 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-07-03 01:24:44 -0400 |
commit | fe443ef2ac421c9c652e251e8733e2479d8e411a (patch) | |
tree | 60914cfaf29232b9c3d4f555eac3b19b85a84254 /arch/powerpc/platforms/cell/spufs/sched.c | |
parent | 379018022071489a7dffee74db2a267465dab561 (diff) |
[POWERPC] spusched: Dynamic timeslicing for SCHED_OTHER
Enable preemptive scheduling for non-RT contexts.
We use the same algorithms as the CPU scheduler to calculate the time
slice length, and for now we also use the same timeslice length as the
CPU scheduler. This might be not enough for good performance and can be
changed after some benchmarking.
Note that currently we do not boost the priority for contexts waiting
on the runqueue for a long time, so contexts with a higher nice value
could starve ones with less priority. This could easily be fixed once
the rework of the spu lists that Luke and I discussed is done.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 54 |
1 files changed, 45 insertions, 9 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index d673353b6d33..1b2916bdc1c8 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -44,10 +44,6 @@ | |||
44 | #include <asm/spu_priv1.h> | 44 | #include <asm/spu_priv1.h> |
45 | #include "spufs.h" | 45 | #include "spufs.h" |
46 | 46 | ||
47 | #define SPU_TIMESLICE (HZ) | ||
48 | |||
49 | #define SPUSCHED_TICK (HZ / 100) | ||
50 | |||
51 | struct spu_prio_array { | 47 | struct spu_prio_array { |
52 | DECLARE_BITMAP(bitmap, MAX_PRIO); | 48 | DECLARE_BITMAP(bitmap, MAX_PRIO); |
53 | struct list_head runq[MAX_PRIO]; | 49 | struct list_head runq[MAX_PRIO]; |
@@ -60,6 +56,46 @@ static struct spu_prio_array *spu_prio; | |||
60 | static struct task_struct *spusched_task; | 56 | static struct task_struct *spusched_task; |
61 | static struct timer_list spusched_timer; | 57 | static struct timer_list spusched_timer; |
62 | 58 | ||
59 | /* | ||
60 | * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). | ||
61 | */ | ||
62 | #define NORMAL_PRIO 120 | ||
63 | |||
64 | /* | ||
65 | * Frequency of the spu scheduler tick. By default we do one SPU scheduler | ||
66 | * tick for every 10 CPU scheduler ticks. | ||
67 | */ | ||
68 | #define SPUSCHED_TICK (10) | ||
69 | |||
70 | /* | ||
71 | * These are the 'tuning knobs' of the scheduler: | ||
72 | * | ||
73 | * Minimum timeslice is 5 msecs (or 10 jiffies, whichever is larger), | ||
74 | * default timeslice is 100 msecs, maximum timeslice is 800 msecs. | ||
75 | */ | ||
76 | #define MIN_SPU_TIMESLICE max(5 * HZ / 100, 10) | ||
77 | #define DEF_SPU_TIMESLICE (100 * HZ / 100) | ||
78 | |||
79 | #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO) | ||
80 | #define SCALE_PRIO(x, prio) \ | ||
81 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE) | ||
82 | |||
83 | /* | ||
84 | * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values: | ||
85 | * [800ms ... 100ms ... 5ms] | ||
86 | * | ||
87 | * The higher a thread's priority, the bigger timeslices | ||
88 | * it gets during one round of execution. But even the lowest | ||
89 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
90 | */ | ||
91 | void spu_set_timeslice(struct spu_context *ctx) | ||
92 | { | ||
93 | if (ctx->prio < NORMAL_PRIO) | ||
94 | ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); | ||
95 | else | ||
96 | ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); | ||
97 | } | ||
98 | |||
63 | static inline int node_allowed(int node) | 99 | static inline int node_allowed(int node) |
64 | { | 100 | { |
65 | cpumask_t mask; | 101 | cpumask_t mask; |
@@ -265,8 +301,8 @@ static struct spu *find_victim(struct spu_context *ctx) | |||
265 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { | 301 | list_for_each_entry(spu, &spu_prio->active_list[node], list) { |
266 | struct spu_context *tmp = spu->ctx; | 302 | struct spu_context *tmp = spu->ctx; |
267 | 303 | ||
268 | if (tmp->rt_priority < ctx->rt_priority && | 304 | if (tmp->prio > ctx->prio && |
269 | (!victim || tmp->rt_priority < victim->rt_priority)) | 305 | (!victim || tmp->prio > victim->prio)) |
270 | victim = spu->ctx; | 306 | victim = spu->ctx; |
271 | } | 307 | } |
272 | mutex_unlock(&spu_prio->active_mutex[node]); | 308 | mutex_unlock(&spu_prio->active_mutex[node]); |
@@ -333,7 +369,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
333 | * If this is a realtime thread we try to get it running by | 369 | * If this is a realtime thread we try to get it running by |
334 | * preempting a lower priority thread. | 370 | * preempting a lower priority thread. |
335 | */ | 371 | */ |
336 | if (!spu && ctx->rt_priority) | 372 | if (!spu && rt_prio(ctx->prio)) |
337 | spu = find_victim(ctx); | 373 | spu = find_victim(ctx); |
338 | if (spu) { | 374 | if (spu) { |
339 | spu_bind_context(spu, ctx); | 375 | spu_bind_context(spu, ctx); |
@@ -424,7 +460,7 @@ void spu_yield(struct spu_context *ctx) | |||
424 | 460 | ||
425 | static void spusched_tick(struct spu_context *ctx) | 461 | static void spusched_tick(struct spu_context *ctx) |
426 | { | 462 | { |
427 | if (ctx->policy != SCHED_RR || --ctx->time_slice) | 463 | if (ctx->policy == SCHED_FIFO || --ctx->time_slice) |
428 | return; | 464 | return; |
429 | 465 | ||
430 | /* | 466 | /* |
@@ -448,7 +484,7 @@ static void spusched_tick(struct spu_context *ctx) | |||
448 | */ | 484 | */ |
449 | wake_up(&ctx->stop_wq); | 485 | wake_up(&ctx->stop_wq); |
450 | } | 486 | } |
451 | ctx->time_slice = SPU_DEF_TIMESLICE; | 487 | spu_set_timeslice(ctx); |
452 | mutex_unlock(&ctx->state_mutex); | 488 | mutex_unlock(&ctx->state_mutex); |
453 | } else { | 489 | } else { |
454 | ctx->time_slice++; | 490 | ctx->time_slice++; |