diff options
author | Christoph Hellwig <hch@lst.de> | 2007-06-28 20:58:02 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-07-03 01:24:46 -0400 |
commit | 65de66f0b8bcb7431d9df82cf32b002062b3a611 (patch) | |
tree | a81eef8195d7f2f1c8c0ad110577b1ca92999c7e /arch/powerpc/platforms/cell/spufs/sched.c | |
parent | 476273adc7277333aed9963bc4dc9b39066d3038 (diff) |
[POWERPC] spufs: Implement /proc/spu_loadavg
Provide load average information for spu context. The format
is identical to /proc/loadavg, which is also where a lot of code
and concepts is borrowed from.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 127 |
1 files changed, 119 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 540067550e88..9fc09306c9ae 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -36,6 +36,9 @@ | |||
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/notifier.h> | 37 | #include <linux/notifier.h> |
38 | #include <linux/kthread.h> | 38 | #include <linux/kthread.h> |
39 | #include <linux/pid_namespace.h> | ||
40 | #include <linux/proc_fs.h> | ||
41 | #include <linux/seq_file.h> | ||
39 | 42 | ||
40 | #include <asm/io.h> | 43 | #include <asm/io.h> |
41 | #include <asm/mmu_context.h> | 44 | #include <asm/mmu_context.h> |
@@ -50,8 +53,11 @@ struct spu_prio_array { | |||
50 | spinlock_t runq_lock; | 53 | spinlock_t runq_lock; |
51 | struct list_head active_list[MAX_NUMNODES]; | 54 | struct list_head active_list[MAX_NUMNODES]; |
52 | struct mutex active_mutex[MAX_NUMNODES]; | 55 | struct mutex active_mutex[MAX_NUMNODES]; |
56 | int nr_active[MAX_NUMNODES]; | ||
57 | int nr_waiting; | ||
53 | }; | 58 | }; |
54 | 59 | ||
60 | static unsigned long spu_avenrun[3]; | ||
55 | static struct spu_prio_array *spu_prio; | 61 | static struct spu_prio_array *spu_prio; |
56 | static struct task_struct *spusched_task; | 62 | static struct task_struct *spusched_task; |
57 | static struct timer_list spusched_timer; | 63 | static struct timer_list spusched_timer; |
@@ -169,14 +175,18 @@ static int node_allowed(struct spu_context *ctx, int node) | |||
169 | */ | 175 | */ |
170 | static void spu_add_to_active_list(struct spu *spu) | 176 | static void spu_add_to_active_list(struct spu *spu) |
171 | { | 177 | { |
172 | mutex_lock(&spu_prio->active_mutex[spu->node]); | 178 | int node = spu->node; |
173 | list_add_tail(&spu->list, &spu_prio->active_list[spu->node]); | 179 | |
174 | mutex_unlock(&spu_prio->active_mutex[spu->node]); | 180 | mutex_lock(&spu_prio->active_mutex[node]); |
181 | spu_prio->nr_active[node]++; | ||
182 | list_add_tail(&spu->list, &spu_prio->active_list[node]); | ||
183 | mutex_unlock(&spu_prio->active_mutex[node]); | ||
175 | } | 184 | } |
176 | 185 | ||
177 | static void __spu_remove_from_active_list(struct spu *spu) | 186 | static void __spu_remove_from_active_list(struct spu *spu) |
178 | { | 187 | { |
179 | list_del_init(&spu->list); | 188 | list_del_init(&spu->list); |
189 | spu_prio->nr_active[spu->node]--; | ||
180 | } | 190 | } |
181 | 191 | ||
182 | /** | 192 | /** |
@@ -275,6 +285,7 @@ static void __spu_add_to_rq(struct spu_context *ctx) | |||
275 | { | 285 | { |
276 | int prio = ctx->prio; | 286 | int prio = ctx->prio; |
277 | 287 | ||
288 | spu_prio->nr_waiting++; | ||
278 | list_add_tail(&ctx->rq, &spu_prio->runq[prio]); | 289 | list_add_tail(&ctx->rq, &spu_prio->runq[prio]); |
279 | set_bit(prio, spu_prio->bitmap); | 290 | set_bit(prio, spu_prio->bitmap); |
280 | } | 291 | } |
@@ -283,8 +294,10 @@ static void __spu_del_from_rq(struct spu_context *ctx) | |||
283 | { | 294 | { |
284 | int prio = ctx->prio; | 295 | int prio = ctx->prio; |
285 | 296 | ||
286 | if (!list_empty(&ctx->rq)) | 297 | if (!list_empty(&ctx->rq)) { |
287 | list_del_init(&ctx->rq); | 298 | list_del_init(&ctx->rq); |
299 | spu_prio->nr_waiting--; | ||
300 | } | ||
288 | if (list_empty(&spu_prio->runq[prio])) | 301 | if (list_empty(&spu_prio->runq[prio])) |
289 | clear_bit(prio, spu_prio->bitmap); | 302 | clear_bit(prio, spu_prio->bitmap); |
290 | } | 303 | } |
@@ -567,10 +580,56 @@ static void spusched_tick(struct spu_context *ctx) | |||
567 | } | 580 | } |
568 | } | 581 | } |
569 | 582 | ||
583 | /** | ||
584 | * count_active_contexts - count nr of active tasks | ||
585 | * | ||
586 | * Return the number of tasks currently running or waiting to run. | ||
587 | * | ||
588 | * Note that we don't take runq_lock / active_mutex here. Reading | ||
589 | * a single 32bit value is atomic on powerpc, and we don't care | ||
590 | * about memory ordering issues here. | ||
591 | */ | ||
592 | static unsigned long count_active_contexts(void) | ||
593 | { | ||
594 | int nr_active = 0, node; | ||
595 | |||
596 | for (node = 0; node < MAX_NUMNODES; node++) | ||
597 | nr_active += spu_prio->nr_active[node]; | ||
598 | nr_active += spu_prio->nr_waiting; | ||
599 | |||
600 | return nr_active; | ||
601 | } | ||
602 | |||
603 | /** | ||
604 | * spu_calc_load - given tick count, update the avenrun load estimates. | ||
605 | * @tick: tick count | ||
606 | * | ||
607 | * No locking against reading these values from userspace, as for | ||
608 | * the CPU loadavg code. | ||
609 | */ | ||
610 | static void spu_calc_load(unsigned long ticks) | ||
611 | { | ||
612 | unsigned long active_tasks; /* fixed-point */ | ||
613 | static int count = LOAD_FREQ; | ||
614 | |||
615 | count -= ticks; | ||
616 | |||
617 | if (unlikely(count < 0)) { | ||
618 | active_tasks = count_active_contexts() * FIXED_1; | ||
619 | do { | ||
620 | CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); | ||
621 | CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); | ||
622 | CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); | ||
623 | count += LOAD_FREQ; | ||
624 | } while (count < 0); | ||
625 | } | ||
626 | } | ||
627 | |||
570 | static void spusched_wake(unsigned long data) | 628 | static void spusched_wake(unsigned long data) |
571 | { | 629 | { |
572 | mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | 630 | mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); |
573 | wake_up_process(spusched_task); | 631 | wake_up_process(spusched_task); |
632 | spu_calc_load(SPUSCHED_TICK); | ||
574 | } | 633 | } |
575 | 634 | ||
576 | static int spusched_thread(void *unused) | 635 | static int spusched_thread(void *unused) |
@@ -598,13 +657,52 @@ static int spusched_thread(void *unused) | |||
598 | return 0; | 657 | return 0; |
599 | } | 658 | } |
600 | 659 | ||
660 | #define LOAD_INT(x) ((x) >> FSHIFT) | ||
661 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | ||
662 | |||
663 | static int show_spu_loadavg(struct seq_file *s, void *private) | ||
664 | { | ||
665 | int a, b, c; | ||
666 | |||
667 | a = spu_avenrun[0] + (FIXED_1/200); | ||
668 | b = spu_avenrun[1] + (FIXED_1/200); | ||
669 | c = spu_avenrun[2] + (FIXED_1/200); | ||
670 | |||
671 | /* | ||
672 | * Note that last_pid doesn't really make much sense for the | ||
673 | * SPU loadavg (it even seems very odd on the CPU side..), | ||
674 | * but we include it here to have a 100% compatible interface. | ||
675 | */ | ||
676 | seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", | ||
677 | LOAD_INT(a), LOAD_FRAC(a), | ||
678 | LOAD_INT(b), LOAD_FRAC(b), | ||
679 | LOAD_INT(c), LOAD_FRAC(c), | ||
680 | count_active_contexts(), | ||
681 | atomic_read(&nr_spu_contexts), | ||
682 | current->nsproxy->pid_ns->last_pid); | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static int spu_loadavg_open(struct inode *inode, struct file *file) | ||
687 | { | ||
688 | return single_open(file, show_spu_loadavg, NULL); | ||
689 | } | ||
690 | |||
691 | static const struct file_operations spu_loadavg_fops = { | ||
692 | .open = spu_loadavg_open, | ||
693 | .read = seq_read, | ||
694 | .llseek = seq_lseek, | ||
695 | .release = single_release, | ||
696 | }; | ||
697 | |||
601 | int __init spu_sched_init(void) | 698 | int __init spu_sched_init(void) |
602 | { | 699 | { |
603 | int i; | 700 | struct proc_dir_entry *entry; |
701 | int err = -ENOMEM, i; | ||
604 | 702 | ||
605 | spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); | 703 | spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); |
606 | if (!spu_prio) | 704 | if (!spu_prio) |
607 | return -ENOMEM; | 705 | goto out; |
608 | 706 | ||
609 | for (i = 0; i < MAX_PRIO; i++) { | 707 | for (i = 0; i < MAX_PRIO; i++) { |
610 | INIT_LIST_HEAD(&spu_prio->runq[i]); | 708 | INIT_LIST_HEAD(&spu_prio->runq[i]); |
@@ -619,14 +717,25 @@ int __init spu_sched_init(void) | |||
619 | 717 | ||
620 | spusched_task = kthread_run(spusched_thread, NULL, "spusched"); | 718 | spusched_task = kthread_run(spusched_thread, NULL, "spusched"); |
621 | if (IS_ERR(spusched_task)) { | 719 | if (IS_ERR(spusched_task)) { |
622 | kfree(spu_prio); | 720 | err = PTR_ERR(spusched_task); |
623 | return PTR_ERR(spusched_task); | 721 | goto out_free_spu_prio; |
624 | } | 722 | } |
625 | 723 | ||
724 | entry = create_proc_entry("spu_loadavg", 0, NULL); | ||
725 | if (!entry) | ||
726 | goto out_stop_kthread; | ||
727 | entry->proc_fops = &spu_loadavg_fops; | ||
728 | |||
626 | pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", | 729 | pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", |
627 | SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); | 730 | SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); |
628 | return 0; | 731 | return 0; |
629 | 732 | ||
733 | out_stop_kthread: | ||
734 | kthread_stop(spusched_task); | ||
735 | out_free_spu_prio: | ||
736 | kfree(spu_prio); | ||
737 | out: | ||
738 | return err; | ||
630 | } | 739 | } |
631 | 740 | ||
632 | void __exit spu_sched_exit(void) | 741 | void __exit spu_sched_exit(void) |
@@ -634,6 +743,8 @@ void __exit spu_sched_exit(void) | |||
634 | struct spu *spu, *tmp; | 743 | struct spu *spu, *tmp; |
635 | int node; | 744 | int node; |
636 | 745 | ||
746 | remove_proc_entry("spu_loadavg", NULL); | ||
747 | |||
637 | kthread_stop(spusched_task); | 748 | kthread_stop(spusched_task); |
638 | 749 | ||
639 | for (node = 0; node < MAX_NUMNODES; node++) { | 750 | for (node = 0; node < MAX_NUMNODES; node++) { |