diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2014-02-04 20:07:27 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-02-04 20:07:27 -0500 |
commit | 48e8cdc06c6bf3e3344ef8824843d6a83434cea5 (patch) | |
tree | 482885b85b7c16455bd5d13d8f47646026f41ca8 /litmus/litmus_proc.c | |
parent | 1c37b770a1898b1d95468aee69c443cfc04d21e1 (diff) |
/proc/litmus: Export info. on CPU <-> cluster
This patch adds a framework by which plugins can
export information about CPU <-> cluster (aka
scheduling domain) per cluster.
/proc/litmus/domains/<domain#>: This file contains a CPU
mask describing the CPUs scheduled/managed by that domain.
Files are named by index. For example, the first scheduling
domain would be '0'.
/proc/litmus/cpus/<cpus#>: This file contains a domain
mask describing which domains manage this CPU. Normally,
only one bit will be set in this mask, but overlapping clusters
can also be expressed by setting multiple bits.
Diffstat (limited to 'litmus/litmus_proc.c')
-rw-r--r-- | litmus/litmus_proc.c | 171 |
1 files changed, 170 insertions, 1 deletions
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index 1ebf1277f5d3..4db3fe2a672d 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -3,6 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | #include <linux/slab.h> | ||
6 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
7 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
8 | 9 | ||
@@ -21,7 +22,10 @@ static struct proc_dir_entry *litmus_dir = NULL, | |||
21 | #ifdef CONFIG_RELEASE_MASTER | 22 | #ifdef CONFIG_RELEASE_MASTER |
22 | *release_master_file = NULL, | 23 | *release_master_file = NULL, |
23 | #endif | 24 | #endif |
24 | *plugs_file = NULL; | 25 | *plugs_file = NULL, |
26 | *domains_dir = NULL, | ||
27 | *cpus_dir = NULL; | ||
28 | |||
25 | 29 | ||
26 | /* in litmus/sync.c */ | 30 | /* in litmus/sync.c */ |
27 | int count_tasks_waiting_for_release(void); | 31 | int count_tasks_waiting_for_release(void); |
@@ -218,11 +222,32 @@ int __init init_litmus_proc(void) | |||
218 | plugs_file = proc_create("loaded", 0444, plugs_dir, | 222 | plugs_file = proc_create("loaded", 0444, plugs_dir, |
219 | &litmus_loaded_proc_fops); | 223 | &litmus_loaded_proc_fops); |
220 | 224 | ||
225 | domains_dir = proc_mkdir("domains", litmus_dir); | ||
226 | if (!domains_dir) { | ||
227 | printk(KERN_ERR "Could not allocate domains directory " | ||
228 | "procfs entry.\n"); | ||
229 | return -ENOMEM; | ||
230 | } | ||
231 | |||
232 | cpus_dir = proc_mkdir("cpus", litmus_dir); | ||
233 | if (!cpus_dir) { | ||
234 | printk(KERN_ERR "Could not allocate cpus directory " | ||
235 | "procfs entry.\n"); | ||
236 | return -ENOMEM; | ||
237 | } | ||
238 | |||
221 | return 0; | 239 | return 0; |
222 | } | 240 | } |
223 | 241 | ||
224 | void exit_litmus_proc(void) | 242 | void exit_litmus_proc(void) |
225 | { | 243 | { |
244 | if (cpus_dir || domains_dir) { | ||
245 | deactivate_domain_proc(); | ||
246 | if (cpus_dir) | ||
247 | remove_proc_entry("cpus", litmus_dir); | ||
248 | if (domains_dir) | ||
249 | remove_proc_entry("domains", litmus_dir); | ||
250 | } | ||
226 | if (plugs_file) | 251 | if (plugs_file) |
227 | remove_proc_entry("loaded", plugs_dir); | 252 | remove_proc_entry("loaded", plugs_dir); |
228 | if (plugs_dir) | 253 | if (plugs_dir) |
@@ -405,3 +430,147 @@ struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent, | |||
405 | } | 430 | } |
406 | return cluster_file; | 431 | return cluster_file; |
407 | } | 432 | } |
433 | |||
434 | static struct domain_proc_info* active_mapping = NULL; | ||
435 | |||
436 | static int litmus_mapping_proc_show(struct seq_file *m, void *v) | ||
437 | { | ||
438 | struct cd_mapping *mapping = (struct cd_mapping*) m->private; | ||
439 | char buf[256]; | ||
440 | |||
441 | if(!mapping) | ||
442 | return 0; | ||
443 | |||
444 | cpumask_scnprintf(buf, sizeof(buf), mapping->mask); | ||
445 | buf[255] = '\0'; /* just in case... */ | ||
446 | seq_printf(m, "%s\n", buf); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static int litmus_mapping_proc_open(struct inode *inode, struct file *file) | ||
451 | { | ||
452 | return single_open(file, litmus_mapping_proc_show, PDE_DATA(inode)); | ||
453 | } | ||
454 | |||
455 | static const struct file_operations litmus_domain_proc_fops = { | ||
456 | .open = litmus_mapping_proc_open, | ||
457 | .read = seq_read, | ||
458 | .llseek = seq_lseek, | ||
459 | .release = single_release, | ||
460 | }; | ||
461 | |||
462 | long activate_domain_proc(struct domain_proc_info* map) | ||
463 | { | ||
464 | int i; | ||
465 | char name[8]; | ||
466 | |||
467 | if (!map) | ||
468 | return -EINVAL; | ||
469 | if (cpus_dir == NULL || domains_dir == NULL) | ||
470 | return -EINVAL; | ||
471 | |||
472 | if (active_mapping) | ||
473 | deactivate_domain_proc(); | ||
474 | |||
475 | active_mapping = map; | ||
476 | |||
477 | for (i = 0; i < map->num_cpus; ++i) { | ||
478 | struct cd_mapping* m = &map->cpu_to_domains[i]; | ||
479 | snprintf(name, sizeof(name), "%d", m->id); | ||
480 | m->proc_file = proc_create_data(name, 0444, cpus_dir, | ||
481 | &litmus_domain_proc_fops, (void*)m); | ||
482 | } | ||
483 | |||
484 | for (i = 0; i < map->num_domains; ++i) { | ||
485 | struct cd_mapping* m = &map->domain_to_cpus[i]; | ||
486 | snprintf(name, sizeof(name), "%d", m->id); | ||
487 | m->proc_file = proc_create_data(name, 0444, domains_dir, | ||
488 | &litmus_domain_proc_fops, (void*)m); | ||
489 | } | ||
490 | |||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | long deactivate_domain_proc() | ||
495 | { | ||
496 | int i; | ||
497 | char name[65]; | ||
498 | |||
499 | struct domain_proc_info* map = active_mapping; | ||
500 | |||
501 | if (!map) | ||
502 | return -EINVAL; | ||
503 | |||
504 | for (i = 0; i < map->num_cpus; ++i) { | ||
505 | struct cd_mapping* m = &map->cpu_to_domains[i]; | ||
506 | snprintf(name, sizeof(name), "%d", m->id); | ||
507 | remove_proc_entry(name, cpus_dir); | ||
508 | m->proc_file = NULL; | ||
509 | } | ||
510 | for (i = 0; i < map->num_domains; ++i) { | ||
511 | struct cd_mapping* m = &map->domain_to_cpus[i]; | ||
512 | snprintf(name, sizeof(name), "%d", m->id); | ||
513 | remove_proc_entry(name, domains_dir); | ||
514 | m->proc_file = NULL; | ||
515 | } | ||
516 | |||
517 | active_mapping = NULL; | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | long init_domain_proc_info(struct domain_proc_info* m, | ||
523 | int num_cpus, int num_domains) | ||
524 | { | ||
525 | int i; | ||
526 | int num_alloced_cpu_masks = 0; | ||
527 | int num_alloced_domain_masks = 0; | ||
528 | |||
529 | m->cpu_to_domains = | ||
530 | kmalloc(sizeof(*(m->cpu_to_domains))*num_cpus, | ||
531 | GFP_ATOMIC); | ||
532 | if(!m->cpu_to_domains) | ||
533 | goto failure; | ||
534 | |||
535 | m->domain_to_cpus = | ||
536 | kmalloc(sizeof(*(m->domain_to_cpus))*num_domains, | ||
537 | GFP_ATOMIC); | ||
538 | if(!m->domain_to_cpus) | ||
539 | goto failure; | ||
540 | |||
541 | for(i = 0; i < num_cpus; ++i) { | ||
542 | if(!zalloc_cpumask_var(&m->cpu_to_domains[i].mask, GFP_ATOMIC)) | ||
543 | goto failure; | ||
544 | ++num_alloced_cpu_masks; | ||
545 | } | ||
546 | for(i = 0; i < num_domains; ++i) { | ||
547 | if(!zalloc_cpumask_var(&m->domain_to_cpus[i].mask, GFP_ATOMIC)) | ||
548 | goto failure; | ||
549 | ++num_alloced_domain_masks; | ||
550 | } | ||
551 | |||
552 | return 0; | ||
553 | |||
554 | failure: | ||
555 | for(i = 0; i < num_alloced_cpu_masks; ++i) | ||
556 | free_cpumask_var(m->cpu_to_domains[i].mask); | ||
557 | for(i = 0; i < num_alloced_domain_masks; ++i) | ||
558 | free_cpumask_var(m->domain_to_cpus[i].mask); | ||
559 | if(m->cpu_to_domains) | ||
560 | kfree(m->cpu_to_domains); | ||
561 | if(m->domain_to_cpus) | ||
562 | kfree(m->domain_to_cpus); | ||
563 | return -ENOMEM; | ||
564 | } | ||
565 | |||
566 | void destroy_domain_proc_info(struct domain_proc_info* m) | ||
567 | { | ||
568 | int i; | ||
569 | for(i = 0; i < m->num_cpus; ++i) | ||
570 | free_cpumask_var(m->cpu_to_domains[i].mask); | ||
571 | for(i = 0; i < m->num_domains; ++i) | ||
572 | free_cpumask_var(m->domain_to_cpus[i].mask); | ||
573 | kfree(m->cpu_to_domains); | ||
574 | kfree(m->domain_to_cpus); | ||
575 | memset(m, sizeof(*m), 0); | ||
576 | } | ||