aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2011-01-27 13:55:14 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2011-01-27 13:55:14 -0500
commit178914fda62f345d45c0873f000f4760293b24ab (patch)
tree7244c6d9c95c95f719468fba194df7e09f86de54
parentb71d5bb985706bfb1e96a2527559a1753165db96 (diff)
parentd11808b5c6b032de4284281ed2ff77ae697a4ebd (diff)
Merge branch 'master' into wip-edzl-critique
-rw-r--r--include/litmus/ftdev.h15
-rw-r--r--litmus/ftdev.c144
-rw-r--r--litmus/sched_cedf.c70
-rw-r--r--litmus/sched_task_trace.c37
-rw-r--r--litmus/trace.c30
5 files changed, 215 insertions, 81 deletions
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h
index 7697b4616699..348387e9adf9 100644
--- a/include/litmus/ftdev.h
+++ b/include/litmus/ftdev.h
@@ -6,8 +6,6 @@
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <linux/cdev.h> 7#include <linux/cdev.h>
8 8
9#define MAX_FTDEV_MINORS NR_CPUS
10
11#define FTDEV_ENABLE_CMD 0 9#define FTDEV_ENABLE_CMD 0
12#define FTDEV_DISABLE_CMD 1 10#define FTDEV_DISABLE_CMD 1
13 11
@@ -28,12 +26,15 @@ struct ftdev_minor {
28 struct mutex lock; 26 struct mutex lock;
29 /* FIXME: filter for authorized events */ 27 /* FIXME: filter for authorized events */
30 struct ftdev_event* events; 28 struct ftdev_event* events;
29 struct device* device;
31}; 30};
32 31
33struct ftdev { 32struct ftdev {
33 dev_t major;
34 struct cdev cdev; 34 struct cdev cdev;
35 /* FIXME: don't waste memory, allocate dynamically */ 35 struct class* class;
36 struct ftdev_minor minor[MAX_FTDEV_MINORS]; 36 const char* name;
37 struct ftdev_minor* minor;
37 unsigned int minor_cnt; 38 unsigned int minor_cnt;
38 ftdev_alloc_t alloc; 39 ftdev_alloc_t alloc;
39 ftdev_free_t free; 40 ftdev_free_t free;
@@ -43,7 +44,9 @@ struct ftdev {
43struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); 44struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size);
44void free_ft_buffer(struct ft_buffer* buf); 45void free_ft_buffer(struct ft_buffer* buf);
45 46
46void ftdev_init(struct ftdev* ftdev, struct module* owner); 47int ftdev_init( struct ftdev* ftdev, struct module* owner,
47int register_ftdev(struct ftdev* ftdev, const char* name, int major); 48 const int minor_cnt, const char* name);
49void ftdev_exit(struct ftdev* ftdev);
50int register_ftdev(struct ftdev* ftdev);
48 51
49#endif 52#endif
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
index 51dafaebf8a6..4a4b2e3e56c2 100644
--- a/litmus/ftdev.c
+++ b/litmus/ftdev.c
@@ -4,6 +4,7 @@
4#include <linux/cdev.h> 4#include <linux/cdev.h>
5#include <asm/uaccess.h> 5#include <asm/uaccess.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/device.h>
7 8
8#include <litmus/litmus.h> 9#include <litmus/litmus.h>
9#include <litmus/feather_trace.h> 10#include <litmus/feather_trace.h>
@@ -309,52 +310,131 @@ struct file_operations ftdev_fops = {
309 .read = ftdev_read, 310 .read = ftdev_read,
310}; 311};
311 312
312 313int ftdev_init( struct ftdev* ftdev, struct module* owner,
313void ftdev_init(struct ftdev* ftdev, struct module* owner) 314 const int minor_cnt, const char* name)
314{ 315{
315 int i; 316 int i, err;
317
318 BUG_ON(minor_cnt < 1);
319
316 cdev_init(&ftdev->cdev, &ftdev_fops); 320 cdev_init(&ftdev->cdev, &ftdev_fops);
321 ftdev->name = name;
322 ftdev->minor_cnt = minor_cnt;
317 ftdev->cdev.owner = owner; 323 ftdev->cdev.owner = owner;
318 ftdev->cdev.ops = &ftdev_fops; 324 ftdev->cdev.ops = &ftdev_fops;
319 ftdev->minor_cnt = 0; 325 ftdev->alloc = NULL;
320 for (i = 0; i < MAX_FTDEV_MINORS; i++) { 326 ftdev->free = NULL;
327 ftdev->can_open = NULL;
328
329 ftdev->minor = kcalloc(ftdev->minor_cnt, sizeof(*ftdev->minor),
330 GFP_KERNEL);
331 if (!ftdev->minor) {
332 printk(KERN_WARNING "ftdev(%s): Could not allocate memory\n",
333 ftdev->name);
334 err = -ENOMEM;
335 goto err_out;
336 }
337
338 for (i = 0; i < ftdev->minor_cnt; i++) {
321 mutex_init(&ftdev->minor[i].lock); 339 mutex_init(&ftdev->minor[i].lock);
322 ftdev->minor[i].readers = 0; 340 ftdev->minor[i].readers = 0;
323 ftdev->minor[i].buf = NULL; 341 ftdev->minor[i].buf = NULL;
324 ftdev->minor[i].events = NULL; 342 ftdev->minor[i].events = NULL;
325 } 343 }
326 ftdev->alloc = NULL; 344
327 ftdev->free = NULL; 345 ftdev->class = class_create(owner, ftdev->name);
328 ftdev->can_open = NULL; 346 if (IS_ERR(ftdev->class)) {
347 err = PTR_ERR(ftdev->class);
348 printk(KERN_WARNING "ftdev(%s): "
349 "Could not create device class.\n", ftdev->name);
350 goto err_dealloc;
351 }
352
353 return 0;
354
355err_dealloc:
356 kfree(ftdev->minor);
357err_out:
358 return err;
329} 359}
330 360
331int register_ftdev(struct ftdev* ftdev, const char* name, int major) 361/*
362 * Destroy minor devices up to, but not including, up_to.
363 */
364static void ftdev_device_destroy(struct ftdev* ftdev, unsigned int up_to)
332{ 365{
333 dev_t trace_dev; 366 dev_t minor_cntr;
334 int error = 0; 367
335 368 if (up_to < 1)
336 if(major) { 369 up_to = (ftdev->minor_cnt < 1) ? 0 : ftdev->minor_cnt;
337 trace_dev = MKDEV(major, 0); 370
338 error = register_chrdev_region(trace_dev, ftdev->minor_cnt, 371 for (minor_cntr = 0; minor_cntr < up_to; ++minor_cntr)
339 name); 372 device_destroy(ftdev->class, MKDEV(ftdev->major, minor_cntr));
340 } else { 373}
341 error = alloc_chrdev_region(&trace_dev, 0, ftdev->minor_cnt, 374
342 name); 375void ftdev_exit(struct ftdev* ftdev)
343 major = MAJOR(trace_dev); 376{
344 } 377 printk("ftdev(%s): Exiting\n", ftdev->name);
345 if (error) 378 ftdev_device_destroy(ftdev, -1);
346 { 379 cdev_del(&ftdev->cdev);
380 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
381 class_destroy(ftdev->class);
382 kfree(ftdev->minor);
383}
384
385int register_ftdev(struct ftdev* ftdev)
386{
387 struct device **device;
388 dev_t trace_dev_tmp, minor_cntr;
389 int err;
390
391 err = alloc_chrdev_region(&trace_dev_tmp, 0, ftdev->minor_cnt,
392 ftdev->name);
393 if (err) {
347 printk(KERN_WARNING "ftdev(%s): " 394 printk(KERN_WARNING "ftdev(%s): "
348 "Could not register major/minor number %d/%u\n", 395 "Could not allocate char. device region (%d minors)\n",
349 name, major, ftdev->minor_cnt); 396 ftdev->name, ftdev->minor_cnt);
350 return error; 397 goto err_out;
351 } 398 }
352 error = cdev_add(&ftdev->cdev, trace_dev, ftdev->minor_cnt); 399
353 if (error) { 400 ftdev->major = MAJOR(trace_dev_tmp);
401
402 err = cdev_add(&ftdev->cdev, trace_dev_tmp, ftdev->minor_cnt);
403 if (err) {
354 printk(KERN_WARNING "ftdev(%s): " 404 printk(KERN_WARNING "ftdev(%s): "
355 "Could not add cdev for major/minor = %d/%u.\n", 405 "Could not add cdev for major %u with %u minor(s).\n",
356 name, major, ftdev->minor_cnt); 406 ftdev->name, ftdev->major, ftdev->minor_cnt);
357 return error; 407 goto err_unregister;
358 } 408 }
359 return error; 409
410 /* create the minor device(s) */
411 for (minor_cntr = 0; minor_cntr < ftdev->minor_cnt; ++minor_cntr)
412 {
413 trace_dev_tmp = MKDEV(ftdev->major, minor_cntr);
414 device = &ftdev->minor[minor_cntr].device;
415
416 *device = device_create(ftdev->class, NULL, trace_dev_tmp, NULL,
417 "litmus/%s%d", ftdev->name, minor_cntr);
418 if (IS_ERR(*device)) {
419 err = PTR_ERR(*device);
420 printk(KERN_WARNING "ftdev(%s): "
421 "Could not create device major/minor number "
422 "%u/%u\n", ftdev->name, ftdev->major,
423 minor_cntr);
424 printk(KERN_WARNING "ftdev(%s): "
425 "will attempt deletion of allocated devices.\n",
426 ftdev->name);
427 goto err_minors;
428 }
429 }
430
431 return 0;
432
433err_minors:
434 ftdev_device_destroy(ftdev, minor_cntr);
435 cdev_del(&ftdev->cdev);
436err_unregister:
437 unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt);
438err_out:
439 return err;
360} 440}
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 8c9513d33f59..098a449c2490 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -45,14 +45,17 @@
45#include <litmus/litmus_proc.h> 45#include <litmus/litmus_proc.h>
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
47 47
48/* 48/* Reference configuration variable. Determines which cache level is used to
49 * It makes sense only to cluster around L2 or L3, so if cluster_index = 2 49 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that
50 * (default) we cluster all the CPUs that shares a L2 cache, while 50 * all CPUs form a single cluster (just like GSN-EDF).
51 * cluster_cache_index = 3 we cluster all CPs that shares a L3 cache
52 */ 51 */
53int cluster_index = 2; 52static enum {
53 GLOBAL_CLUSTER = 0,
54 L1_CLUSTER = 1,
55 L2_CLUSTER = 2,
56 L3_CLUSTER = 3
57} cluster_config = GLOBAL_CLUSTER;
54 58
55/* forward declaration... a funny thing with C ;) */
56struct clusterdomain; 59struct clusterdomain;
57 60
58/* cpu_entry_t - maintain the linked and scheduled state 61/* cpu_entry_t - maintain the linked and scheduled state
@@ -587,7 +590,9 @@ static void cedf_task_exit(struct task_struct * t)
587 raw_spin_lock_irqsave(&cluster->lock, flags); 590 raw_spin_lock_irqsave(&cluster->lock, flags);
588 unlink(t); 591 unlink(t);
589 if (tsk_rt(t)->scheduled_on != NO_CPU) { 592 if (tsk_rt(t)->scheduled_on != NO_CPU) {
590 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 593 cpu_entry_t *cpu;
594 cpu = &per_cpu(cedf_cpu_entries, tsk_rt(t)->scheduled_on);
595 cpu->scheduled = NULL;
591 tsk_rt(t)->scheduled_on = NO_CPU; 596 tsk_rt(t)->scheduled_on = NO_CPU;
592 } 597 }
593 raw_spin_unlock_irqrestore(&cluster->lock, flags); 598 raw_spin_unlock_irqrestore(&cluster->lock, flags);
@@ -647,26 +652,25 @@ static long cedf_activate_plugin(void)
647 /* de-allocate old clusters, if any */ 652 /* de-allocate old clusters, if any */
648 cleanup_cedf(); 653 cleanup_cedf();
649 654
650 printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n", 655 printk(KERN_INFO "C-EDF: Activate Plugin, cluster configuration = %d\n",
651 cluster_index); 656 cluster_config);
652 657
653 /* need to get cluster_size first */ 658 /* need to get cluster_size first */
654 if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) 659 if(!zalloc_cpumask_var(&mask, GFP_ATOMIC))
655 return -ENOMEM; 660 return -ENOMEM;
656 661
657 if (unlikely(cluster_index == num_online_cpus())) { 662 if (unlikely(cluster_config == GLOBAL_CLUSTER)) {
658
659 cluster_size = num_online_cpus(); 663 cluster_size = num_online_cpus();
660 } else { 664 } else {
661 665 chk = get_shared_cpu_map(mask, 0, cluster_config);
662 chk = get_shared_cpu_map(mask, 0, cluster_index);
663 if (chk) { 666 if (chk) {
664 /* if chk != 0 then it is the max allowed index */ 667 /* if chk != 0 then it is the max allowed index */
665 printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n", 668 printk(KERN_INFO "C-EDF: Cluster configuration = %d "
666 cluster_index); 669 "is not supported on this hardware.\n",
667 printk(KERN_INFO "C-EDF: Using cache index = %d\n", 670 cluster_config);
668 chk); 671 /* User should notice that the configuration failed, so
669 cluster_index = chk; 672 * let's bail out. */
673 return -EINVAL;
670 } 674 }
671 675
672 cluster_size = cpumask_weight(mask); 676 cluster_size = cpumask_weight(mask);
@@ -714,10 +718,10 @@ static long cedf_activate_plugin(void)
714 718
715 /* this cpu isn't in any cluster */ 719 /* this cpu isn't in any cluster */
716 /* get the shared cpus */ 720 /* get the shared cpus */
717 if (unlikely(cluster_index == num_online_cpus())) 721 if (unlikely(cluster_config == GLOBAL_CLUSTER))
718 cpumask_copy(mask, cpu_online_mask); 722 cpumask_copy(mask, cpu_online_mask);
719 else 723 else
720 get_shared_cpu_map(mask, cpu, cluster_index); 724 get_shared_cpu_map(mask, cpu, cluster_config);
721 725
722 cpumask_copy(cedf[i].cpu_map, mask); 726 cpumask_copy(cedf[i].cpu_map, mask);
723#ifdef VERBOSE_INIT 727#ifdef VERBOSE_INIT
@@ -774,11 +778,21 @@ static int proc_read_cluster_size(char *page, char **start,
774 int *eof, void *data) 778 int *eof, void *data)
775{ 779{
776 int len; 780 int len;
777 if (cluster_index >= 1 && cluster_index <= 3) 781 switch (cluster_config) {
778 len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_index); 782 case GLOBAL_CLUSTER:
779 else
780 len = snprintf(page, PAGE_SIZE, "ALL\n"); 783 len = snprintf(page, PAGE_SIZE, "ALL\n");
781 784 break;
785 case L1_CLUSTER:
786 case L2_CLUSTER:
787 case L3_CLUSTER:
788 len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_config);
789 break;
790 default:
791 /* This should be impossible, but let's be paranoid. */
792 len = snprintf(page, PAGE_SIZE, "INVALID (%d)\n",
793 cluster_config);
794 break;
795 }
782 return len; 796 return len;
783} 797}
784 798
@@ -806,13 +820,13 @@ static int proc_write_cluster_size(struct file *file,
806 820
807 /* do a quick and dirty comparison to find the cluster size */ 821 /* do a quick and dirty comparison to find the cluster size */
808 if (!strcmp(cache_name, "L2")) 822 if (!strcmp(cache_name, "L2"))
809 cluster_index = 2; 823 cluster_config = L2_CLUSTER;
810 else if (!strcmp(cache_name, "L3")) 824 else if (!strcmp(cache_name, "L3"))
811 cluster_index = 3; 825 cluster_config = L3_CLUSTER;
812 else if (!strcmp(cache_name, "L1")) 826 else if (!strcmp(cache_name, "L1"))
813 cluster_index = 1; 827 cluster_config = L1_CLUSTER;
814 else if (!strcmp(cache_name, "ALL")) 828 else if (!strcmp(cache_name, "ALL"))
815 cluster_index = num_online_cpus(); 829 cluster_config = GLOBAL_CLUSTER;
816 else 830 else
817 printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name); 831 printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name);
818 832
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index e5098ddb1ec9..a15b25d21a89 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -16,8 +16,6 @@
16#include <litmus/ftdev.h> 16#include <litmus/ftdev.h>
17 17
18 18
19/* set MAJOR to 0 to have it dynamically assigned */
20#define FT_TASK_TRACE_MAJOR 253
21#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) 19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT)
22 20
23#define now() litmus_clock() 21#define now() litmus_clock()
@@ -40,12 +38,17 @@ static int st_dev_can_open(struct ftdev *dev, unsigned int cpu)
40static int __init init_sched_task_trace(void) 38static int __init init_sched_task_trace(void)
41{ 39{
42 struct local_buffer* buf; 40 struct local_buffer* buf;
43 int i, ok = 0; 41 int i, ok = 0, err;
44 printk("Allocated %u sched_trace_xxx() events per CPU " 42 printk("Allocated %u sched_trace_xxx() events per CPU "
45 "(buffer size: %d bytes)\n", 43 "(buffer size: %d bytes)\n",
46 NO_EVENTS, (int) sizeof(struct local_buffer)); 44 NO_EVENTS, (int) sizeof(struct local_buffer));
47 ftdev_init(&st_dev, THIS_MODULE); 45
48 for (i = 0; i < NR_CPUS; i++) { 46 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace");
48 if (err)
49 goto err_out;
50
51 for (i = 0; i < st_dev.minor_cnt; i++) {
49 buf = &per_cpu(st_event_buffer, i); 52 buf = &per_cpu(st_event_buffer, i);
50 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, 53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS,
51 sizeof(struct st_event_record), 54 sizeof(struct st_event_record),
@@ -53,16 +56,32 @@ static int __init init_sched_task_trace(void)
53 buf->record); 56 buf->record);
54 st_dev.minor[i].buf = &buf->ftbuf; 57 st_dev.minor[i].buf = &buf->ftbuf;
55 } 58 }
56 if (ok == NR_CPUS) { 59 if (ok == st_dev.minor_cnt) {
57 st_dev.minor_cnt = NR_CPUS;
58 st_dev.can_open = st_dev_can_open; 60 st_dev.can_open = st_dev_can_open;
59 return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); 61 err = register_ftdev(&st_dev);
62 if (err)
63 goto err_dealloc;
60 } else { 64 } else {
61 return -EINVAL; 65 err = -EINVAL;
66 goto err_dealloc;
62 } 67 }
68
69 return 0;
70
71err_dealloc:
72 ftdev_exit(&st_dev);
73err_out:
74 printk(KERN_WARNING "Could not register sched_trace module\n");
75 return err;
76}
77
78static void __exit exit_sched_task_trace(void)
79{
80 ftdev_exit(&st_dev);
63} 81}
64 82
65module_init(init_sched_task_trace); 83module_init(init_sched_task_trace);
84module_exit(exit_sched_task_trace);
66 85
67 86
68static inline struct st_event_record* get_record(u8 type, struct task_struct* t) 87static inline struct st_event_record* get_record(u8 type, struct task_struct* t)
diff --git a/litmus/trace.c b/litmus/trace.c
index b3a6b47aad6e..e7ea1c2ab3e4 100644
--- a/litmus/trace.c
+++ b/litmus/trace.c
@@ -71,9 +71,6 @@ feather_callback void save_timestamp_cpu(unsigned long event,
71 */ 71 */
72#define NO_TIMESTAMPS (2 << 11) 72#define NO_TIMESTAMPS (2 << 11)
73 73
74/* set MAJOR to 0 to have it dynamically assigned */
75#define FT_TRACE_MAJOR 252
76
77static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) 74static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
78{ 75{
79 unsigned int count = NO_TIMESTAMPS; 76 unsigned int count = NO_TIMESTAMPS;
@@ -93,12 +90,33 @@ static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx)
93 90
94static int __init init_ft_overhead_trace(void) 91static int __init init_ft_overhead_trace(void)
95{ 92{
93 int err;
94
96 printk("Initializing Feather-Trace overhead tracing device.\n"); 95 printk("Initializing Feather-Trace overhead tracing device.\n");
97 ftdev_init(&overhead_dev, THIS_MODULE); 96 err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace");
98 overhead_dev.minor_cnt = 1; /* only one buffer */ 97 if (err)
98 goto err_out;
99
99 overhead_dev.alloc = alloc_timestamp_buffer; 100 overhead_dev.alloc = alloc_timestamp_buffer;
100 overhead_dev.free = free_timestamp_buffer; 101 overhead_dev.free = free_timestamp_buffer;
101 return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); 102
103 err = register_ftdev(&overhead_dev);
104 if (err)
105 goto err_dealloc;
106
107 return 0;
108
109err_dealloc:
110 ftdev_exit(&overhead_dev);
111err_out:
112 printk(KERN_WARNING "Could not register ft_trace module.\n");
113 return err;
114}
115
116static void __exit exit_ft_overhead_trace(void)
117{
118 ftdev_exit(&overhead_dev);
102} 119}
103 120
104module_init(init_ft_overhead_trace); 121module_init(init_ft_overhead_trace);
122module_exit(exit_ft_overhead_trace);