diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-01-27 16:23:46 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-02-01 17:00:27 -0500 |
commit | 343d4ead3b12992f494134114cf50e4f37c656c5 (patch) | |
tree | 16ccd57dfa39ba8c50b87ac776b13a2e3826fc01 | |
parent | 4ce37704ec0bedb28b5708d32964fca471e793d0 (diff) |
Litmus core: add generic clustering support
Inspired by the existing C-EDF code, this generic version will build
clusters of CPUs based on a given cache level.
-rw-r--r-- | include/litmus/clustered.h | 22 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/clustered.c | 111 |
3 files changed, 134 insertions, 0 deletions
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h index cad12467b4ee..0c18dcb15e6c 100644 --- a/include/litmus/clustered.h +++ b/include/litmus/clustered.h | |||
@@ -19,4 +19,26 @@ const char* cache_level_name(enum cache_level level); | |||
19 | struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent, | 19 | struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent, |
20 | enum cache_level* level); | 20 | enum cache_level* level); |
21 | 21 | ||
22 | |||
23 | |||
24 | struct scheduling_cluster { | ||
25 | unsigned int id; | ||
26 | /* list of CPUs that are part of this cluster */ | ||
27 | struct list_head cpus; | ||
28 | }; | ||
29 | |||
30 | struct cluster_cpu { | ||
31 | unsigned int id; /* which CPU is this? */ | ||
32 | struct list_head cluster_list; /* List of the CPUs in this cluster. */ | ||
33 | struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */ | ||
34 | }; | ||
35 | |||
36 | int get_cluster_size(enum cache_level level); | ||
37 | |||
38 | int assign_cpus_to_clusters(enum cache_level level, | ||
39 | struct scheduling_cluster* clusters[], | ||
40 | unsigned int num_clusters, | ||
41 | struct cluster_cpu* cpus[], | ||
42 | unsigned int num_cpus); | ||
43 | |||
22 | #endif | 44 | #endif |
diff --git a/litmus/Makefile b/litmus/Makefile index 62a20e266eeb..ad9936e07b83 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -6,6 +6,7 @@ obj-y = sched_plugin.o litmus.o \ | |||
6 | preempt.o \ | 6 | preempt.o \ |
7 | litmus_proc.o \ | 7 | litmus_proc.o \ |
8 | budget.o \ | 8 | budget.o \ |
9 | clustered.o \ | ||
9 | jobs.o \ | 10 | jobs.o \ |
10 | sync.o \ | 11 | sync.o \ |
11 | rt_domain.o \ | 12 | rt_domain.o \ |
diff --git a/litmus/clustered.c b/litmus/clustered.c new file mode 100644 index 000000000000..04450a8ad4fe --- /dev/null +++ b/litmus/clustered.c | |||
@@ -0,0 +1,111 @@ | |||
1 | #include <linux/gfp.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/list.h> | ||
4 | |||
5 | #include <litmus/clustered.h> | ||
6 | |||
7 | #ifndef CONFIG_X86 | ||
8 | /* fake get_shared_cpu_map() on non-x86 architectures */ | ||
9 | |||
10 | int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) | ||
11 | { | ||
12 | if (index != 1) | ||
13 | return 1; | ||
14 | else { | ||
15 | /* Fake L1: CPU is all by itself. */ | ||
16 | cpumask_clear(mask); | ||
17 | cpumask_set_cpu(cpu, mask); | ||
18 | return 0; | ||
19 | } | ||
20 | } | ||
21 | |||
22 | #endif | ||
23 | |||
24 | int get_cluster_size(enum cache_level level) | ||
25 | { | ||
26 | cpumask_var_t mask; | ||
27 | int ok; | ||
28 | int num_cpus; | ||
29 | |||
30 | if (level == GLOBAL_CLUSTER) | ||
31 | return num_online_cpus(); | ||
32 | else { | ||
33 | if (!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
34 | return -ENOMEM; | ||
35 | /* assumes CPU 0 is representative of all CPUs */ | ||
36 | ok = get_shared_cpu_map(mask, 0, level); | ||
37 | /* ok == 0 means we got the map; otherwise it's an invalid cache level */ | ||
38 | if (ok == 0) | ||
39 | num_cpus = cpumask_weight(mask); | ||
40 | free_cpumask_var(mask); | ||
41 | |||
42 | if (ok == 0) | ||
43 | return num_cpus; | ||
44 | else | ||
45 | return -EINVAL; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | int assign_cpus_to_clusters(enum cache_level level, | ||
50 | struct scheduling_cluster* clusters[], | ||
51 | unsigned int num_clusters, | ||
52 | struct cluster_cpu* cpus[], | ||
53 | unsigned int num_cpus) | ||
54 | { | ||
55 | cpumask_var_t mask; | ||
56 | unsigned int i, free_cluster = 0, low_cpu; | ||
57 | int err = 0; | ||
58 | |||
59 | if (!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
60 | return -ENOMEM; | ||
61 | |||
62 | /* clear cluster pointers */ | ||
63 | for (i = 0; i < num_cpus; i++) { | ||
64 | cpus[i]->id = i; | ||
65 | cpus[i]->cluster = NULL; | ||
66 | } | ||
67 | |||
68 | /* initialize clusters */ | ||
69 | for (i = 0; i < num_clusters; i++) { | ||
70 | clusters[i]->id = i; | ||
71 | INIT_LIST_HEAD(&clusters[i]->cpus); | ||
72 | } | ||
73 | |||
74 | /* Assign each CPU. Two assumtions are made: | ||
75 | * 1) The index of a cpu in cpus corresponds to its processor id (i.e., the index in a cpu mask). | ||
76 | * 2) All cpus that belong to some cluster are online. | ||
77 | */ | ||
78 | for_each_online_cpu(i) { | ||
79 | /* get lowest-id CPU in cluster */ | ||
80 | if (level != GLOBAL_CLUSTER) { | ||
81 | err = get_shared_cpu_map(mask, cpus[i]->id, level); | ||
82 | if (err != 0) { | ||
83 | /* ugh... wrong cache level? Either caller screwed up | ||
84 | * or the CPU topology is weird. */ | ||
85 | printk(KERN_ERR "Could not set up clusters for L%d sharing (max: L%d).\n", | ||
86 | level, err); | ||
87 | err = -EINVAL; | ||
88 | goto out; | ||
89 | } | ||
90 | low_cpu = cpumask_first(mask); | ||
91 | } else | ||
92 | low_cpu = 0; | ||
93 | if (low_cpu == i) { | ||
94 | /* caller must provide an appropriate number of clusters */ | ||
95 | BUG_ON(free_cluster >= num_clusters); | ||
96 | |||
97 | /* create new cluster */ | ||
98 | cpus[i]->cluster = clusters[free_cluster++]; | ||
99 | } else { | ||
100 | /* low_cpu points to the right cluster | ||
101 | * Assumption: low_cpu is actually online and was processed earlier. */ | ||
102 | cpus[i]->cluster = cpus[low_cpu]->cluster; | ||
103 | } | ||
104 | /* enqueue in cpus list */ | ||
105 | list_add(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus); | ||
106 | printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id); | ||
107 | } | ||
108 | out: | ||
109 | free_cpumask_var(mask); | ||
110 | return err; | ||
111 | } | ||