diff options
Diffstat (limited to 'litmus/clustered.c')
-rw-r--r-- | litmus/clustered.c | 111 |
1 files changed, 111 insertions, 0 deletions
diff --git a/litmus/clustered.c b/litmus/clustered.c new file mode 100644 index 000000000000..04450a8ad4fe --- /dev/null +++ b/litmus/clustered.c | |||
@@ -0,0 +1,111 @@ | |||
1 | #include <linux/gfp.h> | ||
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/list.h> | ||
4 | |||
5 | #include <litmus/clustered.h> | ||
6 | |||
7 | #ifndef CONFIG_X86 | ||
8 | /* fake get_shared_cpu_map() on non-x86 architectures */ | ||
9 | |||
10 | int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) | ||
11 | { | ||
12 | if (index != 1) | ||
13 | return 1; | ||
14 | else { | ||
15 | /* Fake L1: CPU is all by itself. */ | ||
16 | cpumask_clear(mask); | ||
17 | cpumask_set_cpu(cpu, mask); | ||
18 | return 0; | ||
19 | } | ||
20 | } | ||
21 | |||
22 | #endif | ||
23 | |||
24 | int get_cluster_size(enum cache_level level) | ||
25 | { | ||
26 | cpumask_var_t mask; | ||
27 | int ok; | ||
28 | int num_cpus; | ||
29 | |||
30 | if (level == GLOBAL_CLUSTER) | ||
31 | return num_online_cpus(); | ||
32 | else { | ||
33 | if (!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
34 | return -ENOMEM; | ||
35 | /* assumes CPU 0 is representative of all CPUs */ | ||
36 | ok = get_shared_cpu_map(mask, 0, level); | ||
37 | /* ok == 0 means we got the map; otherwise it's an invalid cache level */ | ||
38 | if (ok == 0) | ||
39 | num_cpus = cpumask_weight(mask); | ||
40 | free_cpumask_var(mask); | ||
41 | |||
42 | if (ok == 0) | ||
43 | return num_cpus; | ||
44 | else | ||
45 | return -EINVAL; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | int assign_cpus_to_clusters(enum cache_level level, | ||
50 | struct scheduling_cluster* clusters[], | ||
51 | unsigned int num_clusters, | ||
52 | struct cluster_cpu* cpus[], | ||
53 | unsigned int num_cpus) | ||
54 | { | ||
55 | cpumask_var_t mask; | ||
56 | unsigned int i, free_cluster = 0, low_cpu; | ||
57 | int err = 0; | ||
58 | |||
59 | if (!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
60 | return -ENOMEM; | ||
61 | |||
62 | /* clear cluster pointers */ | ||
63 | for (i = 0; i < num_cpus; i++) { | ||
64 | cpus[i]->id = i; | ||
65 | cpus[i]->cluster = NULL; | ||
66 | } | ||
67 | |||
68 | /* initialize clusters */ | ||
69 | for (i = 0; i < num_clusters; i++) { | ||
70 | clusters[i]->id = i; | ||
71 | INIT_LIST_HEAD(&clusters[i]->cpus); | ||
72 | } | ||
73 | |||
74 | /* Assign each CPU. Two assumtions are made: | ||
75 | * 1) The index of a cpu in cpus corresponds to its processor id (i.e., the index in a cpu mask). | ||
76 | * 2) All cpus that belong to some cluster are online. | ||
77 | */ | ||
78 | for_each_online_cpu(i) { | ||
79 | /* get lowest-id CPU in cluster */ | ||
80 | if (level != GLOBAL_CLUSTER) { | ||
81 | err = get_shared_cpu_map(mask, cpus[i]->id, level); | ||
82 | if (err != 0) { | ||
83 | /* ugh... wrong cache level? Either caller screwed up | ||
84 | * or the CPU topology is weird. */ | ||
85 | printk(KERN_ERR "Could not set up clusters for L%d sharing (max: L%d).\n", | ||
86 | level, err); | ||
87 | err = -EINVAL; | ||
88 | goto out; | ||
89 | } | ||
90 | low_cpu = cpumask_first(mask); | ||
91 | } else | ||
92 | low_cpu = 0; | ||
93 | if (low_cpu == i) { | ||
94 | /* caller must provide an appropriate number of clusters */ | ||
95 | BUG_ON(free_cluster >= num_clusters); | ||
96 | |||
97 | /* create new cluster */ | ||
98 | cpus[i]->cluster = clusters[free_cluster++]; | ||
99 | } else { | ||
100 | /* low_cpu points to the right cluster | ||
101 | * Assumption: low_cpu is actually online and was processed earlier. */ | ||
102 | cpus[i]->cluster = cpus[low_cpu]->cluster; | ||
103 | } | ||
104 | /* enqueue in cpus list */ | ||
105 | list_add(&cpus[i]->cluster_list, &cpus[i]->cluster->cpus); | ||
106 | printk(KERN_INFO "Assigning CPU%u to cluster %u\n.", i, cpus[i]->cluster->id); | ||
107 | } | ||
108 | out: | ||
109 | free_cpumask_var(mask); | ||
110 | return err; | ||
111 | } | ||