diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /kernel/sched_cpupri.c | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'kernel/sched_cpupri.c')
-rw-r--r-- | kernel/sched_cpupri.c | 204 |
1 files changed, 204 insertions, 0 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c new file mode 100644 index 00000000000..2722dc1b413 --- /dev/null +++ b/kernel/sched_cpupri.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * kernel/sched_cpupri.c | ||
3 | * | ||
4 | * CPU priority management | ||
5 | * | ||
6 | * Copyright (C) 2007-2008 Novell | ||
7 | * | ||
8 | * Author: Gregory Haskins <ghaskins@novell.com> | ||
9 | * | ||
10 | * This code tracks the priority of each CPU so that global migration | ||
11 | * decisions are easy to calculate. Each CPU can be in a state as follows: | ||
12 | * | ||
13 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 | ||
14 | * | ||
15 | * going from the lowest priority to the highest. CPUs in the INVALID state | ||
16 | * are not eligible for routing. The system maintains this state with | ||
17 | * a 2 dimensional bitmap (the first for priority class, the second for cpus | ||
18 | * in that class). Therefore a typical application without affinity | ||
19 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit | ||
20 | * searches). For tasks with affinity restrictions, the algorithm has a | ||
21 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that | ||
22 | * yields the worst case search is fairly contrived. | ||
23 | * | ||
24 | * This program is free software; you can redistribute it and/or | ||
25 | * modify it under the terms of the GNU General Public License | ||
26 | * as published by the Free Software Foundation; version 2 | ||
27 | * of the License. | ||
28 | */ | ||
29 | |||
30 | #include <linux/gfp.h> | ||
31 | #include "sched_cpupri.h" | ||
32 | |||
33 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ | ||
34 | static int convert_prio(int prio) | ||
35 | { | ||
36 | int cpupri; | ||
37 | |||
38 | if (prio == CPUPRI_INVALID) | ||
39 | cpupri = CPUPRI_INVALID; | ||
40 | else if (prio == MAX_PRIO) | ||
41 | cpupri = CPUPRI_IDLE; | ||
42 | else if (prio >= MAX_RT_PRIO) | ||
43 | cpupri = CPUPRI_NORMAL; | ||
44 | else | ||
45 | cpupri = MAX_RT_PRIO - prio + 1; | ||
46 | |||
47 | return cpupri; | ||
48 | } | ||
49 | |||
50 | #define for_each_cpupri_active(array, idx) \ | ||
51 | for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) | ||
52 | |||
53 | /** | ||
54 | * cpupri_find - find the best (lowest-pri) CPU in the system | ||
55 | * @cp: The cpupri context | ||
56 | * @p: The task | ||
57 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) | ||
58 | * | ||
59 | * Note: This function returns the recommended CPUs as calculated during the | ||
60 | * current invocation. By the time the call returns, the CPUs may have in | ||
61 | * fact changed priorities any number of times. While not ideal, it is not | ||
62 | * an issue of correctness since the normal rebalancer logic will correct | ||
63 | * any discrepancies created by racing against the uncertainty of the current | ||
64 | * priority configuration. | ||
65 | * | ||
66 | * Returns: (int)bool - CPUs were found | ||
67 | */ | ||
68 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | ||
69 | struct cpumask *lowest_mask) | ||
70 | { | ||
71 | int idx = 0; | ||
72 | int task_pri = convert_prio(p->prio); | ||
73 | |||
74 | for_each_cpupri_active(cp->pri_active, idx) { | ||
75 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | ||
76 | |||
77 | if (idx >= task_pri) | ||
78 | break; | ||
79 | |||
80 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | ||
81 | continue; | ||
82 | |||
83 | if (lowest_mask) { | ||
84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | ||
85 | |||
86 | /* | ||
87 | * We have to ensure that we have at least one bit | ||
88 | * still set in the array, since the map could have | ||
89 | * been concurrently emptied between the first and | ||
90 | * second reads of vec->mask. If we hit this | ||
91 | * condition, simply act as though we never hit this | ||
92 | * priority level and continue on. | ||
93 | */ | ||
94 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | return 1; | ||
99 | } | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * cpupri_set - update the cpu priority setting | ||
106 | * @cp: The cpupri context | ||
107 | * @cpu: The target cpu | ||
108 | * @pri: The priority (INVALID-RT99) to assign to this CPU | ||
109 | * | ||
110 | * Note: Assumes cpu_rq(cpu)->lock is locked | ||
111 | * | ||
112 | * Returns: (void) | ||
113 | */ | ||
114 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) | ||
115 | { | ||
116 | int *currpri = &cp->cpu_to_pri[cpu]; | ||
117 | int oldpri = *currpri; | ||
118 | unsigned long flags; | ||
119 | |||
120 | newpri = convert_prio(newpri); | ||
121 | |||
122 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); | ||
123 | |||
124 | if (newpri == oldpri) | ||
125 | return; | ||
126 | |||
127 | /* | ||
128 | * If the cpu was currently mapped to a different value, we | ||
129 | * need to map it to the new value then remove the old value. | ||
130 | * Note, we must add the new value first, otherwise we risk the | ||
131 | * cpu being cleared from pri_active, and this cpu could be | ||
132 | * missed for a push or pull. | ||
133 | */ | ||
134 | if (likely(newpri != CPUPRI_INVALID)) { | ||
135 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; | ||
136 | |||
137 | raw_spin_lock_irqsave(&vec->lock, flags); | ||
138 | |||
139 | cpumask_set_cpu(cpu, vec->mask); | ||
140 | vec->count++; | ||
141 | if (vec->count == 1) | ||
142 | set_bit(newpri, cp->pri_active); | ||
143 | |||
144 | raw_spin_unlock_irqrestore(&vec->lock, flags); | ||
145 | } | ||
146 | if (likely(oldpri != CPUPRI_INVALID)) { | ||
147 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; | ||
148 | |||
149 | raw_spin_lock_irqsave(&vec->lock, flags); | ||
150 | |||
151 | vec->count--; | ||
152 | if (!vec->count) | ||
153 | clear_bit(oldpri, cp->pri_active); | ||
154 | cpumask_clear_cpu(cpu, vec->mask); | ||
155 | |||
156 | raw_spin_unlock_irqrestore(&vec->lock, flags); | ||
157 | } | ||
158 | |||
159 | *currpri = newpri; | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * cpupri_init - initialize the cpupri structure | ||
164 | * @cp: The cpupri context | ||
165 | * @bootmem: true if allocations need to use bootmem | ||
166 | * | ||
167 | * Returns: -ENOMEM if memory fails. | ||
168 | */ | ||
169 | int cpupri_init(struct cpupri *cp) | ||
170 | { | ||
171 | int i; | ||
172 | |||
173 | memset(cp, 0, sizeof(*cp)); | ||
174 | |||
175 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { | ||
176 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; | ||
177 | |||
178 | raw_spin_lock_init(&vec->lock); | ||
179 | vec->count = 0; | ||
180 | if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) | ||
181 | goto cleanup; | ||
182 | } | ||
183 | |||
184 | for_each_possible_cpu(i) | ||
185 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | ||
186 | return 0; | ||
187 | |||
188 | cleanup: | ||
189 | for (i--; i >= 0; i--) | ||
190 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
191 | return -ENOMEM; | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * cpupri_cleanup - clean up the cpupri structure | ||
196 | * @cp: The cpupri context | ||
197 | */ | ||
198 | void cpupri_cleanup(struct cpupri *cp) | ||
199 | { | ||
200 | int i; | ||
201 | |||
202 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) | ||
203 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
204 | } | ||