aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_cpupri.c62
-rw-r--r--kernel/sched_cpupri.h5
2 files changed, 41 insertions, 26 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 2722dc1b4138..7761a2669fff 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -47,9 +47,6 @@ static int convert_prio(int prio)
47 return cpupri; 47 return cpupri;
48} 48}
49 49
50#define for_each_cpupri_active(array, idx) \
51 for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
52
53/** 50/**
54 * cpupri_find - find the best (lowest-pri) CPU in the system 51 * cpupri_find - find the best (lowest-pri) CPU in the system
55 * @cp: The cpupri context 52 * @cp: The cpupri context
@@ -71,11 +68,33 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
71 int idx = 0; 68 int idx = 0;
72 int task_pri = convert_prio(p->prio); 69 int task_pri = convert_prio(p->prio);
73 70
74 for_each_cpupri_active(cp->pri_active, idx) { 71 if (task_pri >= MAX_RT_PRIO)
72 return 0;
73
74 for (idx = 0; idx < task_pri; idx++) {
75 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 75 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
76 76
77 if (idx >= task_pri) 77 if (!atomic_read(&(vec)->count))
78 break; 78 continue;
79 /*
80 * When looking at the vector, we need to read the counter,
81 * do a memory barrier, then read the mask.
82 *
83 * Note: This is still all racey, but we can deal with it.
84 * Ideally, we only want to look at masks that are set.
85 *
86 * If a mask is not set, then the only thing wrong is that we
87 * did a little more work than necessary.
88 *
89 * If we read a zero count but the mask is set, because of the
90 * memory barriers, that can only happen when the highest prio
91 * task for a run queue has left the run queue, in which case,
92 * it will be followed by a pull. If the task we are processing
93 * fails to find a proper place to go, that pull request will
94 * pull this task if the run queue is running at a lower
95 * priority.
96 */
97 smp_rmb();
79 98
80 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 99 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
81 continue; 100 continue;
@@ -115,7 +134,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
115{ 134{
116 int *currpri = &cp->cpu_to_pri[cpu]; 135 int *currpri = &cp->cpu_to_pri[cpu];
117 int oldpri = *currpri; 136 int oldpri = *currpri;
118 unsigned long flags;
119 137
120 newpri = convert_prio(newpri); 138 newpri = convert_prio(newpri);
121 139
@@ -134,26 +152,25 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
134 if (likely(newpri != CPUPRI_INVALID)) { 152 if (likely(newpri != CPUPRI_INVALID)) {
135 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 153 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
136 154
137 raw_spin_lock_irqsave(&vec->lock, flags);
138
139 cpumask_set_cpu(cpu, vec->mask); 155 cpumask_set_cpu(cpu, vec->mask);
140 vec->count++; 156 /*
141 if (vec->count == 1) 157 * When adding a new vector, we update the mask first,
142 set_bit(newpri, cp->pri_active); 158 * do a write memory barrier, and then update the count, to
143 159 * make sure the vector is visible when count is set.
144 raw_spin_unlock_irqrestore(&vec->lock, flags); 160 */
161 smp_wmb();
162 atomic_inc(&(vec)->count);
145 } 163 }
146 if (likely(oldpri != CPUPRI_INVALID)) { 164 if (likely(oldpri != CPUPRI_INVALID)) {
147 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; 165 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
148 166
149 raw_spin_lock_irqsave(&vec->lock, flags); 167 /*
150 168 * When removing from the vector, we decrement the counter first
151 vec->count--; 169 * do a memory barrier and then clear the mask.
152 if (!vec->count) 170 */
153 clear_bit(oldpri, cp->pri_active); 171 atomic_dec(&(vec)->count);
172 smp_wmb();
154 cpumask_clear_cpu(cpu, vec->mask); 173 cpumask_clear_cpu(cpu, vec->mask);
155
156 raw_spin_unlock_irqrestore(&vec->lock, flags);
157 } 174 }
158 175
159 *currpri = newpri; 176 *currpri = newpri;
@@ -175,8 +192,7 @@ int cpupri_init(struct cpupri *cp)
175 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 192 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
176 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; 193 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
177 194
178 raw_spin_lock_init(&vec->lock); 195 atomic_set(&vec->count, 0);
179 vec->count = 0;
180 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) 196 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
181 goto cleanup; 197 goto cleanup;
182 } 198 }
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9fc7d386fea4..6b4cd17dead6 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,9 +12,8 @@
12/* values 2-101 are RT priorities 0-99 */ 12/* values 2-101 are RT priorities 0-99 */
13 13
14struct cpupri_vec { 14struct cpupri_vec {
15 raw_spinlock_t lock; 15 atomic_t count;
16 int count; 16 cpumask_var_t mask;
17 cpumask_var_t mask;
18}; 17};
19 18
20struct cpupri { 19struct cpupri {