diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-24 11:05:13 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 11:52:22 -0500 |
commit | 68e74568fbe5854952355e942acca51f138096d9 (patch) | |
tree | 6263627b3b3c7e249f685ba0fe4d76002e882cd0 /kernel/sched_cpupri.c | |
parent | 4212823fb459eacc8098dd420bb68ebb9917989d (diff) |
sched: convert struct cpupri_vec cpumask_var_t.
Impact: stack usage reduction, (future) size reduction for large NR_CPUS.
Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS.
The fact cpupro_init is called both before and after the slab is
available makes for an ugly parameter unfortunately.
We also use cpumask_any_and to get rid of a temporary in cpupri_find.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_cpupri.c')
-rw-r--r-- | kernel/sched_cpupri.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 52154fefab7e..018b7be1db2e 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -67,24 +67,21 @@ static int convert_prio(int prio) | |||
67 | * Returns: (int)bool - CPUs were found | 67 | * Returns: (int)bool - CPUs were found |
68 | */ | 68 | */ |
69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | 69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
70 | cpumask_t *lowest_mask) | 70 | struct cpumask *lowest_mask) |
71 | { | 71 | { |
72 | int idx = 0; | 72 | int idx = 0; |
73 | int task_pri = convert_prio(p->prio); | 73 | int task_pri = convert_prio(p->prio); |
74 | 74 | ||
75 | for_each_cpupri_active(cp->pri_active, idx) { | 75 | for_each_cpupri_active(cp->pri_active, idx) { |
76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | 76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
77 | cpumask_t mask; | ||
78 | 77 | ||
79 | if (idx >= task_pri) | 78 | if (idx >= task_pri) |
80 | break; | 79 | break; |
81 | 80 | ||
82 | cpus_and(mask, p->cpus_allowed, vec->mask); | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
83 | |||
84 | if (cpus_empty(mask)) | ||
85 | continue; | 82 | continue; |
86 | 83 | ||
87 | *lowest_mask = mask; | 84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
88 | return 1; | 85 | return 1; |
89 | } | 86 | } |
90 | 87 | ||
@@ -126,7 +123,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
126 | vec->count--; | 123 | vec->count--; |
127 | if (!vec->count) | 124 | if (!vec->count) |
128 | clear_bit(oldpri, cp->pri_active); | 125 | clear_bit(oldpri, cp->pri_active); |
129 | cpu_clear(cpu, vec->mask); | 126 | cpumask_clear_cpu(cpu, vec->mask); |
130 | 127 | ||
131 | spin_unlock_irqrestore(&vec->lock, flags); | 128 | spin_unlock_irqrestore(&vec->lock, flags); |
132 | } | 129 | } |
@@ -136,7 +133,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
136 | 133 | ||
137 | spin_lock_irqsave(&vec->lock, flags); | 134 | spin_lock_irqsave(&vec->lock, flags); |
138 | 135 | ||
139 | cpu_set(cpu, vec->mask); | 136 | cpumask_set_cpu(cpu, vec->mask); |
140 | vec->count++; | 137 | vec->count++; |
141 | if (vec->count == 1) | 138 | if (vec->count == 1) |
142 | set_bit(newpri, cp->pri_active); | 139 | set_bit(newpri, cp->pri_active); |
@@ -150,10 +147,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
150 | /** | 147 | /** |
151 | * cpupri_init - initialize the cpupri structure | 148 | * cpupri_init - initialize the cpupri structure |
152 | * @cp: The cpupri context | 149 | * @cp: The cpupri context |
150 | * @bootmem: true if allocations need to use bootmem | ||
153 | * | 151 | * |
154 | * Returns: (void) | 152 | * Returns: -ENOMEM if memory fails. |
155 | */ | 153 | */ |
156 | void cpupri_init(struct cpupri *cp) | 154 | int cpupri_init(struct cpupri *cp, bool bootmem) |
157 | { | 155 | { |
158 | int i; | 156 | int i; |
159 | 157 | ||
@@ -164,11 +162,30 @@ void cpupri_init(struct cpupri *cp) | |||
164 | 162 | ||
165 | spin_lock_init(&vec->lock); | 163 | spin_lock_init(&vec->lock); |
166 | vec->count = 0; | 164 | vec->count = 0; |
167 | cpus_clear(vec->mask); | 165 | if (bootmem) |
166 | alloc_bootmem_cpumask_var(&vec->mask); | ||
167 | else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | ||
168 | goto cleanup; | ||
168 | } | 169 | } |
169 | 170 | ||
170 | for_each_possible_cpu(i) | 171 | for_each_possible_cpu(i) |
171 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | 172 | cp->cpu_to_pri[i] = CPUPRI_INVALID; |
173 | return 0; | ||
174 | |||
175 | cleanup: | ||
176 | for (i--; i >= 0; i--) | ||
177 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
178 | return -ENOMEM; | ||
172 | } | 179 | } |
173 | 180 | ||
181 | /** | ||
182 | * cpupri_cleanup - clean up the cpupri structure | ||
183 | * @cp: The cpupri context | ||
184 | */ | ||
185 | void cpupri_cleanup(struct cpupri *cp) | ||
186 | { | ||
187 | int i; | ||
174 | 188 | ||
189 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) | ||
190 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
191 | } | ||