aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/padata.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/padata.c')
-rw-r--r--kernel/padata.c44
1 files changed, 14 insertions, 30 deletions
diff --git a/kernel/padata.c b/kernel/padata.c
index b45259931512..6f10eb285ece 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -29,7 +29,6 @@
29#include <linux/sysfs.h> 29#include <linux/sysfs.h>
30#include <linux/rcupdate.h> 30#include <linux/rcupdate.h>
31 31
32#define MAX_SEQ_NR (INT_MAX - NR_CPUS)
33#define MAX_OBJ_NUM 1000 32#define MAX_OBJ_NUM 1000
34 33
35static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 34static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
@@ -43,18 +42,19 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
43 return target_cpu; 42 return target_cpu;
44} 43}
45 44
46static int padata_cpu_hash(struct padata_priv *padata) 45static int padata_cpu_hash(struct parallel_data *pd)
47{ 46{
48 int cpu_index; 47 int cpu_index;
49 struct parallel_data *pd;
50
51 pd = padata->pd;
52 48
53 /* 49 /*
54 * Hash the sequence numbers to the cpus by taking 50 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use. 51 * seq_nr mod. number of cpus in use.
56 */ 52 */
57 cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); 53
54 spin_lock(&pd->seq_lock);
55 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
56 pd->seq_nr++;
57 spin_unlock(&pd->seq_lock);
58 58
59 return padata_index_to_cpu(pd, cpu_index); 59 return padata_index_to_cpu(pd, cpu_index);
60} 60}
@@ -132,12 +132,7 @@ int padata_do_parallel(struct padata_instance *pinst,
132 padata->pd = pd; 132 padata->pd = pd;
133 padata->cb_cpu = cb_cpu; 133 padata->cb_cpu = cb_cpu;
134 134
135 if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr)) 135 target_cpu = padata_cpu_hash(pd);
136 atomic_set(&pd->seq_nr, -1);
137
138 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
139
140 target_cpu = padata_cpu_hash(padata);
141 queue = per_cpu_ptr(pd->pqueue, target_cpu); 136 queue = per_cpu_ptr(pd->pqueue, target_cpu);
142 137
143 spin_lock(&queue->parallel.lock); 138 spin_lock(&queue->parallel.lock);
@@ -173,7 +168,7 @@ EXPORT_SYMBOL(padata_do_parallel);
173static struct padata_priv *padata_get_next(struct parallel_data *pd) 168static struct padata_priv *padata_get_next(struct parallel_data *pd)
174{ 169{
175 int cpu, num_cpus; 170 int cpu, num_cpus;
176 int next_nr, next_index; 171 unsigned int next_nr, next_index;
177 struct padata_parallel_queue *queue, *next_queue; 172 struct padata_parallel_queue *queue, *next_queue;
178 struct padata_priv *padata; 173 struct padata_priv *padata;
179 struct padata_list *reorder; 174 struct padata_list *reorder;
@@ -189,14 +184,6 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
189 cpu = padata_index_to_cpu(pd, next_index); 184 cpu = padata_index_to_cpu(pd, next_index);
190 next_queue = per_cpu_ptr(pd->pqueue, cpu); 185 next_queue = per_cpu_ptr(pd->pqueue, cpu);
191 186
192 if (unlikely(next_nr > pd->max_seq_nr)) {
193 next_nr = next_nr - pd->max_seq_nr - 1;
194 next_index = next_nr % num_cpus;
195 cpu = padata_index_to_cpu(pd, next_index);
196 next_queue = per_cpu_ptr(pd->pqueue, cpu);
197 pd->processed = 0;
198 }
199
200 padata = NULL; 187 padata = NULL;
201 188
202 reorder = &next_queue->reorder; 189 reorder = &next_queue->reorder;
@@ -205,8 +192,6 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
205 padata = list_entry(reorder->list.next, 192 padata = list_entry(reorder->list.next,
206 struct padata_priv, list); 193 struct padata_priv, list);
207 194
208 BUG_ON(next_nr != padata->seq_nr);
209
210 spin_lock(&reorder->lock); 195 spin_lock(&reorder->lock);
211 list_del_init(&padata->list); 196 list_del_init(&padata->list);
212 atomic_dec(&pd->reorder_objects); 197 atomic_dec(&pd->reorder_objects);
@@ -230,6 +215,7 @@ out:
230 215
231static void padata_reorder(struct parallel_data *pd) 216static void padata_reorder(struct parallel_data *pd)
232{ 217{
218 int cb_cpu;
233 struct padata_priv *padata; 219 struct padata_priv *padata;
234 struct padata_serial_queue *squeue; 220 struct padata_serial_queue *squeue;
235 struct padata_instance *pinst = pd->pinst; 221 struct padata_instance *pinst = pd->pinst;
@@ -270,13 +256,14 @@ static void padata_reorder(struct parallel_data *pd)
270 return; 256 return;
271 } 257 }
272 258
273 squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); 259 cb_cpu = padata->cb_cpu;
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
274 261
275 spin_lock(&squeue->serial.lock); 262 spin_lock(&squeue->serial.lock);
276 list_add_tail(&padata->list, &squeue->serial.list); 263 list_add_tail(&padata->list, &squeue->serial.list);
277 spin_unlock(&squeue->serial.lock); 264 spin_unlock(&squeue->serial.lock);
278 265
279 queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); 266 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
280 } 267 }
281 268
282 spin_unlock_bh(&pd->lock); 269 spin_unlock_bh(&pd->lock);
@@ -400,7 +387,7 @@ static void padata_init_squeues(struct parallel_data *pd)
400/* Initialize all percpu queues used by parallel workers */ 387/* Initialize all percpu queues used by parallel workers */
401static void padata_init_pqueues(struct parallel_data *pd) 388static void padata_init_pqueues(struct parallel_data *pd)
402{ 389{
403 int cpu_index, num_cpus, cpu; 390 int cpu_index, cpu;
404 struct padata_parallel_queue *pqueue; 391 struct padata_parallel_queue *pqueue;
405 392
406 cpu_index = 0; 393 cpu_index = 0;
@@ -415,9 +402,6 @@ static void padata_init_pqueues(struct parallel_data *pd)
415 INIT_WORK(&pqueue->work, padata_parallel_worker); 402 INIT_WORK(&pqueue->work, padata_parallel_worker);
416 atomic_set(&pqueue->num_obj, 0); 403 atomic_set(&pqueue->num_obj, 0);
417 } 404 }
418
419 num_cpus = cpumask_weight(pd->cpumask.pcpu);
420 pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0;
421} 405}
422 406
423/* Allocate and initialize the internal cpumask dependend resources. */ 407/* Allocate and initialize the internal cpumask dependend resources. */
@@ -444,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
444 padata_init_pqueues(pd); 428 padata_init_pqueues(pd);
445 padata_init_squeues(pd); 429 padata_init_squeues(pd);
446 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
447 atomic_set(&pd->seq_nr, -1); 431 pd->seq_nr = 0;
448 atomic_set(&pd->reorder_objects, 0); 432 atomic_set(&pd->reorder_objects, 0);
449 atomic_set(&pd->refcnt, 0); 433 atomic_set(&pd->refcnt, 0);
450 pd->pinst = pinst; 434 pd->pinst = pinst;