aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu_ida.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/percpu_ida.c')
-rw-r--r--lib/percpu_ida.c94
1 files changed, 74 insertions, 20 deletions
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index bab1ba2a4c71..9d054bf91d0f 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -30,15 +30,6 @@
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <linux/percpu_ida.h> 31#include <linux/percpu_ida.h>
32 32
33/*
34 * Number of tags we move between the percpu freelist and the global freelist at
35 * a time
36 */
37#define IDA_PCPU_BATCH_MOVE 32U
38
39/* Max size of percpu freelist, */
40#define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2)
41
42struct percpu_ida_cpu { 33struct percpu_ida_cpu {
43 /* 34 /*
44 * Even though this is percpu, we need a lock for tag stealing by remote 35 * Even though this is percpu, we need a lock for tag stealing by remote
@@ -78,7 +69,7 @@ static inline void steal_tags(struct percpu_ida *pool,
78 struct percpu_ida_cpu *remote; 69 struct percpu_ida_cpu *remote;
79 70
80 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); 71 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
81 cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; 72 cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
82 cpus_have_tags--) { 73 cpus_have_tags--) {
83 cpu = cpumask_next(cpu, &pool->cpus_have_tags); 74 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
84 75
@@ -123,11 +114,10 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
123{ 114{
124 move_tags(tags->freelist, &tags->nr_free, 115 move_tags(tags->freelist, &tags->nr_free,
125 pool->freelist, &pool->nr_free, 116 pool->freelist, &pool->nr_free,
126 min(pool->nr_free, IDA_PCPU_BATCH_MOVE)); 117 min(pool->nr_free, pool->percpu_batch_size));
127} 118}
128 119
129static inline unsigned alloc_local_tag(struct percpu_ida *pool, 120static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
130 struct percpu_ida_cpu *tags)
131{ 121{
132 int tag = -ENOSPC; 122 int tag = -ENOSPC;
133 123
@@ -168,7 +158,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
168 tags = this_cpu_ptr(pool->tag_cpu); 158 tags = this_cpu_ptr(pool->tag_cpu);
169 159
170 /* Fastpath */ 160 /* Fastpath */
171 tag = alloc_local_tag(pool, tags); 161 tag = alloc_local_tag(tags);
172 if (likely(tag >= 0)) { 162 if (likely(tag >= 0)) {
173 local_irq_restore(flags); 163 local_irq_restore(flags);
174 return tag; 164 return tag;
@@ -245,17 +235,17 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
245 wake_up(&pool->wait); 235 wake_up(&pool->wait);
246 } 236 }
247 237
248 if (nr_free == IDA_PCPU_SIZE) { 238 if (nr_free == pool->percpu_max_size) {
249 spin_lock(&pool->lock); 239 spin_lock(&pool->lock);
250 240
251 /* 241 /*
252 * Global lock held and irqs disabled, don't need percpu 242 * Global lock held and irqs disabled, don't need percpu
253 * lock 243 * lock
254 */ 244 */
255 if (tags->nr_free == IDA_PCPU_SIZE) { 245 if (tags->nr_free == pool->percpu_max_size) {
256 move_tags(pool->freelist, &pool->nr_free, 246 move_tags(pool->freelist, &pool->nr_free,
257 tags->freelist, &tags->nr_free, 247 tags->freelist, &tags->nr_free,
258 IDA_PCPU_BATCH_MOVE); 248 pool->percpu_batch_size);
259 249
260 wake_up(&pool->wait); 250 wake_up(&pool->wait);
261 } 251 }
@@ -292,7 +282,8 @@ EXPORT_SYMBOL_GPL(percpu_ida_destroy);
292 * Allocation is percpu, but sharding is limited by nr_tags - for best 282 * Allocation is percpu, but sharding is limited by nr_tags - for best
293 * performance, the workload should not span more cpus than nr_tags / 128. 283 * performance, the workload should not span more cpus than nr_tags / 128.
294 */ 284 */
295int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) 285int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
286 unsigned long max_size, unsigned long batch_size)
296{ 287{
297 unsigned i, cpu, order; 288 unsigned i, cpu, order;
298 289
@@ -301,6 +292,8 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
301 init_waitqueue_head(&pool->wait); 292 init_waitqueue_head(&pool->wait);
302 spin_lock_init(&pool->lock); 293 spin_lock_init(&pool->lock);
303 pool->nr_tags = nr_tags; 294 pool->nr_tags = nr_tags;
295 pool->percpu_max_size = max_size;
296 pool->percpu_batch_size = batch_size;
304 297
305 /* Guard against overflow */ 298 /* Guard against overflow */
306 if (nr_tags > (unsigned) INT_MAX + 1) { 299 if (nr_tags > (unsigned) INT_MAX + 1) {
@@ -319,7 +312,7 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
319 pool->nr_free = nr_tags; 312 pool->nr_free = nr_tags;
320 313
321 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + 314 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
322 IDA_PCPU_SIZE * sizeof(unsigned), 315 pool->percpu_max_size * sizeof(unsigned),
323 sizeof(unsigned)); 316 sizeof(unsigned));
324 if (!pool->tag_cpu) 317 if (!pool->tag_cpu)
325 goto err; 318 goto err;
@@ -332,4 +325,65 @@ err:
332 percpu_ida_destroy(pool); 325 percpu_ida_destroy(pool);
333 return -ENOMEM; 326 return -ENOMEM;
334} 327}
335EXPORT_SYMBOL_GPL(percpu_ida_init); 328EXPORT_SYMBOL_GPL(__percpu_ida_init);
329
330/**
331 * percpu_ida_for_each_free - iterate free ids of a pool
332 * @pool: pool to iterate
333 * @fn: interate callback function
334 * @data: parameter for @fn
335 *
336 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
337 * ids might be missed, some might be iterated duplicated, and some might
338 * be iterated and not free soon.
339 */
340int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
341 void *data)
342{
343 unsigned long flags;
344 struct percpu_ida_cpu *remote;
345 unsigned cpu, i, err = 0;
346
347 local_irq_save(flags);
348 for_each_possible_cpu(cpu) {
349 remote = per_cpu_ptr(pool->tag_cpu, cpu);
350 spin_lock(&remote->lock);
351 for (i = 0; i < remote->nr_free; i++) {
352 err = fn(remote->freelist[i], data);
353 if (err)
354 break;
355 }
356 spin_unlock(&remote->lock);
357 if (err)
358 goto out;
359 }
360
361 spin_lock(&pool->lock);
362 for (i = 0; i < pool->nr_free; i++) {
363 err = fn(pool->freelist[i], data);
364 if (err)
365 break;
366 }
367 spin_unlock(&pool->lock);
368out:
369 local_irq_restore(flags);
370 return err;
371}
372EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
373
374/**
375 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
376 * @pool: pool related
377 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
378 *
379 * Note: this just returns a snapshot of free tags number.
380 */
381unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
382{
383 struct percpu_ida_cpu *remote;
384 if (cpu == nr_cpu_ids)
385 return pool->nr_free;
386 remote = per_cpu_ptr(pool->tag_cpu, cpu);
387 return remote->nr_free;
388}
389EXPORT_SYMBOL_GPL(percpu_ida_free_tags);