diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-05 14:09:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-05 14:09:16 -0400 |
commit | 11126c611e10abb18b6f1ed0300c0548c3906b54 (patch) | |
tree | 246227ad730c1e68f5a9c03db57cd4592abe7687 /lib/genalloc.c | |
parent | 67966e088b0c7dc926f4ce19565ebf208e18d058 (diff) | |
parent | 33e2a4227ddff7c18921ac175fae3ab0e3ff8a76 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge misc patches from Andrew Morton:
"The MM tree is rather stuck while I wait to find out what the heck is
happening with sched/numa. Probably I'll need to route around all the
code which was added to -next, sigh.
So this is "everything else", or at least most of it - other small
bits are still awaiting resolutions of various kinds."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (180 commits)
lib/decompress.c add __init to decompress_method and data
kernel/resource.c: fix stack overflow in __reserve_region_with_split()
omfs: convert to use beXX_add_cpu()
taskstats: cgroupstats_user_cmd() may leak on error
aoe: update aoe-internal version number to 50
aoe: update documentation to better reflect aoe-plus-udev usage
aoe: remove unused code
aoe: make dynamic block minor numbers the default
aoe: update and specify AoE address guards and error messages
aoe: retain static block device numbers for backwards compatibility
aoe: support more AoE addresses with dynamic block device minor numbers
aoe: update documentation with new URL and VM settings reference
aoe: update copyright year in touched files
aoe: update internal version number to 49
aoe: remove unused code and add cosmetic improvements
aoe: increase net_device reference count while using it
aoe: associate frames with the AoE storage target
aoe: disallow unsupported AoE minor addresses
aoe: do revalidation steps in order
aoe: failover remote interface based on aoe_deadsecs parameter
...
Diffstat (limited to 'lib/genalloc.c')
-rw-r--r-- | lib/genalloc.c | 88 |
1 files changed, 84 insertions, 4 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c index 6bc04aab6ec7..ca208a92628c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
152 | spin_lock_init(&pool->lock); | 152 | spin_lock_init(&pool->lock); |
153 | INIT_LIST_HEAD(&pool->chunks); | 153 | INIT_LIST_HEAD(&pool->chunks); |
154 | pool->min_alloc_order = min_alloc_order; | 154 | pool->min_alloc_order = min_alloc_order; |
155 | pool->algo = gen_pool_first_fit; | ||
156 | pool->data = NULL; | ||
155 | } | 157 | } |
156 | return pool; | 158 | return pool; |
157 | } | 159 | } |
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy); | |||
255 | * @size: number of bytes to allocate from the pool | 257 | * @size: number of bytes to allocate from the pool |
256 | * | 258 | * |
257 | * Allocate the requested number of bytes from the specified pool. | 259 | * Allocate the requested number of bytes from the specified pool. |
258 | * Uses a first-fit algorithm. Can not be used in NMI handler on | 260 | * Uses the pool allocation function (with first-fit algorithm by default). |
259 | * architectures without NMI-safe cmpxchg implementation. | 261 | * Can not be used in NMI handler on architectures without |
262 | * NMI-safe cmpxchg implementation. | ||
260 | */ | 263 | */ |
261 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 264 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
262 | { | 265 | { |
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |||
280 | 283 | ||
281 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 284 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
282 | retry: | 285 | retry: |
283 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, | 286 | start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, |
284 | start_bit, nbits, 0); | 287 | pool->data); |
285 | if (start_bit >= end_bit) | 288 | if (start_bit >= end_bit) |
286 | continue; | 289 | continue; |
287 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | 290 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); |
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool) | |||
400 | return size; | 403 | return size; |
401 | } | 404 | } |
402 | EXPORT_SYMBOL_GPL(gen_pool_size); | 405 | EXPORT_SYMBOL_GPL(gen_pool_size); |
406 | |||
407 | /** | ||
408 | * gen_pool_set_algo - set the allocation algorithm | ||
409 | * @pool: pool to change allocation algorithm | ||
410 | * @algo: custom algorithm function | ||
411 | * @data: additional data used by @algo | ||
412 | * | ||
413 | * Call @algo for each memory allocation in the pool. | ||
414 | * If @algo is NULL use gen_pool_first_fit as default | ||
415 | * memory allocation function. | ||
416 | */ | ||
417 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) | ||
418 | { | ||
419 | rcu_read_lock(); | ||
420 | |||
421 | pool->algo = algo; | ||
422 | if (!pool->algo) | ||
423 | pool->algo = gen_pool_first_fit; | ||
424 | |||
425 | pool->data = data; | ||
426 | |||
427 | rcu_read_unlock(); | ||
428 | } | ||
429 | EXPORT_SYMBOL(gen_pool_set_algo); | ||
430 | |||
431 | /** | ||
432 | * gen_pool_first_fit - find the first available region | ||
433 | * of memory matching the size requirement (no alignment constraint) | ||
434 | * @map: The address to base the search on | ||
435 | * @size: The bitmap size in bits | ||
436 | * @start: The bitnumber to start searching at | ||
437 | * @nr: The number of zeroed bits we're looking for | ||
438 | * @data: additional data - unused | ||
439 | */ | ||
440 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, | ||
441 | unsigned long start, unsigned int nr, void *data) | ||
442 | { | ||
443 | return bitmap_find_next_zero_area(map, size, start, nr, 0); | ||
444 | } | ||
445 | EXPORT_SYMBOL(gen_pool_first_fit); | ||
446 | |||
447 | /** | ||
448 | * gen_pool_best_fit - find the best fitting region of memory | ||
449 | * macthing the size requirement (no alignment constraint) | ||
450 | * @map: The address to base the search on | ||
451 | * @size: The bitmap size in bits | ||
452 | * @start: The bitnumber to start searching at | ||
453 | * @nr: The number of zeroed bits we're looking for | ||
454 | * @data: additional data - unused | ||
455 | * | ||
456 | * Iterate over the bitmap to find the smallest free region | ||
457 | * which we can allocate the memory. | ||
458 | */ | ||
459 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | ||
460 | unsigned long start, unsigned int nr, void *data) | ||
461 | { | ||
462 | unsigned long start_bit = size; | ||
463 | unsigned long len = size + 1; | ||
464 | unsigned long index; | ||
465 | |||
466 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); | ||
467 | |||
468 | while (index < size) { | ||
469 | int next_bit = find_next_bit(map, size, index + nr); | ||
470 | if ((next_bit - index) < len) { | ||
471 | len = next_bit - index; | ||
472 | start_bit = index; | ||
473 | if (len == nr) | ||
474 | return start_bit; | ||
475 | } | ||
476 | index = bitmap_find_next_zero_area(map, size, | ||
477 | next_bit + 1, nr, 0); | ||
478 | } | ||
479 | |||
480 | return start_bit; | ||
481 | } | ||
482 | EXPORT_SYMBOL(gen_pool_best_fit); | ||