aboutsummaryrefslogtreecommitdiffstats
path: root/lib/genalloc.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /lib/genalloc.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'lib/genalloc.c')
-rw-r--r--lib/genalloc.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 38d2db82228c..2e65d206b01c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -403,6 +403,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
403EXPORT_SYMBOL(gen_pool_for_each_chunk); 403EXPORT_SYMBOL(gen_pool_for_each_chunk);
404 404
405/** 405/**
406 * addr_in_gen_pool - checks if an address falls within the range of a pool
407 * @pool: the generic memory pool
408 * @start: start address
409 * @size: size of the region
410 *
411 * Check if the range of addresses falls within the specified pool. Returns
412 * true if the entire range is contained in the pool and false otherwise.
413 */
414bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
415 size_t size)
416{
417 bool found = false;
418 unsigned long end = start + size;
419 struct gen_pool_chunk *chunk;
420
421 rcu_read_lock();
422 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
423 if (start >= chunk->start_addr && start <= chunk->end_addr) {
424 if (end <= chunk->end_addr) {
425 found = true;
426 break;
427 }
428 }
429 }
430 rcu_read_unlock();
431 return found;
432}
433
434/**
406 * gen_pool_avail - get available free space of the pool 435 * gen_pool_avail - get available free space of the pool
407 * @pool: pool to get available free space 436 * @pool: pool to get available free space
408 * 437 *
@@ -481,6 +510,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
481EXPORT_SYMBOL(gen_pool_first_fit); 510EXPORT_SYMBOL(gen_pool_first_fit);
482 511
483/** 512/**
513 * gen_pool_first_fit_order_align - find the first available region
514 * of memory matching the size requirement. The region will be aligned
515 * to the order of the size specified.
516 * @map: The address to base the search on
517 * @size: The bitmap size in bits
518 * @start: The bitnumber to start searching at
519 * @nr: The number of zeroed bits we're looking for
520 * @data: additional data - unused
521 */
522unsigned long gen_pool_first_fit_order_align(unsigned long *map,
523 unsigned long size, unsigned long start,
524 unsigned int nr, void *data)
525{
526 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
527
528 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
529}
530EXPORT_SYMBOL(gen_pool_first_fit_order_align);
531
532/**
484 * gen_pool_best_fit - find the best fitting region of memory 533 * gen_pool_best_fit - find the best fitting region of memory
485 * macthing the size requirement (no alignment constraint) 534 * macthing the size requirement (no alignment constraint)
486 * @map: The address to base the search on 535 * @map: The address to base the search on
@@ -549,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
549 598
550 return pool; 599 return pool;
551} 600}
601EXPORT_SYMBOL(devm_gen_pool_create);
552 602
553/** 603/**
554 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 604 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device