diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/genalloc.c | 51 |
1 files changed, 25 insertions, 26 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c index 7e85d1e37a6e..770c769d7cb7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -168,20 +168,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
| 168 | EXPORT_SYMBOL(gen_pool_create); | 168 | EXPORT_SYMBOL(gen_pool_create); |
| 169 | 169 | ||
| 170 | /** | 170 | /** |
| 171 | * gen_pool_add_virt - add a new chunk of special memory to the pool | 171 | * gen_pool_add_owner- add a new chunk of special memory to the pool |
| 172 | * @pool: pool to add new memory chunk to | 172 | * @pool: pool to add new memory chunk to |
| 173 | * @virt: virtual starting address of memory chunk to add to pool | 173 | * @virt: virtual starting address of memory chunk to add to pool |
| 174 | * @phys: physical starting address of memory chunk to add to pool | 174 | * @phys: physical starting address of memory chunk to add to pool |
| 175 | * @size: size in bytes of the memory chunk to add to pool | 175 | * @size: size in bytes of the memory chunk to add to pool |
| 176 | * @nid: node id of the node the chunk structure and bitmap should be | 176 | * @nid: node id of the node the chunk structure and bitmap should be |
| 177 | * allocated on, or -1 | 177 | * allocated on, or -1 |
| 178 | * @owner: private data the publisher would like to recall at alloc time | ||
| 178 | * | 179 | * |
| 179 | * Add a new chunk of special memory to the specified pool. | 180 | * Add a new chunk of special memory to the specified pool. |
| 180 | * | 181 | * |
| 181 | * Returns 0 on success or a -ve errno on failure. | 182 | * Returns 0 on success or a -ve errno on failure. |
| 182 | */ | 183 | */ |
| 183 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, | 184 | int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, |
| 184 | size_t size, int nid) | 185 | size_t size, int nid, void *owner) |
| 185 | { | 186 | { |
| 186 | struct gen_pool_chunk *chunk; | 187 | struct gen_pool_chunk *chunk; |
| 187 | int nbits = size >> pool->min_alloc_order; | 188 | int nbits = size >> pool->min_alloc_order; |
| @@ -195,6 +196,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
| 195 | chunk->phys_addr = phys; | 196 | chunk->phys_addr = phys; |
| 196 | chunk->start_addr = virt; | 197 | chunk->start_addr = virt; |
| 197 | chunk->end_addr = virt + size - 1; | 198 | chunk->end_addr = virt + size - 1; |
| 199 | chunk->owner = owner; | ||
| 198 | atomic_long_set(&chunk->avail, size); | 200 | atomic_long_set(&chunk->avail, size); |
| 199 | 201 | ||
| 200 | spin_lock(&pool->lock); | 202 | spin_lock(&pool->lock); |
| @@ -203,7 +205,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
| 203 | 205 | ||
| 204 | return 0; | 206 | return 0; |
| 205 | } | 207 | } |
| 206 | EXPORT_SYMBOL(gen_pool_add_virt); | 208 | EXPORT_SYMBOL(gen_pool_add_owner); |
| 207 | 209 | ||
| 208 | /** | 210 | /** |
| 209 | * gen_pool_virt_to_phys - return the physical address of memory | 211 | * gen_pool_virt_to_phys - return the physical address of memory |
| @@ -260,35 +262,20 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
| 260 | EXPORT_SYMBOL(gen_pool_destroy); | 262 | EXPORT_SYMBOL(gen_pool_destroy); |
| 261 | 263 | ||
| 262 | /** | 264 | /** |
| 263 | * gen_pool_alloc - allocate special memory from the pool | 265 | * gen_pool_alloc_algo_owner - allocate special memory from the pool |
| 264 | * @pool: pool to allocate from | ||
| 265 | * @size: number of bytes to allocate from the pool | ||
| 266 | * | ||
| 267 | * Allocate the requested number of bytes from the specified pool. | ||
| 268 | * Uses the pool allocation function (with first-fit algorithm by default). | ||
| 269 | * Can not be used in NMI handler on architectures without | ||
| 270 | * NMI-safe cmpxchg implementation. | ||
| 271 | */ | ||
| 272 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | ||
| 273 | { | ||
| 274 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); | ||
| 275 | } | ||
| 276 | EXPORT_SYMBOL(gen_pool_alloc); | ||
| 277 | |||
| 278 | /** | ||
| 279 | * gen_pool_alloc_algo - allocate special memory from the pool | ||
| 280 | * @pool: pool to allocate from | 266 | * @pool: pool to allocate from |
| 281 | * @size: number of bytes to allocate from the pool | 267 | * @size: number of bytes to allocate from the pool |
| 282 | * @algo: algorithm passed from caller | 268 | * @algo: algorithm passed from caller |
| 283 | * @data: data passed to algorithm | 269 | * @data: data passed to algorithm |
| 270 | * @owner: optionally retrieve the chunk owner | ||
| 284 | * | 271 | * |
| 285 | * Allocate the requested number of bytes from the specified pool. | 272 | * Allocate the requested number of bytes from the specified pool. |
| 286 | * Uses the pool allocation function (with first-fit algorithm by default). | 273 | * Uses the pool allocation function (with first-fit algorithm by default). |
| 287 | * Can not be used in NMI handler on architectures without | 274 | * Can not be used in NMI handler on architectures without |
| 288 | * NMI-safe cmpxchg implementation. | 275 | * NMI-safe cmpxchg implementation. |
| 289 | */ | 276 | */ |
| 290 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | 277 | unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, |
| 291 | genpool_algo_t algo, void *data) | 278 | genpool_algo_t algo, void *data, void **owner) |
| 292 | { | 279 | { |
| 293 | struct gen_pool_chunk *chunk; | 280 | struct gen_pool_chunk *chunk; |
| 294 | unsigned long addr = 0; | 281 | unsigned long addr = 0; |
| @@ -299,6 +286,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | |||
| 299 | BUG_ON(in_nmi()); | 286 | BUG_ON(in_nmi()); |
| 300 | #endif | 287 | #endif |
| 301 | 288 | ||
| 289 | if (owner) | ||
| 290 | *owner = NULL; | ||
| 291 | |||
| 302 | if (size == 0) | 292 | if (size == 0) |
| 303 | return 0; | 293 | return 0; |
| 304 | 294 | ||
| @@ -326,12 +316,14 @@ retry: | |||
| 326 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 316 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
| 327 | size = nbits << order; | 317 | size = nbits << order; |
| 328 | atomic_long_sub(size, &chunk->avail); | 318 | atomic_long_sub(size, &chunk->avail); |
| 319 | if (owner) | ||
| 320 | *owner = chunk->owner; | ||
| 329 | break; | 321 | break; |
| 330 | } | 322 | } |
| 331 | rcu_read_unlock(); | 323 | rcu_read_unlock(); |
| 332 | return addr; | 324 | return addr; |
| 333 | } | 325 | } |
| 334 | EXPORT_SYMBOL(gen_pool_alloc_algo); | 326 | EXPORT_SYMBOL(gen_pool_alloc_algo_owner); |
| 335 | 327 | ||
| 336 | /** | 328 | /** |
| 337 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | 329 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage |
| @@ -367,12 +359,14 @@ EXPORT_SYMBOL(gen_pool_dma_alloc); | |||
| 367 | * @pool: pool to free to | 359 | * @pool: pool to free to |
| 368 | * @addr: starting address of memory to free back to pool | 360 | * @addr: starting address of memory to free back to pool |
| 369 | * @size: size in bytes of memory to free | 361 | * @size: size in bytes of memory to free |
| 362 | * @owner: private data stashed at gen_pool_add() time | ||
| 370 | * | 363 | * |
| 371 | * Free previously allocated special memory back to the specified | 364 | * Free previously allocated special memory back to the specified |
| 372 | * pool. Can not be used in NMI handler on architectures without | 365 | * pool. Can not be used in NMI handler on architectures without |
| 373 | * NMI-safe cmpxchg implementation. | 366 | * NMI-safe cmpxchg implementation. |
| 374 | */ | 367 | */ |
| 375 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 368 | void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, |
| 369 | void **owner) | ||
| 376 | { | 370 | { |
| 377 | struct gen_pool_chunk *chunk; | 371 | struct gen_pool_chunk *chunk; |
| 378 | int order = pool->min_alloc_order; | 372 | int order = pool->min_alloc_order; |
| @@ -382,6 +376,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
| 382 | BUG_ON(in_nmi()); | 376 | BUG_ON(in_nmi()); |
| 383 | #endif | 377 | #endif |
| 384 | 378 | ||
| 379 | if (owner) | ||
| 380 | *owner = NULL; | ||
| 381 | |||
| 385 | nbits = (size + (1UL << order) - 1) >> order; | 382 | nbits = (size + (1UL << order) - 1) >> order; |
| 386 | rcu_read_lock(); | 383 | rcu_read_lock(); |
| 387 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | 384 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
| @@ -392,6 +389,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
| 392 | BUG_ON(remain); | 389 | BUG_ON(remain); |
| 393 | size = nbits << order; | 390 | size = nbits << order; |
| 394 | atomic_long_add(size, &chunk->avail); | 391 | atomic_long_add(size, &chunk->avail); |
| 392 | if (owner) | ||
| 393 | *owner = chunk->owner; | ||
| 395 | rcu_read_unlock(); | 394 | rcu_read_unlock(); |
| 396 | return; | 395 | return; |
| 397 | } | 396 | } |
| @@ -399,7 +398,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
| 399 | rcu_read_unlock(); | 398 | rcu_read_unlock(); |
| 400 | BUG(); | 399 | BUG(); |
| 401 | } | 400 | } |
| 402 | EXPORT_SYMBOL(gen_pool_free); | 401 | EXPORT_SYMBOL(gen_pool_free_owner); |
| 403 | 402 | ||
| 404 | /** | 403 | /** |
| 405 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | 404 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool |
