aboutsummaryrefslogtreecommitdiffstats
path: root/lib/genalloc.c
diff options
context:
space:
mode:
authorStephen Bates <sbates@raithlin.com>2017-11-17 18:28:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-17 19:10:02 -0500
commit36a3d1dd4e16bcd0d2ddfb4a2ec7092f0ae0d931 (patch)
tree751e5b94e03ed0001a4e0e7bfa9f1f1204f34270 /lib/genalloc.c
parente813a614007e3a8a7b53d976e86d9a20f21f81ad (diff)
lib/genalloc.c: make the avail variable an atomic_long_t
If the amount of resources allocated to a gen_pool exceeds 2^32 then the avail atomic overflows and this causes problems when clients try and borrow resources from the pool. This is only expected to be an issue on 64 bit systems. Add the <linux/atomic.h> header to pull in atomic_long* operations. So that 32 bit systems continue to use atomic32_t but 64 bit systems can use atomic64_t. Link: http://lkml.kernel.org/r/1509033843-25667-1-git-send-email-sbates@raithlin.com Signed-off-by: Stephen Bates <sbates@raithlin.com> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Reviewed-by: Daniel Mentz <danielmentz@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/genalloc.c')
-rw-r--r--lib/genalloc.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 144fe6b1a03e..ca06adc4f445 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
194 chunk->phys_addr = phys; 194 chunk->phys_addr = phys;
195 chunk->start_addr = virt; 195 chunk->start_addr = virt;
196 chunk->end_addr = virt + size - 1; 196 chunk->end_addr = virt + size - 1;
197 atomic_set(&chunk->avail, size); 197 atomic_long_set(&chunk->avail, size);
198 198
199 spin_lock(&pool->lock); 199 spin_lock(&pool->lock);
200 list_add_rcu(&chunk->next_chunk, &pool->chunks); 200 list_add_rcu(&chunk->next_chunk, &pool->chunks);
@@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
304 nbits = (size + (1UL << order) - 1) >> order; 304 nbits = (size + (1UL << order) - 1) >> order;
305 rcu_read_lock(); 305 rcu_read_lock();
306 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 306 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
307 if (size > atomic_read(&chunk->avail)) 307 if (size > atomic_long_read(&chunk->avail))
308 continue; 308 continue;
309 309
310 start_bit = 0; 310 start_bit = 0;
@@ -324,7 +324,7 @@ retry:
324 324
325 addr = chunk->start_addr + ((unsigned long)start_bit << order); 325 addr = chunk->start_addr + ((unsigned long)start_bit << order);
326 size = nbits << order; 326 size = nbits << order;
327 atomic_sub(size, &chunk->avail); 327 atomic_long_sub(size, &chunk->avail);
328 break; 328 break;
329 } 329 }
330 rcu_read_unlock(); 330 rcu_read_unlock();
@@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
390 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 390 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
391 BUG_ON(remain); 391 BUG_ON(remain);
392 size = nbits << order; 392 size = nbits << order;
393 atomic_add(size, &chunk->avail); 393 atomic_long_add(size, &chunk->avail);
394 rcu_read_unlock(); 394 rcu_read_unlock();
395 return; 395 return;
396 } 396 }
@@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
464 464
465 rcu_read_lock(); 465 rcu_read_lock();
466 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 466 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
467 avail += atomic_read(&chunk->avail); 467 avail += atomic_long_read(&chunk->avail);
468 rcu_read_unlock(); 468 rcu_read_unlock();
469 return avail; 469 return avail;
470} 470}