aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJoonyoung Shim <jy0922.shim@samsung.com>2013-09-11 17:21:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:35 -0400
commit674470d97958a0ec72f72caf7f6451da40159cc7 (patch)
tree5085abf683ef3ac3f2dcf745b0d214dc70031582 /lib
parenteee87e1726af8c746f0e15ae6c57a97675f5e960 (diff)
lib/genalloc.c: fix overflow of ending address of memory chunk
In struct gen_pool_chunk, end_addr means the end address of memory chunk (inclusive), but in the implementation it is treated as address + size of memory chunk (exclusive), so it points to the address plus one instead of correct ending address. The ending address of memory chunk plus one will cause overflow on the memory chunk including the last address of memory map, e.g. when starting address is 0xFFF00000 and size is 0x100000 on 32bit machine, ending address will be 0x100000000. Use correct ending address like starting address + size - 1. [akpm@linux-foundation.org: add comment to struct gen_pool_chunk:end_addr] Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/genalloc.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/lib/genalloc.c b/lib/genalloc.c
index b35cfa9bc3d4..2a39bf62d8c1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -37,6 +37,11 @@
37#include <linux/of_address.h> 37#include <linux/of_address.h>
38#include <linux/of_device.h> 38#include <linux/of_device.h>
39 39
40static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
41{
42 return chunk->end_addr - chunk->start_addr + 1;
43}
44
40static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 45static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
41{ 46{
42 unsigned long val, nval; 47 unsigned long val, nval;
@@ -188,7 +193,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
188 193
189 chunk->phys_addr = phys; 194 chunk->phys_addr = phys;
190 chunk->start_addr = virt; 195 chunk->start_addr = virt;
191 chunk->end_addr = virt + size; 196 chunk->end_addr = virt + size - 1;
192 atomic_set(&chunk->avail, size); 197 atomic_set(&chunk->avail, size);
193 198
194 spin_lock(&pool->lock); 199 spin_lock(&pool->lock);
@@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
213 218
214 rcu_read_lock(); 219 rcu_read_lock();
215 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
216 if (addr >= chunk->start_addr && addr < chunk->end_addr) { 221 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
217 paddr = chunk->phys_addr + (addr - chunk->start_addr); 222 paddr = chunk->phys_addr + (addr - chunk->start_addr);
218 break; 223 break;
219 } 224 }
@@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool)
242 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 247 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
243 list_del(&chunk->next_chunk); 248 list_del(&chunk->next_chunk);
244 249
245 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 250 end_bit = chunk_size(chunk) >> order;
246 bit = find_next_bit(chunk->bits, end_bit, 0); 251 bit = find_next_bit(chunk->bits, end_bit, 0);
247 BUG_ON(bit < end_bit); 252 BUG_ON(bit < end_bit);
248 253
@@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
283 if (size > atomic_read(&chunk->avail)) 288 if (size > atomic_read(&chunk->avail))
284 continue; 289 continue;
285 290
286 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 291 end_bit = chunk_size(chunk) >> order;
287retry: 292retry:
288 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits, 293 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
289 pool->data); 294 pool->data);
@@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
330 nbits = (size + (1UL << order) - 1) >> order; 335 nbits = (size + (1UL << order) - 1) >> order;
331 rcu_read_lock(); 336 rcu_read_lock();
332 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 337 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
333 if (addr >= chunk->start_addr && addr < chunk->end_addr) { 338 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
334 BUG_ON(addr + size > chunk->end_addr); 339 BUG_ON(addr + size - 1 > chunk->end_addr);
335 start_bit = (addr - chunk->start_addr) >> order; 340 start_bit = (addr - chunk->start_addr) >> order;
336 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); 341 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
337 BUG_ON(remain); 342 BUG_ON(remain);
@@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool)
400 405
401 rcu_read_lock(); 406 rcu_read_lock();
402 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) 407 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
403 size += chunk->end_addr - chunk->start_addr; 408 size += chunk_size(chunk);
404 rcu_read_unlock(); 409 rcu_read_unlock();
405 return size; 410 return size;
406} 411}