diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-04 03:53:27 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-04 03:53:27 -0400 |
| commit | c0c770e610cc4cdcd66c7e939bdf89cc3e72f79d (patch) | |
| tree | 7cf6807258fef2a85a2ff212f4f4eb6d9dc336c6 /lib | |
| parent | a9e4e6e14c322e08d1c615afc8f504fb415f9613 (diff) | |
| parent | d0e323b47057f4492b8fa22345f38d80a469bf8d (diff) | |
Merge branch 'apei-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'apei-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6:
ACPI, APEI, EINJ Param support is disabled by default
APEI GHES: 32-bit buildfix
ACPI: APEI build fix
ACPI, APEI, GHES: Add hardware memory error recovery support
HWPoison: add memory_failure_queue()
ACPI, APEI, GHES, Error records content based throttle
ACPI, APEI, GHES, printk support for recoverable error via NMI
lib, Make gen_pool memory allocator lockless
lib, Add lock-less NULL terminated single list
Add Kconfig option ARCH_HAVE_NMI_SAFE_CMPXCHG
ACPI, APEI, Add WHEA _OSC support
ACPI, APEI, Add APEI bit support in generic _OSC call
ACPI, APEI, GHES, Support disable GHES at boot time
ACPI, APEI, GHES, Prevent GHES to be built as module
ACPI, APEI, Use apei_exec_run_optional in APEI EINJ and ERST
ACPI, APEI, Add apei_exec_run_optional
ACPI, APEI, GHES, Do not ratelimit fatal error printk before panic
ACPI, APEI, ERST, Fix erst-dbg long record reading issue
ACPI, APEI, ERST, Prevent erst_dbg from loading if ERST is disabled
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 3 | ||||
| -rw-r--r-- | lib/Makefile | 2 | ||||
| -rw-r--r-- | lib/bitmap.c | 2 | ||||
| -rw-r--r-- | lib/genalloc.c | 300 | ||||
| -rw-r--r-- | lib/llist.c | 129 |
5 files changed, 377 insertions, 59 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 32f3e5ae2be5..6c695ff9caba 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -276,4 +276,7 @@ config CORDIC | |||
| 276 | so its calculations are in fixed point. Modules can select this | 276 | so its calculations are in fixed point. Modules can select this |
| 277 | when they require this function. Module will be called cordic. | 277 | when they require this function. Module will be called cordic. |
| 278 | 278 | ||
| 279 | config LLIST | ||
| 280 | bool | ||
| 281 | |||
| 279 | endmenu | 282 | endmenu |
diff --git a/lib/Makefile b/lib/Makefile index 892f4e282ea1..6457af4a7caf 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o | |||
| 115 | 115 | ||
| 116 | obj-$(CONFIG_CORDIC) += cordic.o | 116 | obj-$(CONFIG_CORDIC) += cordic.o |
| 117 | 117 | ||
| 118 | obj-$(CONFIG_LLIST) += llist.o | ||
| 119 | |||
| 118 | hostprogs-y := gen_crc32table | 120 | hostprogs-y := gen_crc32table |
| 119 | clean-files := crc32table.h | 121 | clean-files := crc32table.h |
| 120 | 122 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 37ef4b048795..2f4412e4d071 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
| 271 | } | 271 | } |
| 272 | EXPORT_SYMBOL(__bitmap_weight); | 272 | EXPORT_SYMBOL(__bitmap_weight); |
| 273 | 273 | ||
| 274 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | ||
| 275 | |||
| 276 | void bitmap_set(unsigned long *map, int start, int nr) | 274 | void bitmap_set(unsigned long *map, int start, int nr) |
| 277 | { | 275 | { |
| 278 | unsigned long *p = map + BIT_WORD(start); | 276 | unsigned long *p = map + BIT_WORD(start); |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 577ddf805975..f352cc42f4f8 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -1,8 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Basic general purpose allocator for managing special purpose memory | 2 | * Basic general purpose allocator for managing special purpose |
| 3 | * not managed by the regular kmalloc/kfree interface. | 3 | * memory, for example, memory that is not managed by the regular |
| 4 | * Uses for this includes on-device special memory, uncached memory | 4 | * kmalloc/kfree interface. Uses for this includes on-device special |
| 5 | * etc. | 5 | * memory, uncached memory etc. |
| 6 | * | ||
| 7 | * It is safe to use the allocator in NMI handlers and other special | ||
| 8 | * unblockable contexts that could otherwise deadlock on locks. This | ||
| 9 | * is implemented by using atomic operations and retries on any | ||
| 10 | * conflicts. The disadvantage is that there may be livelocks in | ||
| 11 | * extreme cases. For better scalability, one allocator can be used | ||
| 12 | * for each CPU. | ||
| 13 | * | ||
| 14 | * The lockless operation only works if there is enough memory | ||
| 15 | * available. If new memory is added to the pool a lock has to be | ||
| 16 | * still taken. So any user relying on locklessness has to ensure | ||
| 17 | * that sufficient memory is preallocated. | ||
| 18 | * | ||
| 19 | * The basic atomic operation of this allocator is cmpxchg on long. | ||
| 20 | * On architectures that don't have NMI-safe cmpxchg implementation, | ||
| 21 | * the allocator can NOT be used in NMI handler. So code uses the | ||
| 22 | * allocator in NMI handler should depend on | ||
| 23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
| 6 | * | 24 | * |
| 7 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | 25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
| 8 | * | 26 | * |
| @@ -13,8 +31,109 @@ | |||
| 13 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 14 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| 15 | #include <linux/bitmap.h> | 33 | #include <linux/bitmap.h> |
| 34 | #include <linux/rculist.h> | ||
| 35 | #include <linux/interrupt.h> | ||
| 16 | #include <linux/genalloc.h> | 36 | #include <linux/genalloc.h> |
| 17 | 37 | ||
| 38 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) | ||
| 39 | { | ||
| 40 | unsigned long val, nval; | ||
| 41 | |||
| 42 | nval = *addr; | ||
| 43 | do { | ||
| 44 | val = nval; | ||
| 45 | if (val & mask_to_set) | ||
| 46 | return -EBUSY; | ||
| 47 | cpu_relax(); | ||
| 48 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); | ||
| 49 | |||
| 50 | return 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) | ||
| 54 | { | ||
| 55 | unsigned long val, nval; | ||
| 56 | |||
| 57 | nval = *addr; | ||
| 58 | do { | ||
| 59 | val = nval; | ||
| 60 | if ((val & mask_to_clear) != mask_to_clear) | ||
| 61 | return -EBUSY; | ||
| 62 | cpu_relax(); | ||
| 63 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | /* | ||
| 69 | * bitmap_set_ll - set the specified number of bits at the specified position | ||
| 70 | * @map: pointer to a bitmap | ||
| 71 | * @start: a bit position in @map | ||
| 72 | * @nr: number of bits to set | ||
| 73 | * | ||
| 74 | * Set @nr bits start from @start in @map lock-lessly. Several users | ||
| 75 | * can set/clear the same bitmap simultaneously without lock. If two | ||
| 76 | * users set the same bit, one user will return remain bits, otherwise | ||
| 77 | * return 0. | ||
| 78 | */ | ||
| 79 | static int bitmap_set_ll(unsigned long *map, int start, int nr) | ||
| 80 | { | ||
| 81 | unsigned long *p = map + BIT_WORD(start); | ||
| 82 | const int size = start + nr; | ||
| 83 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 84 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | ||
| 85 | |||
| 86 | while (nr - bits_to_set >= 0) { | ||
| 87 | if (set_bits_ll(p, mask_to_set)) | ||
| 88 | return nr; | ||
| 89 | nr -= bits_to_set; | ||
| 90 | bits_to_set = BITS_PER_LONG; | ||
| 91 | mask_to_set = ~0UL; | ||
| 92 | p++; | ||
| 93 | } | ||
| 94 | if (nr) { | ||
| 95 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
| 96 | if (set_bits_ll(p, mask_to_set)) | ||
| 97 | return nr; | ||
| 98 | } | ||
| 99 | |||
| 100 | return 0; | ||
| 101 | } | ||
| 102 | |||
| 103 | /* | ||
| 104 | * bitmap_clear_ll - clear the specified number of bits at the specified position | ||
| 105 | * @map: pointer to a bitmap | ||
| 106 | * @start: a bit position in @map | ||
| 107 | * @nr: number of bits to set | ||
| 108 | * | ||
| 109 | * Clear @nr bits start from @start in @map lock-lessly. Several users | ||
| 110 | * can set/clear the same bitmap simultaneously without lock. If two | ||
| 111 | * users clear the same bit, one user will return remain bits, | ||
| 112 | * otherwise return 0. | ||
| 113 | */ | ||
| 114 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) | ||
| 115 | { | ||
| 116 | unsigned long *p = map + BIT_WORD(start); | ||
| 117 | const int size = start + nr; | ||
| 118 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
| 119 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | ||
| 120 | |||
| 121 | while (nr - bits_to_clear >= 0) { | ||
| 122 | if (clear_bits_ll(p, mask_to_clear)) | ||
| 123 | return nr; | ||
| 124 | nr -= bits_to_clear; | ||
| 125 | bits_to_clear = BITS_PER_LONG; | ||
| 126 | mask_to_clear = ~0UL; | ||
| 127 | p++; | ||
| 128 | } | ||
| 129 | if (nr) { | ||
| 130 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
| 131 | if (clear_bits_ll(p, mask_to_clear)) | ||
| 132 | return nr; | ||
| 133 | } | ||
| 134 | |||
| 135 | return 0; | ||
| 136 | } | ||
| 18 | 137 | ||
| 19 | /** | 138 | /** |
| 20 | * gen_pool_create - create a new special memory pool | 139 | * gen_pool_create - create a new special memory pool |
| @@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
| 30 | 149 | ||
| 31 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); | 150 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
| 32 | if (pool != NULL) { | 151 | if (pool != NULL) { |
| 33 | rwlock_init(&pool->lock); | 152 | spin_lock_init(&pool->lock); |
| 34 | INIT_LIST_HEAD(&pool->chunks); | 153 | INIT_LIST_HEAD(&pool->chunks); |
| 35 | pool->min_alloc_order = min_alloc_order; | 154 | pool->min_alloc_order = min_alloc_order; |
| 36 | } | 155 | } |
| @@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
| 63 | if (unlikely(chunk == NULL)) | 182 | if (unlikely(chunk == NULL)) |
| 64 | return -ENOMEM; | 183 | return -ENOMEM; |
| 65 | 184 | ||
| 66 | spin_lock_init(&chunk->lock); | ||
| 67 | chunk->phys_addr = phys; | 185 | chunk->phys_addr = phys; |
| 68 | chunk->start_addr = virt; | 186 | chunk->start_addr = virt; |
| 69 | chunk->end_addr = virt + size; | 187 | chunk->end_addr = virt + size; |
| 188 | atomic_set(&chunk->avail, size); | ||
| 70 | 189 | ||
| 71 | write_lock(&pool->lock); | 190 | spin_lock(&pool->lock); |
| 72 | list_add(&chunk->next_chunk, &pool->chunks); | 191 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
| 73 | write_unlock(&pool->lock); | 192 | spin_unlock(&pool->lock); |
| 74 | 193 | ||
| 75 | return 0; | 194 | return 0; |
| 76 | } | 195 | } |
| @@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt); | |||
| 85 | */ | 204 | */ |
| 86 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | 205 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
| 87 | { | 206 | { |
| 88 | struct list_head *_chunk; | ||
| 89 | struct gen_pool_chunk *chunk; | 207 | struct gen_pool_chunk *chunk; |
| 208 | phys_addr_t paddr = -1; | ||
| 90 | 209 | ||
| 91 | read_lock(&pool->lock); | 210 | rcu_read_lock(); |
| 92 | list_for_each(_chunk, &pool->chunks) { | 211 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
| 93 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 212 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
| 94 | 213 | paddr = chunk->phys_addr + (addr - chunk->start_addr); | |
| 95 | if (addr >= chunk->start_addr && addr < chunk->end_addr) | 214 | break; |
| 96 | return chunk->phys_addr + addr - chunk->start_addr; | 215 | } |
| 97 | } | 216 | } |
| 98 | read_unlock(&pool->lock); | 217 | rcu_read_unlock(); |
| 99 | 218 | ||
| 100 | return -1; | 219 | return paddr; |
| 101 | } | 220 | } |
| 102 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | 221 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
| 103 | 222 | ||
| @@ -115,7 +234,6 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
| 115 | int order = pool->min_alloc_order; | 234 | int order = pool->min_alloc_order; |
| 116 | int bit, end_bit; | 235 | int bit, end_bit; |
| 117 | 236 | ||
| 118 | |||
| 119 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | 237 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
| 120 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 238 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
| 121 | list_del(&chunk->next_chunk); | 239 | list_del(&chunk->next_chunk); |
| @@ -137,44 +255,50 @@ EXPORT_SYMBOL(gen_pool_destroy); | |||
| 137 | * @size: number of bytes to allocate from the pool | 255 | * @size: number of bytes to allocate from the pool |
| 138 | * | 256 | * |
| 139 | * Allocate the requested number of bytes from the specified pool. | 257 | * Allocate the requested number of bytes from the specified pool. |
| 140 | * Uses a first-fit algorithm. | 258 | * Uses a first-fit algorithm. Can not be used in NMI handler on |
| 259 | * architectures without NMI-safe cmpxchg implementation. | ||
| 141 | */ | 260 | */ |
| 142 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 261 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
| 143 | { | 262 | { |
| 144 | struct list_head *_chunk; | ||
| 145 | struct gen_pool_chunk *chunk; | 263 | struct gen_pool_chunk *chunk; |
| 146 | unsigned long addr, flags; | 264 | unsigned long addr = 0; |
| 147 | int order = pool->min_alloc_order; | 265 | int order = pool->min_alloc_order; |
| 148 | int nbits, start_bit, end_bit; | 266 | int nbits, start_bit = 0, end_bit, remain; |
| 267 | |||
| 268 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 269 | BUG_ON(in_nmi()); | ||
| 270 | #endif | ||
| 149 | 271 | ||
| 150 | if (size == 0) | 272 | if (size == 0) |
| 151 | return 0; | 273 | return 0; |
| 152 | 274 | ||
| 153 | nbits = (size + (1UL << order) - 1) >> order; | 275 | nbits = (size + (1UL << order) - 1) >> order; |
| 154 | 276 | rcu_read_lock(); | |
| 155 | read_lock(&pool->lock); | 277 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
| 156 | list_for_each(_chunk, &pool->chunks) { | 278 | if (size > atomic_read(&chunk->avail)) |
| 157 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 279 | continue; |
| 158 | 280 | ||
| 159 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 281 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
| 160 | 282 | retry: | |
| 161 | spin_lock_irqsave(&chunk->lock, flags); | 283 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, |
| 162 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, | 284 | start_bit, nbits, 0); |
| 163 | nbits, 0); | 285 | if (start_bit >= end_bit) |
| 164 | if (start_bit >= end_bit) { | ||
| 165 | spin_unlock_irqrestore(&chunk->lock, flags); | ||
| 166 | continue; | 286 | continue; |
| 287 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | ||
| 288 | if (remain) { | ||
| 289 | remain = bitmap_clear_ll(chunk->bits, start_bit, | ||
| 290 | nbits - remain); | ||
| 291 | BUG_ON(remain); | ||
| 292 | goto retry; | ||
| 167 | } | 293 | } |
| 168 | 294 | ||
| 169 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 295 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
| 170 | 296 | size = nbits << order; | |
| 171 | bitmap_set(chunk->bits, start_bit, nbits); | 297 | atomic_sub(size, &chunk->avail); |
| 172 | spin_unlock_irqrestore(&chunk->lock, flags); | 298 | break; |
| 173 | read_unlock(&pool->lock); | ||
| 174 | return addr; | ||
| 175 | } | 299 | } |
| 176 | read_unlock(&pool->lock); | 300 | rcu_read_unlock(); |
| 177 | return 0; | 301 | return addr; |
| 178 | } | 302 | } |
| 179 | EXPORT_SYMBOL(gen_pool_alloc); | 303 | EXPORT_SYMBOL(gen_pool_alloc); |
| 180 | 304 | ||
| @@ -184,33 +308,95 @@ EXPORT_SYMBOL(gen_pool_alloc); | |||
| 184 | * @addr: starting address of memory to free back to pool | 308 | * @addr: starting address of memory to free back to pool |
| 185 | * @size: size in bytes of memory to free | 309 | * @size: size in bytes of memory to free |
| 186 | * | 310 | * |
| 187 | * Free previously allocated special memory back to the specified pool. | 311 | * Free previously allocated special memory back to the specified |
| 312 | * pool. Can not be used in NMI handler on architectures without | ||
| 313 | * NMI-safe cmpxchg implementation. | ||
| 188 | */ | 314 | */ |
| 189 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 315 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
| 190 | { | 316 | { |
| 191 | struct list_head *_chunk; | ||
| 192 | struct gen_pool_chunk *chunk; | 317 | struct gen_pool_chunk *chunk; |
| 193 | unsigned long flags; | ||
| 194 | int order = pool->min_alloc_order; | 318 | int order = pool->min_alloc_order; |
| 195 | int bit, nbits; | 319 | int start_bit, nbits, remain; |
| 196 | 320 | ||
| 197 | nbits = (size + (1UL << order) - 1) >> order; | 321 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 198 | 322 | BUG_ON(in_nmi()); | |
| 199 | read_lock(&pool->lock); | 323 | #endif |
| 200 | list_for_each(_chunk, &pool->chunks) { | ||
| 201 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | ||
| 202 | 324 | ||
| 325 | nbits = (size + (1UL << order) - 1) >> order; | ||
| 326 | rcu_read_lock(); | ||
| 327 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | ||
| 203 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { | 328 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
| 204 | BUG_ON(addr + size > chunk->end_addr); | 329 | BUG_ON(addr + size > chunk->end_addr); |
| 205 | spin_lock_irqsave(&chunk->lock, flags); | 330 | start_bit = (addr - chunk->start_addr) >> order; |
| 206 | bit = (addr - chunk->start_addr) >> order; | 331 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
| 207 | while (nbits--) | 332 | BUG_ON(remain); |
| 208 | __clear_bit(bit++, chunk->bits); | 333 | size = nbits << order; |
| 209 | spin_unlock_irqrestore(&chunk->lock, flags); | 334 | atomic_add(size, &chunk->avail); |
| 210 | break; | 335 | rcu_read_unlock(); |
| 336 | return; | ||
| 211 | } | 337 | } |
| 212 | } | 338 | } |
| 213 | BUG_ON(nbits > 0); | 339 | rcu_read_unlock(); |
| 214 | read_unlock(&pool->lock); | 340 | BUG(); |
| 215 | } | 341 | } |
| 216 | EXPORT_SYMBOL(gen_pool_free); | 342 | EXPORT_SYMBOL(gen_pool_free); |
| 343 | |||
| 344 | /** | ||
| 345 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | ||
| 346 | * @pool: the generic memory pool | ||
| 347 | * @func: func to call | ||
| 348 | * @data: additional data used by @func | ||
| 349 | * | ||
| 350 | * Call @func for every chunk of generic memory pool. The @func is | ||
| 351 | * called with rcu_read_lock held. | ||
| 352 | */ | ||
| 353 | void gen_pool_for_each_chunk(struct gen_pool *pool, | ||
| 354 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), | ||
| 355 | void *data) | ||
| 356 | { | ||
| 357 | struct gen_pool_chunk *chunk; | ||
| 358 | |||
| 359 | rcu_read_lock(); | ||
| 360 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) | ||
| 361 | func(pool, chunk, data); | ||
| 362 | rcu_read_unlock(); | ||
| 363 | } | ||
| 364 | EXPORT_SYMBOL(gen_pool_for_each_chunk); | ||
| 365 | |||
| 366 | /** | ||
| 367 | * gen_pool_avail - get available free space of the pool | ||
| 368 | * @pool: pool to get available free space | ||
| 369 | * | ||
| 370 | * Return available free space of the specified pool. | ||
| 371 | */ | ||
| 372 | size_t gen_pool_avail(struct gen_pool *pool) | ||
| 373 | { | ||
| 374 | struct gen_pool_chunk *chunk; | ||
| 375 | size_t avail = 0; | ||
| 376 | |||
| 377 | rcu_read_lock(); | ||
| 378 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
| 379 | avail += atomic_read(&chunk->avail); | ||
| 380 | rcu_read_unlock(); | ||
| 381 | return avail; | ||
| 382 | } | ||
| 383 | EXPORT_SYMBOL_GPL(gen_pool_avail); | ||
| 384 | |||
| 385 | /** | ||
| 386 | * gen_pool_size - get size in bytes of memory managed by the pool | ||
| 387 | * @pool: pool to get size | ||
| 388 | * | ||
| 389 | * Return size in bytes of memory managed by the pool. | ||
| 390 | */ | ||
| 391 | size_t gen_pool_size(struct gen_pool *pool) | ||
| 392 | { | ||
| 393 | struct gen_pool_chunk *chunk; | ||
| 394 | size_t size = 0; | ||
| 395 | |||
| 396 | rcu_read_lock(); | ||
| 397 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
| 398 | size += chunk->end_addr - chunk->start_addr; | ||
| 399 | rcu_read_unlock(); | ||
| 400 | return size; | ||
| 401 | } | ||
| 402 | EXPORT_SYMBOL_GPL(gen_pool_size); | ||
diff --git a/lib/llist.c b/lib/llist.c new file mode 100644 index 000000000000..da445724fa1f --- /dev/null +++ b/lib/llist.c | |||
| @@ -0,0 +1,129 @@ | |||
| 1 | /* | ||
| 2 | * Lock-less NULL terminated single linked list | ||
| 3 | * | ||
| 4 | * The basic atomic operation of this list is cmpxchg on long. On | ||
| 5 | * architectures that don't have NMI-safe cmpxchg implementation, the | ||
| 6 | * list can NOT be used in NMI handler. So code uses the list in NMI | ||
| 7 | * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
| 8 | * | ||
| 9 | * Copyright 2010,2011 Intel Corp. | ||
| 10 | * Author: Huang Ying <ying.huang@intel.com> | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or | ||
| 13 | * modify it under the terms of the GNU General Public License version | ||
| 14 | * 2 as published by the Free Software Foundation; | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, write to the Free Software | ||
| 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 24 | */ | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/interrupt.h> | ||
| 28 | #include <linux/llist.h> | ||
| 29 | |||
| 30 | #include <asm/system.h> | ||
| 31 | |||
| 32 | /** | ||
| 33 | * llist_add - add a new entry | ||
| 34 | * @new: new entry to be added | ||
| 35 | * @head: the head for your lock-less list | ||
| 36 | */ | ||
| 37 | void llist_add(struct llist_node *new, struct llist_head *head) | ||
| 38 | { | ||
| 39 | struct llist_node *entry, *old_entry; | ||
| 40 | |||
| 41 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 42 | BUG_ON(in_nmi()); | ||
| 43 | #endif | ||
| 44 | |||
| 45 | entry = head->first; | ||
| 46 | do { | ||
| 47 | old_entry = entry; | ||
| 48 | new->next = entry; | ||
| 49 | cpu_relax(); | ||
| 50 | } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); | ||
| 51 | } | ||
| 52 | EXPORT_SYMBOL_GPL(llist_add); | ||
| 53 | |||
| 54 | /** | ||
| 55 | * llist_add_batch - add several linked entries in batch | ||
| 56 | * @new_first: first entry in batch to be added | ||
| 57 | * @new_last: last entry in batch to be added | ||
| 58 | * @head: the head for your lock-less list | ||
| 59 | */ | ||
| 60 | void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | ||
| 61 | struct llist_head *head) | ||
| 62 | { | ||
| 63 | struct llist_node *entry, *old_entry; | ||
| 64 | |||
| 65 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 66 | BUG_ON(in_nmi()); | ||
| 67 | #endif | ||
| 68 | |||
| 69 | entry = head->first; | ||
| 70 | do { | ||
| 71 | old_entry = entry; | ||
| 72 | new_last->next = entry; | ||
| 73 | cpu_relax(); | ||
| 74 | } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL_GPL(llist_add_batch); | ||
| 77 | |||
| 78 | /** | ||
| 79 | * llist_del_first - delete the first entry of lock-less list | ||
| 80 | * @head: the head for your lock-less list | ||
| 81 | * | ||
| 82 | * If list is empty, return NULL, otherwise, return the first entry | ||
| 83 | * deleted, this is the newest added one. | ||
| 84 | * | ||
| 85 | * Only one llist_del_first user can be used simultaneously with | ||
| 86 | * multiple llist_add users without lock. Because otherwise | ||
| 87 | * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, | ||
| 88 | * llist_add) sequence in another user may change @head->first->next, | ||
| 89 | * but keep @head->first. If multiple consumers are needed, please | ||
| 90 | * use llist_del_all or use lock between consumers. | ||
| 91 | */ | ||
| 92 | struct llist_node *llist_del_first(struct llist_head *head) | ||
| 93 | { | ||
| 94 | struct llist_node *entry, *old_entry, *next; | ||
| 95 | |||
| 96 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 97 | BUG_ON(in_nmi()); | ||
| 98 | #endif | ||
| 99 | |||
| 100 | entry = head->first; | ||
| 101 | do { | ||
| 102 | if (entry == NULL) | ||
| 103 | return NULL; | ||
| 104 | old_entry = entry; | ||
| 105 | next = entry->next; | ||
| 106 | cpu_relax(); | ||
| 107 | } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); | ||
| 108 | |||
| 109 | return entry; | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(llist_del_first); | ||
| 112 | |||
| 113 | /** | ||
| 114 | * llist_del_all - delete all entries from lock-less list | ||
| 115 | * @head: the head of lock-less list to delete all entries | ||
| 116 | * | ||
| 117 | * If list is empty, return NULL, otherwise, delete all entries and | ||
| 118 | * return the pointer to the first entry. The order of entries | ||
| 119 | * deleted is from the newest to the oldest added one. | ||
| 120 | */ | ||
| 121 | struct llist_node *llist_del_all(struct llist_head *head) | ||
| 122 | { | ||
| 123 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
| 124 | BUG_ON(in_nmi()); | ||
| 125 | #endif | ||
| 126 | |||
| 127 | return xchg(&head->first, NULL); | ||
| 128 | } | ||
| 129 | EXPORT_SYMBOL_GPL(llist_del_all); | ||
