diff options
Diffstat (limited to 'lib/percpu_ida.c')
| -rw-r--r-- | lib/percpu_ida.c | 63 | 
1 files changed, 21 insertions, 42 deletions
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6016f1deb1f5..9bbd9c5d375a 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c  | |||
| @@ -112,18 +112,6 @@ static inline void alloc_global_tags(struct percpu_ida *pool, | |||
| 112 | min(pool->nr_free, pool->percpu_batch_size)); | 112 | min(pool->nr_free, pool->percpu_batch_size)); | 
| 113 | } | 113 | } | 
| 114 | 114 | ||
| 115 | static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) | ||
| 116 | { | ||
| 117 | int tag = -ENOSPC; | ||
| 118 | |||
| 119 | spin_lock(&tags->lock); | ||
| 120 | if (tags->nr_free) | ||
| 121 | tag = tags->freelist[--tags->nr_free]; | ||
| 122 | spin_unlock(&tags->lock); | ||
| 123 | |||
| 124 | return tag; | ||
| 125 | } | ||
| 126 | |||
| 127 | /** | 115 | /** | 
| 128 | * percpu_ida_alloc - allocate a tag | 116 | * percpu_ida_alloc - allocate a tag | 
| 129 | * @pool: pool to allocate from | 117 | * @pool: pool to allocate from | 
| @@ -147,20 +135,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 147 | DEFINE_WAIT(wait); | 135 | DEFINE_WAIT(wait); | 
| 148 | struct percpu_ida_cpu *tags; | 136 | struct percpu_ida_cpu *tags; | 
| 149 | unsigned long flags; | 137 | unsigned long flags; | 
| 150 | int tag; | 138 | int tag = -ENOSPC; | 
| 151 | 139 | ||
| 152 | local_irq_save(flags); | 140 | tags = raw_cpu_ptr(pool->tag_cpu); | 
| 153 | tags = this_cpu_ptr(pool->tag_cpu); | 141 | spin_lock_irqsave(&tags->lock, flags); | 
| 154 | 142 | ||
| 155 | /* Fastpath */ | 143 | /* Fastpath */ | 
| 156 | tag = alloc_local_tag(tags); | 144 | if (likely(tags->nr_free >= 0)) { | 
| 157 | if (likely(tag >= 0)) { | 145 | tag = tags->freelist[--tags->nr_free]; | 
| 158 | local_irq_restore(flags); | 146 | spin_unlock_irqrestore(&tags->lock, flags); | 
| 159 | return tag; | 147 | return tag; | 
| 160 | } | 148 | } | 
| 149 | spin_unlock_irqrestore(&tags->lock, flags); | ||
| 161 | 150 | ||
| 162 | while (1) { | 151 | while (1) { | 
| 163 | spin_lock(&pool->lock); | 152 | spin_lock_irqsave(&pool->lock, flags); | 
| 153 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 164 | 154 | ||
| 165 | /* | 155 | /* | 
| 166 | * prepare_to_wait() must come before steal_tags(), in case | 156 | * prepare_to_wait() must come before steal_tags(), in case | 
| @@ -184,8 +174,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 184 | &pool->cpus_have_tags); | 174 | &pool->cpus_have_tags); | 
| 185 | } | 175 | } | 
| 186 | 176 | ||
| 187 | spin_unlock(&pool->lock); | 177 | spin_unlock_irqrestore(&pool->lock, flags); | 
| 188 | local_irq_restore(flags); | ||
| 189 | 178 | ||
| 190 | if (tag >= 0 || state == TASK_RUNNING) | 179 | if (tag >= 0 || state == TASK_RUNNING) | 
| 191 | break; | 180 | break; | 
| @@ -196,9 +185,6 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) | |||
| 196 | } | 185 | } | 
| 197 | 186 | ||
| 198 | schedule(); | 187 | schedule(); | 
| 199 | |||
| 200 | local_irq_save(flags); | ||
| 201 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 202 | } | 188 | } | 
| 203 | if (state != TASK_RUNNING) | 189 | if (state != TASK_RUNNING) | 
| 204 | finish_wait(&pool->wait, &wait); | 190 | finish_wait(&pool->wait, &wait); | 
| @@ -222,28 +208,24 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | |||
| 222 | 208 | ||
| 223 | BUG_ON(tag >= pool->nr_tags); | 209 | BUG_ON(tag >= pool->nr_tags); | 
| 224 | 210 | ||
| 225 | local_irq_save(flags); | 211 | tags = raw_cpu_ptr(pool->tag_cpu); | 
| 226 | tags = this_cpu_ptr(pool->tag_cpu); | ||
| 227 | 212 | ||
| 228 | spin_lock(&tags->lock); | 213 | spin_lock_irqsave(&tags->lock, flags); | 
| 229 | tags->freelist[tags->nr_free++] = tag; | 214 | tags->freelist[tags->nr_free++] = tag; | 
| 230 | 215 | ||
| 231 | nr_free = tags->nr_free; | 216 | nr_free = tags->nr_free; | 
| 232 | spin_unlock(&tags->lock); | ||
| 233 | 217 | ||
| 234 | if (nr_free == 1) { | 218 | if (nr_free == 1) { | 
| 235 | cpumask_set_cpu(smp_processor_id(), | 219 | cpumask_set_cpu(smp_processor_id(), | 
| 236 | &pool->cpus_have_tags); | 220 | &pool->cpus_have_tags); | 
| 237 | wake_up(&pool->wait); | 221 | wake_up(&pool->wait); | 
| 238 | } | 222 | } | 
| 223 | spin_unlock_irqrestore(&tags->lock, flags); | ||
| 239 | 224 | ||
| 240 | if (nr_free == pool->percpu_max_size) { | 225 | if (nr_free == pool->percpu_max_size) { | 
| 241 | spin_lock(&pool->lock); | 226 | spin_lock_irqsave(&pool->lock, flags); | 
| 227 | spin_lock(&tags->lock); | ||
| 242 | 228 | ||
| 243 | /* | ||
| 244 | * Global lock held and irqs disabled, don't need percpu | ||
| 245 | * lock | ||
| 246 | */ | ||
| 247 | if (tags->nr_free == pool->percpu_max_size) { | 229 | if (tags->nr_free == pool->percpu_max_size) { | 
| 248 | move_tags(pool->freelist, &pool->nr_free, | 230 | move_tags(pool->freelist, &pool->nr_free, | 
| 249 | tags->freelist, &tags->nr_free, | 231 | tags->freelist, &tags->nr_free, | 
| @@ -251,10 +233,9 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | |||
| 251 | 233 | ||
| 252 | wake_up(&pool->wait); | 234 | wake_up(&pool->wait); | 
| 253 | } | 235 | } | 
| 254 | spin_unlock(&pool->lock); | 236 | spin_unlock(&tags->lock); | 
| 237 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 255 | } | 238 | } | 
| 256 | |||
| 257 | local_irq_restore(flags); | ||
| 258 | } | 239 | } | 
| 259 | EXPORT_SYMBOL_GPL(percpu_ida_free); | 240 | EXPORT_SYMBOL_GPL(percpu_ida_free); | 
| 260 | 241 | ||
| @@ -346,29 +327,27 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, | |||
| 346 | struct percpu_ida_cpu *remote; | 327 | struct percpu_ida_cpu *remote; | 
| 347 | unsigned cpu, i, err = 0; | 328 | unsigned cpu, i, err = 0; | 
| 348 | 329 | ||
| 349 | local_irq_save(flags); | ||
| 350 | for_each_possible_cpu(cpu) { | 330 | for_each_possible_cpu(cpu) { | 
| 351 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | 331 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | 
| 352 | spin_lock(&remote->lock); | 332 | spin_lock_irqsave(&remote->lock, flags); | 
| 353 | for (i = 0; i < remote->nr_free; i++) { | 333 | for (i = 0; i < remote->nr_free; i++) { | 
| 354 | err = fn(remote->freelist[i], data); | 334 | err = fn(remote->freelist[i], data); | 
| 355 | if (err) | 335 | if (err) | 
| 356 | break; | 336 | break; | 
| 357 | } | 337 | } | 
| 358 | spin_unlock(&remote->lock); | 338 | spin_unlock_irqrestore(&remote->lock, flags); | 
| 359 | if (err) | 339 | if (err) | 
| 360 | goto out; | 340 | goto out; | 
| 361 | } | 341 | } | 
| 362 | 342 | ||
| 363 | spin_lock(&pool->lock); | 343 | spin_lock_irqsave(&pool->lock, flags); | 
| 364 | for (i = 0; i < pool->nr_free; i++) { | 344 | for (i = 0; i < pool->nr_free; i++) { | 
| 365 | err = fn(pool->freelist[i], data); | 345 | err = fn(pool->freelist[i], data); | 
| 366 | if (err) | 346 | if (err) | 
| 367 | break; | 347 | break; | 
| 368 | } | 348 | } | 
| 369 | spin_unlock(&pool->lock); | 349 | spin_unlock_irqrestore(&pool->lock, flags); | 
| 370 | out: | 350 | out: | 
| 371 | local_irq_restore(flags); | ||
| 372 | return err; | 351 | return err; | 
| 373 | } | 352 | } | 
| 374 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); | 353 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); | 
