diff options
Diffstat (limited to 'lib/percpu_ida.c')
-rw-r--r-- | lib/percpu_ida.c | 28 |
1 files changed, 16 insertions, 12 deletions
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf91d0f..93d145e5539c 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, | |||
54 | /* | 54 | /* |
55 | * Try to steal tags from a remote cpu's percpu freelist. | 55 | * Try to steal tags from a remote cpu's percpu freelist. |
56 | * | 56 | * |
57 | * We first check how many percpu freelists have tags - we don't steal tags | 57 | * We first check how many percpu freelists have tags |
58 | * unless enough percpu freelists have tags on them that it's possible more than | ||
59 | * half the total tags could be stuck on remote percpu freelists. | ||
60 | * | 58 | * |
61 | * Then we iterate through the cpus until we find some tags - we don't attempt | 59 | * Then we iterate through the cpus until we find some tags - we don't attempt |
62 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a | 60 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a |
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, | |||
69 | struct percpu_ida_cpu *remote; | 67 | struct percpu_ida_cpu *remote; |
70 | 68 | ||
71 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | 69 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); |
72 | cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; | 70 | cpus_have_tags; cpus_have_tags--) { |
73 | cpus_have_tags--) { | ||
74 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | 71 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); |
75 | 72 | ||
76 | if (cpu >= nr_cpu_ids) { | 73 | if (cpu >= nr_cpu_ids) { |
@@ -132,22 +129,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) | |||
132 | /** | 129 | /** |
133 | * percpu_ida_alloc - allocate a tag | 130 | * percpu_ida_alloc - allocate a tag |
134 | * @pool: pool to allocate from | 131 | * @pool: pool to allocate from |
135 | * @gfp: gfp flags | 132 | * @state: task state for prepare_to_wait |
136 | * | 133 | * |
137 | * Returns a tag - an integer in the range [0..nr_tags) (passed to | 134 | * Returns a tag - an integer in the range [0..nr_tags) (passed to |
138 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. | 135 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. |
139 | * | 136 | * |
140 | * Safe to be called from interrupt context (assuming it isn't passed | 137 | * Safe to be called from interrupt context (assuming it isn't passed |
141 | * __GFP_WAIT, of course). | 138 | * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). |
142 | * | 139 | * |
143 | * @gfp indicates whether or not to wait until a free id is available (it's not | 140 | * @gfp indicates whether or not to wait until a free id is available (it's not |
144 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep | 141 | * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep |
145 | * however long it takes until another thread frees an id (same semantics as a | 142 | * however long it takes until another thread frees an id (same semantics as a |
146 | * mempool). | 143 | * mempool). |
147 | * | 144 | * |
148 | * Will not fail if passed __GFP_WAIT. | 145 | * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. |
149 | */ | 146 | */ |
150 | int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | 147 | int percpu_ida_alloc(struct percpu_ida *pool, int state) |
151 | { | 148 | { |
152 | DEFINE_WAIT(wait); | 149 | DEFINE_WAIT(wait); |
153 | struct percpu_ida_cpu *tags; | 150 | struct percpu_ida_cpu *tags; |
@@ -174,7 +171,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
174 | * | 171 | * |
175 | * global lock held and irqs disabled, don't need percpu lock | 172 | * global lock held and irqs disabled, don't need percpu lock |
176 | */ | 173 | */ |
177 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); | 174 | if (state != TASK_RUNNING) |
175 | prepare_to_wait(&pool->wait, &wait, state); | ||
178 | 176 | ||
179 | if (!tags->nr_free) | 177 | if (!tags->nr_free) |
180 | alloc_global_tags(pool, tags); | 178 | alloc_global_tags(pool, tags); |
@@ -191,16 +189,22 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) | |||
191 | spin_unlock(&pool->lock); | 189 | spin_unlock(&pool->lock); |
192 | local_irq_restore(flags); | 190 | local_irq_restore(flags); |
193 | 191 | ||
194 | if (tag >= 0 || !(gfp & __GFP_WAIT)) | 192 | if (tag >= 0 || state == TASK_RUNNING) |
195 | break; | 193 | break; |
196 | 194 | ||
195 | if (signal_pending_state(state, current)) { | ||
196 | tag = -ERESTARTSYS; | ||
197 | break; | ||
198 | } | ||
199 | |||
197 | schedule(); | 200 | schedule(); |
198 | 201 | ||
199 | local_irq_save(flags); | 202 | local_irq_save(flags); |
200 | tags = this_cpu_ptr(pool->tag_cpu); | 203 | tags = this_cpu_ptr(pool->tag_cpu); |
201 | } | 204 | } |
205 | if (state != TASK_RUNNING) | ||
206 | finish_wait(&pool->wait, &wait); | ||
202 | 207 | ||
203 | finish_wait(&pool->wait, &wait); | ||
204 | return tag; | 208 | return tag; |
205 | } | 209 | } |
206 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); | 210 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); |