summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 01:06:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 01:06:26 -0400
commit72f02ba66bd83b54054da20eae550123de84da6f (patch)
tree96a8360400e040aa2e38e7352594dbbc485461db /lib
parentdb06f826ec12bf0701ea7fc0a3c0aa00b84417c8 (diff)
parent51372570ac3c919b036e760f4ca449e81cf8e995 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: mpt3sas, lpfc, qla2xxx, hisi_sas, smartpqi, megaraid_sas, arcmsr. In addition, with the continuing absence of Nic we have target updates for tcmu and target core (all with reviews and acks). The biggest observable change is going to be that we're (again) trying to switch to mulitqueue as the default (a user can still override the setting on the kernel command line). Other major core stuff is the removal of the remaining Microchannel drivers, an update of the internal timers and some reworks of completion and result handling" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (203 commits) scsi: core: use blk_mq_run_hw_queues in scsi_kick_queue scsi: ufs: remove unnecessary query(DM) UPIU trace scsi: qla2xxx: Fix issue reported by static checker for qla2x00_els_dcmd2_sp_done() scsi: aacraid: Spelling fix in comment scsi: mpt3sas: Fix calltrace observed while running IO & reset scsi: aic94xx: fix an error code in aic94xx_init() scsi: st: remove redundant pointer STbuffer scsi: qla2xxx: Update driver version to 10.00.00.08-k scsi: qla2xxx: Migrate NVME N2N handling into state machine scsi: qla2xxx: Save frame payload size from ICB scsi: qla2xxx: Fix stalled relogin scsi: qla2xxx: Fix race between switch cmd completion and timeout scsi: qla2xxx: Fix Management Server NPort handle reservation logic scsi: qla2xxx: Flush mailbox commands on chip reset scsi: qla2xxx: Fix unintended Logout scsi: qla2xxx: Fix session state stuck in Get Port DB scsi: qla2xxx: Fix redundant fc_rport registration scsi: qla2xxx: Silent erroneous message scsi: qla2xxx: Prevent sysfs access when chip is down scsi: qla2xxx: Add longer window for chip reset ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/klist.c10
-rw-r--r--lib/percpu_ida.c370
3 files changed, 7 insertions, 375 deletions
diff --git a/lib/Makefile b/lib/Makefile
index ff3a397bbb12..d95bb2525101 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
37 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 37 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
38 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 38 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
39 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 39 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
40 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 40 percpu-refcount.o rhashtable.o reciprocal_div.o \
41 once.o refcount.o usercopy.o errseq.o bucket_locks.o 41 once.o refcount.o usercopy.o errseq.o bucket_locks.o
42obj-$(CONFIG_STRING_SELFTEST) += test_string.o 42obj-$(CONFIG_STRING_SELFTEST) += test_string.o
43obj-y += string_helpers.o 43obj-y += string_helpers.o
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c5..f6b547812fe3 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
336 void (*put)(struct klist_node *) = i->i_klist->put; 336 void (*put)(struct klist_node *) = i->i_klist->put;
337 struct klist_node *last = i->i_cur; 337 struct klist_node *last = i->i_cur;
338 struct klist_node *prev; 338 struct klist_node *prev;
339 unsigned long flags;
339 340
340 spin_lock(&i->i_klist->k_lock); 341 spin_lock_irqsave(&i->i_klist->k_lock, flags);
341 342
342 if (last) { 343 if (last) {
343 prev = to_klist_node(last->n_node.prev); 344 prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
356 prev = to_klist_node(prev->n_node.prev); 357 prev = to_klist_node(prev->n_node.prev);
357 } 358 }
358 359
359 spin_unlock(&i->i_klist->k_lock); 360 spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
360 361
361 if (put && last) 362 if (put && last)
362 put(last); 363 put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
377 void (*put)(struct klist_node *) = i->i_klist->put; 378 void (*put)(struct klist_node *) = i->i_klist->put;
378 struct klist_node *last = i->i_cur; 379 struct klist_node *last = i->i_cur;
379 struct klist_node *next; 380 struct klist_node *next;
381 unsigned long flags;
380 382
381 spin_lock(&i->i_klist->k_lock); 383 spin_lock_irqsave(&i->i_klist->k_lock, flags);
382 384
383 if (last) { 385 if (last) {
384 next = to_klist_node(last->n_node.next); 386 next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
397 next = to_klist_node(next->n_node.next); 399 next = to_klist_node(next->n_node.next);
398 } 400 }
399 401
400 spin_unlock(&i->i_klist->k_lock); 402 spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
401 403
402 if (put && last) 404 if (put && last)
403 put(last); 405 put(last);
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
deleted file mode 100644
index beb14839b41a..000000000000
--- a/lib/percpu_ida.c
+++ /dev/null
@@ -1,370 +0,0 @@
1/*
2 * Percpu IDA library
3 *
4 * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 */
16
17#include <linux/mm.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/bug.h>
21#include <linux/err.h>
22#include <linux/export.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/percpu.h>
26#include <linux/sched/signal.h>
27#include <linux/string.h>
28#include <linux/spinlock.h>
29#include <linux/percpu_ida.h>
30
31struct percpu_ida_cpu {
32 /*
33 * Even though this is percpu, we need a lock for tag stealing by remote
34 * CPUs:
35 */
36 spinlock_t lock;
37
38 /* nr_free/freelist form a stack of free IDs */
39 unsigned nr_free;
40 unsigned freelist[];
41};
42
43static inline void move_tags(unsigned *dst, unsigned *dst_nr,
44 unsigned *src, unsigned *src_nr,
45 unsigned nr)
46{
47 *src_nr -= nr;
48 memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
49 *dst_nr += nr;
50}
51
52/*
53 * Try to steal tags from a remote cpu's percpu freelist.
54 *
55 * We first check how many percpu freelists have tags
56 *
57 * Then we iterate through the cpus until we find some tags - we don't attempt
58 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
59 * minimum.
60 */
61static inline void steal_tags(struct percpu_ida *pool,
62 struct percpu_ida_cpu *tags)
63{
64 unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
65 struct percpu_ida_cpu *remote;
66
67 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
68 cpus_have_tags; cpus_have_tags--) {
69 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
70
71 if (cpu >= nr_cpu_ids) {
72 cpu = cpumask_first(&pool->cpus_have_tags);
73 if (cpu >= nr_cpu_ids)
74 BUG();
75 }
76
77 pool->cpu_last_stolen = cpu;
78 remote = per_cpu_ptr(pool->tag_cpu, cpu);
79
80 cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
81
82 if (remote == tags)
83 continue;
84
85 spin_lock(&remote->lock);
86
87 if (remote->nr_free) {
88 memcpy(tags->freelist,
89 remote->freelist,
90 sizeof(unsigned) * remote->nr_free);
91
92 tags->nr_free = remote->nr_free;
93 remote->nr_free = 0;
94 }
95
96 spin_unlock(&remote->lock);
97
98 if (tags->nr_free)
99 break;
100 }
101}
102
103/*
104 * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
105 * our percpu freelist:
106 */
107static inline void alloc_global_tags(struct percpu_ida *pool,
108 struct percpu_ida_cpu *tags)
109{
110 move_tags(tags->freelist, &tags->nr_free,
111 pool->freelist, &pool->nr_free,
112 min(pool->nr_free, pool->percpu_batch_size));
113}
114
115/**
116 * percpu_ida_alloc - allocate a tag
117 * @pool: pool to allocate from
118 * @state: task state for prepare_to_wait
119 *
120 * Returns a tag - an integer in the range [0..nr_tags) (passed to
121 * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
122 *
123 * Safe to be called from interrupt context (assuming it isn't passed
124 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
125 *
126 * @gfp indicates whether or not to wait until a free id is available (it's not
127 * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
128 * however long it takes until another thread frees an id (same semantics as a
129 * mempool).
130 *
131 * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
132 */
133int percpu_ida_alloc(struct percpu_ida *pool, int state)
134{
135 DEFINE_WAIT(wait);
136 struct percpu_ida_cpu *tags;
137 unsigned long flags;
138 int tag = -ENOSPC;
139
140 tags = raw_cpu_ptr(pool->tag_cpu);
141 spin_lock_irqsave(&tags->lock, flags);
142
143 /* Fastpath */
144 if (likely(tags->nr_free)) {
145 tag = tags->freelist[--tags->nr_free];
146 spin_unlock_irqrestore(&tags->lock, flags);
147 return tag;
148 }
149 spin_unlock_irqrestore(&tags->lock, flags);
150
151 while (1) {
152 spin_lock_irqsave(&pool->lock, flags);
153 tags = this_cpu_ptr(pool->tag_cpu);
154
155 /*
156 * prepare_to_wait() must come before steal_tags(), in case
157 * percpu_ida_free() on another cpu flips a bit in
158 * cpus_have_tags
159 *
160 * global lock held and irqs disabled, don't need percpu lock
161 */
162 if (state != TASK_RUNNING)
163 prepare_to_wait(&pool->wait, &wait, state);
164
165 if (!tags->nr_free)
166 alloc_global_tags(pool, tags);
167 if (!tags->nr_free)
168 steal_tags(pool, tags);
169
170 if (tags->nr_free) {
171 tag = tags->freelist[--tags->nr_free];
172 if (tags->nr_free)
173 cpumask_set_cpu(smp_processor_id(),
174 &pool->cpus_have_tags);
175 }
176
177 spin_unlock_irqrestore(&pool->lock, flags);
178
179 if (tag >= 0 || state == TASK_RUNNING)
180 break;
181
182 if (signal_pending_state(state, current)) {
183 tag = -ERESTARTSYS;
184 break;
185 }
186
187 schedule();
188 }
189 if (state != TASK_RUNNING)
190 finish_wait(&pool->wait, &wait);
191
192 return tag;
193}
194EXPORT_SYMBOL_GPL(percpu_ida_alloc);
195
196/**
197 * percpu_ida_free - free a tag
198 * @pool: pool @tag was allocated from
199 * @tag: a tag previously allocated with percpu_ida_alloc()
200 *
201 * Safe to be called from interrupt context.
202 */
203void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
204{
205 struct percpu_ida_cpu *tags;
206 unsigned long flags;
207 unsigned nr_free;
208
209 BUG_ON(tag >= pool->nr_tags);
210
211 tags = raw_cpu_ptr(pool->tag_cpu);
212
213 spin_lock_irqsave(&tags->lock, flags);
214 tags->freelist[tags->nr_free++] = tag;
215
216 nr_free = tags->nr_free;
217
218 if (nr_free == 1) {
219 cpumask_set_cpu(smp_processor_id(),
220 &pool->cpus_have_tags);
221 wake_up(&pool->wait);
222 }
223 spin_unlock_irqrestore(&tags->lock, flags);
224
225 if (nr_free == pool->percpu_max_size) {
226 spin_lock_irqsave(&pool->lock, flags);
227 spin_lock(&tags->lock);
228
229 if (tags->nr_free == pool->percpu_max_size) {
230 move_tags(pool->freelist, &pool->nr_free,
231 tags->freelist, &tags->nr_free,
232 pool->percpu_batch_size);
233
234 wake_up(&pool->wait);
235 }
236 spin_unlock(&tags->lock);
237 spin_unlock_irqrestore(&pool->lock, flags);
238 }
239}
240EXPORT_SYMBOL_GPL(percpu_ida_free);
241
242/**
243 * percpu_ida_destroy - release a tag pool's resources
244 * @pool: pool to free
245 *
246 * Frees the resources allocated by percpu_ida_init().
247 */
248void percpu_ida_destroy(struct percpu_ida *pool)
249{
250 free_percpu(pool->tag_cpu);
251 free_pages((unsigned long) pool->freelist,
252 get_order(pool->nr_tags * sizeof(unsigned)));
253}
254EXPORT_SYMBOL_GPL(percpu_ida_destroy);
255
256/**
257 * percpu_ida_init - initialize a percpu tag pool
258 * @pool: pool to initialize
259 * @nr_tags: number of tags that will be available for allocation
260 *
261 * Initializes @pool so that it can be used to allocate tags - integers in the
262 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
263 * preallocated array of tag structures.
264 *
265 * Allocation is percpu, but sharding is limited by nr_tags - for best
266 * performance, the workload should not span more cpus than nr_tags / 128.
267 */
268int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
269 unsigned long max_size, unsigned long batch_size)
270{
271 unsigned i, cpu, order;
272
273 memset(pool, 0, sizeof(*pool));
274
275 init_waitqueue_head(&pool->wait);
276 spin_lock_init(&pool->lock);
277 pool->nr_tags = nr_tags;
278 pool->percpu_max_size = max_size;
279 pool->percpu_batch_size = batch_size;
280
281 /* Guard against overflow */
282 if (nr_tags > (unsigned) INT_MAX + 1) {
283 pr_err("percpu_ida_init(): nr_tags too large\n");
284 return -EINVAL;
285 }
286
287 order = get_order(nr_tags * sizeof(unsigned));
288 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
289 if (!pool->freelist)
290 return -ENOMEM;
291
292 for (i = 0; i < nr_tags; i++)
293 pool->freelist[i] = i;
294
295 pool->nr_free = nr_tags;
296
297 pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
298 pool->percpu_max_size * sizeof(unsigned),
299 sizeof(unsigned));
300 if (!pool->tag_cpu)
301 goto err;
302
303 for_each_possible_cpu(cpu)
304 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
305
306 return 0;
307err:
308 percpu_ida_destroy(pool);
309 return -ENOMEM;
310}
311EXPORT_SYMBOL_GPL(__percpu_ida_init);
312
313/**
314 * percpu_ida_for_each_free - iterate free ids of a pool
315 * @pool: pool to iterate
316 * @fn: interate callback function
317 * @data: parameter for @fn
318 *
319 * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
320 * ids might be missed, some might be iterated duplicated, and some might
321 * be iterated and not free soon.
322 */
323int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
324 void *data)
325{
326 unsigned long flags;
327 struct percpu_ida_cpu *remote;
328 unsigned cpu, i, err = 0;
329
330 for_each_possible_cpu(cpu) {
331 remote = per_cpu_ptr(pool->tag_cpu, cpu);
332 spin_lock_irqsave(&remote->lock, flags);
333 for (i = 0; i < remote->nr_free; i++) {
334 err = fn(remote->freelist[i], data);
335 if (err)
336 break;
337 }
338 spin_unlock_irqrestore(&remote->lock, flags);
339 if (err)
340 goto out;
341 }
342
343 spin_lock_irqsave(&pool->lock, flags);
344 for (i = 0; i < pool->nr_free; i++) {
345 err = fn(pool->freelist[i], data);
346 if (err)
347 break;
348 }
349 spin_unlock_irqrestore(&pool->lock, flags);
350out:
351 return err;
352}
353EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
354
355/**
356 * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
357 * @pool: pool related
358 * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
359 *
360 * Note: this just returns a snapshot of free tags number.
361 */
362unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
363{
364 struct percpu_ida_cpu *remote;
365 if (cpu == nr_cpu_ids)
366 return pool->nr_free;
367 remote = per_cpu_ptr(pool->tag_cpu, cpu);
368 return remote->nr_free;
369}
370EXPORT_SYMBOL_GPL(percpu_ida_free_tags);