diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-12-18 10:29:53 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-12-18 10:29:53 -0500 |
commit | 4b9254328254bed12a4ac449cdff2c332e630837 (patch) | |
tree | 90ef63c168b0e63e6f07f8736f18faa8a544406f /lib | |
parent | 1a9430db2835c0c00acc87d915b573496998c1bf (diff) | |
parent | cd19181bf9ad4b7f40f2a4e0355d052109c76529 (diff) |
Merge branch 'for-4.21/block' into for-4.21/aio
* for-4.21/block: (351 commits)
blk-mq: enable IO poll if .nr_queues of type poll > 0
blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
blk-mq: skip zero-queue maps in blk_mq_map_swqueue
block: fix blk-iolatency accounting underflow
blk-mq: fix dispatch from sw queue
block: mq-deadline: Fix write completion handling
nvme-pci: don't share queue maps
blk-mq: only dispatch to non-defauly queue maps if they have queues
blk-mq: export hctx->type in debugfs instead of sysfs
blk-mq: fix allocation for queue mapping table
blk-wbt: export internal state via debugfs
blk-mq-debugfs: support rq_qos
block: update sysfs documentation
block: loop: check error using IS_ERR instead of IS_ERR_OR_NULL in loop_add()
aoe: add __exit annotation
block: clear REQ_HIPRI if polling is not supported
blk-mq: replace and kill blk_mq_request_issue_directly
blk-mq: issue directly with bypass 'false' in blk_mq_sched_insert_requests
blk-mq: refactor the code of issue request directly
block: remove the bio_integrity_advance export
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/iov_iter.c | 19 | ||||
-rw-r--r-- | lib/sbitmap.c | 148 |
2 files changed, 149 insertions, 18 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 54c248526b55..1928009f506e 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/vmalloc.h> | 6 | #include <linux/vmalloc.h> |
7 | #include <linux/splice.h> | 7 | #include <linux/splice.h> |
8 | #include <net/checksum.h> | 8 | #include <net/checksum.h> |
9 | #include <linux/scatterlist.h> | ||
9 | 10 | ||
10 | #define PIPE_PARANOIA /* for now */ | 11 | #define PIPE_PARANOIA /* for now */ |
11 | 12 | ||
@@ -1464,10 +1465,11 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, | |||
1464 | } | 1465 | } |
1465 | EXPORT_SYMBOL(csum_and_copy_from_iter_full); | 1466 | EXPORT_SYMBOL(csum_and_copy_from_iter_full); |
1466 | 1467 | ||
1467 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, | 1468 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, |
1468 | struct iov_iter *i) | 1469 | struct iov_iter *i) |
1469 | { | 1470 | { |
1470 | const char *from = addr; | 1471 | const char *from = addr; |
1472 | __wsum *csum = csump; | ||
1471 | __wsum sum, next; | 1473 | __wsum sum, next; |
1472 | size_t off = 0; | 1474 | size_t off = 0; |
1473 | 1475 | ||
@@ -1510,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, | |||
1510 | } | 1512 | } |
1511 | EXPORT_SYMBOL(csum_and_copy_to_iter); | 1513 | EXPORT_SYMBOL(csum_and_copy_to_iter); |
1512 | 1514 | ||
1515 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | ||
1516 | struct iov_iter *i) | ||
1517 | { | ||
1518 | struct ahash_request *hash = hashp; | ||
1519 | struct scatterlist sg; | ||
1520 | size_t copied; | ||
1521 | |||
1522 | copied = copy_to_iter(addr, bytes, i); | ||
1523 | sg_init_one(&sg, addr, copied); | ||
1524 | ahash_request_set_crypt(hash, &sg, NULL, copied); | ||
1525 | crypto_ahash_update(hash); | ||
1526 | return copied; | ||
1527 | } | ||
1528 | EXPORT_SYMBOL(hash_and_copy_to_iter); | ||
1529 | |||
1513 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | 1530 | int iov_iter_npages(const struct iov_iter *i, int maxpages) |
1514 | { | 1531 | { |
1515 | size_t size = i->count; | 1532 | size_t size = i->count; |
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index fdd1b8aa8ac6..5b3e56d68dab 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
@@ -20,6 +20,47 @@ | |||
20 | #include <linux/sbitmap.h> | 20 | #include <linux/sbitmap.h> |
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | 22 | ||
23 | /* | ||
24 | * See if we have deferred clears that we can batch move | ||
25 | */ | ||
26 | static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) | ||
27 | { | ||
28 | unsigned long mask, val; | ||
29 | unsigned long __maybe_unused flags; | ||
30 | bool ret = false; | ||
31 | |||
32 | /* Silence bogus lockdep warning */ | ||
33 | #if defined(CONFIG_LOCKDEP) | ||
34 | local_irq_save(flags); | ||
35 | #endif | ||
36 | spin_lock(&sb->map[index].swap_lock); | ||
37 | |||
38 | if (!sb->map[index].cleared) | ||
39 | goto out_unlock; | ||
40 | |||
41 | /* | ||
42 | * First get a stable cleared mask, setting the old mask to 0. | ||
43 | */ | ||
44 | do { | ||
45 | mask = sb->map[index].cleared; | ||
46 | } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask); | ||
47 | |||
48 | /* | ||
49 | * Now clear the masked bits in our free word | ||
50 | */ | ||
51 | do { | ||
52 | val = sb->map[index].word; | ||
53 | } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); | ||
54 | |||
55 | ret = true; | ||
56 | out_unlock: | ||
57 | spin_unlock(&sb->map[index].swap_lock); | ||
58 | #if defined(CONFIG_LOCKDEP) | ||
59 | local_irq_restore(flags); | ||
60 | #endif | ||
61 | return ret; | ||
62 | } | ||
63 | |||
23 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, | 64 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
24 | gfp_t flags, int node) | 65 | gfp_t flags, int node) |
25 | { | 66 | { |
@@ -59,6 +100,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, | |||
59 | for (i = 0; i < sb->map_nr; i++) { | 100 | for (i = 0; i < sb->map_nr; i++) { |
60 | sb->map[i].depth = min(depth, bits_per_word); | 101 | sb->map[i].depth = min(depth, bits_per_word); |
61 | depth -= sb->map[i].depth; | 102 | depth -= sb->map[i].depth; |
103 | spin_lock_init(&sb->map[i].swap_lock); | ||
62 | } | 104 | } |
63 | return 0; | 105 | return 0; |
64 | } | 106 | } |
@@ -69,6 +111,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | |||
69 | unsigned int bits_per_word = 1U << sb->shift; | 111 | unsigned int bits_per_word = 1U << sb->shift; |
70 | unsigned int i; | 112 | unsigned int i; |
71 | 113 | ||
114 | for (i = 0; i < sb->map_nr; i++) | ||
115 | sbitmap_deferred_clear(sb, i); | ||
116 | |||
72 | sb->depth = depth; | 117 | sb->depth = depth; |
73 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | 118 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); |
74 | 119 | ||
@@ -111,6 +156,24 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, | |||
111 | return nr; | 156 | return nr; |
112 | } | 157 | } |
113 | 158 | ||
159 | static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, | ||
160 | unsigned int alloc_hint, bool round_robin) | ||
161 | { | ||
162 | int nr; | ||
163 | |||
164 | do { | ||
165 | nr = __sbitmap_get_word(&sb->map[index].word, | ||
166 | sb->map[index].depth, alloc_hint, | ||
167 | !round_robin); | ||
168 | if (nr != -1) | ||
169 | break; | ||
170 | if (!sbitmap_deferred_clear(sb, index)) | ||
171 | break; | ||
172 | } while (1); | ||
173 | |||
174 | return nr; | ||
175 | } | ||
176 | |||
114 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) | 177 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) |
115 | { | 178 | { |
116 | unsigned int i, index; | 179 | unsigned int i, index; |
@@ -118,24 +181,28 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) | |||
118 | 181 | ||
119 | index = SB_NR_TO_INDEX(sb, alloc_hint); | 182 | index = SB_NR_TO_INDEX(sb, alloc_hint); |
120 | 183 | ||
184 | /* | ||
185 | * Unless we're doing round robin tag allocation, just use the | ||
186 | * alloc_hint to find the right word index. No point in looping | ||
187 | * twice in find_next_zero_bit() for that case. | ||
188 | */ | ||
189 | if (round_robin) | ||
190 | alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); | ||
191 | else | ||
192 | alloc_hint = 0; | ||
193 | |||
121 | for (i = 0; i < sb->map_nr; i++) { | 194 | for (i = 0; i < sb->map_nr; i++) { |
122 | nr = __sbitmap_get_word(&sb->map[index].word, | 195 | nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, |
123 | sb->map[index].depth, | 196 | round_robin); |
124 | SB_NR_TO_BIT(sb, alloc_hint), | ||
125 | !round_robin); | ||
126 | if (nr != -1) { | 197 | if (nr != -1) { |
127 | nr += index << sb->shift; | 198 | nr += index << sb->shift; |
128 | break; | 199 | break; |
129 | } | 200 | } |
130 | 201 | ||
131 | /* Jump to next index. */ | 202 | /* Jump to next index. */ |
132 | index++; | 203 | alloc_hint = 0; |
133 | alloc_hint = index << sb->shift; | 204 | if (++index >= sb->map_nr) |
134 | |||
135 | if (index >= sb->map_nr) { | ||
136 | index = 0; | 205 | index = 0; |
137 | alloc_hint = 0; | ||
138 | } | ||
139 | } | 206 | } |
140 | 207 | ||
141 | return nr; | 208 | return nr; |
@@ -151,6 +218,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, | |||
151 | index = SB_NR_TO_INDEX(sb, alloc_hint); | 218 | index = SB_NR_TO_INDEX(sb, alloc_hint); |
152 | 219 | ||
153 | for (i = 0; i < sb->map_nr; i++) { | 220 | for (i = 0; i < sb->map_nr; i++) { |
221 | again: | ||
154 | nr = __sbitmap_get_word(&sb->map[index].word, | 222 | nr = __sbitmap_get_word(&sb->map[index].word, |
155 | min(sb->map[index].depth, shallow_depth), | 223 | min(sb->map[index].depth, shallow_depth), |
156 | SB_NR_TO_BIT(sb, alloc_hint), true); | 224 | SB_NR_TO_BIT(sb, alloc_hint), true); |
@@ -159,6 +227,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, | |||
159 | break; | 227 | break; |
160 | } | 228 | } |
161 | 229 | ||
230 | if (sbitmap_deferred_clear(sb, index)) | ||
231 | goto again; | ||
232 | |||
162 | /* Jump to next index. */ | 233 | /* Jump to next index. */ |
163 | index++; | 234 | index++; |
164 | alloc_hint = index << sb->shift; | 235 | alloc_hint = index << sb->shift; |
@@ -178,7 +249,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb) | |||
178 | unsigned int i; | 249 | unsigned int i; |
179 | 250 | ||
180 | for (i = 0; i < sb->map_nr; i++) { | 251 | for (i = 0; i < sb->map_nr; i++) { |
181 | if (sb->map[i].word) | 252 | if (sb->map[i].word & ~sb->map[i].cleared) |
182 | return true; | 253 | return true; |
183 | } | 254 | } |
184 | return false; | 255 | return false; |
@@ -191,9 +262,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) | |||
191 | 262 | ||
192 | for (i = 0; i < sb->map_nr; i++) { | 263 | for (i = 0; i < sb->map_nr; i++) { |
193 | const struct sbitmap_word *word = &sb->map[i]; | 264 | const struct sbitmap_word *word = &sb->map[i]; |
265 | unsigned long mask = word->word & ~word->cleared; | ||
194 | unsigned long ret; | 266 | unsigned long ret; |
195 | 267 | ||
196 | ret = find_first_zero_bit(&word->word, word->depth); | 268 | ret = find_first_zero_bit(&mask, word->depth); |
197 | if (ret < word->depth) | 269 | if (ret < word->depth) |
198 | return true; | 270 | return true; |
199 | } | 271 | } |
@@ -201,23 +273,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb) | |||
201 | } | 273 | } |
202 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); | 274 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); |
203 | 275 | ||
204 | unsigned int sbitmap_weight(const struct sbitmap *sb) | 276 | static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) |
205 | { | 277 | { |
206 | unsigned int i, weight = 0; | 278 | unsigned int i, weight = 0; |
207 | 279 | ||
208 | for (i = 0; i < sb->map_nr; i++) { | 280 | for (i = 0; i < sb->map_nr; i++) { |
209 | const struct sbitmap_word *word = &sb->map[i]; | 281 | const struct sbitmap_word *word = &sb->map[i]; |
210 | 282 | ||
211 | weight += bitmap_weight(&word->word, word->depth); | 283 | if (set) |
284 | weight += bitmap_weight(&word->word, word->depth); | ||
285 | else | ||
286 | weight += bitmap_weight(&word->cleared, word->depth); | ||
212 | } | 287 | } |
213 | return weight; | 288 | return weight; |
214 | } | 289 | } |
215 | EXPORT_SYMBOL_GPL(sbitmap_weight); | 290 | |
291 | static unsigned int sbitmap_weight(const struct sbitmap *sb) | ||
292 | { | ||
293 | return __sbitmap_weight(sb, true); | ||
294 | } | ||
295 | |||
296 | static unsigned int sbitmap_cleared(const struct sbitmap *sb) | ||
297 | { | ||
298 | return __sbitmap_weight(sb, false); | ||
299 | } | ||
216 | 300 | ||
217 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) | 301 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) |
218 | { | 302 | { |
219 | seq_printf(m, "depth=%u\n", sb->depth); | 303 | seq_printf(m, "depth=%u\n", sb->depth); |
220 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); | 304 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); |
305 | seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); | ||
221 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); | 306 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); |
222 | seq_printf(m, "map_nr=%u\n", sb->map_nr); | 307 | seq_printf(m, "map_nr=%u\n", sb->map_nr); |
223 | } | 308 | } |
@@ -325,6 +410,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |||
325 | sbq->min_shallow_depth = UINT_MAX; | 410 | sbq->min_shallow_depth = UINT_MAX; |
326 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); | 411 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); |
327 | atomic_set(&sbq->wake_index, 0); | 412 | atomic_set(&sbq->wake_index, 0); |
413 | atomic_set(&sbq->ws_active, 0); | ||
328 | 414 | ||
329 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); | 415 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
330 | if (!sbq->ws) { | 416 | if (!sbq->ws) { |
@@ -440,6 +526,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | |||
440 | { | 526 | { |
441 | int i, wake_index; | 527 | int i, wake_index; |
442 | 528 | ||
529 | if (!atomic_read(&sbq->ws_active)) | ||
530 | return NULL; | ||
531 | |||
443 | wake_index = atomic_read(&sbq->wake_index); | 532 | wake_index = atomic_read(&sbq->wake_index); |
444 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | 533 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
445 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | 534 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; |
@@ -509,7 +598,8 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); | |||
509 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, | 598 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
510 | unsigned int cpu) | 599 | unsigned int cpu) |
511 | { | 600 | { |
512 | sbitmap_clear_bit_unlock(&sbq->sb, nr); | 601 | sbitmap_deferred_clear_bit(&sbq->sb, nr); |
602 | |||
513 | /* | 603 | /* |
514 | * Pairs with the memory barrier in set_current_state() to ensure the | 604 | * Pairs with the memory barrier in set_current_state() to ensure the |
515 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | 605 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker |
@@ -564,6 +654,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |||
564 | 654 | ||
565 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); | 655 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); |
566 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); | 656 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); |
657 | seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); | ||
567 | 658 | ||
568 | seq_puts(m, "ws={\n"); | 659 | seq_puts(m, "ws={\n"); |
569 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | 660 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
@@ -579,3 +670,26 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |||
579 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); | 670 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); |
580 | } | 671 | } |
581 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | 672 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); |
673 | |||
674 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, | ||
675 | struct sbq_wait_state *ws, | ||
676 | struct sbq_wait *sbq_wait, int state) | ||
677 | { | ||
678 | if (!sbq_wait->accounted) { | ||
679 | atomic_inc(&sbq->ws_active); | ||
680 | sbq_wait->accounted = 1; | ||
681 | } | ||
682 | prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); | ||
683 | } | ||
684 | EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); | ||
685 | |||
686 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, | ||
687 | struct sbq_wait *sbq_wait) | ||
688 | { | ||
689 | finish_wait(&ws->wait, &sbq_wait->wait); | ||
690 | if (sbq_wait->accounted) { | ||
691 | atomic_dec(&sbq->ws_active); | ||
692 | sbq_wait->accounted = 0; | ||
693 | } | ||
694 | } | ||
695 | EXPORT_SYMBOL_GPL(sbitmap_finish_wait); | ||