aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-28 11:50:26 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 11:50:26 -0400
commit6178976500ae61fa7b12ebb2d3de816dc9396388 (patch)
tree143df1479f56458801b676d038e6a7157a472981 /block
parent6314a108ec19aefa5160535b2bfe1ca9c38efe37 (diff)
parentd852564f8c88b0604490234fdeeb6fb47e4bcc7a (diff)
Merge branch 'for-3.16/core' into for-3.16/drivers
mtip32xx uses blk_mq_alloc_reserved_request(), so pull in the core changes so we have a properly merged end result. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/Makefile7
-rw-r--r--block/bio-integrity.c657
-rw-r--r--block/bio.c2038
-rw-r--r--block/blk-core.c65
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk-iopoll.c4
-rw-r--r--block/blk-lib.c4
-rw-r--r--block/blk-mq-cpu.c12
-rw-r--r--block/blk-mq-cpumap.c16
-rw-r--r--block/blk-mq-sysfs.c50
-rw-r--r--block/blk-mq-tag.c541
-rw-r--r--block/blk-mq-tag.h62
-rw-r--r--block/blk-mq.c993
-rw-r--r--block/blk-mq.h18
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c10
-rw-r--r--block/blk-timeout.c47
-rw-r--r--block/blk.h9
-rw-r--r--block/bounce.c287
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/ioprio.c241
21 files changed, 4631 insertions, 495 deletions
diff --git a/block/Makefile b/block/Makefile
index 20645e88fb57..a2ce6ac935ec 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -2,13 +2,15 @@
2# Makefile for the kernel block layer 2# Makefile for the kernel block layer
3# 3#
4 4
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ 8 blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
9 blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ 9 blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
10 genhd.o scsi_ioctl.o partition-generic.o partitions/ 10 genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
11 partitions/
11 12
13obj-$(CONFIG_BOUNCE) += bounce.o
12obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 14obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
13obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o 15obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
14obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o 16obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
@@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
20obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 22obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
21obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 23obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
22obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o 24obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
25obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
new file mode 100644
index 000000000000..9e241063a616
--- /dev/null
+++ b/block/bio-integrity.c
@@ -0,0 +1,657 @@
1/*
2 * bio-integrity.c - bio data integrity extensions
3 *
4 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/export.h>
26#include <linux/bio.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29
30#define BIP_INLINE_VECS 4
31
32static struct kmem_cache *bip_slab;
33static struct workqueue_struct *kintegrityd_wq;
34
35/**
36 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
37 * @bio: bio to attach integrity metadata to
38 * @gfp_mask: Memory allocation mask
39 * @nr_vecs: Number of integrity metadata scatter-gather elements
40 *
41 * Description: This function prepares a bio for attaching integrity
42 * metadata. nr_vecs specifies the maximum number of pages containing
43 * integrity metadata that can be attached.
44 */
45struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
46 gfp_t gfp_mask,
47 unsigned int nr_vecs)
48{
49 struct bio_integrity_payload *bip;
50 struct bio_set *bs = bio->bi_pool;
51 unsigned long idx = BIO_POOL_NONE;
52 unsigned inline_vecs;
53
54 if (!bs) {
55 bip = kmalloc(sizeof(struct bio_integrity_payload) +
56 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
57 inline_vecs = nr_vecs;
58 } else {
59 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
60 inline_vecs = BIP_INLINE_VECS;
61 }
62
63 if (unlikely(!bip))
64 return NULL;
65
66 memset(bip, 0, sizeof(*bip));
67
68 if (nr_vecs > inline_vecs) {
69 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
70 bs->bvec_integrity_pool);
71 if (!bip->bip_vec)
72 goto err;
73 } else {
74 bip->bip_vec = bip->bip_inline_vecs;
75 }
76
77 bip->bip_slab = idx;
78 bip->bip_bio = bio;
79 bio->bi_integrity = bip;
80
81 return bip;
82err:
83 mempool_free(bip, bs->bio_integrity_pool);
84 return NULL;
85}
86EXPORT_SYMBOL(bio_integrity_alloc);
87
88/**
89 * bio_integrity_free - Free bio integrity payload
90 * @bio: bio containing bip to be freed
91 *
92 * Description: Used to free the integrity portion of a bio. Usually
93 * called from bio_free().
94 */
95void bio_integrity_free(struct bio *bio)
96{
97 struct bio_integrity_payload *bip = bio->bi_integrity;
98 struct bio_set *bs = bio->bi_pool;
99
100 if (bip->bip_owns_buf)
101 kfree(bip->bip_buf);
102
103 if (bs) {
104 if (bip->bip_slab != BIO_POOL_NONE)
105 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
106 bip->bip_slab);
107
108 mempool_free(bip, bs->bio_integrity_pool);
109 } else {
110 kfree(bip);
111 }
112
113 bio->bi_integrity = NULL;
114}
115EXPORT_SYMBOL(bio_integrity_free);
116
117static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
118{
119 if (bip->bip_slab == BIO_POOL_NONE)
120 return BIP_INLINE_VECS;
121
122 return bvec_nr_vecs(bip->bip_slab);
123}
124
125/**
126 * bio_integrity_add_page - Attach integrity metadata
127 * @bio: bio to update
128 * @page: page containing integrity metadata
129 * @len: number of bytes of integrity metadata in page
130 * @offset: start offset within page
131 *
132 * Description: Attach a page containing integrity metadata to bio.
133 */
134int bio_integrity_add_page(struct bio *bio, struct page *page,
135 unsigned int len, unsigned int offset)
136{
137 struct bio_integrity_payload *bip = bio->bi_integrity;
138 struct bio_vec *iv;
139
140 if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
141 printk(KERN_ERR "%s: bip_vec full\n", __func__);
142 return 0;
143 }
144
145 iv = bip->bip_vec + bip->bip_vcnt;
146
147 iv->bv_page = page;
148 iv->bv_len = len;
149 iv->bv_offset = offset;
150 bip->bip_vcnt++;
151
152 return len;
153}
154EXPORT_SYMBOL(bio_integrity_add_page);
155
156static int bdev_integrity_enabled(struct block_device *bdev, int rw)
157{
158 struct blk_integrity *bi = bdev_get_integrity(bdev);
159
160 if (bi == NULL)
161 return 0;
162
163 if (rw == READ && bi->verify_fn != NULL &&
164 (bi->flags & INTEGRITY_FLAG_READ))
165 return 1;
166
167 if (rw == WRITE && bi->generate_fn != NULL &&
168 (bi->flags & INTEGRITY_FLAG_WRITE))
169 return 1;
170
171 return 0;
172}
173
174/**
175 * bio_integrity_enabled - Check whether integrity can be passed
176 * @bio: bio to check
177 *
178 * Description: Determines whether bio_integrity_prep() can be called
179 * on this bio or not. bio data direction and target device must be
180 * set prior to calling. The functions honors the write_generate and
181 * read_verify flags in sysfs.
182 */
183int bio_integrity_enabled(struct bio *bio)
184{
185 if (!bio_is_rw(bio))
186 return 0;
187
188 /* Already protected? */
189 if (bio_integrity(bio))
190 return 0;
191
192 return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
193}
194EXPORT_SYMBOL(bio_integrity_enabled);
195
196/**
197 * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto
198 * @bi: blk_integrity profile for device
199 * @sectors: Number of 512 sectors to convert
200 *
201 * Description: The block layer calculates everything in 512 byte
202 * sectors but integrity metadata is done in terms of the hardware
203 * sector size of the storage device. Convert the block layer sectors
204 * to physical sectors.
205 */
206static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
207 unsigned int sectors)
208{
209 /* At this point there are only 512b or 4096b DIF/EPP devices */
210 if (bi->sector_size == 4096)
211 return sectors >>= 3;
212
213 return sectors;
214}
215
216static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
217 unsigned int sectors)
218{
219 return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
220}
221
222/**
223 * bio_integrity_tag_size - Retrieve integrity tag space
224 * @bio: bio to inspect
225 *
226 * Description: Returns the maximum number of tag bytes that can be
227 * attached to this bio. Filesystems can use this to determine how
228 * much metadata to attach to an I/O.
229 */
230unsigned int bio_integrity_tag_size(struct bio *bio)
231{
232 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
233
234 BUG_ON(bio->bi_iter.bi_size == 0);
235
236 return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
237}
238EXPORT_SYMBOL(bio_integrity_tag_size);
239
240static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
241 int set)
242{
243 struct bio_integrity_payload *bip = bio->bi_integrity;
244 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
245 unsigned int nr_sectors;
246
247 BUG_ON(bip->bip_buf == NULL);
248
249 if (bi->tag_size == 0)
250 return -1;
251
252 nr_sectors = bio_integrity_hw_sectors(bi,
253 DIV_ROUND_UP(len, bi->tag_size));
254
255 if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
256 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
257 nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
258 return -1;
259 }
260
261 if (set)
262 bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
263 else
264 bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
265
266 return 0;
267}
268
269/**
270 * bio_integrity_set_tag - Attach a tag buffer to a bio
271 * @bio: bio to attach buffer to
272 * @tag_buf: Pointer to a buffer containing tag data
273 * @len: Length of the included buffer
274 *
275 * Description: Use this function to tag a bio by leveraging the extra
276 * space provided by devices formatted with integrity protection. The
277 * size of the integrity buffer must be <= to the size reported by
278 * bio_integrity_tag_size().
279 */
280int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
281{
282 BUG_ON(bio_data_dir(bio) != WRITE);
283
284 return bio_integrity_tag(bio, tag_buf, len, 1);
285}
286EXPORT_SYMBOL(bio_integrity_set_tag);
287
288/**
289 * bio_integrity_get_tag - Retrieve a tag buffer from a bio
290 * @bio: bio to retrieve buffer from
291 * @tag_buf: Pointer to a buffer for the tag data
292 * @len: Length of the target buffer
293 *
294 * Description: Use this function to retrieve the tag buffer from a
295 * completed I/O. The size of the integrity buffer must be <= to the
296 * size reported by bio_integrity_tag_size().
297 */
298int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
299{
300 BUG_ON(bio_data_dir(bio) != READ);
301
302 return bio_integrity_tag(bio, tag_buf, len, 0);
303}
304EXPORT_SYMBOL(bio_integrity_get_tag);
305
306/**
307 * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio
308 * @bio: bio to generate/verify integrity metadata for
309 * @operate: operate number, 1 for generate, 0 for verify
310 */
311static int bio_integrity_generate_verify(struct bio *bio, int operate)
312{
313 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
314 struct blk_integrity_exchg bix;
315 struct bio_vec *bv;
316 sector_t sector;
317 unsigned int sectors, ret = 0, i;
318 void *prot_buf = bio->bi_integrity->bip_buf;
319
320 if (operate)
321 sector = bio->bi_iter.bi_sector;
322 else
323 sector = bio->bi_integrity->bip_iter.bi_sector;
324
325 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
326 bix.sector_size = bi->sector_size;
327
328 bio_for_each_segment_all(bv, bio, i) {
329 void *kaddr = kmap_atomic(bv->bv_page);
330 bix.data_buf = kaddr + bv->bv_offset;
331 bix.data_size = bv->bv_len;
332 bix.prot_buf = prot_buf;
333 bix.sector = sector;
334
335 if (operate)
336 bi->generate_fn(&bix);
337 else {
338 ret = bi->verify_fn(&bix);
339 if (ret) {
340 kunmap_atomic(kaddr);
341 return ret;
342 }
343 }
344
345 sectors = bv->bv_len / bi->sector_size;
346 sector += sectors;
347 prot_buf += sectors * bi->tuple_size;
348
349 kunmap_atomic(kaddr);
350 }
351 return ret;
352}
353
354/**
355 * bio_integrity_generate - Generate integrity metadata for a bio
356 * @bio: bio to generate integrity metadata for
357 *
358 * Description: Generates integrity metadata for a bio by calling the
359 * block device's generation callback function. The bio must have a
360 * bip attached with enough room to accommodate the generated
361 * integrity metadata.
362 */
363static void bio_integrity_generate(struct bio *bio)
364{
365 bio_integrity_generate_verify(bio, 1);
366}
367
368static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
369{
370 if (bi)
371 return bi->tuple_size;
372
373 return 0;
374}
375
376/**
377 * bio_integrity_prep - Prepare bio for integrity I/O
378 * @bio: bio to prepare
379 *
380 * Description: Allocates a buffer for integrity metadata, maps the
381 * pages and attaches them to a bio. The bio must have data
382 * direction, target device and start sector set priot to calling. In
383 * the WRITE case, integrity metadata will be generated using the
384 * block device's integrity function. In the READ case, the buffer
385 * will be prepared for DMA and a suitable end_io handler set up.
386 */
387int bio_integrity_prep(struct bio *bio)
388{
389 struct bio_integrity_payload *bip;
390 struct blk_integrity *bi;
391 struct request_queue *q;
392 void *buf;
393 unsigned long start, end;
394 unsigned int len, nr_pages;
395 unsigned int bytes, offset, i;
396 unsigned int sectors;
397
398 bi = bdev_get_integrity(bio->bi_bdev);
399 q = bdev_get_queue(bio->bi_bdev);
400 BUG_ON(bi == NULL);
401 BUG_ON(bio_integrity(bio));
402
403 sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
404
405 /* Allocate kernel buffer for protection data */
406 len = sectors * blk_integrity_tuple_size(bi);
407 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
408 if (unlikely(buf == NULL)) {
409 printk(KERN_ERR "could not allocate integrity buffer\n");
410 return -ENOMEM;
411 }
412
413 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
414 start = ((unsigned long) buf) >> PAGE_SHIFT;
415 nr_pages = end - start;
416
417 /* Allocate bio integrity payload and integrity vectors */
418 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
419 if (unlikely(bip == NULL)) {
420 printk(KERN_ERR "could not allocate data integrity bioset\n");
421 kfree(buf);
422 return -EIO;
423 }
424
425 bip->bip_owns_buf = 1;
426 bip->bip_buf = buf;
427 bip->bip_iter.bi_size = len;
428 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
429
430 /* Map it */
431 offset = offset_in_page(buf);
432 for (i = 0 ; i < nr_pages ; i++) {
433 int ret;
434 bytes = PAGE_SIZE - offset;
435
436 if (len <= 0)
437 break;
438
439 if (bytes > len)
440 bytes = len;
441
442 ret = bio_integrity_add_page(bio, virt_to_page(buf),
443 bytes, offset);
444
445 if (ret == 0)
446 return 0;
447
448 if (ret < bytes)
449 break;
450
451 buf += bytes;
452 len -= bytes;
453 offset = 0;
454 }
455
456 /* Install custom I/O completion handler if read verify is enabled */
457 if (bio_data_dir(bio) == READ) {
458 bip->bip_end_io = bio->bi_end_io;
459 bio->bi_end_io = bio_integrity_endio;
460 }
461
462 /* Auto-generate integrity metadata if this is a write */
463 if (bio_data_dir(bio) == WRITE)
464 bio_integrity_generate(bio);
465
466 return 0;
467}
468EXPORT_SYMBOL(bio_integrity_prep);
469
470/**
471 * bio_integrity_verify - Verify integrity metadata for a bio
472 * @bio: bio to verify
473 *
474 * Description: This function is called to verify the integrity of a
475 * bio. The data in the bio io_vec is compared to the integrity
476 * metadata returned by the HBA.
477 */
478static int bio_integrity_verify(struct bio *bio)
479{
480 return bio_integrity_generate_verify(bio, 0);
481}
482
483/**
484 * bio_integrity_verify_fn - Integrity I/O completion worker
485 * @work: Work struct stored in bio to be verified
486 *
487 * Description: This workqueue function is called to complete a READ
488 * request. The function verifies the transferred integrity metadata
489 * and then calls the original bio end_io function.
490 */
491static void bio_integrity_verify_fn(struct work_struct *work)
492{
493 struct bio_integrity_payload *bip =
494 container_of(work, struct bio_integrity_payload, bip_work);
495 struct bio *bio = bip->bip_bio;
496 int error;
497
498 error = bio_integrity_verify(bio);
499
500 /* Restore original bio completion handler */
501 bio->bi_end_io = bip->bip_end_io;
502 bio_endio_nodec(bio, error);
503}
504
505/**
506 * bio_integrity_endio - Integrity I/O completion function
507 * @bio: Protected bio
508 * @error: Pointer to errno
509 *
510 * Description: Completion for integrity I/O
511 *
512 * Normally I/O completion is done in interrupt context. However,
513 * verifying I/O integrity is a time-consuming task which must be run
514 * in process context. This function postpones completion
515 * accordingly.
516 */
517void bio_integrity_endio(struct bio *bio, int error)
518{
519 struct bio_integrity_payload *bip = bio->bi_integrity;
520
521 BUG_ON(bip->bip_bio != bio);
522
523 /* In case of an I/O error there is no point in verifying the
524 * integrity metadata. Restore original bio end_io handler
525 * and run it.
526 */
527 if (error) {
528 bio->bi_end_io = bip->bip_end_io;
529 bio_endio(bio, error);
530
531 return;
532 }
533
534 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
535 queue_work(kintegrityd_wq, &bip->bip_work);
536}
537EXPORT_SYMBOL(bio_integrity_endio);
538
539/**
540 * bio_integrity_advance - Advance integrity vector
541 * @bio: bio whose integrity vector to update
542 * @bytes_done: number of data bytes that have been completed
543 *
544 * Description: This function calculates how many integrity bytes the
545 * number of completed data bytes correspond to and advances the
546 * integrity vector accordingly.
547 */
548void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
549{
550 struct bio_integrity_payload *bip = bio->bi_integrity;
551 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
552 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
553
554 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
555}
556EXPORT_SYMBOL(bio_integrity_advance);
557
558/**
559 * bio_integrity_trim - Trim integrity vector
560 * @bio: bio whose integrity vector to update
561 * @offset: offset to first data sector
562 * @sectors: number of data sectors
563 *
564 * Description: Used to trim the integrity vector in a cloned bio.
565 * The ivec will be advanced corresponding to 'offset' data sectors
566 * and the length will be truncated corresponding to 'len' data
567 * sectors.
568 */
569void bio_integrity_trim(struct bio *bio, unsigned int offset,
570 unsigned int sectors)
571{
572 struct bio_integrity_payload *bip = bio->bi_integrity;
573 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
574
575 bio_integrity_advance(bio, offset << 9);
576 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
577}
578EXPORT_SYMBOL(bio_integrity_trim);
579
580/**
581 * bio_integrity_clone - Callback for cloning bios with integrity metadata
582 * @bio: New bio
583 * @bio_src: Original bio
584 * @gfp_mask: Memory allocation mask
585 *
586 * Description: Called to allocate a bip when cloning a bio
587 */
588int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
589 gfp_t gfp_mask)
590{
591 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
592 struct bio_integrity_payload *bip;
593
594 BUG_ON(bip_src == NULL);
595
596 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
597
598 if (bip == NULL)
599 return -EIO;
600
601 memcpy(bip->bip_vec, bip_src->bip_vec,
602 bip_src->bip_vcnt * sizeof(struct bio_vec));
603
604 bip->bip_vcnt = bip_src->bip_vcnt;
605 bip->bip_iter = bip_src->bip_iter;
606
607 return 0;
608}
609EXPORT_SYMBOL(bio_integrity_clone);
610
611int bioset_integrity_create(struct bio_set *bs, int pool_size)
612{
613 if (bs->bio_integrity_pool)
614 return 0;
615
616 bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
617 if (!bs->bio_integrity_pool)
618 return -1;
619
620 bs->bvec_integrity_pool = biovec_create_pool(pool_size);
621 if (!bs->bvec_integrity_pool) {
622 mempool_destroy(bs->bio_integrity_pool);
623 return -1;
624 }
625
626 return 0;
627}
628EXPORT_SYMBOL(bioset_integrity_create);
629
630void bioset_integrity_free(struct bio_set *bs)
631{
632 if (bs->bio_integrity_pool)
633 mempool_destroy(bs->bio_integrity_pool);
634
635 if (bs->bvec_integrity_pool)
636 mempool_destroy(bs->bvec_integrity_pool);
637}
638EXPORT_SYMBOL(bioset_integrity_free);
639
640void __init bio_integrity_init(void)
641{
642 /*
643 * kintegrityd won't block much but may burn a lot of CPU cycles.
644 * Make it highpri CPU intensive wq with max concurrency of 1.
645 */
646 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
647 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
648 if (!kintegrityd_wq)
649 panic("Failed to create kintegrityd\n");
650
651 bip_slab = kmem_cache_create("bio_integrity_payload",
652 sizeof(struct bio_integrity_payload) +
653 sizeof(struct bio_vec) * BIP_INLINE_VECS,
654 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
655 if (!bip_slab)
656 panic("Failed to create slab\n");
657}
diff --git a/block/bio.c b/block/bio.c
new file mode 100644
index 000000000000..96d28eee8a1e
--- /dev/null
+++ b/block/bio.c
@@ -0,0 +1,2038 @@
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <scsi/sg.h> /* for struct sg_iovec */
32
33#include <trace/events/block.h>
34
35/*
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
38 */
39#define BIO_INLINE_VECS 4
40
41/*
42 * if you change this list, also change bvec_alloc or things will
43 * break badly! cannot be bigger than what you can fit into an
44 * unsigned short
45 */
46#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
49};
50#undef BV
51
52/*
53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
54 * IO code that does not need private memory pools.
55 */
56struct bio_set *fs_bio_set;
57EXPORT_SYMBOL(fs_bio_set);
58
59/*
60 * Our slab pool management
61 */
62struct bio_slab {
63 struct kmem_cache *slab;
64 unsigned int slab_ref;
65 unsigned int slab_size;
66 char name[8];
67};
68static DEFINE_MUTEX(bio_slab_lock);
69static struct bio_slab *bio_slabs;
70static unsigned int bio_slab_nr, bio_slab_max;
71
72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{
74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int new_bio_slab_max;
78 unsigned int i, entry = -1;
79
80 mutex_lock(&bio_slab_lock);
81
82 i = 0;
83 while (i < bio_slab_nr) {
84 bslab = &bio_slabs[i];
85
86 if (!bslab->slab && entry == -1)
87 entry = i;
88 else if (bslab->slab_size == sz) {
89 slab = bslab->slab;
90 bslab->slab_ref++;
91 break;
92 }
93 i++;
94 }
95
96 if (slab)
97 goto out_unlock;
98
99 if (bio_slab_nr == bio_slab_max && entry == -1) {
100 new_bio_slab_max = bio_slab_max << 1;
101 new_bio_slabs = krealloc(bio_slabs,
102 new_bio_slab_max * sizeof(struct bio_slab),
103 GFP_KERNEL);
104 if (!new_bio_slabs)
105 goto out_unlock;
106 bio_slab_max = new_bio_slab_max;
107 bio_slabs = new_bio_slabs;
108 }
109 if (entry == -1)
110 entry = bio_slab_nr++;
111
112 bslab = &bio_slabs[entry];
113
114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
115 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125}
126
127static void bio_put_slab(struct bio_set *bs)
128{
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152out:
153 mutex_unlock(&bio_slab_lock);
154}
155
156unsigned int bvec_nr_vecs(unsigned short idx)
157{
158 return bvec_slabs[idx].nr_vecs;
159}
160
161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162{
163 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
164
165 if (idx == BIOVEC_MAX_IDX)
166 mempool_free(bv, pool);
167 else {
168 struct biovec_slab *bvs = bvec_slabs + idx;
169
170 kmem_cache_free(bvs->slab, bv);
171 }
172}
173
174struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
175 mempool_t *pool)
176{
177 struct bio_vec *bvl;
178
179 /*
180 * see comment near bvec_array define!
181 */
182 switch (nr) {
183 case 1:
184 *idx = 0;
185 break;
186 case 2 ... 4:
187 *idx = 1;
188 break;
189 case 5 ... 16:
190 *idx = 2;
191 break;
192 case 17 ... 64:
193 *idx = 3;
194 break;
195 case 65 ... 128:
196 *idx = 4;
197 break;
198 case 129 ... BIO_MAX_PAGES:
199 *idx = 5;
200 break;
201 default:
202 return NULL;
203 }
204
205 /*
206 * idx now points to the pool we want to allocate from. only the
207 * 1-vec entry pool is mempool backed.
208 */
209 if (*idx == BIOVEC_MAX_IDX) {
210fallback:
211 bvl = mempool_alloc(pool, gfp_mask);
212 } else {
213 struct biovec_slab *bvs = bvec_slabs + *idx;
214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
215
216 /*
217 * Make this allocation restricted and don't dump info on
218 * allocation failures, since we'll fallback to the mempool
219 * in case of failure.
220 */
221 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
222
223 /*
224 * Try a slab allocation. If this fails and __GFP_WAIT
225 * is set, retry with the 1-entry mempool
226 */
227 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
229 *idx = BIOVEC_MAX_IDX;
230 goto fallback;
231 }
232 }
233
234 return bvl;
235}
236
237static void __bio_free(struct bio *bio)
238{
239 bio_disassociate_task(bio);
240
241 if (bio_integrity(bio))
242 bio_integrity_free(bio);
243}
244
245static void bio_free(struct bio *bio)
246{
247 struct bio_set *bs = bio->bi_pool;
248 void *p;
249
250 __bio_free(bio);
251
252 if (bs) {
253 if (bio_flagged(bio, BIO_OWNS_VEC))
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
255
256 /*
257 * If we have front padding, adjust the bio pointer before freeing
258 */
259 p = bio;
260 p -= bs->front_pad;
261
262 mempool_free(p, bs->bio_pool);
263 } else {
264 /* Bio was allocated by bio_kmalloc() */
265 kfree(bio);
266 }
267}
268
269void bio_init(struct bio *bio)
270{
271 memset(bio, 0, sizeof(*bio));
272 bio->bi_flags = 1 << BIO_UPTODATE;
273 atomic_set(&bio->bi_remaining, 1);
274 atomic_set(&bio->bi_cnt, 1);
275}
276EXPORT_SYMBOL(bio_init);
277
278/**
279 * bio_reset - reinitialize a bio
280 * @bio: bio to reset
281 *
282 * Description:
283 * After calling bio_reset(), @bio will be in the same state as a freshly
284 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
285 * preserved are the ones that are initialized by bio_alloc_bioset(). See
286 * comment in struct bio.
287 */
288void bio_reset(struct bio *bio)
289{
290 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
291
292 __bio_free(bio);
293
294 memset(bio, 0, BIO_RESET_BYTES);
295 bio->bi_flags = flags|(1 << BIO_UPTODATE);
296 atomic_set(&bio->bi_remaining, 1);
297}
298EXPORT_SYMBOL(bio_reset);
299
300static void bio_chain_endio(struct bio *bio, int error)
301{
302 bio_endio(bio->bi_private, error);
303 bio_put(bio);
304}
305
306/**
307 * bio_chain - chain bio completions
308 * @bio: the target bio
309 * @parent: the @bio's parent bio
310 *
311 * The caller won't have a bi_end_io called when @bio completes - instead,
312 * @parent's bi_end_io won't be called until both @parent and @bio have
313 * completed; the chained bio will also be freed when it completes.
314 *
315 * The caller must not set bi_private or bi_end_io in @bio.
316 */
317void bio_chain(struct bio *bio, struct bio *parent)
318{
319 BUG_ON(bio->bi_private || bio->bi_end_io);
320
321 bio->bi_private = parent;
322 bio->bi_end_io = bio_chain_endio;
323 atomic_inc(&parent->bi_remaining);
324}
325EXPORT_SYMBOL(bio_chain);
326
327static void bio_alloc_rescue(struct work_struct *work)
328{
329 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
330 struct bio *bio;
331
332 while (1) {
333 spin_lock(&bs->rescue_lock);
334 bio = bio_list_pop(&bs->rescue_list);
335 spin_unlock(&bs->rescue_lock);
336
337 if (!bio)
338 break;
339
340 generic_make_request(bio);
341 }
342}
343
344static void punt_bios_to_rescuer(struct bio_set *bs)
345{
346 struct bio_list punt, nopunt;
347 struct bio *bio;
348
349 /*
350 * In order to guarantee forward progress we must punt only bios that
351 * were allocated from this bio_set; otherwise, if there was a bio on
352 * there for a stacking driver higher up in the stack, processing it
353 * could require allocating bios from this bio_set, and doing that from
354 * our own rescuer would be bad.
355 *
356 * Since bio lists are singly linked, pop them all instead of trying to
357 * remove from the middle of the list:
358 */
359
360 bio_list_init(&punt);
361 bio_list_init(&nopunt);
362
363 while ((bio = bio_list_pop(current->bio_list)))
364 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
365
366 *current->bio_list = nopunt;
367
368 spin_lock(&bs->rescue_lock);
369 bio_list_merge(&bs->rescue_list, &punt);
370 spin_unlock(&bs->rescue_lock);
371
372 queue_work(bs->rescue_workqueue, &bs->rescue_work);
373}
374
375/**
376 * bio_alloc_bioset - allocate a bio for I/O
377 * @gfp_mask: the GFP_ mask given to the slab allocator
378 * @nr_iovecs: number of iovecs to pre-allocate
379 * @bs: the bio_set to allocate from.
380 *
381 * Description:
382 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
383 * backed by the @bs's mempool.
384 *
385 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
386 * able to allocate a bio. This is due to the mempool guarantees. To make this
387 * work, callers must never allocate more than 1 bio at a time from this pool.
388 * Callers that need to allocate more than 1 bio must always submit the
389 * previously allocated bio for IO before attempting to allocate a new one.
390 * Failure to do so can cause deadlocks under memory pressure.
391 *
392 * Note that when running under generic_make_request() (i.e. any block
393 * driver), bios are not submitted until after you return - see the code in
394 * generic_make_request() that converts recursion into iteration, to prevent
395 * stack overflows.
396 *
397 * This would normally mean allocating multiple bios under
398 * generic_make_request() would be susceptible to deadlocks, but we have
399 * deadlock avoidance code that resubmits any blocked bios from a rescuer
400 * thread.
401 *
402 * However, we do not guarantee forward progress for allocations from other
403 * mempools. Doing multiple allocations from the same mempool under
404 * generic_make_request() should be avoided - instead, use bio_set's front_pad
405 * for per bio allocations.
406 *
407 * RETURNS:
408 * Pointer to new bio on success, NULL on failure.
409 */
410struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
411{
412 gfp_t saved_gfp = gfp_mask;
413 unsigned front_pad;
414 unsigned inline_vecs;
415 unsigned long idx = BIO_POOL_NONE;
416 struct bio_vec *bvl = NULL;
417 struct bio *bio;
418 void *p;
419
420 if (!bs) {
421 if (nr_iovecs > UIO_MAXIOV)
422 return NULL;
423
424 p = kmalloc(sizeof(struct bio) +
425 nr_iovecs * sizeof(struct bio_vec),
426 gfp_mask);
427 front_pad = 0;
428 inline_vecs = nr_iovecs;
429 } else {
430 /*
431 * generic_make_request() converts recursion to iteration; this
432 * means if we're running beneath it, any bios we allocate and
433 * submit will not be submitted (and thus freed) until after we
434 * return.
435 *
436 * This exposes us to a potential deadlock if we allocate
437 * multiple bios from the same bio_set() while running
438 * underneath generic_make_request(). If we were to allocate
439 * multiple bios (say a stacking block driver that was splitting
440 * bios), we would deadlock if we exhausted the mempool's
441 * reserve.
442 *
443 * We solve this, and guarantee forward progress, with a rescuer
444 * workqueue per bio_set. If we go to allocate and there are
445 * bios on current->bio_list, we first try the allocation
446 * without __GFP_WAIT; if that fails, we punt those bios we
447 * would be blocking to the rescuer workqueue before we retry
448 * with the original gfp_flags.
449 */
450
451 if (current->bio_list && !bio_list_empty(current->bio_list))
452 gfp_mask &= ~__GFP_WAIT;
453
454 p = mempool_alloc(bs->bio_pool, gfp_mask);
455 if (!p && gfp_mask != saved_gfp) {
456 punt_bios_to_rescuer(bs);
457 gfp_mask = saved_gfp;
458 p = mempool_alloc(bs->bio_pool, gfp_mask);
459 }
460
461 front_pad = bs->front_pad;
462 inline_vecs = BIO_INLINE_VECS;
463 }
464
465 if (unlikely(!p))
466 return NULL;
467
468 bio = p + front_pad;
469 bio_init(bio);
470
471 if (nr_iovecs > inline_vecs) {
472 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
473 if (!bvl && gfp_mask != saved_gfp) {
474 punt_bios_to_rescuer(bs);
475 gfp_mask = saved_gfp;
476 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
477 }
478
479 if (unlikely(!bvl))
480 goto err_free;
481
482 bio->bi_flags |= 1 << BIO_OWNS_VEC;
483 } else if (nr_iovecs) {
484 bvl = bio->bi_inline_vecs;
485 }
486
487 bio->bi_pool = bs;
488 bio->bi_flags |= idx << BIO_POOL_OFFSET;
489 bio->bi_max_vecs = nr_iovecs;
490 bio->bi_io_vec = bvl;
491 return bio;
492
493err_free:
494 mempool_free(p, bs->bio_pool);
495 return NULL;
496}
497EXPORT_SYMBOL(bio_alloc_bioset);
498
499void zero_fill_bio(struct bio *bio)
500{
501 unsigned long flags;
502 struct bio_vec bv;
503 struct bvec_iter iter;
504
505 bio_for_each_segment(bv, bio, iter) {
506 char *data = bvec_kmap_irq(&bv, &flags);
507 memset(data, 0, bv.bv_len);
508 flush_dcache_page(bv.bv_page);
509 bvec_kunmap_irq(data, &flags);
510 }
511}
512EXPORT_SYMBOL(zero_fill_bio);
513
514/**
515 * bio_put - release a reference to a bio
516 * @bio: bio to release reference to
517 *
518 * Description:
519 * Put a reference to a &struct bio, either one you have gotten with
520 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
521 **/
522void bio_put(struct bio *bio)
523{
524 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
525
526 /*
527 * last put frees it
528 */
529 if (atomic_dec_and_test(&bio->bi_cnt))
530 bio_free(bio);
531}
532EXPORT_SYMBOL(bio_put);
533
534inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
535{
536 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
537 blk_recount_segments(q, bio);
538
539 return bio->bi_phys_segments;
540}
541EXPORT_SYMBOL(bio_phys_segments);
542
543/**
544 * __bio_clone_fast - clone a bio that shares the original bio's biovec
545 * @bio: destination bio
546 * @bio_src: bio to clone
547 *
548 * Clone a &bio. Caller will own the returned bio, but not
549 * the actual data it points to. Reference count of returned
550 * bio will be one.
551 *
552 * Caller must ensure that @bio_src is not freed before @bio.
553 */
554void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
555{
556 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
557
558 /*
559 * most users will be overriding ->bi_bdev with a new target,
560 * so we don't set nor calculate new physical/hw segment counts here
561 */
562 bio->bi_bdev = bio_src->bi_bdev;
563 bio->bi_flags |= 1 << BIO_CLONED;
564 bio->bi_rw = bio_src->bi_rw;
565 bio->bi_iter = bio_src->bi_iter;
566 bio->bi_io_vec = bio_src->bi_io_vec;
567}
568EXPORT_SYMBOL(__bio_clone_fast);
569
570/**
571 * bio_clone_fast - clone a bio that shares the original bio's biovec
572 * @bio: bio to clone
573 * @gfp_mask: allocation priority
574 * @bs: bio_set to allocate from
575 *
576 * Like __bio_clone_fast, only also allocates the returned bio
577 */
578struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
579{
580 struct bio *b;
581
582 b = bio_alloc_bioset(gfp_mask, 0, bs);
583 if (!b)
584 return NULL;
585
586 __bio_clone_fast(b, bio);
587
588 if (bio_integrity(bio)) {
589 int ret;
590
591 ret = bio_integrity_clone(b, bio, gfp_mask);
592
593 if (ret < 0) {
594 bio_put(b);
595 return NULL;
596 }
597 }
598
599 return b;
600}
601EXPORT_SYMBOL(bio_clone_fast);
602
603/**
604 * bio_clone_bioset - clone a bio
605 * @bio_src: bio to clone
606 * @gfp_mask: allocation priority
607 * @bs: bio_set to allocate from
608 *
609 * Clone bio. Caller will own the returned bio, but not the actual data it
610 * points to. Reference count of returned bio will be one.
611 */
612struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
613 struct bio_set *bs)
614{
615 struct bvec_iter iter;
616 struct bio_vec bv;
617 struct bio *bio;
618
619 /*
620 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
621 * bio_src->bi_io_vec to bio->bi_io_vec.
622 *
623 * We can't do that anymore, because:
624 *
625 * - The point of cloning the biovec is to produce a bio with a biovec
626 * the caller can modify: bi_idx and bi_bvec_done should be 0.
627 *
628 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
629 * we tried to clone the whole thing bio_alloc_bioset() would fail.
630 * But the clone should succeed as long as the number of biovecs we
631 * actually need to allocate is fewer than BIO_MAX_PAGES.
632 *
633 * - Lastly, bi_vcnt should not be looked at or relied upon by code
634 * that does not own the bio - reason being drivers don't use it for
635 * iterating over the biovec anymore, so expecting it to be kept up
636 * to date (i.e. for clones that share the parent biovec) is just
637 * asking for trouble and would force extra work on
638 * __bio_clone_fast() anyways.
639 */
640
641 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
642 if (!bio)
643 return NULL;
644
645 bio->bi_bdev = bio_src->bi_bdev;
646 bio->bi_rw = bio_src->bi_rw;
647 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
648 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
649
650 if (bio->bi_rw & REQ_DISCARD)
651 goto integrity_clone;
652
653 if (bio->bi_rw & REQ_WRITE_SAME) {
654 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
655 goto integrity_clone;
656 }
657
658 bio_for_each_segment(bv, bio_src, iter)
659 bio->bi_io_vec[bio->bi_vcnt++] = bv;
660
661integrity_clone:
662 if (bio_integrity(bio_src)) {
663 int ret;
664
665 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
666 if (ret < 0) {
667 bio_put(bio);
668 return NULL;
669 }
670 }
671
672 return bio;
673}
674EXPORT_SYMBOL(bio_clone_bioset);
675
676/**
677 * bio_get_nr_vecs - return approx number of vecs
678 * @bdev: I/O target
679 *
680 * Return the approximate number of pages we can send to this target.
681 * There's no guarantee that you will be able to fit this number of pages
682 * into a bio, it does not account for dynamic restrictions that vary
683 * on offset.
684 */
685int bio_get_nr_vecs(struct block_device *bdev)
686{
687 struct request_queue *q = bdev_get_queue(bdev);
688 int nr_pages;
689
690 nr_pages = min_t(unsigned,
691 queue_max_segments(q),
692 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
693
694 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
695
696}
697EXPORT_SYMBOL(bio_get_nr_vecs);
698
699static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
700 *page, unsigned int len, unsigned int offset,
701 unsigned int max_sectors)
702{
703 int retried_segments = 0;
704 struct bio_vec *bvec;
705
706 /*
707 * cloned bio must not modify vec list
708 */
709 if (unlikely(bio_flagged(bio, BIO_CLONED)))
710 return 0;
711
712 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
713 return 0;
714
715 /*
716 * For filesystems with a blocksize smaller than the pagesize
717 * we will often be called with the same page as last time and
718 * a consecutive offset. Optimize this special case.
719 */
720 if (bio->bi_vcnt > 0) {
721 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
722
723 if (page == prev->bv_page &&
724 offset == prev->bv_offset + prev->bv_len) {
725 unsigned int prev_bv_len = prev->bv_len;
726 prev->bv_len += len;
727
728 if (q->merge_bvec_fn) {
729 struct bvec_merge_data bvm = {
730 /* prev_bvec is already charged in
731 bi_size, discharge it in order to
732 simulate merging updated prev_bvec
733 as new bvec. */
734 .bi_bdev = bio->bi_bdev,
735 .bi_sector = bio->bi_iter.bi_sector,
736 .bi_size = bio->bi_iter.bi_size -
737 prev_bv_len,
738 .bi_rw = bio->bi_rw,
739 };
740
741 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
742 prev->bv_len -= len;
743 return 0;
744 }
745 }
746
747 goto done;
748 }
749 }
750
751 if (bio->bi_vcnt >= bio->bi_max_vecs)
752 return 0;
753
754 /*
755 * we might lose a segment or two here, but rather that than
756 * make this too complex.
757 */
758
759 while (bio->bi_phys_segments >= queue_max_segments(q)) {
760
761 if (retried_segments)
762 return 0;
763
764 retried_segments = 1;
765 blk_recount_segments(q, bio);
766 }
767
768 /*
769 * setup the new entry, we might clear it again later if we
770 * cannot add the page
771 */
772 bvec = &bio->bi_io_vec[bio->bi_vcnt];
773 bvec->bv_page = page;
774 bvec->bv_len = len;
775 bvec->bv_offset = offset;
776
777 /*
778 * if queue has other restrictions (eg varying max sector size
779 * depending on offset), it can specify a merge_bvec_fn in the
780 * queue to get further control
781 */
782 if (q->merge_bvec_fn) {
783 struct bvec_merge_data bvm = {
784 .bi_bdev = bio->bi_bdev,
785 .bi_sector = bio->bi_iter.bi_sector,
786 .bi_size = bio->bi_iter.bi_size,
787 .bi_rw = bio->bi_rw,
788 };
789
790 /*
791 * merge_bvec_fn() returns number of bytes it can accept
792 * at this offset
793 */
794 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
795 bvec->bv_page = NULL;
796 bvec->bv_len = 0;
797 bvec->bv_offset = 0;
798 return 0;
799 }
800 }
801
802 /* If we may be able to merge these biovecs, force a recount */
803 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
804 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
805
806 bio->bi_vcnt++;
807 bio->bi_phys_segments++;
808 done:
809 bio->bi_iter.bi_size += len;
810 return len;
811}
812
813/**
814 * bio_add_pc_page - attempt to add page to bio
815 * @q: the target queue
816 * @bio: destination bio
817 * @page: page to add
818 * @len: vec entry length
819 * @offset: vec entry offset
820 *
821 * Attempt to add a page to the bio_vec maplist. This can fail for a
822 * number of reasons, such as the bio being full or target block device
823 * limitations. The target block device must allow bio's up to PAGE_SIZE,
824 * so it is always possible to add a single page to an empty bio.
825 *
826 * This should only be used by REQ_PC bios.
827 */
828int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
829 unsigned int len, unsigned int offset)
830{
831 return __bio_add_page(q, bio, page, len, offset,
832 queue_max_hw_sectors(q));
833}
834EXPORT_SYMBOL(bio_add_pc_page);
835
836/**
837 * bio_add_page - attempt to add page to bio
838 * @bio: destination bio
839 * @page: page to add
840 * @len: vec entry length
841 * @offset: vec entry offset
842 *
843 * Attempt to add a page to the bio_vec maplist. This can fail for a
844 * number of reasons, such as the bio being full or target block device
845 * limitations. The target block device must allow bio's up to PAGE_SIZE,
846 * so it is always possible to add a single page to an empty bio.
847 */
848int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
849 unsigned int offset)
850{
851 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
852 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
853}
854EXPORT_SYMBOL(bio_add_page);
855
856struct submit_bio_ret {
857 struct completion event;
858 int error;
859};
860
861static void submit_bio_wait_endio(struct bio *bio, int error)
862{
863 struct submit_bio_ret *ret = bio->bi_private;
864
865 ret->error = error;
866 complete(&ret->event);
867}
868
869/**
870 * submit_bio_wait - submit a bio, and wait until it completes
871 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
872 * @bio: The &struct bio which describes the I/O
873 *
874 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
875 * bio_endio() on failure.
876 */
877int submit_bio_wait(int rw, struct bio *bio)
878{
879 struct submit_bio_ret ret;
880
881 rw |= REQ_SYNC;
882 init_completion(&ret.event);
883 bio->bi_private = &ret;
884 bio->bi_end_io = submit_bio_wait_endio;
885 submit_bio(rw, bio);
886 wait_for_completion(&ret.event);
887
888 return ret.error;
889}
890EXPORT_SYMBOL(submit_bio_wait);
891
892/**
893 * bio_advance - increment/complete a bio by some number of bytes
894 * @bio: bio to advance
895 * @bytes: number of bytes to complete
896 *
897 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
898 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
899 * be updated on the last bvec as well.
900 *
901 * @bio will then represent the remaining, uncompleted portion of the io.
902 */
903void bio_advance(struct bio *bio, unsigned bytes)
904{
905 if (bio_integrity(bio))
906 bio_integrity_advance(bio, bytes);
907
908 bio_advance_iter(bio, &bio->bi_iter, bytes);
909}
910EXPORT_SYMBOL(bio_advance);
911
912/**
913 * bio_alloc_pages - allocates a single page for each bvec in a bio
914 * @bio: bio to allocate pages for
915 * @gfp_mask: flags for allocation
916 *
917 * Allocates pages up to @bio->bi_vcnt.
918 *
919 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
920 * freed.
921 */
922int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
923{
924 int i;
925 struct bio_vec *bv;
926
927 bio_for_each_segment_all(bv, bio, i) {
928 bv->bv_page = alloc_page(gfp_mask);
929 if (!bv->bv_page) {
930 while (--bv >= bio->bi_io_vec)
931 __free_page(bv->bv_page);
932 return -ENOMEM;
933 }
934 }
935
936 return 0;
937}
938EXPORT_SYMBOL(bio_alloc_pages);
939
940/**
941 * bio_copy_data - copy contents of data buffers from one chain of bios to
942 * another
943 * @src: source bio list
944 * @dst: destination bio list
945 *
946 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
947 * @src and @dst as linked lists of bios.
948 *
949 * Stops when it reaches the end of either @src or @dst - that is, copies
950 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
951 */
952void bio_copy_data(struct bio *dst, struct bio *src)
953{
954 struct bvec_iter src_iter, dst_iter;
955 struct bio_vec src_bv, dst_bv;
956 void *src_p, *dst_p;
957 unsigned bytes;
958
959 src_iter = src->bi_iter;
960 dst_iter = dst->bi_iter;
961
962 while (1) {
963 if (!src_iter.bi_size) {
964 src = src->bi_next;
965 if (!src)
966 break;
967
968 src_iter = src->bi_iter;
969 }
970
971 if (!dst_iter.bi_size) {
972 dst = dst->bi_next;
973 if (!dst)
974 break;
975
976 dst_iter = dst->bi_iter;
977 }
978
979 src_bv = bio_iter_iovec(src, src_iter);
980 dst_bv = bio_iter_iovec(dst, dst_iter);
981
982 bytes = min(src_bv.bv_len, dst_bv.bv_len);
983
984 src_p = kmap_atomic(src_bv.bv_page);
985 dst_p = kmap_atomic(dst_bv.bv_page);
986
987 memcpy(dst_p + dst_bv.bv_offset,
988 src_p + src_bv.bv_offset,
989 bytes);
990
991 kunmap_atomic(dst_p);
992 kunmap_atomic(src_p);
993
994 bio_advance_iter(src, &src_iter, bytes);
995 bio_advance_iter(dst, &dst_iter, bytes);
996 }
997}
998EXPORT_SYMBOL(bio_copy_data);
999
1000struct bio_map_data {
1001 int nr_sgvecs;
1002 int is_our_pages;
1003 struct sg_iovec sgvecs[];
1004};
1005
1006static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
1007 const struct sg_iovec *iov, int iov_count,
1008 int is_our_pages)
1009{
1010 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
1011 bmd->nr_sgvecs = iov_count;
1012 bmd->is_our_pages = is_our_pages;
1013 bio->bi_private = bmd;
1014}
1015
1016static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1017 gfp_t gfp_mask)
1018{
1019 if (iov_count > UIO_MAXIOV)
1020 return NULL;
1021
1022 return kmalloc(sizeof(struct bio_map_data) +
1023 sizeof(struct sg_iovec) * iov_count, gfp_mask);
1024}
1025
1026static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
1027 int to_user, int from_user, int do_free_page)
1028{
1029 int ret = 0, i;
1030 struct bio_vec *bvec;
1031 int iov_idx = 0;
1032 unsigned int iov_off = 0;
1033
1034 bio_for_each_segment_all(bvec, bio, i) {
1035 char *bv_addr = page_address(bvec->bv_page);
1036 unsigned int bv_len = bvec->bv_len;
1037
1038 while (bv_len && iov_idx < iov_count) {
1039 unsigned int bytes;
1040 char __user *iov_addr;
1041
1042 bytes = min_t(unsigned int,
1043 iov[iov_idx].iov_len - iov_off, bv_len);
1044 iov_addr = iov[iov_idx].iov_base + iov_off;
1045
1046 if (!ret) {
1047 if (to_user)
1048 ret = copy_to_user(iov_addr, bv_addr,
1049 bytes);
1050
1051 if (from_user)
1052 ret = copy_from_user(bv_addr, iov_addr,
1053 bytes);
1054
1055 if (ret)
1056 ret = -EFAULT;
1057 }
1058
1059 bv_len -= bytes;
1060 bv_addr += bytes;
1061 iov_addr += bytes;
1062 iov_off += bytes;
1063
1064 if (iov[iov_idx].iov_len == iov_off) {
1065 iov_idx++;
1066 iov_off = 0;
1067 }
1068 }
1069
1070 if (do_free_page)
1071 __free_page(bvec->bv_page);
1072 }
1073
1074 return ret;
1075}
1076
1077/**
1078 * bio_uncopy_user - finish previously mapped bio
1079 * @bio: bio being terminated
1080 *
1081 * Free pages allocated from bio_copy_user() and write back data
1082 * to user space in case of a read.
1083 */
1084int bio_uncopy_user(struct bio *bio)
1085{
1086 struct bio_map_data *bmd = bio->bi_private;
1087 struct bio_vec *bvec;
1088 int ret = 0, i;
1089
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091 /*
1092 * if we're in a workqueue, the request is orphaned, so
1093 * don't copy into a random user address space, just free.
1094 */
1095 if (current->mm)
1096 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
1097 bio_data_dir(bio) == READ,
1098 0, bmd->is_our_pages);
1099 else if (bmd->is_our_pages)
1100 bio_for_each_segment_all(bvec, bio, i)
1101 __free_page(bvec->bv_page);
1102 }
1103 kfree(bmd);
1104 bio_put(bio);
1105 return ret;
1106}
1107EXPORT_SYMBOL(bio_uncopy_user);
1108
1109/**
1110 * bio_copy_user_iov - copy user data to bio
1111 * @q: destination block queue
1112 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1113 * @iov: the iovec.
1114 * @iov_count: number of elements in the iovec
1115 * @write_to_vm: bool indicating writing to pages or not
1116 * @gfp_mask: memory allocation flags
1117 *
1118 * Prepares and returns a bio for indirect user io, bouncing data
1119 * to/from kernel pages as necessary. Must be paired with
1120 * call bio_uncopy_user() on io completion.
1121 */
1122struct bio *bio_copy_user_iov(struct request_queue *q,
1123 struct rq_map_data *map_data,
1124 const struct sg_iovec *iov, int iov_count,
1125 int write_to_vm, gfp_t gfp_mask)
1126{
1127 struct bio_map_data *bmd;
1128 struct bio_vec *bvec;
1129 struct page *page;
1130 struct bio *bio;
1131 int i, ret;
1132 int nr_pages = 0;
1133 unsigned int len = 0;
1134 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1135
1136 for (i = 0; i < iov_count; i++) {
1137 unsigned long uaddr;
1138 unsigned long end;
1139 unsigned long start;
1140
1141 uaddr = (unsigned long)iov[i].iov_base;
1142 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1143 start = uaddr >> PAGE_SHIFT;
1144
1145 /*
1146 * Overflow, abort
1147 */
1148 if (end < start)
1149 return ERR_PTR(-EINVAL);
1150
1151 nr_pages += end - start;
1152 len += iov[i].iov_len;
1153 }
1154
1155 if (offset)
1156 nr_pages++;
1157
1158 bmd = bio_alloc_map_data(iov_count, gfp_mask);
1159 if (!bmd)
1160 return ERR_PTR(-ENOMEM);
1161
1162 ret = -ENOMEM;
1163 bio = bio_kmalloc(gfp_mask, nr_pages);
1164 if (!bio)
1165 goto out_bmd;
1166
1167 if (!write_to_vm)
1168 bio->bi_rw |= REQ_WRITE;
1169
1170 ret = 0;
1171
1172 if (map_data) {
1173 nr_pages = 1 << map_data->page_order;
1174 i = map_data->offset / PAGE_SIZE;
1175 }
1176 while (len) {
1177 unsigned int bytes = PAGE_SIZE;
1178
1179 bytes -= offset;
1180
1181 if (bytes > len)
1182 bytes = len;
1183
1184 if (map_data) {
1185 if (i == map_data->nr_entries * nr_pages) {
1186 ret = -ENOMEM;
1187 break;
1188 }
1189
1190 page = map_data->pages[i / nr_pages];
1191 page += (i % nr_pages);
1192
1193 i++;
1194 } else {
1195 page = alloc_page(q->bounce_gfp | gfp_mask);
1196 if (!page) {
1197 ret = -ENOMEM;
1198 break;
1199 }
1200 }
1201
1202 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1203 break;
1204
1205 len -= bytes;
1206 offset = 0;
1207 }
1208
1209 if (ret)
1210 goto cleanup;
1211
1212 /*
1213 * success
1214 */
1215 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1216 (map_data && map_data->from_user)) {
1217 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
1218 if (ret)
1219 goto cleanup;
1220 }
1221
1222 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
1223 return bio;
1224cleanup:
1225 if (!map_data)
1226 bio_for_each_segment_all(bvec, bio, i)
1227 __free_page(bvec->bv_page);
1228
1229 bio_put(bio);
1230out_bmd:
1231 kfree(bmd);
1232 return ERR_PTR(ret);
1233}
1234
1235/**
1236 * bio_copy_user - copy user data to bio
1237 * @q: destination block queue
1238 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1239 * @uaddr: start of user address
1240 * @len: length in bytes
1241 * @write_to_vm: bool indicating writing to pages or not
1242 * @gfp_mask: memory allocation flags
1243 *
1244 * Prepares and returns a bio for indirect user io, bouncing data
1245 * to/from kernel pages as necessary. Must be paired with
1246 * call bio_uncopy_user() on io completion.
1247 */
1248struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
1249 unsigned long uaddr, unsigned int len,
1250 int write_to_vm, gfp_t gfp_mask)
1251{
1252 struct sg_iovec iov;
1253
1254 iov.iov_base = (void __user *)uaddr;
1255 iov.iov_len = len;
1256
1257 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
1258}
1259EXPORT_SYMBOL(bio_copy_user);
1260
1261static struct bio *__bio_map_user_iov(struct request_queue *q,
1262 struct block_device *bdev,
1263 const struct sg_iovec *iov, int iov_count,
1264 int write_to_vm, gfp_t gfp_mask)
1265{
1266 int i, j;
1267 int nr_pages = 0;
1268 struct page **pages;
1269 struct bio *bio;
1270 int cur_page = 0;
1271 int ret, offset;
1272
1273 for (i = 0; i < iov_count; i++) {
1274 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1275 unsigned long len = iov[i].iov_len;
1276 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1277 unsigned long start = uaddr >> PAGE_SHIFT;
1278
1279 /*
1280 * Overflow, abort
1281 */
1282 if (end < start)
1283 return ERR_PTR(-EINVAL);
1284
1285 nr_pages += end - start;
1286 /*
1287 * buffer must be aligned to at least hardsector size for now
1288 */
1289 if (uaddr & queue_dma_alignment(q))
1290 return ERR_PTR(-EINVAL);
1291 }
1292
1293 if (!nr_pages)
1294 return ERR_PTR(-EINVAL);
1295
1296 bio = bio_kmalloc(gfp_mask, nr_pages);
1297 if (!bio)
1298 return ERR_PTR(-ENOMEM);
1299
1300 ret = -ENOMEM;
1301 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1302 if (!pages)
1303 goto out;
1304
1305 for (i = 0; i < iov_count; i++) {
1306 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1307 unsigned long len = iov[i].iov_len;
1308 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1309 unsigned long start = uaddr >> PAGE_SHIFT;
1310 const int local_nr_pages = end - start;
1311 const int page_limit = cur_page + local_nr_pages;
1312
1313 ret = get_user_pages_fast(uaddr, local_nr_pages,
1314 write_to_vm, &pages[cur_page]);
1315 if (ret < local_nr_pages) {
1316 ret = -EFAULT;
1317 goto out_unmap;
1318 }
1319
1320 offset = uaddr & ~PAGE_MASK;
1321 for (j = cur_page; j < page_limit; j++) {
1322 unsigned int bytes = PAGE_SIZE - offset;
1323
1324 if (len <= 0)
1325 break;
1326
1327 if (bytes > len)
1328 bytes = len;
1329
1330 /*
1331 * sorry...
1332 */
1333 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1334 bytes)
1335 break;
1336
1337 len -= bytes;
1338 offset = 0;
1339 }
1340
1341 cur_page = j;
1342 /*
1343 * release the pages we didn't map into the bio, if any
1344 */
1345 while (j < page_limit)
1346 page_cache_release(pages[j++]);
1347 }
1348
1349 kfree(pages);
1350
1351 /*
1352 * set data direction, and check if mapped pages need bouncing
1353 */
1354 if (!write_to_vm)
1355 bio->bi_rw |= REQ_WRITE;
1356
1357 bio->bi_bdev = bdev;
1358 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1359 return bio;
1360
1361 out_unmap:
1362 for (i = 0; i < nr_pages; i++) {
1363 if(!pages[i])
1364 break;
1365 page_cache_release(pages[i]);
1366 }
1367 out:
1368 kfree(pages);
1369 bio_put(bio);
1370 return ERR_PTR(ret);
1371}
1372
1373/**
1374 * bio_map_user - map user address into bio
1375 * @q: the struct request_queue for the bio
1376 * @bdev: destination block device
1377 * @uaddr: start of user address
1378 * @len: length in bytes
1379 * @write_to_vm: bool indicating writing to pages or not
1380 * @gfp_mask: memory allocation flags
1381 *
1382 * Map the user space address into a bio suitable for io to a block
1383 * device. Returns an error pointer in case of error.
1384 */
1385struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1386 unsigned long uaddr, unsigned int len, int write_to_vm,
1387 gfp_t gfp_mask)
1388{
1389 struct sg_iovec iov;
1390
1391 iov.iov_base = (void __user *)uaddr;
1392 iov.iov_len = len;
1393
1394 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1395}
1396EXPORT_SYMBOL(bio_map_user);
1397
1398/**
1399 * bio_map_user_iov - map user sg_iovec table into bio
1400 * @q: the struct request_queue for the bio
1401 * @bdev: destination block device
1402 * @iov: the iovec.
1403 * @iov_count: number of elements in the iovec
1404 * @write_to_vm: bool indicating writing to pages or not
1405 * @gfp_mask: memory allocation flags
1406 *
1407 * Map the user space address into a bio suitable for io to a block
1408 * device. Returns an error pointer in case of error.
1409 */
1410struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1411 const struct sg_iovec *iov, int iov_count,
1412 int write_to_vm, gfp_t gfp_mask)
1413{
1414 struct bio *bio;
1415
1416 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1417 gfp_mask);
1418 if (IS_ERR(bio))
1419 return bio;
1420
1421 /*
1422 * subtle -- if __bio_map_user() ended up bouncing a bio,
1423 * it would normally disappear when its bi_end_io is run.
1424 * however, we need it for the unmap, so grab an extra
1425 * reference to it
1426 */
1427 bio_get(bio);
1428
1429 return bio;
1430}
1431
1432static void __bio_unmap_user(struct bio *bio)
1433{
1434 struct bio_vec *bvec;
1435 int i;
1436
1437 /*
1438 * make sure we dirty pages we wrote to
1439 */
1440 bio_for_each_segment_all(bvec, bio, i) {
1441 if (bio_data_dir(bio) == READ)
1442 set_page_dirty_lock(bvec->bv_page);
1443
1444 page_cache_release(bvec->bv_page);
1445 }
1446
1447 bio_put(bio);
1448}
1449
1450/**
1451 * bio_unmap_user - unmap a bio
1452 * @bio: the bio being unmapped
1453 *
1454 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1455 * a process context.
1456 *
1457 * bio_unmap_user() may sleep.
1458 */
1459void bio_unmap_user(struct bio *bio)
1460{
1461 __bio_unmap_user(bio);
1462 bio_put(bio);
1463}
1464EXPORT_SYMBOL(bio_unmap_user);
1465
1466static void bio_map_kern_endio(struct bio *bio, int err)
1467{
1468 bio_put(bio);
1469}
1470
1471static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1472 unsigned int len, gfp_t gfp_mask)
1473{
1474 unsigned long kaddr = (unsigned long)data;
1475 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1476 unsigned long start = kaddr >> PAGE_SHIFT;
1477 const int nr_pages = end - start;
1478 int offset, i;
1479 struct bio *bio;
1480
1481 bio = bio_kmalloc(gfp_mask, nr_pages);
1482 if (!bio)
1483 return ERR_PTR(-ENOMEM);
1484
1485 offset = offset_in_page(kaddr);
1486 for (i = 0; i < nr_pages; i++) {
1487 unsigned int bytes = PAGE_SIZE - offset;
1488
1489 if (len <= 0)
1490 break;
1491
1492 if (bytes > len)
1493 bytes = len;
1494
1495 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1496 offset) < bytes)
1497 break;
1498
1499 data += bytes;
1500 len -= bytes;
1501 offset = 0;
1502 }
1503
1504 bio->bi_end_io = bio_map_kern_endio;
1505 return bio;
1506}
1507
1508/**
1509 * bio_map_kern - map kernel address into bio
1510 * @q: the struct request_queue for the bio
1511 * @data: pointer to buffer to map
1512 * @len: length in bytes
1513 * @gfp_mask: allocation flags for bio allocation
1514 *
1515 * Map the kernel address into a bio suitable for io to a block
1516 * device. Returns an error pointer in case of error.
1517 */
1518struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1519 gfp_t gfp_mask)
1520{
1521 struct bio *bio;
1522
1523 bio = __bio_map_kern(q, data, len, gfp_mask);
1524 if (IS_ERR(bio))
1525 return bio;
1526
1527 if (bio->bi_iter.bi_size == len)
1528 return bio;
1529
1530 /*
1531 * Don't support partial mappings.
1532 */
1533 bio_put(bio);
1534 return ERR_PTR(-EINVAL);
1535}
1536EXPORT_SYMBOL(bio_map_kern);
1537
1538static void bio_copy_kern_endio(struct bio *bio, int err)
1539{
1540 struct bio_vec *bvec;
1541 const int read = bio_data_dir(bio) == READ;
1542 struct bio_map_data *bmd = bio->bi_private;
1543 int i;
1544 char *p = bmd->sgvecs[0].iov_base;
1545
1546 bio_for_each_segment_all(bvec, bio, i) {
1547 char *addr = page_address(bvec->bv_page);
1548
1549 if (read)
1550 memcpy(p, addr, bvec->bv_len);
1551
1552 __free_page(bvec->bv_page);
1553 p += bvec->bv_len;
1554 }
1555
1556 kfree(bmd);
1557 bio_put(bio);
1558}
1559
1560/**
1561 * bio_copy_kern - copy kernel address into bio
1562 * @q: the struct request_queue for the bio
1563 * @data: pointer to buffer to copy
1564 * @len: length in bytes
1565 * @gfp_mask: allocation flags for bio and page allocation
1566 * @reading: data direction is READ
1567 *
1568 * copy the kernel address into a bio suitable for io to a block
1569 * device. Returns an error pointer in case of error.
1570 */
1571struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1572 gfp_t gfp_mask, int reading)
1573{
1574 struct bio *bio;
1575 struct bio_vec *bvec;
1576 int i;
1577
1578 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1579 if (IS_ERR(bio))
1580 return bio;
1581
1582 if (!reading) {
1583 void *p = data;
1584
1585 bio_for_each_segment_all(bvec, bio, i) {
1586 char *addr = page_address(bvec->bv_page);
1587
1588 memcpy(addr, p, bvec->bv_len);
1589 p += bvec->bv_len;
1590 }
1591 }
1592
1593 bio->bi_end_io = bio_copy_kern_endio;
1594
1595 return bio;
1596}
1597EXPORT_SYMBOL(bio_copy_kern);
1598
1599/*
1600 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1601 * for performing direct-IO in BIOs.
1602 *
1603 * The problem is that we cannot run set_page_dirty() from interrupt context
1604 * because the required locks are not interrupt-safe. So what we can do is to
1605 * mark the pages dirty _before_ performing IO. And in interrupt context,
1606 * check that the pages are still dirty. If so, fine. If not, redirty them
1607 * in process context.
1608 *
1609 * We special-case compound pages here: normally this means reads into hugetlb
1610 * pages. The logic in here doesn't really work right for compound pages
1611 * because the VM does not uniformly chase down the head page in all cases.
1612 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1613 * handle them at all. So we skip compound pages here at an early stage.
1614 *
1615 * Note that this code is very hard to test under normal circumstances because
1616 * direct-io pins the pages with get_user_pages(). This makes
1617 * is_page_cache_freeable return false, and the VM will not clean the pages.
1618 * But other code (eg, flusher threads) could clean the pages if they are mapped
1619 * pagecache.
1620 *
1621 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1622 * deferred bio dirtying paths.
1623 */
1624
1625/*
1626 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1627 */
1628void bio_set_pages_dirty(struct bio *bio)
1629{
1630 struct bio_vec *bvec;
1631 int i;
1632
1633 bio_for_each_segment_all(bvec, bio, i) {
1634 struct page *page = bvec->bv_page;
1635
1636 if (page && !PageCompound(page))
1637 set_page_dirty_lock(page);
1638 }
1639}
1640
1641static void bio_release_pages(struct bio *bio)
1642{
1643 struct bio_vec *bvec;
1644 int i;
1645
1646 bio_for_each_segment_all(bvec, bio, i) {
1647 struct page *page = bvec->bv_page;
1648
1649 if (page)
1650 put_page(page);
1651 }
1652}
1653
1654/*
1655 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1656 * If they are, then fine. If, however, some pages are clean then they must
1657 * have been written out during the direct-IO read. So we take another ref on
1658 * the BIO and the offending pages and re-dirty the pages in process context.
1659 *
1660 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1661 * here on. It will run one page_cache_release() against each page and will
1662 * run one bio_put() against the BIO.
1663 */
1664
1665static void bio_dirty_fn(struct work_struct *work);
1666
1667static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1668static DEFINE_SPINLOCK(bio_dirty_lock);
1669static struct bio *bio_dirty_list;
1670
1671/*
1672 * This runs in process context
1673 */
1674static void bio_dirty_fn(struct work_struct *work)
1675{
1676 unsigned long flags;
1677 struct bio *bio;
1678
1679 spin_lock_irqsave(&bio_dirty_lock, flags);
1680 bio = bio_dirty_list;
1681 bio_dirty_list = NULL;
1682 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1683
1684 while (bio) {
1685 struct bio *next = bio->bi_private;
1686
1687 bio_set_pages_dirty(bio);
1688 bio_release_pages(bio);
1689 bio_put(bio);
1690 bio = next;
1691 }
1692}
1693
1694void bio_check_pages_dirty(struct bio *bio)
1695{
1696 struct bio_vec *bvec;
1697 int nr_clean_pages = 0;
1698 int i;
1699
1700 bio_for_each_segment_all(bvec, bio, i) {
1701 struct page *page = bvec->bv_page;
1702
1703 if (PageDirty(page) || PageCompound(page)) {
1704 page_cache_release(page);
1705 bvec->bv_page = NULL;
1706 } else {
1707 nr_clean_pages++;
1708 }
1709 }
1710
1711 if (nr_clean_pages) {
1712 unsigned long flags;
1713
1714 spin_lock_irqsave(&bio_dirty_lock, flags);
1715 bio->bi_private = bio_dirty_list;
1716 bio_dirty_list = bio;
1717 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1718 schedule_work(&bio_dirty_work);
1719 } else {
1720 bio_put(bio);
1721 }
1722}
1723
1724#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1725void bio_flush_dcache_pages(struct bio *bi)
1726{
1727 struct bio_vec bvec;
1728 struct bvec_iter iter;
1729
1730 bio_for_each_segment(bvec, bi, iter)
1731 flush_dcache_page(bvec.bv_page);
1732}
1733EXPORT_SYMBOL(bio_flush_dcache_pages);
1734#endif
1735
1736/**
1737 * bio_endio - end I/O on a bio
1738 * @bio: bio
1739 * @error: error, if any
1740 *
1741 * Description:
1742 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1743 * preferred way to end I/O on a bio, it takes care of clearing
1744 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1745 * established -Exxxx (-EIO, for instance) error values in case
1746 * something went wrong. No one should call bi_end_io() directly on a
1747 * bio unless they own it and thus know that it has an end_io
1748 * function.
1749 **/
1750void bio_endio(struct bio *bio, int error)
1751{
1752 while (bio) {
1753 BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
1754
1755 if (error)
1756 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1757 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1758 error = -EIO;
1759
1760 if (!atomic_dec_and_test(&bio->bi_remaining))
1761 return;
1762
1763 /*
1764 * Need to have a real endio function for chained bios,
1765 * otherwise various corner cases will break (like stacking
1766 * block devices that save/restore bi_end_io) - however, we want
1767 * to avoid unbounded recursion and blowing the stack. Tail call
1768 * optimization would handle this, but compiling with frame
1769 * pointers also disables gcc's sibling call optimization.
1770 */
1771 if (bio->bi_end_io == bio_chain_endio) {
1772 struct bio *parent = bio->bi_private;
1773 bio_put(bio);
1774 bio = parent;
1775 } else {
1776 if (bio->bi_end_io)
1777 bio->bi_end_io(bio, error);
1778 bio = NULL;
1779 }
1780 }
1781}
1782EXPORT_SYMBOL(bio_endio);
1783
1784/**
1785 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1786 * @bio: bio
1787 * @error: error, if any
1788 *
1789 * For code that has saved and restored bi_end_io; thing hard before using this
1790 * function, probably you should've cloned the entire bio.
1791 **/
1792void bio_endio_nodec(struct bio *bio, int error)
1793{
1794 atomic_inc(&bio->bi_remaining);
1795 bio_endio(bio, error);
1796}
1797EXPORT_SYMBOL(bio_endio_nodec);
1798
1799/**
1800 * bio_split - split a bio
1801 * @bio: bio to split
1802 * @sectors: number of sectors to split from the front of @bio
1803 * @gfp: gfp mask
1804 * @bs: bio set to allocate from
1805 *
1806 * Allocates and returns a new bio which represents @sectors from the start of
1807 * @bio, and updates @bio to represent the remaining sectors.
1808 *
1809 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1810 * responsibility to ensure that @bio is not freed before the split.
1811 */
1812struct bio *bio_split(struct bio *bio, int sectors,
1813 gfp_t gfp, struct bio_set *bs)
1814{
1815 struct bio *split = NULL;
1816
1817 BUG_ON(sectors <= 0);
1818 BUG_ON(sectors >= bio_sectors(bio));
1819
1820 split = bio_clone_fast(bio, gfp, bs);
1821 if (!split)
1822 return NULL;
1823
1824 split->bi_iter.bi_size = sectors << 9;
1825
1826 if (bio_integrity(split))
1827 bio_integrity_trim(split, 0, sectors);
1828
1829 bio_advance(bio, split->bi_iter.bi_size);
1830
1831 return split;
1832}
1833EXPORT_SYMBOL(bio_split);
1834
1835/**
1836 * bio_trim - trim a bio
1837 * @bio: bio to trim
1838 * @offset: number of sectors to trim from the front of @bio
1839 * @size: size we want to trim @bio to, in sectors
1840 */
1841void bio_trim(struct bio *bio, int offset, int size)
1842{
1843 /* 'bio' is a cloned bio which we need to trim to match
1844 * the given offset and size.
1845 */
1846
1847 size <<= 9;
1848 if (offset == 0 && size == bio->bi_iter.bi_size)
1849 return;
1850
1851 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1852
1853 bio_advance(bio, offset << 9);
1854
1855 bio->bi_iter.bi_size = size;
1856}
1857EXPORT_SYMBOL_GPL(bio_trim);
1858
1859/*
1860 * create memory pools for biovec's in a bio_set.
1861 * use the global biovec slabs created for general use.
1862 */
1863mempool_t *biovec_create_pool(int pool_entries)
1864{
1865 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1866
1867 return mempool_create_slab_pool(pool_entries, bp->slab);
1868}
1869
1870void bioset_free(struct bio_set *bs)
1871{
1872 if (bs->rescue_workqueue)
1873 destroy_workqueue(bs->rescue_workqueue);
1874
1875 if (bs->bio_pool)
1876 mempool_destroy(bs->bio_pool);
1877
1878 if (bs->bvec_pool)
1879 mempool_destroy(bs->bvec_pool);
1880
1881 bioset_integrity_free(bs);
1882 bio_put_slab(bs);
1883
1884 kfree(bs);
1885}
1886EXPORT_SYMBOL(bioset_free);
1887
1888/**
1889 * bioset_create - Create a bio_set
1890 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1891 * @front_pad: Number of bytes to allocate in front of the returned bio
1892 *
1893 * Description:
1894 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1895 * to ask for a number of bytes to be allocated in front of the bio.
1896 * Front pad allocation is useful for embedding the bio inside
1897 * another structure, to avoid allocating extra data to go with the bio.
1898 * Note that the bio must be embedded at the END of that structure always,
1899 * or things will break badly.
1900 */
1901struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1902{
1903 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1904 struct bio_set *bs;
1905
1906 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1907 if (!bs)
1908 return NULL;
1909
1910 bs->front_pad = front_pad;
1911
1912 spin_lock_init(&bs->rescue_lock);
1913 bio_list_init(&bs->rescue_list);
1914 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1915
1916 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1917 if (!bs->bio_slab) {
1918 kfree(bs);
1919 return NULL;
1920 }
1921
1922 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1923 if (!bs->bio_pool)
1924 goto bad;
1925
1926 bs->bvec_pool = biovec_create_pool(pool_size);
1927 if (!bs->bvec_pool)
1928 goto bad;
1929
1930 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1931 if (!bs->rescue_workqueue)
1932 goto bad;
1933
1934 return bs;
1935bad:
1936 bioset_free(bs);
1937 return NULL;
1938}
1939EXPORT_SYMBOL(bioset_create);
1940
1941#ifdef CONFIG_BLK_CGROUP
1942/**
1943 * bio_associate_current - associate a bio with %current
1944 * @bio: target bio
1945 *
1946 * Associate @bio with %current if it hasn't been associated yet. Block
1947 * layer will treat @bio as if it were issued by %current no matter which
1948 * task actually issues it.
1949 *
1950 * This function takes an extra reference of @task's io_context and blkcg
1951 * which will be put when @bio is released. The caller must own @bio,
1952 * ensure %current->io_context exists, and is responsible for synchronizing
1953 * calls to this function.
1954 */
1955int bio_associate_current(struct bio *bio)
1956{
1957 struct io_context *ioc;
1958 struct cgroup_subsys_state *css;
1959
1960 if (bio->bi_ioc)
1961 return -EBUSY;
1962
1963 ioc = current->io_context;
1964 if (!ioc)
1965 return -ENOENT;
1966
1967 /* acquire active ref on @ioc and associate */
1968 get_io_context_active(ioc);
1969 bio->bi_ioc = ioc;
1970
1971 /* associate blkcg if exists */
1972 rcu_read_lock();
1973 css = task_css(current, blkio_cgrp_id);
1974 if (css && css_tryget(css))
1975 bio->bi_css = css;
1976 rcu_read_unlock();
1977
1978 return 0;
1979}
1980
1981/**
1982 * bio_disassociate_task - undo bio_associate_current()
1983 * @bio: target bio
1984 */
1985void bio_disassociate_task(struct bio *bio)
1986{
1987 if (bio->bi_ioc) {
1988 put_io_context(bio->bi_ioc);
1989 bio->bi_ioc = NULL;
1990 }
1991 if (bio->bi_css) {
1992 css_put(bio->bi_css);
1993 bio->bi_css = NULL;
1994 }
1995}
1996
1997#endif /* CONFIG_BLK_CGROUP */
1998
1999static void __init biovec_init_slabs(void)
2000{
2001 int i;
2002
2003 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
2004 int size;
2005 struct biovec_slab *bvs = bvec_slabs + i;
2006
2007 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2008 bvs->slab = NULL;
2009 continue;
2010 }
2011
2012 size = bvs->nr_vecs * sizeof(struct bio_vec);
2013 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2014 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2015 }
2016}
2017
2018static int __init init_bio(void)
2019{
2020 bio_slab_max = 2;
2021 bio_slab_nr = 0;
2022 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2023 if (!bio_slabs)
2024 panic("bio: can't allocate bios\n");
2025
2026 bio_integrity_init();
2027 biovec_init_slabs();
2028
2029 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2030 if (!fs_bio_set)
2031 panic("bio: can't allocate bios\n");
2032
2033 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2034 panic("bio: can't create integrity pool\n");
2035
2036 return 0;
2037}
2038subsys_initcall(init_bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index c4269701cb4f..d87be5b4e554 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -576,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
576 if (!q) 576 if (!q)
577 return NULL; 577 return NULL;
578 578
579 if (percpu_counter_init(&q->mq_usage_counter, 0))
580 goto fail_q;
581
582 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 579 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
583 if (q->id < 0) 580 if (q->id < 0)
584 goto fail_c; 581 goto fail_q;
585 582
586 q->backing_dev_info.ra_pages = 583 q->backing_dev_info.ra_pages =
587 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 584 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -639,8 +636,6 @@ fail_bdi:
639 bdi_destroy(&q->backing_dev_info); 636 bdi_destroy(&q->backing_dev_info);
640fail_id: 637fail_id:
641 ida_simple_remove(&blk_queue_ida, q->id); 638 ida_simple_remove(&blk_queue_ida, q->id);
642fail_c:
643 percpu_counter_destroy(&q->mq_usage_counter);
644fail_q: 639fail_q:
645 kmem_cache_free(blk_requestq_cachep, q); 640 kmem_cache_free(blk_requestq_cachep, q);
646 return NULL; 641 return NULL;
@@ -848,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags)
848 __freed_request(rl, sync ^ 1); 843 __freed_request(rl, sync ^ 1);
849} 844}
850 845
846int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
847{
848 struct request_list *rl;
849
850 spin_lock_irq(q->queue_lock);
851 q->nr_requests = nr;
852 blk_queue_congestion_threshold(q);
853
854 /* congestion isn't cgroup aware and follows root blkcg for now */
855 rl = &q->root_rl;
856
857 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
858 blk_set_queue_congested(q, BLK_RW_SYNC);
859 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
860 blk_clear_queue_congested(q, BLK_RW_SYNC);
861
862 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
863 blk_set_queue_congested(q, BLK_RW_ASYNC);
864 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
865 blk_clear_queue_congested(q, BLK_RW_ASYNC);
866
867 blk_queue_for_each_rl(rl, q) {
868 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
869 blk_set_rl_full(rl, BLK_RW_SYNC);
870 } else {
871 blk_clear_rl_full(rl, BLK_RW_SYNC);
872 wake_up(&rl->wait[BLK_RW_SYNC]);
873 }
874
875 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
876 blk_set_rl_full(rl, BLK_RW_ASYNC);
877 } else {
878 blk_clear_rl_full(rl, BLK_RW_ASYNC);
879 wake_up(&rl->wait[BLK_RW_ASYNC]);
880 }
881 }
882
883 spin_unlock_irq(q->queue_lock);
884 return 0;
885}
886
851/* 887/*
852 * Determine if elevator data should be initialized when allocating the 888 * Determine if elevator data should be initialized when allocating the
853 * request associated with @bio. 889 * request associated with @bio.
@@ -1137,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1137struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1173struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1138{ 1174{
1139 if (q->mq_ops) 1175 if (q->mq_ops)
1140 return blk_mq_alloc_request(q, rw, gfp_mask); 1176 return blk_mq_alloc_request(q, rw, gfp_mask, false);
1141 else 1177 else
1142 return blk_old_get_request(q, rw, gfp_mask); 1178 return blk_old_get_request(q, rw, gfp_mask);
1143} 1179}
@@ -1233,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1233static void part_round_stats_single(int cpu, struct hd_struct *part, 1269static void part_round_stats_single(int cpu, struct hd_struct *part,
1234 unsigned long now) 1270 unsigned long now)
1235{ 1271{
1272 int inflight;
1273
1236 if (now == part->stamp) 1274 if (now == part->stamp)
1237 return; 1275 return;
1238 1276
1239 if (part_in_flight(part)) { 1277 inflight = part_in_flight(part);
1278 if (inflight) {
1240 __part_stat_add(cpu, part, time_in_queue, 1279 __part_stat_add(cpu, part, time_in_queue,
1241 part_in_flight(part) * (now - part->stamp)); 1280 inflight * (now - part->stamp));
1242 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1281 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1243 } 1282 }
1244 part->stamp = now; 1283 part->stamp = now;
@@ -1427,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1427 * added on the elevator at this point. In addition, we don't have 1466 * added on the elevator at this point. In addition, we don't have
1428 * reliable access to the elevator outside queue lock. Only check basic 1467 * reliable access to the elevator outside queue lock. Only check basic
1429 * merging parameters without querying the elevator. 1468 * merging parameters without querying the elevator.
1469 *
1470 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1430 */ 1471 */
1431bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1472bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1432 unsigned int *request_count) 1473 unsigned int *request_count)
@@ -1436,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1436 bool ret = false; 1477 bool ret = false;
1437 struct list_head *plug_list; 1478 struct list_head *plug_list;
1438 1479
1439 if (blk_queue_nomerges(q))
1440 goto out;
1441
1442 plug = current->plug; 1480 plug = current->plug;
1443 if (!plug) 1481 if (!plug)
1444 goto out; 1482 goto out;
@@ -1517,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1517 * Check if we can merge with the plugged list before grabbing 1555 * Check if we can merge with the plugged list before grabbing
1518 * any locks. 1556 * any locks.
1519 */ 1557 */
1520 if (blk_attempt_plug_merge(q, bio, &request_count)) 1558 if (!blk_queue_nomerges(q) &&
1559 blk_attempt_plug_merge(q, bio, &request_count))
1521 return; 1560 return;
1522 1561
1523 spin_lock_irq(q->queue_lock); 1562 spin_lock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ec7a224d6733..ef608b35d9be 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
130 blk_clear_rq_complete(rq); 130 blk_clear_rq_complete(rq);
131} 131}
132 132
133static void mq_flush_run(struct work_struct *work)
134{
135 struct request *rq;
136
137 rq = container_of(work, struct request, requeue_work);
138
139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_insert_request(rq, false, true, false);
141}
142
143static bool blk_flush_queue_rq(struct request *rq, bool add_front) 133static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{ 134{
145 if (rq->q->mq_ops) { 135 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->requeue_work, mq_flush_run); 136 struct request_queue *q = rq->q;
147 kblockd_schedule_work(&rq->requeue_work); 137
138 blk_mq_add_to_requeue_list(rq, add_front);
139 blk_mq_kick_requeue_list(q);
148 return false; 140 return false;
149 } else { 141 } else {
150 if (add_front) 142 if (add_front)
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index c11d24e379e2..d828b44a404b 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete);
64 * iopoll handler will not be invoked again before blk_iopoll_sched_prep() 64 * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
65 * is called. 65 * is called.
66 **/ 66 **/
67void blk_iopoll_complete(struct blk_iopoll *iopoll) 67void blk_iopoll_complete(struct blk_iopoll *iop)
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
70 70
71 local_irq_save(flags); 71 local_irq_save(flags);
72 __blk_iopoll_complete(iopoll); 72 __blk_iopoll_complete(iop);
73 local_irq_restore(flags); 73 local_irq_restore(flags);
74} 74}
75EXPORT_SYMBOL(blk_iopoll_complete); 75EXPORT_SYMBOL(blk_iopoll_complete);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 97a733cf3d5f..8411be3c19d3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -226,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same);
226 * Generate and issue number of bios with zerofiled pages. 226 * Generate and issue number of bios with zerofiled pages.
227 */ 227 */
228 228
229int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 229static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
230 sector_t nr_sects, gfp_t gfp_mask) 230 sector_t nr_sects, gfp_t gfp_mask)
231{ 231{
232 int ret; 232 int ret;
233 struct bio *bio; 233 struct bio *bio;
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 136ef8643bba..d2c253f71b86 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
18{ 18{
19 unsigned int cpu = (unsigned long) hcpu; 19 unsigned int cpu = (unsigned long) hcpu;
20 struct blk_mq_cpu_notifier *notify; 20 struct blk_mq_cpu_notifier *notify;
21 int ret = NOTIFY_OK;
21 22
22 raw_spin_lock(&blk_mq_cpu_notify_lock); 23 raw_spin_lock(&blk_mq_cpu_notify_lock);
23 24
24 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) 25 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
25 notify->notify(notify->data, action, cpu); 26 ret = notify->notify(notify->data, action, cpu);
27 if (ret != NOTIFY_OK)
28 break;
29 }
26 30
27 raw_spin_unlock(&blk_mq_cpu_notify_lock); 31 raw_spin_unlock(&blk_mq_cpu_notify_lock);
28 return NOTIFY_OK; 32 return ret;
29} 33}
30 34
31void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) 35void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
@@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
45} 49}
46 50
47void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 51void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
48 void (*fn)(void *, unsigned long, unsigned int), 52 int (*fn)(void *, unsigned long, unsigned int),
49 void *data) 53 void *data)
50{ 54{
51 notifier->notify = fn; 55 notifier->notify = fn;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 5d0f93cf358c..0daacb927be1 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
96 kfree(map); 96 kfree(map);
97 return NULL; 97 return NULL;
98} 98}
99
100/*
101 * We have no quick way of doing reverse lookups. This is only used at
102 * queue init time, so runtime isn't important.
103 */
104int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
105{
106 int i;
107
108 for_each_possible_cpu(i) {
109 if (index == mq_map[i])
110 return cpu_to_node(i);
111 }
112
113 return NUMA_NO_NODE;
114}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 9176a6984857..99a60a829e69 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -203,45 +203,14 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
203 return ret; 203 return ret;
204} 204}
205 205
206static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) 206static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
207{
208 ssize_t ret;
209
210 spin_lock(&hctx->lock);
211 ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
212 spin_unlock(&hctx->lock);
213
214 return ret;
215}
216
217static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
218 const char *page, size_t len)
219{ 207{
220 struct blk_mq_ctx *ctx; 208 return blk_mq_tag_sysfs_show(hctx->tags, page);
221 unsigned long ret;
222 unsigned int i;
223
224 if (kstrtoul(page, 10, &ret)) {
225 pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
226 return -EINVAL;
227 }
228
229 spin_lock(&hctx->lock);
230 if (ret)
231 hctx->flags |= BLK_MQ_F_SHOULD_IPI;
232 else
233 hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
234 spin_unlock(&hctx->lock);
235
236 hctx_for_each_ctx(hctx, ctx, i)
237 ctx->ipi_redirect = !!ret;
238
239 return len;
240} 209}
241 210
242static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) 211static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
243{ 212{
244 return blk_mq_tag_sysfs_show(hctx->tags, page); 213 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
245} 214}
246 215
247static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) 216static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
@@ -303,15 +272,14 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
303 .attr = {.name = "dispatched", .mode = S_IRUGO }, 272 .attr = {.name = "dispatched", .mode = S_IRUGO },
304 .show = blk_mq_hw_sysfs_dispatched_show, 273 .show = blk_mq_hw_sysfs_dispatched_show,
305}; 274};
275static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
276 .attr = {.name = "active", .mode = S_IRUGO },
277 .show = blk_mq_hw_sysfs_active_show,
278};
306static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { 279static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
307 .attr = {.name = "pending", .mode = S_IRUGO }, 280 .attr = {.name = "pending", .mode = S_IRUGO },
308 .show = blk_mq_hw_sysfs_rq_list_show, 281 .show = blk_mq_hw_sysfs_rq_list_show,
309}; 282};
310static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
311 .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
312 .show = blk_mq_hw_sysfs_ipi_show,
313 .store = blk_mq_hw_sysfs_ipi_store,
314};
315static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { 283static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
316 .attr = {.name = "tags", .mode = S_IRUGO }, 284 .attr = {.name = "tags", .mode = S_IRUGO },
317 .show = blk_mq_hw_sysfs_tags_show, 285 .show = blk_mq_hw_sysfs_tags_show,
@@ -326,9 +294,9 @@ static struct attribute *default_hw_ctx_attrs[] = {
326 &blk_mq_hw_sysfs_run.attr, 294 &blk_mq_hw_sysfs_run.attr,
327 &blk_mq_hw_sysfs_dispatched.attr, 295 &blk_mq_hw_sysfs_dispatched.attr,
328 &blk_mq_hw_sysfs_pending.attr, 296 &blk_mq_hw_sysfs_pending.attr,
329 &blk_mq_hw_sysfs_ipi.attr,
330 &blk_mq_hw_sysfs_tags.attr, 297 &blk_mq_hw_sysfs_tags.attr,
331 &blk_mq_hw_sysfs_cpus.attr, 298 &blk_mq_hw_sysfs_cpus.attr,
299 &blk_mq_hw_sysfs_active.attr,
332 NULL, 300 NULL,
333}; 301};
334 302
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 7a799c46c32d..0d0640d38a06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,64 +1,333 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/random.h>
3 4
4#include <linux/blk-mq.h> 5#include <linux/blk-mq.h>
5#include "blk.h" 6#include "blk.h"
6#include "blk-mq.h" 7#include "blk-mq.h"
7#include "blk-mq-tag.h" 8#include "blk-mq-tag.h"
8 9
9void blk_mq_wait_for_tags(struct blk_mq_tags *tags) 10static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
10{ 11{
11 int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); 12 int i;
12 blk_mq_put_tag(tags, tag); 13
14 for (i = 0; i < bt->map_nr; i++) {
15 struct blk_align_bitmap *bm = &bt->map[i];
16 int ret;
17
18 ret = find_first_zero_bit(&bm->word, bm->depth);
19 if (ret < bm->depth)
20 return true;
21 }
22
23 return false;
13} 24}
14 25
15bool blk_mq_has_free_tags(struct blk_mq_tags *tags) 26bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
16{ 27{
17 return !tags || 28 if (!tags)
18 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0; 29 return true;
30
31 return bt_has_free_tags(&tags->bitmap_tags);
32}
33
34static inline void bt_index_inc(unsigned int *index)
35{
36 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
37}
38
39/*
40 * If a previously inactive queue goes active, bump the active user count.
41 */
42bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
43{
44 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
45 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
46 atomic_inc(&hctx->tags->active_queues);
47
48 return true;
49}
50
51/*
52 * Wakeup all potentially sleeping on normal (non-reserved) tags
53 */
54static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
55{
56 struct blk_mq_bitmap_tags *bt;
57 int i, wake_index;
58
59 bt = &tags->bitmap_tags;
60 wake_index = bt->wake_index;
61 for (i = 0; i < BT_WAIT_QUEUES; i++) {
62 struct bt_wait_state *bs = &bt->bs[wake_index];
63
64 if (waitqueue_active(&bs->wait))
65 wake_up(&bs->wait);
66
67 bt_index_inc(&wake_index);
68 }
19} 69}
20 70
21static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) 71/*
72 * If a previously busy queue goes inactive, potential waiters could now
73 * be allowed to queue. Wake them up and check.
74 */
75void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
22{ 76{
77 struct blk_mq_tags *tags = hctx->tags;
78
79 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
80 return;
81
82 atomic_dec(&tags->active_queues);
83
84 blk_mq_tag_wakeup_all(tags);
85}
86
87/*
88 * For shared tag users, we track the number of currently active users
89 * and attempt to provide a fair share of the tag depth for each of them.
90 */
91static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
92 struct blk_mq_bitmap_tags *bt)
93{
94 unsigned int depth, users;
95
96 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
97 return true;
98 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
99 return true;
100
101 /*
102 * Don't try dividing an ant
103 */
104 if (bt->depth == 1)
105 return true;
106
107 users = atomic_read(&hctx->tags->active_queues);
108 if (!users)
109 return true;
110
111 /*
112 * Allow at least some tags
113 */
114 depth = max((bt->depth + users - 1) / users, 4U);
115 return atomic_read(&hctx->nr_active) < depth;
116}
117
118static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
119{
120 int tag, org_last_tag, end;
121
122 org_last_tag = last_tag;
123 end = bm->depth;
124 do {
125restart:
126 tag = find_next_zero_bit(&bm->word, end, last_tag);
127 if (unlikely(tag >= end)) {
128 /*
129 * We started with an offset, start from 0 to
130 * exhaust the map.
131 */
132 if (org_last_tag && last_tag) {
133 end = last_tag;
134 last_tag = 0;
135 goto restart;
136 }
137 return -1;
138 }
139 last_tag = tag + 1;
140 } while (test_and_set_bit_lock(tag, &bm->word));
141
142 return tag;
143}
144
145/*
146 * Straight forward bitmap tag implementation, where each bit is a tag
147 * (cleared == free, and set == busy). The small twist is using per-cpu
148 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
149 * contexts. This enables us to drastically limit the space searched,
150 * without dirtying an extra shared cacheline like we would if we stored
151 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
152 * of that, each word of tags is in a separate cacheline. This means that
153 * multiple users will tend to stick to different cachelines, at least
154 * until the map is exhausted.
155 */
156static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
157 unsigned int *tag_cache)
158{
159 unsigned int last_tag, org_last_tag;
160 int index, i, tag;
161
162 if (!hctx_may_queue(hctx, bt))
163 return -1;
164
165 last_tag = org_last_tag = *tag_cache;
166 index = TAG_TO_INDEX(bt, last_tag);
167
168 for (i = 0; i < bt->map_nr; i++) {
169 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
170 if (tag != -1) {
171 tag += (index << bt->bits_per_word);
172 goto done;
173 }
174
175 last_tag = 0;
176 if (++index >= bt->map_nr)
177 index = 0;
178 }
179
180 *tag_cache = 0;
181 return -1;
182
183 /*
184 * Only update the cache from the allocation path, if we ended
185 * up using the specific cached tag.
186 */
187done:
188 if (tag == org_last_tag) {
189 last_tag = tag + 1;
190 if (last_tag >= bt->depth - 1)
191 last_tag = 0;
192
193 *tag_cache = last_tag;
194 }
195
196 return tag;
197}
198
199static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
200 struct blk_mq_hw_ctx *hctx)
201{
202 struct bt_wait_state *bs;
203
204 if (!hctx)
205 return &bt->bs[0];
206
207 bs = &bt->bs[hctx->wait_index];
208 bt_index_inc(&hctx->wait_index);
209 return bs;
210}
211
212static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
213 unsigned int *last_tag, gfp_t gfp)
214{
215 struct bt_wait_state *bs;
216 DEFINE_WAIT(wait);
23 int tag; 217 int tag;
24 218
25 tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? 219 tag = __bt_get(hctx, bt, last_tag);
26 TASK_UNINTERRUPTIBLE : TASK_RUNNING); 220 if (tag != -1)
27 if (tag < 0) 221 return tag;
28 return BLK_MQ_TAG_FAIL; 222
29 return tag + tags->nr_reserved_tags; 223 if (!(gfp & __GFP_WAIT))
224 return -1;
225
226 bs = bt_wait_ptr(bt, hctx);
227 do {
228 bool was_empty;
229
230 was_empty = list_empty(&wait.task_list);
231 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
232
233 tag = __bt_get(hctx, bt, last_tag);
234 if (tag != -1)
235 break;
236
237 if (was_empty)
238 atomic_set(&bs->wait_cnt, bt->wake_cnt);
239
240 io_schedule();
241 } while (1);
242
243 finish_wait(&bs->wait, &wait);
244 return tag;
245}
246
247static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags,
248 struct blk_mq_hw_ctx *hctx,
249 unsigned int *last_tag, gfp_t gfp)
250{
251 int tag;
252
253 tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp);
254 if (tag >= 0)
255 return tag + tags->nr_reserved_tags;
256
257 return BLK_MQ_TAG_FAIL;
30} 258}
31 259
32static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, 260static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
33 gfp_t gfp) 261 gfp_t gfp)
34{ 262{
35 int tag; 263 int tag, zero = 0;
36 264
37 if (unlikely(!tags->nr_reserved_tags)) { 265 if (unlikely(!tags->nr_reserved_tags)) {
38 WARN_ON_ONCE(1); 266 WARN_ON_ONCE(1);
39 return BLK_MQ_TAG_FAIL; 267 return BLK_MQ_TAG_FAIL;
40 } 268 }
41 269
42 tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? 270 tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp);
43 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
44 if (tag < 0) 271 if (tag < 0)
45 return BLK_MQ_TAG_FAIL; 272 return BLK_MQ_TAG_FAIL;
273
46 return tag; 274 return tag;
47} 275}
48 276
49unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) 277unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
278 gfp_t gfp, bool reserved)
50{ 279{
51 if (!reserved) 280 if (!reserved)
52 return __blk_mq_get_tag(tags, gfp); 281 return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp);
53 282
54 return __blk_mq_get_reserved_tag(tags, gfp); 283 return __blk_mq_get_reserved_tag(hctx->tags, gfp);
284}
285
286static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
287{
288 int i, wake_index;
289
290 wake_index = bt->wake_index;
291 for (i = 0; i < BT_WAIT_QUEUES; i++) {
292 struct bt_wait_state *bs = &bt->bs[wake_index];
293
294 if (waitqueue_active(&bs->wait)) {
295 if (wake_index != bt->wake_index)
296 bt->wake_index = wake_index;
297
298 return bs;
299 }
300
301 bt_index_inc(&wake_index);
302 }
303
304 return NULL;
305}
306
307static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
308{
309 const int index = TAG_TO_INDEX(bt, tag);
310 struct bt_wait_state *bs;
311
312 /*
313 * The unlock memory barrier need to order access to req in free
314 * path and clearing tag bit
315 */
316 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
317
318 bs = bt_wake_ptr(bt);
319 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
320 atomic_set(&bs->wait_cnt, bt->wake_cnt);
321 bt_index_inc(&bt->wake_index);
322 wake_up(&bs->wait);
323 }
55} 324}
56 325
57static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) 326static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
58{ 327{
59 BUG_ON(tag >= tags->nr_tags); 328 BUG_ON(tag >= tags->nr_tags);
60 329
61 percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags); 330 bt_clear_tag(&tags->bitmap_tags, tag);
62} 331}
63 332
64static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, 333static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
@@ -66,22 +335,43 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
66{ 335{
67 BUG_ON(tag >= tags->nr_reserved_tags); 336 BUG_ON(tag >= tags->nr_reserved_tags);
68 337
69 percpu_ida_free(&tags->reserved_tags, tag); 338 bt_clear_tag(&tags->breserved_tags, tag);
70} 339}
71 340
72void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) 341void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
342 unsigned int *last_tag)
73{ 343{
74 if (tag >= tags->nr_reserved_tags) 344 struct blk_mq_tags *tags = hctx->tags;
75 __blk_mq_put_tag(tags, tag); 345
76 else 346 if (tag >= tags->nr_reserved_tags) {
347 const int real_tag = tag - tags->nr_reserved_tags;
348
349 __blk_mq_put_tag(tags, real_tag);
350 *last_tag = real_tag;
351 } else
77 __blk_mq_put_reserved_tag(tags, tag); 352 __blk_mq_put_reserved_tag(tags, tag);
78} 353}
79 354
80static int __blk_mq_tag_iter(unsigned id, void *data) 355static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
356 unsigned long *free_map, unsigned int off)
81{ 357{
82 unsigned long *tag_map = data; 358 int i;
83 __set_bit(id, tag_map); 359
84 return 0; 360 for (i = 0; i < bt->map_nr; i++) {
361 struct blk_align_bitmap *bm = &bt->map[i];
362 int bit = 0;
363
364 do {
365 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
366 if (bit >= bm->depth)
367 break;
368
369 __set_bit(bit + off, free_map);
370 bit++;
371 } while (1);
372
373 off += (1 << bt->bits_per_word);
374 }
85} 375}
86 376
87void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, 377void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
@@ -95,21 +385,128 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
95 if (!tag_map) 385 if (!tag_map)
96 return; 386 return;
97 387
98 percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map); 388 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
99 if (tags->nr_reserved_tags) 389 if (tags->nr_reserved_tags)
100 percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter, 390 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
101 tag_map);
102 391
103 fn(data, tag_map); 392 fn(data, tag_map);
104 kfree(tag_map); 393 kfree(tag_map);
105} 394}
395EXPORT_SYMBOL(blk_mq_tag_busy_iter);
396
397static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
398{
399 unsigned int i, used;
400
401 for (i = 0, used = 0; i < bt->map_nr; i++) {
402 struct blk_align_bitmap *bm = &bt->map[i];
403
404 used += bitmap_weight(&bm->word, bm->depth);
405 }
406
407 return bt->depth - used;
408}
409
410static void bt_update_count(struct blk_mq_bitmap_tags *bt,
411 unsigned int depth)
412{
413 unsigned int tags_per_word = 1U << bt->bits_per_word;
414 unsigned int map_depth = depth;
415
416 if (depth) {
417 int i;
418
419 for (i = 0; i < bt->map_nr; i++) {
420 bt->map[i].depth = min(map_depth, tags_per_word);
421 map_depth -= bt->map[i].depth;
422 }
423 }
424
425 bt->wake_cnt = BT_WAIT_BATCH;
426 if (bt->wake_cnt > depth / 4)
427 bt->wake_cnt = max(1U, depth / 4);
428
429 bt->depth = depth;
430}
431
432static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
433 int node, bool reserved)
434{
435 int i;
436
437 bt->bits_per_word = ilog2(BITS_PER_LONG);
438
439 /*
440 * Depth can be zero for reserved tags, that's not a failure
441 * condition.
442 */
443 if (depth) {
444 unsigned int nr, tags_per_word;
445
446 tags_per_word = (1 << bt->bits_per_word);
447
448 /*
449 * If the tag space is small, shrink the number of tags
450 * per word so we spread over a few cachelines, at least.
451 * If less than 4 tags, just forget about it, it's not
452 * going to work optimally anyway.
453 */
454 if (depth >= 4) {
455 while (tags_per_word * 4 > depth) {
456 bt->bits_per_word--;
457 tags_per_word = (1 << bt->bits_per_word);
458 }
459 }
460
461 nr = ALIGN(depth, tags_per_word) / tags_per_word;
462 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
463 GFP_KERNEL, node);
464 if (!bt->map)
465 return -ENOMEM;
466
467 bt->map_nr = nr;
468 }
469
470 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
471 if (!bt->bs) {
472 kfree(bt->map);
473 return -ENOMEM;
474 }
475
476 for (i = 0; i < BT_WAIT_QUEUES; i++)
477 init_waitqueue_head(&bt->bs[i].wait);
478
479 bt_update_count(bt, depth);
480 return 0;
481}
482
483static void bt_free(struct blk_mq_bitmap_tags *bt)
484{
485 kfree(bt->map);
486 kfree(bt->bs);
487}
488
489static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
490 int node)
491{
492 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
493
494 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
495 goto enomem;
496 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
497 goto enomem;
498
499 return tags;
500enomem:
501 bt_free(&tags->bitmap_tags);
502 kfree(tags);
503 return NULL;
504}
106 505
107struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, 506struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
108 unsigned int reserved_tags, int node) 507 unsigned int reserved_tags, int node)
109{ 508{
110 unsigned int nr_tags, nr_cache;
111 struct blk_mq_tags *tags; 509 struct blk_mq_tags *tags;
112 int ret;
113 510
114 if (total_tags > BLK_MQ_TAG_MAX) { 511 if (total_tags > BLK_MQ_TAG_MAX) {
115 pr_err("blk-mq: tag depth too large\n"); 512 pr_err("blk-mq: tag depth too large\n");
@@ -120,73 +517,59 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
120 if (!tags) 517 if (!tags)
121 return NULL; 518 return NULL;
122 519
123 nr_tags = total_tags - reserved_tags;
124 nr_cache = nr_tags / num_possible_cpus();
125
126 if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
127 nr_cache = BLK_MQ_TAG_CACHE_MIN;
128 else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
129 nr_cache = BLK_MQ_TAG_CACHE_MAX;
130
131 tags->nr_tags = total_tags; 520 tags->nr_tags = total_tags;
132 tags->nr_reserved_tags = reserved_tags; 521 tags->nr_reserved_tags = reserved_tags;
133 tags->nr_max_cache = nr_cache;
134 tags->nr_batch_move = max(1u, nr_cache / 2);
135 522
136 ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags - 523 return blk_mq_init_bitmap_tags(tags, node);
137 tags->nr_reserved_tags, 524}
138 tags->nr_max_cache,
139 tags->nr_batch_move);
140 if (ret)
141 goto err_free_tags;
142 525
143 if (reserved_tags) { 526void blk_mq_free_tags(struct blk_mq_tags *tags)
144 /* 527{
145 * With max_cahe and batch set to 1, the allocator fallbacks to 528 bt_free(&tags->bitmap_tags);
146 * no cached. It's fine reserved tags allocation is slow. 529 bt_free(&tags->breserved_tags);
147 */ 530 kfree(tags);
148 ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags, 531}
149 1, 1);
150 if (ret)
151 goto err_reserved_tags;
152 }
153 532
154 return tags; 533void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
534{
535 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
155 536
156err_reserved_tags: 537 *tag = prandom_u32() % depth;
157 percpu_ida_destroy(&tags->free_tags);
158err_free_tags:
159 kfree(tags);
160 return NULL;
161} 538}
162 539
163void blk_mq_free_tags(struct blk_mq_tags *tags) 540int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
164{ 541{
165 percpu_ida_destroy(&tags->free_tags); 542 tdepth -= tags->nr_reserved_tags;
166 percpu_ida_destroy(&tags->reserved_tags); 543 if (tdepth > tags->nr_tags)
167 kfree(tags); 544 return -EINVAL;
545
546 /*
547 * Don't need (or can't) update reserved tags here, they remain
548 * static and should never need resizing.
549 */
550 bt_update_count(&tags->bitmap_tags, tdepth);
551 blk_mq_tag_wakeup_all(tags);
552 return 0;
168} 553}
169 554
170ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) 555ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
171{ 556{
172 char *orig_page = page; 557 char *orig_page = page;
173 unsigned int cpu; 558 unsigned int free, res;
174 559
175 if (!tags) 560 if (!tags)
176 return 0; 561 return 0;
177 562
178 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," 563 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
179 " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags, 564 "bits_per_word=%u\n",
180 tags->nr_batch_move, tags->nr_max_cache); 565 tags->nr_tags, tags->nr_reserved_tags,
566 tags->bitmap_tags.bits_per_word);
181 567
182 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", 568 free = bt_unused_tags(&tags->bitmap_tags);
183 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids), 569 res = bt_unused_tags(&tags->breserved_tags);
184 percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
185 570
186 for_each_possible_cpu(cpu) { 571 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
187 page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, 572 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
188 percpu_ida_free_tags(&tags->free_tags, cpu));
189 }
190 573
191 return page - orig_page; 574 return page - orig_page;
192} 575}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index b602e3fa66ea..c959de58d2a5 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -1,7 +1,32 @@
1#ifndef INT_BLK_MQ_TAG_H 1#ifndef INT_BLK_MQ_TAG_H
2#define INT_BLK_MQ_TAG_H 2#define INT_BLK_MQ_TAG_H
3 3
4#include <linux/percpu_ida.h> 4#include "blk-mq.h"
5
6enum {
7 BT_WAIT_QUEUES = 8,
8 BT_WAIT_BATCH = 8,
9};
10
11struct bt_wait_state {
12 atomic_t wait_cnt;
13 wait_queue_head_t wait;
14} ____cacheline_aligned_in_smp;
15
16#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
17#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
18
19struct blk_mq_bitmap_tags {
20 unsigned int depth;
21 unsigned int wake_cnt;
22 unsigned int bits_per_word;
23
24 unsigned int map_nr;
25 struct blk_align_bitmap *map;
26
27 unsigned int wake_index;
28 struct bt_wait_state *bs;
29};
5 30
6/* 31/*
7 * Tag address space map. 32 * Tag address space map.
@@ -9,11 +34,11 @@
9struct blk_mq_tags { 34struct blk_mq_tags {
10 unsigned int nr_tags; 35 unsigned int nr_tags;
11 unsigned int nr_reserved_tags; 36 unsigned int nr_reserved_tags;
12 unsigned int nr_batch_move;
13 unsigned int nr_max_cache;
14 37
15 struct percpu_ida free_tags; 38 atomic_t active_queues;
16 struct percpu_ida reserved_tags; 39
40 struct blk_mq_bitmap_tags bitmap_tags;
41 struct blk_mq_bitmap_tags breserved_tags;
17 42
18 struct request **rqs; 43 struct request **rqs;
19 struct list_head page_list; 44 struct list_head page_list;
@@ -23,12 +48,12 @@ struct blk_mq_tags {
23extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); 48extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
24extern void blk_mq_free_tags(struct blk_mq_tags *tags); 49extern void blk_mq_free_tags(struct blk_mq_tags *tags);
25 50
26extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); 51extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
27extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); 52extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
28extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
29extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
30extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); 53extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
31extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 54extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
55extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
56extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
32 57
33enum { 58enum {
34 BLK_MQ_TAG_CACHE_MIN = 1, 59 BLK_MQ_TAG_CACHE_MIN = 1,
@@ -41,4 +66,23 @@ enum {
41 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, 66 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
42}; 67};
43 68
69extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
70extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
71
72static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
73{
74 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
75 return false;
76
77 return __blk_mq_tag_busy(hctx);
78}
79
80static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
81{
82 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
83 return;
84
85 __blk_mq_tag_idle(hctx);
86}
87
44#endif 88#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ee225cc312b8..ae14749b530c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -56,39 +56,40 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56{ 56{
57 unsigned int i; 57 unsigned int i;
58 58
59 for (i = 0; i < hctx->nr_ctx_map; i++) 59 for (i = 0; i < hctx->ctx_map.map_size; i++)
60 if (hctx->ctx_map[i]) 60 if (hctx->ctx_map.map[i].word)
61 return true; 61 return true;
62 62
63 return false; 63 return false;
64} 64}
65 65
66static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
67 struct blk_mq_ctx *ctx)
68{
69 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
70}
71
72#define CTX_TO_BIT(hctx, ctx) \
73 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
74
66/* 75/*
67 * Mark this ctx as having pending work in this hardware queue 76 * Mark this ctx as having pending work in this hardware queue
68 */ 77 */
69static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 78static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70 struct blk_mq_ctx *ctx) 79 struct blk_mq_ctx *ctx)
71{ 80{
72 if (!test_bit(ctx->index_hw, hctx->ctx_map)) 81 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
73 set_bit(ctx->index_hw, hctx->ctx_map); 82
83 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
84 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
74} 85}
75 86
76static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 87static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
77 gfp_t gfp, bool reserved) 88 struct blk_mq_ctx *ctx)
78{ 89{
79 struct request *rq; 90 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
80 unsigned int tag;
81
82 tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
83 if (tag != BLK_MQ_TAG_FAIL) {
84 rq = hctx->tags->rqs[tag];
85 blk_rq_init(hctx->queue, rq);
86 rq->tag = tag;
87
88 return rq;
89 }
90 91
91 return NULL; 92 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
92} 93}
93 94
94static int blk_mq_queue_enter(struct request_queue *q) 95static int blk_mq_queue_enter(struct request_queue *q)
@@ -187,70 +188,109 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
187 if (blk_queue_io_stat(q)) 188 if (blk_queue_io_stat(q))
188 rw_flags |= REQ_IO_STAT; 189 rw_flags |= REQ_IO_STAT;
189 190
191 INIT_LIST_HEAD(&rq->queuelist);
192 /* csd/requeue_work/fifo_time is initialized before use */
193 rq->q = q;
190 rq->mq_ctx = ctx; 194 rq->mq_ctx = ctx;
191 rq->cmd_flags = rw_flags; 195 rq->cmd_flags |= rw_flags;
196 rq->cmd_type = 0;
197 /* do not touch atomic flags, it needs atomic ops against the timer */
198 rq->cpu = -1;
199 rq->__data_len = 0;
200 rq->__sector = (sector_t) -1;
201 rq->bio = NULL;
202 rq->biotail = NULL;
203 INIT_HLIST_NODE(&rq->hash);
204 RB_CLEAR_NODE(&rq->rb_node);
205 memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
206 rq->rq_disk = NULL;
207 rq->part = NULL;
192 rq->start_time = jiffies; 208 rq->start_time = jiffies;
209#ifdef CONFIG_BLK_CGROUP
210 rq->rl = NULL;
193 set_start_time_ns(rq); 211 set_start_time_ns(rq);
212 rq->io_start_time_ns = 0;
213#endif
214 rq->nr_phys_segments = 0;
215#if defined(CONFIG_BLK_DEV_INTEGRITY)
216 rq->nr_integrity_segments = 0;
217#endif
218 rq->ioprio = 0;
219 rq->special = NULL;
220 /* tag was already set */
221 rq->errors = 0;
222 memset(rq->__cmd, 0, sizeof(rq->__cmd));
223 rq->cmd = rq->__cmd;
224 rq->cmd_len = BLK_MAX_CDB;
225
226 rq->extra_len = 0;
227 rq->sense_len = 0;
228 rq->resid_len = 0;
229 rq->sense = NULL;
230
231 rq->deadline = 0;
232 INIT_LIST_HEAD(&rq->timeout_list);
233 rq->timeout = 0;
234 rq->retries = 0;
235 rq->end_io = NULL;
236 rq->end_io_data = NULL;
237 rq->next_rq = NULL;
238
194 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 239 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
195} 240}
196 241
197static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 242static struct request *
198 int rw, gfp_t gfp, 243__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
199 bool reserved) 244 struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
200{ 245{
201 struct request *rq; 246 struct request *rq;
247 unsigned int tag;
202 248
203 do { 249 tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
204 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 250 if (tag != BLK_MQ_TAG_FAIL) {
205 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 251 rq = hctx->tags->rqs[tag];
206
207 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
208 if (rq) {
209 blk_mq_rq_ctx_init(q, ctx, rq, rw);
210 break;
211 }
212 252
213 if (gfp & __GFP_WAIT) { 253 rq->cmd_flags = 0;
214 __blk_mq_run_hw_queue(hctx); 254 if (blk_mq_tag_busy(hctx)) {
215 blk_mq_put_ctx(ctx); 255 rq->cmd_flags = REQ_MQ_INFLIGHT;
216 } else { 256 atomic_inc(&hctx->nr_active);
217 blk_mq_put_ctx(ctx);
218 break;
219 } 257 }
220 258
221 blk_mq_wait_for_tags(hctx->tags); 259 rq->tag = tag;
222 } while (1); 260 blk_mq_rq_ctx_init(q, ctx, rq, rw);
261 return rq;
262 }
223 263
224 return rq; 264 return NULL;
225} 265}
226 266
227struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) 267struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
268 bool reserved)
228{ 269{
270 struct blk_mq_ctx *ctx;
271 struct blk_mq_hw_ctx *hctx;
229 struct request *rq; 272 struct request *rq;
230 273
231 if (blk_mq_queue_enter(q)) 274 if (blk_mq_queue_enter(q))
232 return NULL; 275 return NULL;
233 276
234 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); 277 ctx = blk_mq_get_ctx(q);
235 if (rq) 278 hctx = q->mq_ops->map_queue(q, ctx->cpu);
236 blk_mq_put_ctx(rq->mq_ctx);
237 return rq;
238}
239
240struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
241 gfp_t gfp)
242{
243 struct request *rq;
244 279
245 if (blk_mq_queue_enter(q)) 280 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
246 return NULL; 281 reserved);
282 if (!rq && (gfp & __GFP_WAIT)) {
283 __blk_mq_run_hw_queue(hctx);
284 blk_mq_put_ctx(ctx);
247 285
248 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); 286 ctx = blk_mq_get_ctx(q);
249 if (rq) 287 hctx = q->mq_ops->map_queue(q, ctx->cpu);
250 blk_mq_put_ctx(rq->mq_ctx); 288 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
289 }
290 blk_mq_put_ctx(ctx);
251 return rq; 291 return rq;
252} 292}
253EXPORT_SYMBOL(blk_mq_alloc_reserved_request); 293EXPORT_SYMBOL(blk_mq_alloc_request);
254 294
255static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, 295static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
256 struct blk_mq_ctx *ctx, struct request *rq) 296 struct blk_mq_ctx *ctx, struct request *rq)
@@ -258,7 +298,11 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
258 const int tag = rq->tag; 298 const int tag = rq->tag;
259 struct request_queue *q = rq->q; 299 struct request_queue *q = rq->q;
260 300
261 blk_mq_put_tag(hctx->tags, tag); 301 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
302 atomic_dec(&hctx->nr_active);
303
304 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
305 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
262 blk_mq_queue_exit(q); 306 blk_mq_queue_exit(q);
263} 307}
264 308
@@ -326,15 +370,19 @@ static void __blk_mq_complete_request_remote(void *data)
326void __blk_mq_complete_request(struct request *rq) 370void __blk_mq_complete_request(struct request *rq)
327{ 371{
328 struct blk_mq_ctx *ctx = rq->mq_ctx; 372 struct blk_mq_ctx *ctx = rq->mq_ctx;
373 bool shared = false;
329 int cpu; 374 int cpu;
330 375
331 if (!ctx->ipi_redirect) { 376 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
332 rq->q->softirq_done_fn(rq); 377 rq->q->softirq_done_fn(rq);
333 return; 378 return;
334 } 379 }
335 380
336 cpu = get_cpu(); 381 cpu = get_cpu();
337 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { 382 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
383 shared = cpus_share_cache(cpu, ctx->cpu);
384
385 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
338 rq->csd.func = __blk_mq_complete_request_remote; 386 rq->csd.func = __blk_mq_complete_request_remote;
339 rq->csd.info = rq; 387 rq->csd.info = rq;
340 rq->csd.flags = 0; 388 rq->csd.flags = 0;
@@ -355,10 +403,16 @@ void __blk_mq_complete_request(struct request *rq)
355 **/ 403 **/
356void blk_mq_complete_request(struct request *rq) 404void blk_mq_complete_request(struct request *rq)
357{ 405{
358 if (unlikely(blk_should_fake_timeout(rq->q))) 406 struct request_queue *q = rq->q;
407
408 if (unlikely(blk_should_fake_timeout(q)))
359 return; 409 return;
360 if (!blk_mark_rq_complete(rq)) 410 if (!blk_mark_rq_complete(rq)) {
361 __blk_mq_complete_request(rq); 411 if (q->softirq_done_fn)
412 __blk_mq_complete_request(rq);
413 else
414 blk_mq_end_io(rq, rq->errors);
415 }
362} 416}
363EXPORT_SYMBOL(blk_mq_complete_request); 417EXPORT_SYMBOL(blk_mq_complete_request);
364 418
@@ -375,10 +429,22 @@ static void blk_mq_start_request(struct request *rq, bool last)
375 /* 429 /*
376 * Just mark start time and set the started bit. Due to memory 430 * Just mark start time and set the started bit. Due to memory
377 * ordering, we know we'll see the correct deadline as long as 431 * ordering, we know we'll see the correct deadline as long as
378 * REQ_ATOMIC_STARTED is seen. 432 * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
433 * unless one has been set in the request.
434 */
435 if (!rq->timeout)
436 rq->deadline = jiffies + q->rq_timeout;
437 else
438 rq->deadline = jiffies + rq->timeout;
439
440 /*
441 * Mark us as started and clear complete. Complete might have been
442 * set if requeue raced with timeout, which then marked it as
443 * complete. So be sure to clear complete again when we start
444 * the request, otherwise we'll ignore the completion event.
379 */ 445 */
380 rq->deadline = jiffies + q->rq_timeout;
381 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 446 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
447 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
382 448
383 if (q->dma_drain_size && blk_rq_bytes(rq)) { 449 if (q->dma_drain_size && blk_rq_bytes(rq)) {
384 /* 450 /*
@@ -415,18 +481,72 @@ static void __blk_mq_requeue_request(struct request *rq)
415 481
416void blk_mq_requeue_request(struct request *rq) 482void blk_mq_requeue_request(struct request *rq)
417{ 483{
418 struct request_queue *q = rq->q;
419
420 __blk_mq_requeue_request(rq); 484 __blk_mq_requeue_request(rq);
421 blk_clear_rq_complete(rq); 485 blk_clear_rq_complete(rq);
422 486
423 trace_block_rq_requeue(q, rq);
424
425 BUG_ON(blk_queued_rq(rq)); 487 BUG_ON(blk_queued_rq(rq));
426 blk_mq_insert_request(rq, true, true, false); 488 blk_mq_add_to_requeue_list(rq, true);
427} 489}
428EXPORT_SYMBOL(blk_mq_requeue_request); 490EXPORT_SYMBOL(blk_mq_requeue_request);
429 491
492static void blk_mq_requeue_work(struct work_struct *work)
493{
494 struct request_queue *q =
495 container_of(work, struct request_queue, requeue_work);
496 LIST_HEAD(rq_list);
497 struct request *rq, *next;
498 unsigned long flags;
499
500 spin_lock_irqsave(&q->requeue_lock, flags);
501 list_splice_init(&q->requeue_list, &rq_list);
502 spin_unlock_irqrestore(&q->requeue_lock, flags);
503
504 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
505 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
506 continue;
507
508 rq->cmd_flags &= ~REQ_SOFTBARRIER;
509 list_del_init(&rq->queuelist);
510 blk_mq_insert_request(rq, true, false, false);
511 }
512
513 while (!list_empty(&rq_list)) {
514 rq = list_entry(rq_list.next, struct request, queuelist);
515 list_del_init(&rq->queuelist);
516 blk_mq_insert_request(rq, false, false, false);
517 }
518
519 blk_mq_run_queues(q, false);
520}
521
522void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
523{
524 struct request_queue *q = rq->q;
525 unsigned long flags;
526
527 /*
528 * We abuse this flag that is otherwise used by the I/O scheduler to
529 * request head insertation from the workqueue.
530 */
531 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
532
533 spin_lock_irqsave(&q->requeue_lock, flags);
534 if (at_head) {
535 rq->cmd_flags |= REQ_SOFTBARRIER;
536 list_add(&rq->queuelist, &q->requeue_list);
537 } else {
538 list_add_tail(&rq->queuelist, &q->requeue_list);
539 }
540 spin_unlock_irqrestore(&q->requeue_lock, flags);
541}
542EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
543
544void blk_mq_kick_requeue_list(struct request_queue *q)
545{
546 kblockd_schedule_work(&q->requeue_work);
547}
548EXPORT_SYMBOL(blk_mq_kick_requeue_list);
549
430struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 550struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
431{ 551{
432 return tags->rqs[tag]; 552 return tags->rqs[tag];
@@ -485,6 +605,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
485 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); 605 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
486} 606}
487 607
608static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
609{
610 struct request_queue *q = rq->q;
611
612 /*
613 * We know that complete is set at this point. If STARTED isn't set
614 * anymore, then the request isn't active and the "timeout" should
615 * just be ignored. This can happen due to the bitflag ordering.
616 * Timeout first checks if STARTED is set, and if it is, assumes
617 * the request is active. But if we race with completion, then
618 * we both flags will get cleared. So check here again, and ignore
619 * a timeout event with a request that isn't active.
620 */
621 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
622 return BLK_EH_NOT_HANDLED;
623
624 if (!q->mq_ops->timeout)
625 return BLK_EH_RESET_TIMER;
626
627 return q->mq_ops->timeout(rq);
628}
629
488static void blk_mq_rq_timer(unsigned long data) 630static void blk_mq_rq_timer(unsigned long data)
489{ 631{
490 struct request_queue *q = (struct request_queue *) data; 632 struct request_queue *q = (struct request_queue *) data;
@@ -492,11 +634,24 @@ static void blk_mq_rq_timer(unsigned long data)
492 unsigned long next = 0; 634 unsigned long next = 0;
493 int i, next_set = 0; 635 int i, next_set = 0;
494 636
495 queue_for_each_hw_ctx(q, hctx, i) 637 queue_for_each_hw_ctx(q, hctx, i) {
638 /*
639 * If not software queues are currently mapped to this
640 * hardware queue, there's nothing to check
641 */
642 if (!hctx->nr_ctx || !hctx->tags)
643 continue;
644
496 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); 645 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
646 }
497 647
498 if (next_set) 648 if (next_set) {
499 mod_timer(&q->timeout, round_jiffies_up(next)); 649 next = blk_rq_timeout(round_jiffies_up(next));
650 mod_timer(&q->timeout, next);
651 } else {
652 queue_for_each_hw_ctx(q, hctx, i)
653 blk_mq_tag_idle(hctx);
654 }
500} 655}
501 656
502/* 657/*
@@ -538,9 +693,38 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
538 return false; 693 return false;
539} 694}
540 695
541void blk_mq_add_timer(struct request *rq) 696/*
697 * Process software queues that have been marked busy, splicing them
698 * to the for-dispatch
699 */
700static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
542{ 701{
543 __blk_add_timer(rq, NULL); 702 struct blk_mq_ctx *ctx;
703 int i;
704
705 for (i = 0; i < hctx->ctx_map.map_size; i++) {
706 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
707 unsigned int off, bit;
708
709 if (!bm->word)
710 continue;
711
712 bit = 0;
713 off = i * hctx->ctx_map.bits_per_word;
714 do {
715 bit = find_next_bit(&bm->word, bm->depth, bit);
716 if (bit >= bm->depth)
717 break;
718
719 ctx = hctx->ctxs[bit + off];
720 clear_bit(bit, &bm->word);
721 spin_lock(&ctx->lock);
722 list_splice_tail_init(&ctx->rq_list, list);
723 spin_unlock(&ctx->lock);
724
725 bit++;
726 } while (1);
727 }
544} 728}
545 729
546/* 730/*
@@ -552,10 +736,9 @@ void blk_mq_add_timer(struct request *rq)
552static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 736static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
553{ 737{
554 struct request_queue *q = hctx->queue; 738 struct request_queue *q = hctx->queue;
555 struct blk_mq_ctx *ctx;
556 struct request *rq; 739 struct request *rq;
557 LIST_HEAD(rq_list); 740 LIST_HEAD(rq_list);
558 int bit, queued; 741 int queued;
559 742
560 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); 743 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
561 744
@@ -567,15 +750,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
567 /* 750 /*
568 * Touch any software queue that has pending entries. 751 * Touch any software queue that has pending entries.
569 */ 752 */
570 for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { 753 flush_busy_ctxs(hctx, &rq_list);
571 clear_bit(bit, hctx->ctx_map);
572 ctx = hctx->ctxs[bit];
573 BUG_ON(bit != ctx->index_hw);
574
575 spin_lock(&ctx->lock);
576 list_splice_tail_init(&ctx->rq_list, &rq_list);
577 spin_unlock(&ctx->lock);
578 }
579 754
580 /* 755 /*
581 * If we have previous entries on our dispatch list, grab them 756 * If we have previous entries on our dispatch list, grab them
@@ -589,13 +764,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
589 } 764 }
590 765
591 /* 766 /*
592 * Delete and return all entries from our dispatch list
593 */
594 queued = 0;
595
596 /*
597 * Now process all the entries, sending them to the driver. 767 * Now process all the entries, sending them to the driver.
598 */ 768 */
769 queued = 0;
599 while (!list_empty(&rq_list)) { 770 while (!list_empty(&rq_list)) {
600 int ret; 771 int ret;
601 772
@@ -610,11 +781,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
610 queued++; 781 queued++;
611 continue; 782 continue;
612 case BLK_MQ_RQ_QUEUE_BUSY: 783 case BLK_MQ_RQ_QUEUE_BUSY:
613 /*
614 * FIXME: we should have a mechanism to stop the queue
615 * like blk_stop_queue, otherwise we will waste cpu
616 * time
617 */
618 list_add(&rq->queuelist, &rq_list); 784 list_add(&rq->queuelist, &rq_list);
619 __blk_mq_requeue_request(rq); 785 __blk_mq_requeue_request(rq);
620 break; 786 break;
@@ -646,6 +812,30 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
646 } 812 }
647} 813}
648 814
815/*
816 * It'd be great if the workqueue API had a way to pass
817 * in a mask and had some smarts for more clever placement.
818 * For now we just round-robin here, switching for every
819 * BLK_MQ_CPU_WORK_BATCH queued items.
820 */
821static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
822{
823 int cpu = hctx->next_cpu;
824
825 if (--hctx->next_cpu_batch <= 0) {
826 int next_cpu;
827
828 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
829 if (next_cpu >= nr_cpu_ids)
830 next_cpu = cpumask_first(hctx->cpumask);
831
832 hctx->next_cpu = next_cpu;
833 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
834 }
835
836 return cpu;
837}
838
649void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 839void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
650{ 840{
651 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) 841 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
@@ -658,13 +848,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
658 else { 848 else {
659 unsigned int cpu; 849 unsigned int cpu;
660 850
661 /* 851 cpu = blk_mq_hctx_next_cpu(hctx);
662 * It'd be great if the workqueue API had a way to pass
663 * in a mask and had some smarts for more clever placement
664 * than the first CPU. Or we could round-robin here. For now,
665 * just queue on the first CPU.
666 */
667 cpu = cpumask_first(hctx->cpumask);
668 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); 852 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
669 } 853 }
670} 854}
@@ -771,13 +955,7 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
771 else { 955 else {
772 unsigned int cpu; 956 unsigned int cpu;
773 957
774 /* 958 cpu = blk_mq_hctx_next_cpu(hctx);
775 * It'd be great if the workqueue API had a way to pass
776 * in a mask and had some smarts for more clever placement
777 * than the first CPU. Or we could round-robin here. For now,
778 * just queue on the first CPU.
779 */
780 cpu = cpumask_first(hctx->cpumask);
781 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); 959 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
782 } 960 }
783} 961}
@@ -794,12 +972,13 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
794 list_add(&rq->queuelist, &ctx->rq_list); 972 list_add(&rq->queuelist, &ctx->rq_list);
795 else 973 else
796 list_add_tail(&rq->queuelist, &ctx->rq_list); 974 list_add_tail(&rq->queuelist, &ctx->rq_list);
975
797 blk_mq_hctx_mark_pending(hctx, ctx); 976 blk_mq_hctx_mark_pending(hctx, ctx);
798 977
799 /* 978 /*
800 * We do this early, to ensure we are on the right CPU. 979 * We do this early, to ensure we are on the right CPU.
801 */ 980 */
802 blk_mq_add_timer(rq); 981 blk_add_timer(rq);
803} 982}
804 983
805void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 984void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
@@ -930,21 +1109,161 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
930 blk_account_io_start(rq, 1); 1109 blk_account_io_start(rq, 1);
931} 1110}
932 1111
933static void blk_mq_make_request(struct request_queue *q, struct bio *bio) 1112static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1113 struct blk_mq_ctx *ctx,
1114 struct request *rq, struct bio *bio)
1115{
1116 struct request_queue *q = hctx->queue;
1117
1118 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1119 blk_mq_bio_to_request(rq, bio);
1120 spin_lock(&ctx->lock);
1121insert_rq:
1122 __blk_mq_insert_request(hctx, rq, false);
1123 spin_unlock(&ctx->lock);
1124 return false;
1125 } else {
1126 spin_lock(&ctx->lock);
1127 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1128 blk_mq_bio_to_request(rq, bio);
1129 goto insert_rq;
1130 }
1131
1132 spin_unlock(&ctx->lock);
1133 __blk_mq_free_request(hctx, ctx, rq);
1134 return true;
1135 }
1136}
1137
1138struct blk_map_ctx {
1139 struct blk_mq_hw_ctx *hctx;
1140 struct blk_mq_ctx *ctx;
1141};
1142
1143static struct request *blk_mq_map_request(struct request_queue *q,
1144 struct bio *bio,
1145 struct blk_map_ctx *data)
934{ 1146{
935 struct blk_mq_hw_ctx *hctx; 1147 struct blk_mq_hw_ctx *hctx;
936 struct blk_mq_ctx *ctx; 1148 struct blk_mq_ctx *ctx;
1149 struct request *rq;
1150 int rw = bio_data_dir(bio);
1151
1152 if (unlikely(blk_mq_queue_enter(q))) {
1153 bio_endio(bio, -EIO);
1154 return NULL;
1155 }
1156
1157 ctx = blk_mq_get_ctx(q);
1158 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1159
1160 if (rw_is_sync(bio->bi_rw))
1161 rw |= REQ_SYNC;
1162
1163 trace_block_getrq(q, bio, rw);
1164 rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
1165 if (unlikely(!rq)) {
1166 __blk_mq_run_hw_queue(hctx);
1167 blk_mq_put_ctx(ctx);
1168 trace_block_sleeprq(q, bio, rw);
1169
1170 ctx = blk_mq_get_ctx(q);
1171 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1172 rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
1173 __GFP_WAIT|GFP_ATOMIC, false);
1174 }
1175
1176 hctx->queued++;
1177 data->hctx = hctx;
1178 data->ctx = ctx;
1179 return rq;
1180}
1181
1182/*
1183 * Multiple hardware queue variant. This will not use per-process plugs,
1184 * but will attempt to bypass the hctx queueing if we can go straight to
1185 * hardware for SYNC IO.
1186 */
1187static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1188{
937 const int is_sync = rw_is_sync(bio->bi_rw); 1189 const int is_sync = rw_is_sync(bio->bi_rw);
938 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1190 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
939 int rw = bio_data_dir(bio); 1191 struct blk_map_ctx data;
940 struct request *rq; 1192 struct request *rq;
1193
1194 blk_queue_bounce(q, &bio);
1195
1196 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1197 bio_endio(bio, -EIO);
1198 return;
1199 }
1200
1201 rq = blk_mq_map_request(q, bio, &data);
1202 if (unlikely(!rq))
1203 return;
1204
1205 if (unlikely(is_flush_fua)) {
1206 blk_mq_bio_to_request(rq, bio);
1207 blk_insert_flush(rq);
1208 goto run_queue;
1209 }
1210
1211 if (is_sync) {
1212 int ret;
1213
1214 blk_mq_bio_to_request(rq, bio);
1215 blk_mq_start_request(rq, true);
1216
1217 /*
1218 * For OK queue, we are done. For error, kill it. Any other
1219 * error (busy), just add it to our list as we previously
1220 * would have done
1221 */
1222 ret = q->mq_ops->queue_rq(data.hctx, rq);
1223 if (ret == BLK_MQ_RQ_QUEUE_OK)
1224 goto done;
1225 else {
1226 __blk_mq_requeue_request(rq);
1227
1228 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1229 rq->errors = -EIO;
1230 blk_mq_end_io(rq, rq->errors);
1231 goto done;
1232 }
1233 }
1234 }
1235
1236 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1237 /*
1238 * For a SYNC request, send it to the hardware immediately. For
1239 * an ASYNC request, just ensure that we run it later on. The
1240 * latter allows for merging opportunities and more efficient
1241 * dispatching.
1242 */
1243run_queue:
1244 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1245 }
1246done:
1247 blk_mq_put_ctx(data.ctx);
1248}
1249
1250/*
1251 * Single hardware queue variant. This will attempt to use any per-process
1252 * plug for merging and IO deferral.
1253 */
1254static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1255{
1256 const int is_sync = rw_is_sync(bio->bi_rw);
1257 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
941 unsigned int use_plug, request_count = 0; 1258 unsigned int use_plug, request_count = 0;
1259 struct blk_map_ctx data;
1260 struct request *rq;
942 1261
943 /* 1262 /*
944 * If we have multiple hardware queues, just go directly to 1263 * If we have multiple hardware queues, just go directly to
945 * one of those for sync IO. 1264 * one of those for sync IO.
946 */ 1265 */
947 use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync); 1266 use_plug = !is_flush_fua && !is_sync;
948 1267
949 blk_queue_bounce(q, &bio); 1268 blk_queue_bounce(q, &bio);
950 1269
@@ -953,33 +1272,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
953 return; 1272 return;
954 } 1273 }
955 1274
956 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) 1275 if (use_plug && !blk_queue_nomerges(q) &&
1276 blk_attempt_plug_merge(q, bio, &request_count))
957 return; 1277 return;
958 1278
959 if (blk_mq_queue_enter(q)) { 1279 rq = blk_mq_map_request(q, bio, &data);
960 bio_endio(bio, -EIO);
961 return;
962 }
963
964 ctx = blk_mq_get_ctx(q);
965 hctx = q->mq_ops->map_queue(q, ctx->cpu);
966
967 if (is_sync)
968 rw |= REQ_SYNC;
969 trace_block_getrq(q, bio, rw);
970 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
971 if (likely(rq))
972 blk_mq_rq_ctx_init(q, ctx, rq, rw);
973 else {
974 blk_mq_put_ctx(ctx);
975 trace_block_sleeprq(q, bio, rw);
976 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
977 false);
978 ctx = rq->mq_ctx;
979 hctx = q->mq_ops->map_queue(q, ctx->cpu);
980 }
981
982 hctx->queued++;
983 1280
984 if (unlikely(is_flush_fua)) { 1281 if (unlikely(is_flush_fua)) {
985 blk_mq_bio_to_request(rq, bio); 1282 blk_mq_bio_to_request(rq, bio);
@@ -1004,31 +1301,23 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1004 trace_block_plug(q); 1301 trace_block_plug(q);
1005 } 1302 }
1006 list_add_tail(&rq->queuelist, &plug->mq_list); 1303 list_add_tail(&rq->queuelist, &plug->mq_list);
1007 blk_mq_put_ctx(ctx); 1304 blk_mq_put_ctx(data.ctx);
1008 return; 1305 return;
1009 } 1306 }
1010 } 1307 }
1011 1308
1012 spin_lock(&ctx->lock); 1309 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1013 1310 /*
1014 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1311 * For a SYNC request, send it to the hardware immediately. For
1015 blk_mq_attempt_merge(q, ctx, bio)) 1312 * an ASYNC request, just ensure that we run it later on. The
1016 __blk_mq_free_request(hctx, ctx, rq); 1313 * latter allows for merging opportunities and more efficient
1017 else { 1314 * dispatching.
1018 blk_mq_bio_to_request(rq, bio); 1315 */
1019 __blk_mq_insert_request(hctx, rq, false); 1316run_queue:
1317 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1020 } 1318 }
1021 1319
1022 spin_unlock(&ctx->lock); 1320 blk_mq_put_ctx(data.ctx);
1023
1024 /*
1025 * For a SYNC request, send it to the hardware immediately. For an
1026 * ASYNC request, just ensure that we run it later on. The latter
1027 * allows for merging opportunities and more efficient dispatching.
1028 */
1029run_queue:
1030 blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
1031 blk_mq_put_ctx(ctx);
1032} 1321}
1033 1322
1034/* 1323/*
@@ -1041,10 +1330,10 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1041EXPORT_SYMBOL(blk_mq_map_queue); 1330EXPORT_SYMBOL(blk_mq_map_queue);
1042 1331
1043struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, 1332struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1044 unsigned int hctx_index) 1333 unsigned int hctx_index,
1334 int node)
1045{ 1335{
1046 return kmalloc_node(sizeof(struct blk_mq_hw_ctx), 1336 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
1047 GFP_KERNEL | __GFP_ZERO, set->numa_node);
1048} 1337}
1049EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); 1338EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1050 1339
@@ -1055,52 +1344,6 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1055} 1344}
1056EXPORT_SYMBOL(blk_mq_free_single_hw_queue); 1345EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1057 1346
1058static void blk_mq_hctx_notify(void *data, unsigned long action,
1059 unsigned int cpu)
1060{
1061 struct blk_mq_hw_ctx *hctx = data;
1062 struct request_queue *q = hctx->queue;
1063 struct blk_mq_ctx *ctx;
1064 LIST_HEAD(tmp);
1065
1066 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1067 return;
1068
1069 /*
1070 * Move ctx entries to new CPU, if this one is going away.
1071 */
1072 ctx = __blk_mq_get_ctx(q, cpu);
1073
1074 spin_lock(&ctx->lock);
1075 if (!list_empty(&ctx->rq_list)) {
1076 list_splice_init(&ctx->rq_list, &tmp);
1077 clear_bit(ctx->index_hw, hctx->ctx_map);
1078 }
1079 spin_unlock(&ctx->lock);
1080
1081 if (list_empty(&tmp))
1082 return;
1083
1084 ctx = blk_mq_get_ctx(q);
1085 spin_lock(&ctx->lock);
1086
1087 while (!list_empty(&tmp)) {
1088 struct request *rq;
1089
1090 rq = list_first_entry(&tmp, struct request, queuelist);
1091 rq->mq_ctx = ctx;
1092 list_move_tail(&rq->queuelist, &ctx->rq_list);
1093 }
1094
1095 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1096 blk_mq_hctx_mark_pending(hctx, ctx);
1097
1098 spin_unlock(&ctx->lock);
1099
1100 blk_mq_run_hw_queue(hctx, true);
1101 blk_mq_put_ctx(ctx);
1102}
1103
1104static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, 1347static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1105 struct blk_mq_tags *tags, unsigned int hctx_idx) 1348 struct blk_mq_tags *tags, unsigned int hctx_idx)
1106{ 1349{
@@ -1130,12 +1373,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1130 1373
1131static size_t order_to_size(unsigned int order) 1374static size_t order_to_size(unsigned int order)
1132{ 1375{
1133 size_t ret = PAGE_SIZE; 1376 return (size_t)PAGE_SIZE << order;
1134
1135 while (order--)
1136 ret *= 2;
1137
1138 return ret;
1139} 1377}
1140 1378
1141static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, 1379static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
@@ -1219,17 +1457,147 @@ fail:
1219 return NULL; 1457 return NULL;
1220} 1458}
1221 1459
1460static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1461{
1462 kfree(bitmap->map);
1463}
1464
1465static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1466{
1467 unsigned int bpw = 8, total, num_maps, i;
1468
1469 bitmap->bits_per_word = bpw;
1470
1471 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1472 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1473 GFP_KERNEL, node);
1474 if (!bitmap->map)
1475 return -ENOMEM;
1476
1477 bitmap->map_size = num_maps;
1478
1479 total = nr_cpu_ids;
1480 for (i = 0; i < num_maps; i++) {
1481 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1482 total -= bitmap->map[i].depth;
1483 }
1484
1485 return 0;
1486}
1487
1488static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1489{
1490 struct request_queue *q = hctx->queue;
1491 struct blk_mq_ctx *ctx;
1492 LIST_HEAD(tmp);
1493
1494 /*
1495 * Move ctx entries to new CPU, if this one is going away.
1496 */
1497 ctx = __blk_mq_get_ctx(q, cpu);
1498
1499 spin_lock(&ctx->lock);
1500 if (!list_empty(&ctx->rq_list)) {
1501 list_splice_init(&ctx->rq_list, &tmp);
1502 blk_mq_hctx_clear_pending(hctx, ctx);
1503 }
1504 spin_unlock(&ctx->lock);
1505
1506 if (list_empty(&tmp))
1507 return NOTIFY_OK;
1508
1509 ctx = blk_mq_get_ctx(q);
1510 spin_lock(&ctx->lock);
1511
1512 while (!list_empty(&tmp)) {
1513 struct request *rq;
1514
1515 rq = list_first_entry(&tmp, struct request, queuelist);
1516 rq->mq_ctx = ctx;
1517 list_move_tail(&rq->queuelist, &ctx->rq_list);
1518 }
1519
1520 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1521 blk_mq_hctx_mark_pending(hctx, ctx);
1522
1523 spin_unlock(&ctx->lock);
1524
1525 blk_mq_run_hw_queue(hctx, true);
1526 blk_mq_put_ctx(ctx);
1527 return NOTIFY_OK;
1528}
1529
1530static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1531{
1532 struct request_queue *q = hctx->queue;
1533 struct blk_mq_tag_set *set = q->tag_set;
1534
1535 if (set->tags[hctx->queue_num])
1536 return NOTIFY_OK;
1537
1538 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1539 if (!set->tags[hctx->queue_num])
1540 return NOTIFY_STOP;
1541
1542 hctx->tags = set->tags[hctx->queue_num];
1543 return NOTIFY_OK;
1544}
1545
1546static int blk_mq_hctx_notify(void *data, unsigned long action,
1547 unsigned int cpu)
1548{
1549 struct blk_mq_hw_ctx *hctx = data;
1550
1551 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1552 return blk_mq_hctx_cpu_offline(hctx, cpu);
1553 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1554 return blk_mq_hctx_cpu_online(hctx, cpu);
1555
1556 return NOTIFY_OK;
1557}
1558
1559static void blk_mq_exit_hw_queues(struct request_queue *q,
1560 struct blk_mq_tag_set *set, int nr_queue)
1561{
1562 struct blk_mq_hw_ctx *hctx;
1563 unsigned int i;
1564
1565 queue_for_each_hw_ctx(q, hctx, i) {
1566 if (i == nr_queue)
1567 break;
1568
1569 if (set->ops->exit_hctx)
1570 set->ops->exit_hctx(hctx, i);
1571
1572 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1573 kfree(hctx->ctxs);
1574 blk_mq_free_bitmap(&hctx->ctx_map);
1575 }
1576
1577}
1578
1579static void blk_mq_free_hw_queues(struct request_queue *q,
1580 struct blk_mq_tag_set *set)
1581{
1582 struct blk_mq_hw_ctx *hctx;
1583 unsigned int i;
1584
1585 queue_for_each_hw_ctx(q, hctx, i) {
1586 free_cpumask_var(hctx->cpumask);
1587 set->ops->free_hctx(hctx, i);
1588 }
1589}
1590
1222static int blk_mq_init_hw_queues(struct request_queue *q, 1591static int blk_mq_init_hw_queues(struct request_queue *q,
1223 struct blk_mq_tag_set *set) 1592 struct blk_mq_tag_set *set)
1224{ 1593{
1225 struct blk_mq_hw_ctx *hctx; 1594 struct blk_mq_hw_ctx *hctx;
1226 unsigned int i, j; 1595 unsigned int i;
1227 1596
1228 /* 1597 /*
1229 * Initialize hardware queues 1598 * Initialize hardware queues
1230 */ 1599 */
1231 queue_for_each_hw_ctx(q, hctx, i) { 1600 queue_for_each_hw_ctx(q, hctx, i) {
1232 unsigned int num_maps;
1233 int node; 1601 int node;
1234 1602
1235 node = hctx->numa_node; 1603 node = hctx->numa_node;
@@ -1260,13 +1628,9 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1260 if (!hctx->ctxs) 1628 if (!hctx->ctxs)
1261 break; 1629 break;
1262 1630
1263 num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; 1631 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1264 hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
1265 GFP_KERNEL, node);
1266 if (!hctx->ctx_map)
1267 break; 1632 break;
1268 1633
1269 hctx->nr_ctx_map = num_maps;
1270 hctx->nr_ctx = 0; 1634 hctx->nr_ctx = 0;
1271 1635
1272 if (set->ops->init_hctx && 1636 if (set->ops->init_hctx &&
@@ -1280,16 +1644,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1280 /* 1644 /*
1281 * Init failed 1645 * Init failed
1282 */ 1646 */
1283 queue_for_each_hw_ctx(q, hctx, j) { 1647 blk_mq_exit_hw_queues(q, set, i);
1284 if (i == j)
1285 break;
1286
1287 if (set->ops->exit_hctx)
1288 set->ops->exit_hctx(hctx, j);
1289
1290 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1291 kfree(hctx->ctxs);
1292 }
1293 1648
1294 return 1; 1649 return 1;
1295} 1650}
@@ -1350,6 +1705,79 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1350 ctx->index_hw = hctx->nr_ctx; 1705 ctx->index_hw = hctx->nr_ctx;
1351 hctx->ctxs[hctx->nr_ctx++] = ctx; 1706 hctx->ctxs[hctx->nr_ctx++] = ctx;
1352 } 1707 }
1708
1709 queue_for_each_hw_ctx(q, hctx, i) {
1710 /*
1711 * If not software queues are mapped to this hardware queue,
1712 * disable it and free the request entries
1713 */
1714 if (!hctx->nr_ctx) {
1715 struct blk_mq_tag_set *set = q->tag_set;
1716
1717 if (set->tags[i]) {
1718 blk_mq_free_rq_map(set, set->tags[i], i);
1719 set->tags[i] = NULL;
1720 hctx->tags = NULL;
1721 }
1722 continue;
1723 }
1724
1725 /*
1726 * Initialize batch roundrobin counts
1727 */
1728 hctx->next_cpu = cpumask_first(hctx->cpumask);
1729 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1730 }
1731}
1732
1733static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1734{
1735 struct blk_mq_hw_ctx *hctx;
1736 struct request_queue *q;
1737 bool shared;
1738 int i;
1739
1740 if (set->tag_list.next == set->tag_list.prev)
1741 shared = false;
1742 else
1743 shared = true;
1744
1745 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1746 blk_mq_freeze_queue(q);
1747
1748 queue_for_each_hw_ctx(q, hctx, i) {
1749 if (shared)
1750 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1751 else
1752 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1753 }
1754 blk_mq_unfreeze_queue(q);
1755 }
1756}
1757
1758static void blk_mq_del_queue_tag_set(struct request_queue *q)
1759{
1760 struct blk_mq_tag_set *set = q->tag_set;
1761
1762 blk_mq_freeze_queue(q);
1763
1764 mutex_lock(&set->tag_list_lock);
1765 list_del_init(&q->tag_set_list);
1766 blk_mq_update_tag_set_depth(set);
1767 mutex_unlock(&set->tag_list_lock);
1768
1769 blk_mq_unfreeze_queue(q);
1770}
1771
1772static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1773 struct request_queue *q)
1774{
1775 q->tag_set = set;
1776
1777 mutex_lock(&set->tag_list_lock);
1778 list_add_tail(&q->tag_set_list, &set->tag_list);
1779 blk_mq_update_tag_set_depth(set);
1780 mutex_unlock(&set->tag_list_lock);
1353} 1781}
1354 1782
1355struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) 1783struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
@@ -1357,6 +1785,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1357 struct blk_mq_hw_ctx **hctxs; 1785 struct blk_mq_hw_ctx **hctxs;
1358 struct blk_mq_ctx *ctx; 1786 struct blk_mq_ctx *ctx;
1359 struct request_queue *q; 1787 struct request_queue *q;
1788 unsigned int *map;
1360 int i; 1789 int i;
1361 1790
1362 ctx = alloc_percpu(struct blk_mq_ctx); 1791 ctx = alloc_percpu(struct blk_mq_ctx);
@@ -1369,15 +1798,22 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1369 if (!hctxs) 1798 if (!hctxs)
1370 goto err_percpu; 1799 goto err_percpu;
1371 1800
1801 map = blk_mq_make_queue_map(set);
1802 if (!map)
1803 goto err_map;
1804
1372 for (i = 0; i < set->nr_hw_queues; i++) { 1805 for (i = 0; i < set->nr_hw_queues; i++) {
1373 hctxs[i] = set->ops->alloc_hctx(set, i); 1806 int node = blk_mq_hw_queue_to_node(map, i);
1807
1808 hctxs[i] = set->ops->alloc_hctx(set, i, node);
1374 if (!hctxs[i]) 1809 if (!hctxs[i])
1375 goto err_hctxs; 1810 goto err_hctxs;
1376 1811
1377 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) 1812 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1378 goto err_hctxs; 1813 goto err_hctxs;
1379 1814
1380 hctxs[i]->numa_node = NUMA_NO_NODE; 1815 atomic_set(&hctxs[i]->nr_active, 0);
1816 hctxs[i]->numa_node = node;
1381 hctxs[i]->queue_num = i; 1817 hctxs[i]->queue_num = i;
1382 } 1818 }
1383 1819
@@ -1385,8 +1821,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1385 if (!q) 1821 if (!q)
1386 goto err_hctxs; 1822 goto err_hctxs;
1387 1823
1388 q->mq_map = blk_mq_make_queue_map(set); 1824 if (percpu_counter_init(&q->mq_usage_counter, 0))
1389 if (!q->mq_map)
1390 goto err_map; 1825 goto err_map;
1391 1826
1392 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1827 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
@@ -1394,6 +1829,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1394 1829
1395 q->nr_queues = nr_cpu_ids; 1830 q->nr_queues = nr_cpu_ids;
1396 q->nr_hw_queues = set->nr_hw_queues; 1831 q->nr_hw_queues = set->nr_hw_queues;
1832 q->mq_map = map;
1397 1833
1398 q->queue_ctx = ctx; 1834 q->queue_ctx = ctx;
1399 q->queue_hw_ctx = hctxs; 1835 q->queue_hw_ctx = hctxs;
@@ -1403,11 +1839,24 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1403 1839
1404 q->sg_reserved_size = INT_MAX; 1840 q->sg_reserved_size = INT_MAX;
1405 1841
1406 blk_queue_make_request(q, blk_mq_make_request); 1842 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1407 blk_queue_rq_timed_out(q, set->ops->timeout); 1843 INIT_LIST_HEAD(&q->requeue_list);
1844 spin_lock_init(&q->requeue_lock);
1845
1846 if (q->nr_hw_queues > 1)
1847 blk_queue_make_request(q, blk_mq_make_request);
1848 else
1849 blk_queue_make_request(q, blk_sq_make_request);
1850
1851 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1408 if (set->timeout) 1852 if (set->timeout)
1409 blk_queue_rq_timeout(q, set->timeout); 1853 blk_queue_rq_timeout(q, set->timeout);
1410 1854
1855 /*
1856 * Do this after blk_queue_make_request() overrides it...
1857 */
1858 q->nr_requests = set->queue_depth;
1859
1411 if (set->ops->complete) 1860 if (set->ops->complete)
1412 blk_queue_softirq_done(q, set->ops->complete); 1861 blk_queue_softirq_done(q, set->ops->complete);
1413 1862
@@ -1423,27 +1872,29 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1423 if (blk_mq_init_hw_queues(q, set)) 1872 if (blk_mq_init_hw_queues(q, set))
1424 goto err_flush_rq; 1873 goto err_flush_rq;
1425 1874
1426 blk_mq_map_swqueue(q);
1427
1428 mutex_lock(&all_q_mutex); 1875 mutex_lock(&all_q_mutex);
1429 list_add_tail(&q->all_q_node, &all_q_list); 1876 list_add_tail(&q->all_q_node, &all_q_list);
1430 mutex_unlock(&all_q_mutex); 1877 mutex_unlock(&all_q_mutex);
1431 1878
1879 blk_mq_add_queue_tag_set(set, q);
1880
1881 blk_mq_map_swqueue(q);
1882
1432 return q; 1883 return q;
1433 1884
1434err_flush_rq: 1885err_flush_rq:
1435 kfree(q->flush_rq); 1886 kfree(q->flush_rq);
1436err_hw: 1887err_hw:
1437 kfree(q->mq_map);
1438err_map:
1439 blk_cleanup_queue(q); 1888 blk_cleanup_queue(q);
1440err_hctxs: 1889err_hctxs:
1890 kfree(map);
1441 for (i = 0; i < set->nr_hw_queues; i++) { 1891 for (i = 0; i < set->nr_hw_queues; i++) {
1442 if (!hctxs[i]) 1892 if (!hctxs[i])
1443 break; 1893 break;
1444 free_cpumask_var(hctxs[i]->cpumask); 1894 free_cpumask_var(hctxs[i]->cpumask);
1445 set->ops->free_hctx(hctxs[i], i); 1895 set->ops->free_hctx(hctxs[i], i);
1446 } 1896 }
1897err_map:
1447 kfree(hctxs); 1898 kfree(hctxs);
1448err_percpu: 1899err_percpu:
1449 free_percpu(ctx); 1900 free_percpu(ctx);
@@ -1453,18 +1904,14 @@ EXPORT_SYMBOL(blk_mq_init_queue);
1453 1904
1454void blk_mq_free_queue(struct request_queue *q) 1905void blk_mq_free_queue(struct request_queue *q)
1455{ 1906{
1456 struct blk_mq_hw_ctx *hctx; 1907 struct blk_mq_tag_set *set = q->tag_set;
1457 int i;
1458 1908
1459 queue_for_each_hw_ctx(q, hctx, i) { 1909 blk_mq_del_queue_tag_set(q);
1460 kfree(hctx->ctx_map); 1910
1461 kfree(hctx->ctxs); 1911 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1462 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1912 blk_mq_free_hw_queues(q, set);
1463 if (q->mq_ops->exit_hctx) 1913
1464 q->mq_ops->exit_hctx(hctx, i); 1914 percpu_counter_destroy(&q->mq_usage_counter);
1465 free_cpumask_var(hctx->cpumask);
1466 q->mq_ops->free_hctx(hctx, i);
1467 }
1468 1915
1469 free_percpu(q->queue_ctx); 1916 free_percpu(q->queue_ctx);
1470 kfree(q->queue_hw_ctx); 1917 kfree(q->queue_hw_ctx);
@@ -1503,10 +1950,10 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1503 struct request_queue *q; 1950 struct request_queue *q;
1504 1951
1505 /* 1952 /*
1506 * Before new mapping is established, hotadded cpu might already start 1953 * Before new mappings are established, hotadded cpu might already
1507 * handling requests. This doesn't break anything as we map offline 1954 * start handling requests. This doesn't break anything as we map
1508 * CPUs to first hardware queue. We will re-init queue below to get 1955 * offline CPUs to first hardware queue. We will re-init the queue
1509 * optimal settings. 1956 * below to get optimal settings.
1510 */ 1957 */
1511 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && 1958 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1512 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) 1959 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
@@ -1536,7 +1983,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1536 return -EINVAL; 1983 return -EINVAL;
1537 1984
1538 1985
1539 set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags), 1986 set->tags = kmalloc_node(set->nr_hw_queues *
1987 sizeof(struct blk_mq_tags *),
1540 GFP_KERNEL, set->numa_node); 1988 GFP_KERNEL, set->numa_node);
1541 if (!set->tags) 1989 if (!set->tags)
1542 goto out; 1990 goto out;
@@ -1547,6 +1995,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1547 goto out_unwind; 1995 goto out_unwind;
1548 } 1996 }
1549 1997
1998 mutex_init(&set->tag_list_lock);
1999 INIT_LIST_HEAD(&set->tag_list);
2000
1550 return 0; 2001 return 0;
1551 2002
1552out_unwind: 2003out_unwind:
@@ -1561,11 +2012,37 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1561{ 2012{
1562 int i; 2013 int i;
1563 2014
1564 for (i = 0; i < set->nr_hw_queues; i++) 2015 for (i = 0; i < set->nr_hw_queues; i++) {
1565 blk_mq_free_rq_map(set, set->tags[i], i); 2016 if (set->tags[i])
2017 blk_mq_free_rq_map(set, set->tags[i], i);
2018 }
2019
2020 kfree(set->tags);
1566} 2021}
1567EXPORT_SYMBOL(blk_mq_free_tag_set); 2022EXPORT_SYMBOL(blk_mq_free_tag_set);
1568 2023
2024int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2025{
2026 struct blk_mq_tag_set *set = q->tag_set;
2027 struct blk_mq_hw_ctx *hctx;
2028 int i, ret;
2029
2030 if (!set || nr > set->queue_depth)
2031 return -EINVAL;
2032
2033 ret = 0;
2034 queue_for_each_hw_ctx(q, hctx, i) {
2035 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2036 if (ret)
2037 break;
2038 }
2039
2040 if (!ret)
2041 q->nr_requests = nr;
2042
2043 return ret;
2044}
2045
1569void blk_mq_disable_hotplug(void) 2046void blk_mq_disable_hotplug(void)
1570{ 2047{
1571 mutex_lock(&all_q_mutex); 2048 mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5fa14f19f752..ff5e6bf0f691 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -11,7 +11,8 @@ struct blk_mq_ctx {
11 11
12 unsigned int cpu; 12 unsigned int cpu;
13 unsigned int index_hw; 13 unsigned int index_hw;
14 unsigned int ipi_redirect; 14
15 unsigned int last_tag ____cacheline_aligned_in_smp;
15 16
16 /* incremented at dispatch time */ 17 /* incremented at dispatch time */
17 unsigned long rq_dispatched[2]; 18 unsigned long rq_dispatched[2];
@@ -22,7 +23,7 @@ struct blk_mq_ctx {
22 23
23 struct request_queue *queue; 24 struct request_queue *queue;
24 struct kobject kobj; 25 struct kobject kobj;
25}; 26} ____cacheline_aligned_in_smp;
26 27
27void __blk_mq_complete_request(struct request *rq); 28void __blk_mq_complete_request(struct request *rq);
28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
@@ -31,13 +32,14 @@ void blk_mq_drain_queue(struct request_queue *q);
31void blk_mq_free_queue(struct request_queue *q); 32void blk_mq_free_queue(struct request_queue *q);
32void blk_mq_clone_flush_request(struct request *flush_rq, 33void blk_mq_clone_flush_request(struct request *flush_rq,
33 struct request *orig_rq); 34 struct request *orig_rq);
35int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
34 36
35/* 37/*
36 * CPU hotplug helpers 38 * CPU hotplug helpers
37 */ 39 */
38struct blk_mq_cpu_notifier; 40struct blk_mq_cpu_notifier;
39void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 41void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
40 void (*fn)(void *, unsigned long, unsigned int), 42 int (*fn)(void *, unsigned long, unsigned int),
41 void *data); 43 void *data);
42void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 44void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
43void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 45void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
@@ -50,7 +52,15 @@ void blk_mq_disable_hotplug(void);
50 */ 52 */
51extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 53extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
52extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 54extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
55extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
53 56
54void blk_mq_add_timer(struct request *rq); 57/*
58 * Basic implementation of sparser bitmap, allowing the user to spread
59 * the bits over more cachelines.
60 */
61struct blk_align_bitmap {
62 unsigned long word;
63 unsigned long depth;
64} ____cacheline_aligned_in_smp;
55 65
56#endif 66#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 7500f876dae4..23321fbab293 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
48static ssize_t 48static ssize_t
49queue_requests_store(struct request_queue *q, const char *page, size_t count) 49queue_requests_store(struct request_queue *q, const char *page, size_t count)
50{ 50{
51 struct request_list *rl;
52 unsigned long nr; 51 unsigned long nr;
53 int ret; 52 int ret, err;
54 53
55 if (!q->request_fn) 54 if (!q->request_fn && !q->mq_ops)
56 return -EINVAL; 55 return -EINVAL;
57 56
58 ret = queue_var_store(&nr, page, count); 57 ret = queue_var_store(&nr, page, count);
@@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
62 if (nr < BLKDEV_MIN_RQ) 61 if (nr < BLKDEV_MIN_RQ)
63 nr = BLKDEV_MIN_RQ; 62 nr = BLKDEV_MIN_RQ;
64 63
65 spin_lock_irq(q->queue_lock); 64 if (q->request_fn)
66 q->nr_requests = nr; 65 err = blk_update_nr_requests(q, nr);
67 blk_queue_congestion_threshold(q); 66 else
68 67 err = blk_mq_update_nr_requests(q, nr);
69 /* congestion isn't cgroup aware and follows root blkcg for now */ 68
70 rl = &q->root_rl; 69 if (err)
71 70 return err;
72 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
73 blk_set_queue_congested(q, BLK_RW_SYNC);
74 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
75 blk_clear_queue_congested(q, BLK_RW_SYNC);
76
77 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
78 blk_set_queue_congested(q, BLK_RW_ASYNC);
79 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
80 blk_clear_queue_congested(q, BLK_RW_ASYNC);
81
82 blk_queue_for_each_rl(rl, q) {
83 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
84 blk_set_rl_full(rl, BLK_RW_SYNC);
85 } else {
86 blk_clear_rl_full(rl, BLK_RW_SYNC);
87 wake_up(&rl->wait[BLK_RW_SYNC]);
88 }
89
90 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
91 blk_set_rl_full(rl, BLK_RW_ASYNC);
92 } else {
93 blk_clear_rl_full(rl, BLK_RW_ASYNC);
94 wake_up(&rl->wait[BLK_RW_ASYNC]);
95 }
96 }
97 71
98 spin_unlock_irq(q->queue_lock);
99 return ret; 72 return ret;
100} 73}
101 74
@@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj)
544 if (q->queue_tags) 517 if (q->queue_tags)
545 __blk_queue_free_tags(q); 518 __blk_queue_free_tags(q);
546 519
547 percpu_counter_destroy(&q->mq_usage_counter);
548
549 if (q->mq_ops) 520 if (q->mq_ops)
550 blk_mq_free_queue(q); 521 blk_mq_free_queue(q);
551 522
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 033745cd7fba..9353b4683359 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
744static bool throtl_slice_used(struct throtl_grp *tg, bool rw) 744static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
745{ 745{
746 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 746 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
747 return 0; 747 return false;
748 748
749 return 1; 749 return 1;
750} 750}
@@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
842 if (tg->io_disp[rw] + 1 <= io_allowed) { 842 if (tg->io_disp[rw] + 1 <= io_allowed) {
843 if (wait) 843 if (wait)
844 *wait = 0; 844 *wait = 0;
845 return 1; 845 return true;
846 } 846 }
847 847
848 /* Calc approx time to dispatch */ 848 /* Calc approx time to dispatch */
@@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
881 if (wait) 881 if (wait)
882 *wait = 0; 882 *wait = 0;
883 return 1; 883 return true;
884 } 884 }
885 885
886 /* Calc approx time to dispatch */ 886 /* Calc approx time to dispatch */
@@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
923 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { 923 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
924 if (wait) 924 if (wait)
925 *wait = 0; 925 *wait = 0;
926 return 1; 926 return true;
927 } 927 }
928 928
929 /* 929 /*
@@ -1258,7 +1258,7 @@ out_unlock:
1258 * of throtl_data->service_queue. Those bio's are ready and issued by this 1258 * of throtl_data->service_queue. Those bio's are ready and issued by this
1259 * function. 1259 * function.
1260 */ 1260 */
1261void blk_throtl_dispatch_work_fn(struct work_struct *work) 1261static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1262{ 1262{
1263 struct throtl_data *td = container_of(work, struct throtl_data, 1263 struct throtl_data *td = container_of(work, struct throtl_data,
1264 dispatch_work); 1264 dispatch_work);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a09e8af8186c..43e8b515806f 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req)
96 __blk_complete_request(req); 96 __blk_complete_request(req);
97 break; 97 break;
98 case BLK_EH_RESET_TIMER: 98 case BLK_EH_RESET_TIMER:
99 if (q->mq_ops) 99 blk_add_timer(req);
100 blk_mq_add_timer(req);
101 else
102 blk_add_timer(req);
103
104 blk_clear_rq_complete(req); 100 blk_clear_rq_complete(req);
105 break; 101 break;
106 case BLK_EH_NOT_HANDLED: 102 case BLK_EH_NOT_HANDLED:
@@ -170,7 +166,26 @@ void blk_abort_request(struct request *req)
170} 166}
171EXPORT_SYMBOL_GPL(blk_abort_request); 167EXPORT_SYMBOL_GPL(blk_abort_request);
172 168
173void __blk_add_timer(struct request *req, struct list_head *timeout_list) 169unsigned long blk_rq_timeout(unsigned long timeout)
170{
171 unsigned long maxt;
172
173 maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
174 if (time_after(timeout, maxt))
175 timeout = maxt;
176
177 return timeout;
178}
179
180/**
181 * blk_add_timer - Start timeout timer for a single request
182 * @req: request that is about to start running.
183 *
184 * Notes:
185 * Each request has its own timer, and as it is added to the queue, we
186 * set up the timer. When the request completes, we cancel the timer.
187 */
188void blk_add_timer(struct request *req)
174{ 189{
175 struct request_queue *q = req->q; 190 struct request_queue *q = req->q;
176 unsigned long expiry; 191 unsigned long expiry;
@@ -188,15 +203,15 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
188 req->timeout = q->rq_timeout; 203 req->timeout = q->rq_timeout;
189 204
190 req->deadline = jiffies + req->timeout; 205 req->deadline = jiffies + req->timeout;
191 if (timeout_list) 206 if (!q->mq_ops)
192 list_add_tail(&req->timeout_list, timeout_list); 207 list_add_tail(&req->timeout_list, &req->q->timeout_list);
193 208
194 /* 209 /*
195 * If the timer isn't already pending or this timeout is earlier 210 * If the timer isn't already pending or this timeout is earlier
196 * than an existing one, modify the timer. Round up to next nearest 211 * than an existing one, modify the timer. Round up to next nearest
197 * second. 212 * second.
198 */ 213 */
199 expiry = round_jiffies_up(req->deadline); 214 expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
200 215
201 if (!timer_pending(&q->timeout) || 216 if (!timer_pending(&q->timeout) ||
202 time_before(expiry, q->timeout.expires)) { 217 time_before(expiry, q->timeout.expires)) {
@@ -214,17 +229,3 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
214 } 229 }
215 230
216} 231}
217
218/**
219 * blk_add_timer - Start timeout timer for a single request
220 * @req: request that is about to start running.
221 *
222 * Notes:
223 * Each request has its own timer, and as it is added to the queue, we
224 * set up the timer. When the request completes, we cancel the timer.
225 */
226void blk_add_timer(struct request *req)
227{
228 __blk_add_timer(req, &req->q->timeout_list);
229}
230
diff --git a/block/blk.h b/block/blk.h
index 1d880f1f957f..45385e9abf6f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -9,6 +9,9 @@
9/* Number of requests a "batching" process may submit */ 9/* Number of requests a "batching" process may submit */
10#define BLK_BATCH_REQ 32 10#define BLK_BATCH_REQ 32
11 11
12/* Max future timer expiry for timeouts */
13#define BLK_MAX_TIMEOUT (5 * HZ)
14
12extern struct kmem_cache *blk_requestq_cachep; 15extern struct kmem_cache *blk_requestq_cachep;
13extern struct kmem_cache *request_cachep; 16extern struct kmem_cache *request_cachep;
14extern struct kobj_type blk_queue_ktype; 17extern struct kobj_type blk_queue_ktype;
@@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error,
37void blk_rq_timed_out_timer(unsigned long data); 40void blk_rq_timed_out_timer(unsigned long data);
38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, 41void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
39 unsigned int *next_set); 42 unsigned int *next_set);
40void __blk_add_timer(struct request *req, struct list_head *timeout_list); 43unsigned long blk_rq_timeout(unsigned long timeout);
44void blk_add_timer(struct request *req);
41void blk_delete_timer(struct request *); 45void blk_delete_timer(struct request *);
42void blk_add_timer(struct request *);
43 46
44 47
45bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 48bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
@@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
185 return q->nr_congestion_off; 188 return q->nr_congestion_off;
186} 189}
187 190
191extern int blk_update_nr_requests(struct request_queue *, unsigned int);
192
188/* 193/*
189 * Contribute to IO statistics IFF: 194 * Contribute to IO statistics IFF:
190 * 195 *
diff --git a/block/bounce.c b/block/bounce.c
new file mode 100644
index 000000000000..523918b8c6dc
--- /dev/null
+++ b/block/bounce.c
@@ -0,0 +1,287 @@
1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#include <linux/mm.h>
7#include <linux/export.h>
8#include <linux/swap.h>
9#include <linux/gfp.h>
10#include <linux/bio.h>
11#include <linux/pagemap.h>
12#include <linux/mempool.h>
13#include <linux/blkdev.h>
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/highmem.h>
17#include <linux/bootmem.h>
18#include <asm/tlbflush.h>
19
20#include <trace/events/block.h>
21
22#define POOL_SIZE 64
23#define ISA_POOL_SIZE 16
24
25static mempool_t *page_pool, *isa_page_pool;
26
27#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
28static __init int init_emergency_pool(void)
29{
30#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
31 if (max_pfn <= max_low_pfn)
32 return 0;
33#endif
34
35 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
36 BUG_ON(!page_pool);
37 printk("bounce pool size: %d pages\n", POOL_SIZE);
38
39 return 0;
40}
41
42__initcall(init_emergency_pool);
43#endif
44
45#ifdef CONFIG_HIGHMEM
46/*
47 * highmem version, map in to vec
48 */
49static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
50{
51 unsigned long flags;
52 unsigned char *vto;
53
54 local_irq_save(flags);
55 vto = kmap_atomic(to->bv_page);
56 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
57 kunmap_atomic(vto);
58 local_irq_restore(flags);
59}
60
61#else /* CONFIG_HIGHMEM */
62
63#define bounce_copy_vec(to, vfrom) \
64 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
65
66#endif /* CONFIG_HIGHMEM */
67
68/*
69 * allocate pages in the DMA region for the ISA pool
70 */
71static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
72{
73 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
74}
75
76/*
77 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
78 * as the max address, so check if the pool has already been created.
79 */
80int init_emergency_isa_pool(void)
81{
82 if (isa_page_pool)
83 return 0;
84
85 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
86 mempool_free_pages, (void *) 0);
87 BUG_ON(!isa_page_pool);
88
89 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
90 return 0;
91}
92
93/*
94 * Simple bounce buffer support for highmem pages. Depending on the
95 * queue gfp mask set, *to may or may not be a highmem page. kmap it
96 * always, it will do the Right Thing
97 */
98static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
99{
100 unsigned char *vfrom;
101 struct bio_vec tovec, *fromvec = from->bi_io_vec;
102 struct bvec_iter iter;
103
104 bio_for_each_segment(tovec, to, iter) {
105 if (tovec.bv_page != fromvec->bv_page) {
106 /*
107 * fromvec->bv_offset and fromvec->bv_len might have
108 * been modified by the block layer, so use the original
109 * copy, bounce_copy_vec already uses tovec->bv_len
110 */
111 vfrom = page_address(fromvec->bv_page) +
112 tovec.bv_offset;
113
114 bounce_copy_vec(&tovec, vfrom);
115 flush_dcache_page(tovec.bv_page);
116 }
117
118 fromvec++;
119 }
120}
121
122static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
123{
124 struct bio *bio_orig = bio->bi_private;
125 struct bio_vec *bvec, *org_vec;
126 int i;
127
128 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
129 set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
130
131 /*
132 * free up bounce indirect pages used
133 */
134 bio_for_each_segment_all(bvec, bio, i) {
135 org_vec = bio_orig->bi_io_vec + i;
136 if (bvec->bv_page == org_vec->bv_page)
137 continue;
138
139 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
140 mempool_free(bvec->bv_page, pool);
141 }
142
143 bio_endio(bio_orig, err);
144 bio_put(bio);
145}
146
147static void bounce_end_io_write(struct bio *bio, int err)
148{
149 bounce_end_io(bio, page_pool, err);
150}
151
152static void bounce_end_io_write_isa(struct bio *bio, int err)
153{
154
155 bounce_end_io(bio, isa_page_pool, err);
156}
157
158static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
159{
160 struct bio *bio_orig = bio->bi_private;
161
162 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
163 copy_to_high_bio_irq(bio_orig, bio);
164
165 bounce_end_io(bio, pool, err);
166}
167
168static void bounce_end_io_read(struct bio *bio, int err)
169{
170 __bounce_end_io_read(bio, page_pool, err);
171}
172
173static void bounce_end_io_read_isa(struct bio *bio, int err)
174{
175 __bounce_end_io_read(bio, isa_page_pool, err);
176}
177
178#ifdef CONFIG_NEED_BOUNCE_POOL
179static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
180{
181 if (bio_data_dir(bio) != WRITE)
182 return 0;
183
184 if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
185 return 0;
186
187 return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
188}
189#else
190static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
191{
192 return 0;
193}
194#endif /* CONFIG_NEED_BOUNCE_POOL */
195
196static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
197 mempool_t *pool, int force)
198{
199 struct bio *bio;
200 int rw = bio_data_dir(*bio_orig);
201 struct bio_vec *to, from;
202 struct bvec_iter iter;
203 unsigned i;
204
205 if (force)
206 goto bounce;
207 bio_for_each_segment(from, *bio_orig, iter)
208 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
209 goto bounce;
210
211 return;
212bounce:
213 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
214
215 bio_for_each_segment_all(to, bio, i) {
216 struct page *page = to->bv_page;
217
218 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
219 continue;
220
221 inc_zone_page_state(to->bv_page, NR_BOUNCE);
222 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
223
224 if (rw == WRITE) {
225 char *vto, *vfrom;
226
227 flush_dcache_page(page);
228
229 vto = page_address(to->bv_page) + to->bv_offset;
230 vfrom = kmap_atomic(page) + to->bv_offset;
231 memcpy(vto, vfrom, to->bv_len);
232 kunmap_atomic(vfrom);
233 }
234 }
235
236 trace_block_bio_bounce(q, *bio_orig);
237
238 bio->bi_flags |= (1 << BIO_BOUNCED);
239
240 if (pool == page_pool) {
241 bio->bi_end_io = bounce_end_io_write;
242 if (rw == READ)
243 bio->bi_end_io = bounce_end_io_read;
244 } else {
245 bio->bi_end_io = bounce_end_io_write_isa;
246 if (rw == READ)
247 bio->bi_end_io = bounce_end_io_read_isa;
248 }
249
250 bio->bi_private = *bio_orig;
251 *bio_orig = bio;
252}
253
254void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
255{
256 int must_bounce;
257 mempool_t *pool;
258
259 /*
260 * Data-less bio, nothing to bounce
261 */
262 if (!bio_has_data(*bio_orig))
263 return;
264
265 must_bounce = must_snapshot_stable_pages(q, *bio_orig);
266
267 /*
268 * for non-isa bounce case, just check if the bounce pfn is equal
269 * to or bigger than the highest pfn in the system -- in that case,
270 * don't waste time iterating over bio segments
271 */
272 if (!(q->bounce_gfp & GFP_DMA)) {
273 if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
274 return;
275 pool = page_pool;
276 } else {
277 BUG_ON(!isa_page_pool);
278 pool = isa_page_pool;
279 }
280
281 /*
282 * slow path
283 */
284 __blk_queue_bounce(q, bio_orig, pool, must_bounce);
285}
286
287EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5063a0bd831a..22dffebc7c73 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -4460,7 +4460,7 @@ out_free:
4460static ssize_t 4460static ssize_t
4461cfq_var_show(unsigned int var, char *page) 4461cfq_var_show(unsigned int var, char *page)
4462{ 4462{
4463 return sprintf(page, "%d\n", var); 4463 return sprintf(page, "%u\n", var);
4464} 4464}
4465 4465
4466static ssize_t 4466static ssize_t
diff --git a/block/ioprio.c b/block/ioprio.c
new file mode 100644
index 000000000000..e50170ca7c33
--- /dev/null
+++ b/block/ioprio.c
@@ -0,0 +1,241 @@
1/*
2 * fs/ioprio.c
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5 *
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
12 *
13 * IOW, setting BE scheduling class with prio 2 is done ala:
14 *
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
16 *
17 * ioprio_set(PRIO_PROCESS, pid, prio);
18 *
19 * See also Documentation/block/ioprio.txt
20 *
21 */
22#include <linux/gfp.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ioprio.h>
26#include <linux/blkdev.h>
27#include <linux/capability.h>
28#include <linux/syscalls.h>
29#include <linux/security.h>
30#include <linux/pid_namespace.h>
31
32int set_task_ioprio(struct task_struct *task, int ioprio)
33{
34 int err;
35 struct io_context *ioc;
36 const struct cred *cred = current_cred(), *tcred;
37
38 rcu_read_lock();
39 tcred = __task_cred(task);
40 if (!uid_eq(tcred->uid, cred->euid) &&
41 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
42 rcu_read_unlock();
43 return -EPERM;
44 }
45 rcu_read_unlock();
46
47 err = security_task_setioprio(task, ioprio);
48 if (err)
49 return err;
50
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) {
53 ioc->ioprio = ioprio;
54 put_io_context(ioc);
55 }
56
57 return err;
58}
59EXPORT_SYMBOL_GPL(set_task_ioprio);
60
61SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
62{
63 int class = IOPRIO_PRIO_CLASS(ioprio);
64 int data = IOPRIO_PRIO_DATA(ioprio);
65 struct task_struct *p, *g;
66 struct user_struct *user;
67 struct pid *pgrp;
68 kuid_t uid;
69 int ret;
70
71 switch (class) {
72 case IOPRIO_CLASS_RT:
73 if (!capable(CAP_SYS_ADMIN))
74 return -EPERM;
75 /* fall through, rt has prio field too */
76 case IOPRIO_CLASS_BE:
77 if (data >= IOPRIO_BE_NR || data < 0)
78 return -EINVAL;
79
80 break;
81 case IOPRIO_CLASS_IDLE:
82 break;
83 case IOPRIO_CLASS_NONE:
84 if (data)
85 return -EINVAL;
86 break;
87 default:
88 return -EINVAL;
89 }
90
91 ret = -ESRCH;
92 rcu_read_lock();
93 switch (which) {
94 case IOPRIO_WHO_PROCESS:
95 if (!who)
96 p = current;
97 else
98 p = find_task_by_vpid(who);
99 if (p)
100 ret = set_task_ioprio(p, ioprio);
101 break;
102 case IOPRIO_WHO_PGRP:
103 if (!who)
104 pgrp = task_pgrp(current);
105 else
106 pgrp = find_vpid(who);
107 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
108 ret = set_task_ioprio(p, ioprio);
109 if (ret)
110 break;
111 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
112 break;
113 case IOPRIO_WHO_USER:
114 uid = make_kuid(current_user_ns(), who);
115 if (!uid_valid(uid))
116 break;
117 if (!who)
118 user = current_user();
119 else
120 user = find_user(uid);
121
122 if (!user)
123 break;
124
125 do_each_thread(g, p) {
126 if (!uid_eq(task_uid(p), uid))
127 continue;
128 ret = set_task_ioprio(p, ioprio);
129 if (ret)
130 goto free_uid;
131 } while_each_thread(g, p);
132free_uid:
133 if (who)
134 free_uid(user);
135 break;
136 default:
137 ret = -EINVAL;
138 }
139
140 rcu_read_unlock();
141 return ret;
142}
143
144static int get_task_ioprio(struct task_struct *p)
145{
146 int ret;
147
148 ret = security_task_getioprio(p);
149 if (ret)
150 goto out;
151 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
152 if (p->io_context)
153 ret = p->io_context->ioprio;
154out:
155 return ret;
156}
157
158int ioprio_best(unsigned short aprio, unsigned short bprio)
159{
160 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
161 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
162
163 if (aclass == IOPRIO_CLASS_NONE)
164 aclass = IOPRIO_CLASS_BE;
165 if (bclass == IOPRIO_CLASS_NONE)
166 bclass = IOPRIO_CLASS_BE;
167
168 if (aclass == bclass)
169 return min(aprio, bprio);
170 if (aclass > bclass)
171 return bprio;
172 else
173 return aprio;
174}
175
176SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
177{
178 struct task_struct *g, *p;
179 struct user_struct *user;
180 struct pid *pgrp;
181 kuid_t uid;
182 int ret = -ESRCH;
183 int tmpio;
184
185 rcu_read_lock();
186 switch (which) {
187 case IOPRIO_WHO_PROCESS:
188 if (!who)
189 p = current;
190 else
191 p = find_task_by_vpid(who);
192 if (p)
193 ret = get_task_ioprio(p);
194 break;
195 case IOPRIO_WHO_PGRP:
196 if (!who)
197 pgrp = task_pgrp(current);
198 else
199 pgrp = find_vpid(who);
200 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
201 tmpio = get_task_ioprio(p);
202 if (tmpio < 0)
203 continue;
204 if (ret == -ESRCH)
205 ret = tmpio;
206 else
207 ret = ioprio_best(ret, tmpio);
208 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
209 break;
210 case IOPRIO_WHO_USER:
211 uid = make_kuid(current_user_ns(), who);
212 if (!who)
213 user = current_user();
214 else
215 user = find_user(uid);
216
217 if (!user)
218 break;
219
220 do_each_thread(g, p) {
221 if (!uid_eq(task_uid(p), user->uid))
222 continue;
223 tmpio = get_task_ioprio(p);
224 if (tmpio < 0)
225 continue;
226 if (ret == -ESRCH)
227 ret = tmpio;
228 else
229 ret = ioprio_best(ret, tmpio);
230 } while_each_thread(g, p);
231
232 if (who)
233 free_uid(user);
234 break;
235 default:
236 ret = -EINVAL;
237 }
238
239 rcu_read_unlock();
240 return ret;
241}