aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Makefile7
-rw-r--r--block/bio-integrity.c657
-rw-r--r--block/bio.c2038
-rw-r--r--block/blk-cgroup.c30
-rw-r--r--block/blk-cgroup.h24
-rw-r--r--block/blk-core.c119
-rw-r--r--block/blk-flush.c40
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iopoll.c11
-rw-r--r--block/blk-lib.c4
-rw-r--r--block/blk-map.c7
-rw-r--r--block/blk-merge.c28
-rw-r--r--block/blk-mq-cpu.c17
-rw-r--r--block/blk-mq-cpumap.c37
-rw-r--r--block/blk-mq-sysfs.c167
-rw-r--r--block/blk-mq-tag.c581
-rw-r--r--block/blk-mq-tag.h71
-rw-r--r--block/blk-mq.c1438
-rw-r--r--block/blk-mq.h79
-rw-r--r--block/blk-softirq.c14
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c46
-rw-r--r--block/blk-timeout.c60
-rw-r--r--block/blk.h11
-rw-r--r--block/bounce.c290
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c43
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c2
-rw-r--r--block/ioprio.c241
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/efi.h9
-rw-r--r--block/partitions/karma.c3
-rw-r--r--block/scsi_ioctl.c4
34 files changed, 5306 insertions, 835 deletions
diff --git a/block/Makefile b/block/Makefile
index 20645e88fb57..a2ce6ac935ec 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -2,13 +2,15 @@
2# Makefile for the kernel block layer 2# Makefile for the kernel block layer
3# 3#
4 4
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ 8 blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
9 blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ 9 blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
10 genhd.o scsi_ioctl.o partition-generic.o partitions/ 10 genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
11 partitions/
11 12
13obj-$(CONFIG_BOUNCE) += bounce.o
12obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 14obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
13obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o 15obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
14obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o 16obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
@@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
20obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o 22obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
21obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o 23obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
22obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o 24obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
25obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
new file mode 100644
index 000000000000..9e241063a616
--- /dev/null
+++ b/block/bio-integrity.c
@@ -0,0 +1,657 @@
1/*
2 * bio-integrity.c - bio data integrity extensions
3 *
4 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/export.h>
26#include <linux/bio.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29
30#define BIP_INLINE_VECS 4
31
32static struct kmem_cache *bip_slab;
33static struct workqueue_struct *kintegrityd_wq;
34
35/**
36 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
37 * @bio: bio to attach integrity metadata to
38 * @gfp_mask: Memory allocation mask
39 * @nr_vecs: Number of integrity metadata scatter-gather elements
40 *
41 * Description: This function prepares a bio for attaching integrity
42 * metadata. nr_vecs specifies the maximum number of pages containing
43 * integrity metadata that can be attached.
44 */
45struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
46 gfp_t gfp_mask,
47 unsigned int nr_vecs)
48{
49 struct bio_integrity_payload *bip;
50 struct bio_set *bs = bio->bi_pool;
51 unsigned long idx = BIO_POOL_NONE;
52 unsigned inline_vecs;
53
54 if (!bs) {
55 bip = kmalloc(sizeof(struct bio_integrity_payload) +
56 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
57 inline_vecs = nr_vecs;
58 } else {
59 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
60 inline_vecs = BIP_INLINE_VECS;
61 }
62
63 if (unlikely(!bip))
64 return NULL;
65
66 memset(bip, 0, sizeof(*bip));
67
68 if (nr_vecs > inline_vecs) {
69 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
70 bs->bvec_integrity_pool);
71 if (!bip->bip_vec)
72 goto err;
73 } else {
74 bip->bip_vec = bip->bip_inline_vecs;
75 }
76
77 bip->bip_slab = idx;
78 bip->bip_bio = bio;
79 bio->bi_integrity = bip;
80
81 return bip;
82err:
83 mempool_free(bip, bs->bio_integrity_pool);
84 return NULL;
85}
86EXPORT_SYMBOL(bio_integrity_alloc);
87
88/**
89 * bio_integrity_free - Free bio integrity payload
90 * @bio: bio containing bip to be freed
91 *
92 * Description: Used to free the integrity portion of a bio. Usually
93 * called from bio_free().
94 */
95void bio_integrity_free(struct bio *bio)
96{
97 struct bio_integrity_payload *bip = bio->bi_integrity;
98 struct bio_set *bs = bio->bi_pool;
99
100 if (bip->bip_owns_buf)
101 kfree(bip->bip_buf);
102
103 if (bs) {
104 if (bip->bip_slab != BIO_POOL_NONE)
105 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
106 bip->bip_slab);
107
108 mempool_free(bip, bs->bio_integrity_pool);
109 } else {
110 kfree(bip);
111 }
112
113 bio->bi_integrity = NULL;
114}
115EXPORT_SYMBOL(bio_integrity_free);
116
117static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
118{
119 if (bip->bip_slab == BIO_POOL_NONE)
120 return BIP_INLINE_VECS;
121
122 return bvec_nr_vecs(bip->bip_slab);
123}
124
125/**
126 * bio_integrity_add_page - Attach integrity metadata
127 * @bio: bio to update
128 * @page: page containing integrity metadata
129 * @len: number of bytes of integrity metadata in page
130 * @offset: start offset within page
131 *
132 * Description: Attach a page containing integrity metadata to bio.
133 */
134int bio_integrity_add_page(struct bio *bio, struct page *page,
135 unsigned int len, unsigned int offset)
136{
137 struct bio_integrity_payload *bip = bio->bi_integrity;
138 struct bio_vec *iv;
139
140 if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
141 printk(KERN_ERR "%s: bip_vec full\n", __func__);
142 return 0;
143 }
144
145 iv = bip->bip_vec + bip->bip_vcnt;
146
147 iv->bv_page = page;
148 iv->bv_len = len;
149 iv->bv_offset = offset;
150 bip->bip_vcnt++;
151
152 return len;
153}
154EXPORT_SYMBOL(bio_integrity_add_page);
155
156static int bdev_integrity_enabled(struct block_device *bdev, int rw)
157{
158 struct blk_integrity *bi = bdev_get_integrity(bdev);
159
160 if (bi == NULL)
161 return 0;
162
163 if (rw == READ && bi->verify_fn != NULL &&
164 (bi->flags & INTEGRITY_FLAG_READ))
165 return 1;
166
167 if (rw == WRITE && bi->generate_fn != NULL &&
168 (bi->flags & INTEGRITY_FLAG_WRITE))
169 return 1;
170
171 return 0;
172}
173
174/**
175 * bio_integrity_enabled - Check whether integrity can be passed
176 * @bio: bio to check
177 *
178 * Description: Determines whether bio_integrity_prep() can be called
179 * on this bio or not. bio data direction and target device must be
180 * set prior to calling. The functions honors the write_generate and
181 * read_verify flags in sysfs.
182 */
183int bio_integrity_enabled(struct bio *bio)
184{
185 if (!bio_is_rw(bio))
186 return 0;
187
188 /* Already protected? */
189 if (bio_integrity(bio))
190 return 0;
191
192 return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
193}
194EXPORT_SYMBOL(bio_integrity_enabled);
195
196/**
197 * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto
198 * @bi: blk_integrity profile for device
199 * @sectors: Number of 512 sectors to convert
200 *
201 * Description: The block layer calculates everything in 512 byte
202 * sectors but integrity metadata is done in terms of the hardware
203 * sector size of the storage device. Convert the block layer sectors
204 * to physical sectors.
205 */
206static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
207 unsigned int sectors)
208{
209 /* At this point there are only 512b or 4096b DIF/EPP devices */
210 if (bi->sector_size == 4096)
211 return sectors >>= 3;
212
213 return sectors;
214}
215
216static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
217 unsigned int sectors)
218{
219 return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
220}
221
222/**
223 * bio_integrity_tag_size - Retrieve integrity tag space
224 * @bio: bio to inspect
225 *
226 * Description: Returns the maximum number of tag bytes that can be
227 * attached to this bio. Filesystems can use this to determine how
228 * much metadata to attach to an I/O.
229 */
230unsigned int bio_integrity_tag_size(struct bio *bio)
231{
232 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
233
234 BUG_ON(bio->bi_iter.bi_size == 0);
235
236 return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
237}
238EXPORT_SYMBOL(bio_integrity_tag_size);
239
240static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
241 int set)
242{
243 struct bio_integrity_payload *bip = bio->bi_integrity;
244 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
245 unsigned int nr_sectors;
246
247 BUG_ON(bip->bip_buf == NULL);
248
249 if (bi->tag_size == 0)
250 return -1;
251
252 nr_sectors = bio_integrity_hw_sectors(bi,
253 DIV_ROUND_UP(len, bi->tag_size));
254
255 if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
256 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
257 nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
258 return -1;
259 }
260
261 if (set)
262 bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
263 else
264 bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
265
266 return 0;
267}
268
269/**
270 * bio_integrity_set_tag - Attach a tag buffer to a bio
271 * @bio: bio to attach buffer to
272 * @tag_buf: Pointer to a buffer containing tag data
273 * @len: Length of the included buffer
274 *
275 * Description: Use this function to tag a bio by leveraging the extra
276 * space provided by devices formatted with integrity protection. The
277 * size of the integrity buffer must be <= to the size reported by
278 * bio_integrity_tag_size().
279 */
280int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
281{
282 BUG_ON(bio_data_dir(bio) != WRITE);
283
284 return bio_integrity_tag(bio, tag_buf, len, 1);
285}
286EXPORT_SYMBOL(bio_integrity_set_tag);
287
288/**
289 * bio_integrity_get_tag - Retrieve a tag buffer from a bio
290 * @bio: bio to retrieve buffer from
291 * @tag_buf: Pointer to a buffer for the tag data
292 * @len: Length of the target buffer
293 *
294 * Description: Use this function to retrieve the tag buffer from a
295 * completed I/O. The size of the integrity buffer must be <= to the
296 * size reported by bio_integrity_tag_size().
297 */
298int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
299{
300 BUG_ON(bio_data_dir(bio) != READ);
301
302 return bio_integrity_tag(bio, tag_buf, len, 0);
303}
304EXPORT_SYMBOL(bio_integrity_get_tag);
305
306/**
307 * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio
308 * @bio: bio to generate/verify integrity metadata for
309 * @operate: operate number, 1 for generate, 0 for verify
310 */
311static int bio_integrity_generate_verify(struct bio *bio, int operate)
312{
313 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
314 struct blk_integrity_exchg bix;
315 struct bio_vec *bv;
316 sector_t sector;
317 unsigned int sectors, ret = 0, i;
318 void *prot_buf = bio->bi_integrity->bip_buf;
319
320 if (operate)
321 sector = bio->bi_iter.bi_sector;
322 else
323 sector = bio->bi_integrity->bip_iter.bi_sector;
324
325 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
326 bix.sector_size = bi->sector_size;
327
328 bio_for_each_segment_all(bv, bio, i) {
329 void *kaddr = kmap_atomic(bv->bv_page);
330 bix.data_buf = kaddr + bv->bv_offset;
331 bix.data_size = bv->bv_len;
332 bix.prot_buf = prot_buf;
333 bix.sector = sector;
334
335 if (operate)
336 bi->generate_fn(&bix);
337 else {
338 ret = bi->verify_fn(&bix);
339 if (ret) {
340 kunmap_atomic(kaddr);
341 return ret;
342 }
343 }
344
345 sectors = bv->bv_len / bi->sector_size;
346 sector += sectors;
347 prot_buf += sectors * bi->tuple_size;
348
349 kunmap_atomic(kaddr);
350 }
351 return ret;
352}
353
354/**
355 * bio_integrity_generate - Generate integrity metadata for a bio
356 * @bio: bio to generate integrity metadata for
357 *
358 * Description: Generates integrity metadata for a bio by calling the
359 * block device's generation callback function. The bio must have a
360 * bip attached with enough room to accommodate the generated
361 * integrity metadata.
362 */
363static void bio_integrity_generate(struct bio *bio)
364{
365 bio_integrity_generate_verify(bio, 1);
366}
367
368static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
369{
370 if (bi)
371 return bi->tuple_size;
372
373 return 0;
374}
375
376/**
377 * bio_integrity_prep - Prepare bio for integrity I/O
378 * @bio: bio to prepare
379 *
380 * Description: Allocates a buffer for integrity metadata, maps the
381 * pages and attaches them to a bio. The bio must have data
382 * direction, target device and start sector set priot to calling. In
383 * the WRITE case, integrity metadata will be generated using the
384 * block device's integrity function. In the READ case, the buffer
385 * will be prepared for DMA and a suitable end_io handler set up.
386 */
387int bio_integrity_prep(struct bio *bio)
388{
389 struct bio_integrity_payload *bip;
390 struct blk_integrity *bi;
391 struct request_queue *q;
392 void *buf;
393 unsigned long start, end;
394 unsigned int len, nr_pages;
395 unsigned int bytes, offset, i;
396 unsigned int sectors;
397
398 bi = bdev_get_integrity(bio->bi_bdev);
399 q = bdev_get_queue(bio->bi_bdev);
400 BUG_ON(bi == NULL);
401 BUG_ON(bio_integrity(bio));
402
403 sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
404
405 /* Allocate kernel buffer for protection data */
406 len = sectors * blk_integrity_tuple_size(bi);
407 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
408 if (unlikely(buf == NULL)) {
409 printk(KERN_ERR "could not allocate integrity buffer\n");
410 return -ENOMEM;
411 }
412
413 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
414 start = ((unsigned long) buf) >> PAGE_SHIFT;
415 nr_pages = end - start;
416
417 /* Allocate bio integrity payload and integrity vectors */
418 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
419 if (unlikely(bip == NULL)) {
420 printk(KERN_ERR "could not allocate data integrity bioset\n");
421 kfree(buf);
422 return -EIO;
423 }
424
425 bip->bip_owns_buf = 1;
426 bip->bip_buf = buf;
427 bip->bip_iter.bi_size = len;
428 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
429
430 /* Map it */
431 offset = offset_in_page(buf);
432 for (i = 0 ; i < nr_pages ; i++) {
433 int ret;
434 bytes = PAGE_SIZE - offset;
435
436 if (len <= 0)
437 break;
438
439 if (bytes > len)
440 bytes = len;
441
442 ret = bio_integrity_add_page(bio, virt_to_page(buf),
443 bytes, offset);
444
445 if (ret == 0)
446 return 0;
447
448 if (ret < bytes)
449 break;
450
451 buf += bytes;
452 len -= bytes;
453 offset = 0;
454 }
455
456 /* Install custom I/O completion handler if read verify is enabled */
457 if (bio_data_dir(bio) == READ) {
458 bip->bip_end_io = bio->bi_end_io;
459 bio->bi_end_io = bio_integrity_endio;
460 }
461
462 /* Auto-generate integrity metadata if this is a write */
463 if (bio_data_dir(bio) == WRITE)
464 bio_integrity_generate(bio);
465
466 return 0;
467}
468EXPORT_SYMBOL(bio_integrity_prep);
469
470/**
471 * bio_integrity_verify - Verify integrity metadata for a bio
472 * @bio: bio to verify
473 *
474 * Description: This function is called to verify the integrity of a
475 * bio. The data in the bio io_vec is compared to the integrity
476 * metadata returned by the HBA.
477 */
478static int bio_integrity_verify(struct bio *bio)
479{
480 return bio_integrity_generate_verify(bio, 0);
481}
482
483/**
484 * bio_integrity_verify_fn - Integrity I/O completion worker
485 * @work: Work struct stored in bio to be verified
486 *
487 * Description: This workqueue function is called to complete a READ
488 * request. The function verifies the transferred integrity metadata
489 * and then calls the original bio end_io function.
490 */
491static void bio_integrity_verify_fn(struct work_struct *work)
492{
493 struct bio_integrity_payload *bip =
494 container_of(work, struct bio_integrity_payload, bip_work);
495 struct bio *bio = bip->bip_bio;
496 int error;
497
498 error = bio_integrity_verify(bio);
499
500 /* Restore original bio completion handler */
501 bio->bi_end_io = bip->bip_end_io;
502 bio_endio_nodec(bio, error);
503}
504
505/**
506 * bio_integrity_endio - Integrity I/O completion function
507 * @bio: Protected bio
508 * @error: Pointer to errno
509 *
510 * Description: Completion for integrity I/O
511 *
512 * Normally I/O completion is done in interrupt context. However,
513 * verifying I/O integrity is a time-consuming task which must be run
514 * in process context. This function postpones completion
515 * accordingly.
516 */
517void bio_integrity_endio(struct bio *bio, int error)
518{
519 struct bio_integrity_payload *bip = bio->bi_integrity;
520
521 BUG_ON(bip->bip_bio != bio);
522
523 /* In case of an I/O error there is no point in verifying the
524 * integrity metadata. Restore original bio end_io handler
525 * and run it.
526 */
527 if (error) {
528 bio->bi_end_io = bip->bip_end_io;
529 bio_endio(bio, error);
530
531 return;
532 }
533
534 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
535 queue_work(kintegrityd_wq, &bip->bip_work);
536}
537EXPORT_SYMBOL(bio_integrity_endio);
538
539/**
540 * bio_integrity_advance - Advance integrity vector
541 * @bio: bio whose integrity vector to update
542 * @bytes_done: number of data bytes that have been completed
543 *
544 * Description: This function calculates how many integrity bytes the
545 * number of completed data bytes correspond to and advances the
546 * integrity vector accordingly.
547 */
548void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
549{
550 struct bio_integrity_payload *bip = bio->bi_integrity;
551 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
552 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
553
554 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
555}
556EXPORT_SYMBOL(bio_integrity_advance);
557
558/**
559 * bio_integrity_trim - Trim integrity vector
560 * @bio: bio whose integrity vector to update
561 * @offset: offset to first data sector
562 * @sectors: number of data sectors
563 *
564 * Description: Used to trim the integrity vector in a cloned bio.
565 * The ivec will be advanced corresponding to 'offset' data sectors
566 * and the length will be truncated corresponding to 'len' data
567 * sectors.
568 */
569void bio_integrity_trim(struct bio *bio, unsigned int offset,
570 unsigned int sectors)
571{
572 struct bio_integrity_payload *bip = bio->bi_integrity;
573 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
574
575 bio_integrity_advance(bio, offset << 9);
576 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
577}
578EXPORT_SYMBOL(bio_integrity_trim);
579
580/**
581 * bio_integrity_clone - Callback for cloning bios with integrity metadata
582 * @bio: New bio
583 * @bio_src: Original bio
584 * @gfp_mask: Memory allocation mask
585 *
586 * Description: Called to allocate a bip when cloning a bio
587 */
588int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
589 gfp_t gfp_mask)
590{
591 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
592 struct bio_integrity_payload *bip;
593
594 BUG_ON(bip_src == NULL);
595
596 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
597
598 if (bip == NULL)
599 return -EIO;
600
601 memcpy(bip->bip_vec, bip_src->bip_vec,
602 bip_src->bip_vcnt * sizeof(struct bio_vec));
603
604 bip->bip_vcnt = bip_src->bip_vcnt;
605 bip->bip_iter = bip_src->bip_iter;
606
607 return 0;
608}
609EXPORT_SYMBOL(bio_integrity_clone);
610
611int bioset_integrity_create(struct bio_set *bs, int pool_size)
612{
613 if (bs->bio_integrity_pool)
614 return 0;
615
616 bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
617 if (!bs->bio_integrity_pool)
618 return -1;
619
620 bs->bvec_integrity_pool = biovec_create_pool(pool_size);
621 if (!bs->bvec_integrity_pool) {
622 mempool_destroy(bs->bio_integrity_pool);
623 return -1;
624 }
625
626 return 0;
627}
628EXPORT_SYMBOL(bioset_integrity_create);
629
630void bioset_integrity_free(struct bio_set *bs)
631{
632 if (bs->bio_integrity_pool)
633 mempool_destroy(bs->bio_integrity_pool);
634
635 if (bs->bvec_integrity_pool)
636 mempool_destroy(bs->bvec_integrity_pool);
637}
638EXPORT_SYMBOL(bioset_integrity_free);
639
640void __init bio_integrity_init(void)
641{
642 /*
643 * kintegrityd won't block much but may burn a lot of CPU cycles.
644 * Make it highpri CPU intensive wq with max concurrency of 1.
645 */
646 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
647 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
648 if (!kintegrityd_wq)
649 panic("Failed to create kintegrityd\n");
650
651 bip_slab = kmem_cache_create("bio_integrity_payload",
652 sizeof(struct bio_integrity_payload) +
653 sizeof(struct bio_vec) * BIP_INLINE_VECS,
654 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
655 if (!bip_slab)
656 panic("Failed to create slab\n");
657}
diff --git a/block/bio.c b/block/bio.c
new file mode 100644
index 000000000000..1ba33657160f
--- /dev/null
+++ b/block/bio.c
@@ -0,0 +1,2038 @@
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <scsi/sg.h> /* for struct sg_iovec */
32
33#include <trace/events/block.h>
34
35/*
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
38 */
39#define BIO_INLINE_VECS 4
40
41/*
42 * if you change this list, also change bvec_alloc or things will
43 * break badly! cannot be bigger than what you can fit into an
44 * unsigned short
45 */
46#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
49};
50#undef BV
51
52/*
53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
54 * IO code that does not need private memory pools.
55 */
56struct bio_set *fs_bio_set;
57EXPORT_SYMBOL(fs_bio_set);
58
59/*
60 * Our slab pool management
61 */
62struct bio_slab {
63 struct kmem_cache *slab;
64 unsigned int slab_ref;
65 unsigned int slab_size;
66 char name[8];
67};
68static DEFINE_MUTEX(bio_slab_lock);
69static struct bio_slab *bio_slabs;
70static unsigned int bio_slab_nr, bio_slab_max;
71
72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{
74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int new_bio_slab_max;
78 unsigned int i, entry = -1;
79
80 mutex_lock(&bio_slab_lock);
81
82 i = 0;
83 while (i < bio_slab_nr) {
84 bslab = &bio_slabs[i];
85
86 if (!bslab->slab && entry == -1)
87 entry = i;
88 else if (bslab->slab_size == sz) {
89 slab = bslab->slab;
90 bslab->slab_ref++;
91 break;
92 }
93 i++;
94 }
95
96 if (slab)
97 goto out_unlock;
98
99 if (bio_slab_nr == bio_slab_max && entry == -1) {
100 new_bio_slab_max = bio_slab_max << 1;
101 new_bio_slabs = krealloc(bio_slabs,
102 new_bio_slab_max * sizeof(struct bio_slab),
103 GFP_KERNEL);
104 if (!new_bio_slabs)
105 goto out_unlock;
106 bio_slab_max = new_bio_slab_max;
107 bio_slabs = new_bio_slabs;
108 }
109 if (entry == -1)
110 entry = bio_slab_nr++;
111
112 bslab = &bio_slabs[entry];
113
114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
115 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125}
126
127static void bio_put_slab(struct bio_set *bs)
128{
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152out:
153 mutex_unlock(&bio_slab_lock);
154}
155
156unsigned int bvec_nr_vecs(unsigned short idx)
157{
158 return bvec_slabs[idx].nr_vecs;
159}
160
161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162{
163 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
164
165 if (idx == BIOVEC_MAX_IDX)
166 mempool_free(bv, pool);
167 else {
168 struct biovec_slab *bvs = bvec_slabs + idx;
169
170 kmem_cache_free(bvs->slab, bv);
171 }
172}
173
174struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
175 mempool_t *pool)
176{
177 struct bio_vec *bvl;
178
179 /*
180 * see comment near bvec_array define!
181 */
182 switch (nr) {
183 case 1:
184 *idx = 0;
185 break;
186 case 2 ... 4:
187 *idx = 1;
188 break;
189 case 5 ... 16:
190 *idx = 2;
191 break;
192 case 17 ... 64:
193 *idx = 3;
194 break;
195 case 65 ... 128:
196 *idx = 4;
197 break;
198 case 129 ... BIO_MAX_PAGES:
199 *idx = 5;
200 break;
201 default:
202 return NULL;
203 }
204
205 /*
206 * idx now points to the pool we want to allocate from. only the
207 * 1-vec entry pool is mempool backed.
208 */
209 if (*idx == BIOVEC_MAX_IDX) {
210fallback:
211 bvl = mempool_alloc(pool, gfp_mask);
212 } else {
213 struct biovec_slab *bvs = bvec_slabs + *idx;
214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
215
216 /*
217 * Make this allocation restricted and don't dump info on
218 * allocation failures, since we'll fallback to the mempool
219 * in case of failure.
220 */
221 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
222
223 /*
224 * Try a slab allocation. If this fails and __GFP_WAIT
225 * is set, retry with the 1-entry mempool
226 */
227 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
229 *idx = BIOVEC_MAX_IDX;
230 goto fallback;
231 }
232 }
233
234 return bvl;
235}
236
237static void __bio_free(struct bio *bio)
238{
239 bio_disassociate_task(bio);
240
241 if (bio_integrity(bio))
242 bio_integrity_free(bio);
243}
244
245static void bio_free(struct bio *bio)
246{
247 struct bio_set *bs = bio->bi_pool;
248 void *p;
249
250 __bio_free(bio);
251
252 if (bs) {
253 if (bio_flagged(bio, BIO_OWNS_VEC))
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
255
256 /*
257 * If we have front padding, adjust the bio pointer before freeing
258 */
259 p = bio;
260 p -= bs->front_pad;
261
262 mempool_free(p, bs->bio_pool);
263 } else {
264 /* Bio was allocated by bio_kmalloc() */
265 kfree(bio);
266 }
267}
268
269void bio_init(struct bio *bio)
270{
271 memset(bio, 0, sizeof(*bio));
272 bio->bi_flags = 1 << BIO_UPTODATE;
273 atomic_set(&bio->bi_remaining, 1);
274 atomic_set(&bio->bi_cnt, 1);
275}
276EXPORT_SYMBOL(bio_init);
277
278/**
279 * bio_reset - reinitialize a bio
280 * @bio: bio to reset
281 *
282 * Description:
283 * After calling bio_reset(), @bio will be in the same state as a freshly
284 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
285 * preserved are the ones that are initialized by bio_alloc_bioset(). See
286 * comment in struct bio.
287 */
288void bio_reset(struct bio *bio)
289{
290 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
291
292 __bio_free(bio);
293
294 memset(bio, 0, BIO_RESET_BYTES);
295 bio->bi_flags = flags|(1 << BIO_UPTODATE);
296 atomic_set(&bio->bi_remaining, 1);
297}
298EXPORT_SYMBOL(bio_reset);
299
300static void bio_chain_endio(struct bio *bio, int error)
301{
302 bio_endio(bio->bi_private, error);
303 bio_put(bio);
304}
305
306/**
307 * bio_chain - chain bio completions
308 * @bio: the target bio
309 * @parent: the @bio's parent bio
310 *
311 * The caller won't have a bi_end_io called when @bio completes - instead,
312 * @parent's bi_end_io won't be called until both @parent and @bio have
313 * completed; the chained bio will also be freed when it completes.
314 *
315 * The caller must not set bi_private or bi_end_io in @bio.
316 */
317void bio_chain(struct bio *bio, struct bio *parent)
318{
319 BUG_ON(bio->bi_private || bio->bi_end_io);
320
321 bio->bi_private = parent;
322 bio->bi_end_io = bio_chain_endio;
323 atomic_inc(&parent->bi_remaining);
324}
325EXPORT_SYMBOL(bio_chain);
326
327static void bio_alloc_rescue(struct work_struct *work)
328{
329 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
330 struct bio *bio;
331
332 while (1) {
333 spin_lock(&bs->rescue_lock);
334 bio = bio_list_pop(&bs->rescue_list);
335 spin_unlock(&bs->rescue_lock);
336
337 if (!bio)
338 break;
339
340 generic_make_request(bio);
341 }
342}
343
344static void punt_bios_to_rescuer(struct bio_set *bs)
345{
346 struct bio_list punt, nopunt;
347 struct bio *bio;
348
349 /*
350 * In order to guarantee forward progress we must punt only bios that
351 * were allocated from this bio_set; otherwise, if there was a bio on
352 * there for a stacking driver higher up in the stack, processing it
353 * could require allocating bios from this bio_set, and doing that from
354 * our own rescuer would be bad.
355 *
356 * Since bio lists are singly linked, pop them all instead of trying to
357 * remove from the middle of the list:
358 */
359
360 bio_list_init(&punt);
361 bio_list_init(&nopunt);
362
363 while ((bio = bio_list_pop(current->bio_list)))
364 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
365
366 *current->bio_list = nopunt;
367
368 spin_lock(&bs->rescue_lock);
369 bio_list_merge(&bs->rescue_list, &punt);
370 spin_unlock(&bs->rescue_lock);
371
372 queue_work(bs->rescue_workqueue, &bs->rescue_work);
373}
374
375/**
376 * bio_alloc_bioset - allocate a bio for I/O
377 * @gfp_mask: the GFP_ mask given to the slab allocator
378 * @nr_iovecs: number of iovecs to pre-allocate
379 * @bs: the bio_set to allocate from.
380 *
381 * Description:
382 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
383 * backed by the @bs's mempool.
384 *
385 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
386 * able to allocate a bio. This is due to the mempool guarantees. To make this
387 * work, callers must never allocate more than 1 bio at a time from this pool.
388 * Callers that need to allocate more than 1 bio must always submit the
389 * previously allocated bio for IO before attempting to allocate a new one.
390 * Failure to do so can cause deadlocks under memory pressure.
391 *
392 * Note that when running under generic_make_request() (i.e. any block
393 * driver), bios are not submitted until after you return - see the code in
394 * generic_make_request() that converts recursion into iteration, to prevent
395 * stack overflows.
396 *
397 * This would normally mean allocating multiple bios under
398 * generic_make_request() would be susceptible to deadlocks, but we have
399 * deadlock avoidance code that resubmits any blocked bios from a rescuer
400 * thread.
401 *
402 * However, we do not guarantee forward progress for allocations from other
403 * mempools. Doing multiple allocations from the same mempool under
404 * generic_make_request() should be avoided - instead, use bio_set's front_pad
405 * for per bio allocations.
406 *
407 * RETURNS:
408 * Pointer to new bio on success, NULL on failure.
409 */
410struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
411{
412 gfp_t saved_gfp = gfp_mask;
413 unsigned front_pad;
414 unsigned inline_vecs;
415 unsigned long idx = BIO_POOL_NONE;
416 struct bio_vec *bvl = NULL;
417 struct bio *bio;
418 void *p;
419
420 if (!bs) {
421 if (nr_iovecs > UIO_MAXIOV)
422 return NULL;
423
424 p = kmalloc(sizeof(struct bio) +
425 nr_iovecs * sizeof(struct bio_vec),
426 gfp_mask);
427 front_pad = 0;
428 inline_vecs = nr_iovecs;
429 } else {
430 /*
431 * generic_make_request() converts recursion to iteration; this
432 * means if we're running beneath it, any bios we allocate and
433 * submit will not be submitted (and thus freed) until after we
434 * return.
435 *
436 * This exposes us to a potential deadlock if we allocate
437 * multiple bios from the same bio_set() while running
438 * underneath generic_make_request(). If we were to allocate
439 * multiple bios (say a stacking block driver that was splitting
440 * bios), we would deadlock if we exhausted the mempool's
441 * reserve.
442 *
443 * We solve this, and guarantee forward progress, with a rescuer
444 * workqueue per bio_set. If we go to allocate and there are
445 * bios on current->bio_list, we first try the allocation
446 * without __GFP_WAIT; if that fails, we punt those bios we
447 * would be blocking to the rescuer workqueue before we retry
448 * with the original gfp_flags.
449 */
450
451 if (current->bio_list && !bio_list_empty(current->bio_list))
452 gfp_mask &= ~__GFP_WAIT;
453
454 p = mempool_alloc(bs->bio_pool, gfp_mask);
455 if (!p && gfp_mask != saved_gfp) {
456 punt_bios_to_rescuer(bs);
457 gfp_mask = saved_gfp;
458 p = mempool_alloc(bs->bio_pool, gfp_mask);
459 }
460
461 front_pad = bs->front_pad;
462 inline_vecs = BIO_INLINE_VECS;
463 }
464
465 if (unlikely(!p))
466 return NULL;
467
468 bio = p + front_pad;
469 bio_init(bio);
470
471 if (nr_iovecs > inline_vecs) {
472 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
473 if (!bvl && gfp_mask != saved_gfp) {
474 punt_bios_to_rescuer(bs);
475 gfp_mask = saved_gfp;
476 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
477 }
478
479 if (unlikely(!bvl))
480 goto err_free;
481
482 bio->bi_flags |= 1 << BIO_OWNS_VEC;
483 } else if (nr_iovecs) {
484 bvl = bio->bi_inline_vecs;
485 }
486
487 bio->bi_pool = bs;
488 bio->bi_flags |= idx << BIO_POOL_OFFSET;
489 bio->bi_max_vecs = nr_iovecs;
490 bio->bi_io_vec = bvl;
491 return bio;
492
493err_free:
494 mempool_free(p, bs->bio_pool);
495 return NULL;
496}
497EXPORT_SYMBOL(bio_alloc_bioset);
498
499void zero_fill_bio(struct bio *bio)
500{
501 unsigned long flags;
502 struct bio_vec bv;
503 struct bvec_iter iter;
504
505 bio_for_each_segment(bv, bio, iter) {
506 char *data = bvec_kmap_irq(&bv, &flags);
507 memset(data, 0, bv.bv_len);
508 flush_dcache_page(bv.bv_page);
509 bvec_kunmap_irq(data, &flags);
510 }
511}
512EXPORT_SYMBOL(zero_fill_bio);
513
514/**
515 * bio_put - release a reference to a bio
516 * @bio: bio to release reference to
517 *
518 * Description:
519 * Put a reference to a &struct bio, either one you have gotten with
520 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
521 **/
522void bio_put(struct bio *bio)
523{
524 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
525
526 /*
527 * last put frees it
528 */
529 if (atomic_dec_and_test(&bio->bi_cnt))
530 bio_free(bio);
531}
532EXPORT_SYMBOL(bio_put);
533
534inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
535{
536 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
537 blk_recount_segments(q, bio);
538
539 return bio->bi_phys_segments;
540}
541EXPORT_SYMBOL(bio_phys_segments);
542
543/**
544 * __bio_clone_fast - clone a bio that shares the original bio's biovec
545 * @bio: destination bio
546 * @bio_src: bio to clone
547 *
548 * Clone a &bio. Caller will own the returned bio, but not
549 * the actual data it points to. Reference count of returned
550 * bio will be one.
551 *
552 * Caller must ensure that @bio_src is not freed before @bio.
553 */
554void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
555{
556 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
557
558 /*
559 * most users will be overriding ->bi_bdev with a new target,
560 * so we don't set nor calculate new physical/hw segment counts here
561 */
562 bio->bi_bdev = bio_src->bi_bdev;
563 bio->bi_flags |= 1 << BIO_CLONED;
564 bio->bi_rw = bio_src->bi_rw;
565 bio->bi_iter = bio_src->bi_iter;
566 bio->bi_io_vec = bio_src->bi_io_vec;
567}
568EXPORT_SYMBOL(__bio_clone_fast);
569
570/**
571 * bio_clone_fast - clone a bio that shares the original bio's biovec
572 * @bio: bio to clone
573 * @gfp_mask: allocation priority
574 * @bs: bio_set to allocate from
575 *
576 * Like __bio_clone_fast, only also allocates the returned bio
577 */
578struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
579{
580 struct bio *b;
581
582 b = bio_alloc_bioset(gfp_mask, 0, bs);
583 if (!b)
584 return NULL;
585
586 __bio_clone_fast(b, bio);
587
588 if (bio_integrity(bio)) {
589 int ret;
590
591 ret = bio_integrity_clone(b, bio, gfp_mask);
592
593 if (ret < 0) {
594 bio_put(b);
595 return NULL;
596 }
597 }
598
599 return b;
600}
601EXPORT_SYMBOL(bio_clone_fast);
602
603/**
604 * bio_clone_bioset - clone a bio
605 * @bio_src: bio to clone
606 * @gfp_mask: allocation priority
607 * @bs: bio_set to allocate from
608 *
609 * Clone bio. Caller will own the returned bio, but not the actual data it
610 * points to. Reference count of returned bio will be one.
611 */
612struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
613 struct bio_set *bs)
614{
615 struct bvec_iter iter;
616 struct bio_vec bv;
617 struct bio *bio;
618
619 /*
620 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
621 * bio_src->bi_io_vec to bio->bi_io_vec.
622 *
623 * We can't do that anymore, because:
624 *
625 * - The point of cloning the biovec is to produce a bio with a biovec
626 * the caller can modify: bi_idx and bi_bvec_done should be 0.
627 *
628 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
629 * we tried to clone the whole thing bio_alloc_bioset() would fail.
630 * But the clone should succeed as long as the number of biovecs we
631 * actually need to allocate is fewer than BIO_MAX_PAGES.
632 *
633 * - Lastly, bi_vcnt should not be looked at or relied upon by code
634 * that does not own the bio - reason being drivers don't use it for
635 * iterating over the biovec anymore, so expecting it to be kept up
636 * to date (i.e. for clones that share the parent biovec) is just
637 * asking for trouble and would force extra work on
638 * __bio_clone_fast() anyways.
639 */
640
641 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
642 if (!bio)
643 return NULL;
644
645 bio->bi_bdev = bio_src->bi_bdev;
646 bio->bi_rw = bio_src->bi_rw;
647 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
648 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
649
650 if (bio->bi_rw & REQ_DISCARD)
651 goto integrity_clone;
652
653 if (bio->bi_rw & REQ_WRITE_SAME) {
654 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
655 goto integrity_clone;
656 }
657
658 bio_for_each_segment(bv, bio_src, iter)
659 bio->bi_io_vec[bio->bi_vcnt++] = bv;
660
661integrity_clone:
662 if (bio_integrity(bio_src)) {
663 int ret;
664
665 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
666 if (ret < 0) {
667 bio_put(bio);
668 return NULL;
669 }
670 }
671
672 return bio;
673}
674EXPORT_SYMBOL(bio_clone_bioset);
675
676/**
677 * bio_get_nr_vecs - return approx number of vecs
678 * @bdev: I/O target
679 *
680 * Return the approximate number of pages we can send to this target.
681 * There's no guarantee that you will be able to fit this number of pages
682 * into a bio, it does not account for dynamic restrictions that vary
683 * on offset.
684 */
685int bio_get_nr_vecs(struct block_device *bdev)
686{
687 struct request_queue *q = bdev_get_queue(bdev);
688 int nr_pages;
689
690 nr_pages = min_t(unsigned,
691 queue_max_segments(q),
692 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
693
694 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
695
696}
697EXPORT_SYMBOL(bio_get_nr_vecs);
698
699static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
700 *page, unsigned int len, unsigned int offset,
701 unsigned int max_sectors)
702{
703 int retried_segments = 0;
704 struct bio_vec *bvec;
705
706 /*
707 * cloned bio must not modify vec list
708 */
709 if (unlikely(bio_flagged(bio, BIO_CLONED)))
710 return 0;
711
712 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
713 return 0;
714
715 /*
716 * For filesystems with a blocksize smaller than the pagesize
717 * we will often be called with the same page as last time and
718 * a consecutive offset. Optimize this special case.
719 */
720 if (bio->bi_vcnt > 0) {
721 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
722
723 if (page == prev->bv_page &&
724 offset == prev->bv_offset + prev->bv_len) {
725 unsigned int prev_bv_len = prev->bv_len;
726 prev->bv_len += len;
727
728 if (q->merge_bvec_fn) {
729 struct bvec_merge_data bvm = {
730 /* prev_bvec is already charged in
731 bi_size, discharge it in order to
732 simulate merging updated prev_bvec
733 as new bvec. */
734 .bi_bdev = bio->bi_bdev,
735 .bi_sector = bio->bi_iter.bi_sector,
736 .bi_size = bio->bi_iter.bi_size -
737 prev_bv_len,
738 .bi_rw = bio->bi_rw,
739 };
740
741 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
742 prev->bv_len -= len;
743 return 0;
744 }
745 }
746
747 goto done;
748 }
749 }
750
751 if (bio->bi_vcnt >= bio->bi_max_vecs)
752 return 0;
753
754 /*
755 * we might lose a segment or two here, but rather that than
756 * make this too complex.
757 */
758
759 while (bio->bi_phys_segments >= queue_max_segments(q)) {
760
761 if (retried_segments)
762 return 0;
763
764 retried_segments = 1;
765 blk_recount_segments(q, bio);
766 }
767
768 /*
769 * setup the new entry, we might clear it again later if we
770 * cannot add the page
771 */
772 bvec = &bio->bi_io_vec[bio->bi_vcnt];
773 bvec->bv_page = page;
774 bvec->bv_len = len;
775 bvec->bv_offset = offset;
776
777 /*
778 * if queue has other restrictions (eg varying max sector size
779 * depending on offset), it can specify a merge_bvec_fn in the
780 * queue to get further control
781 */
782 if (q->merge_bvec_fn) {
783 struct bvec_merge_data bvm = {
784 .bi_bdev = bio->bi_bdev,
785 .bi_sector = bio->bi_iter.bi_sector,
786 .bi_size = bio->bi_iter.bi_size,
787 .bi_rw = bio->bi_rw,
788 };
789
790 /*
791 * merge_bvec_fn() returns number of bytes it can accept
792 * at this offset
793 */
794 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
795 bvec->bv_page = NULL;
796 bvec->bv_len = 0;
797 bvec->bv_offset = 0;
798 return 0;
799 }
800 }
801
802 /* If we may be able to merge these biovecs, force a recount */
803 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
804 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
805
806 bio->bi_vcnt++;
807 bio->bi_phys_segments++;
808 done:
809 bio->bi_iter.bi_size += len;
810 return len;
811}
812
813/**
814 * bio_add_pc_page - attempt to add page to bio
815 * @q: the target queue
816 * @bio: destination bio
817 * @page: page to add
818 * @len: vec entry length
819 * @offset: vec entry offset
820 *
821 * Attempt to add a page to the bio_vec maplist. This can fail for a
822 * number of reasons, such as the bio being full or target block device
823 * limitations. The target block device must allow bio's up to PAGE_SIZE,
824 * so it is always possible to add a single page to an empty bio.
825 *
826 * This should only be used by REQ_PC bios.
827 */
828int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
829 unsigned int len, unsigned int offset)
830{
831 return __bio_add_page(q, bio, page, len, offset,
832 queue_max_hw_sectors(q));
833}
834EXPORT_SYMBOL(bio_add_pc_page);
835
836/**
837 * bio_add_page - attempt to add page to bio
838 * @bio: destination bio
839 * @page: page to add
840 * @len: vec entry length
841 * @offset: vec entry offset
842 *
843 * Attempt to add a page to the bio_vec maplist. This can fail for a
844 * number of reasons, such as the bio being full or target block device
845 * limitations. The target block device must allow bio's up to PAGE_SIZE,
846 * so it is always possible to add a single page to an empty bio.
847 */
848int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
849 unsigned int offset)
850{
851 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
852 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
853}
854EXPORT_SYMBOL(bio_add_page);
855
856struct submit_bio_ret {
857 struct completion event;
858 int error;
859};
860
861static void submit_bio_wait_endio(struct bio *bio, int error)
862{
863 struct submit_bio_ret *ret = bio->bi_private;
864
865 ret->error = error;
866 complete(&ret->event);
867}
868
869/**
870 * submit_bio_wait - submit a bio, and wait until it completes
871 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
872 * @bio: The &struct bio which describes the I/O
873 *
874 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
875 * bio_endio() on failure.
876 */
877int submit_bio_wait(int rw, struct bio *bio)
878{
879 struct submit_bio_ret ret;
880
881 rw |= REQ_SYNC;
882 init_completion(&ret.event);
883 bio->bi_private = &ret;
884 bio->bi_end_io = submit_bio_wait_endio;
885 submit_bio(rw, bio);
886 wait_for_completion(&ret.event);
887
888 return ret.error;
889}
890EXPORT_SYMBOL(submit_bio_wait);
891
892/**
893 * bio_advance - increment/complete a bio by some number of bytes
894 * @bio: bio to advance
895 * @bytes: number of bytes to complete
896 *
897 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
898 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
899 * be updated on the last bvec as well.
900 *
901 * @bio will then represent the remaining, uncompleted portion of the io.
902 */
903void bio_advance(struct bio *bio, unsigned bytes)
904{
905 if (bio_integrity(bio))
906 bio_integrity_advance(bio, bytes);
907
908 bio_advance_iter(bio, &bio->bi_iter, bytes);
909}
910EXPORT_SYMBOL(bio_advance);
911
912/**
913 * bio_alloc_pages - allocates a single page for each bvec in a bio
914 * @bio: bio to allocate pages for
915 * @gfp_mask: flags for allocation
916 *
917 * Allocates pages up to @bio->bi_vcnt.
918 *
919 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
920 * freed.
921 */
922int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
923{
924 int i;
925 struct bio_vec *bv;
926
927 bio_for_each_segment_all(bv, bio, i) {
928 bv->bv_page = alloc_page(gfp_mask);
929 if (!bv->bv_page) {
930 while (--bv >= bio->bi_io_vec)
931 __free_page(bv->bv_page);
932 return -ENOMEM;
933 }
934 }
935
936 return 0;
937}
938EXPORT_SYMBOL(bio_alloc_pages);
939
940/**
941 * bio_copy_data - copy contents of data buffers from one chain of bios to
942 * another
943 * @src: source bio list
944 * @dst: destination bio list
945 *
946 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
947 * @src and @dst as linked lists of bios.
948 *
949 * Stops when it reaches the end of either @src or @dst - that is, copies
950 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
951 */
952void bio_copy_data(struct bio *dst, struct bio *src)
953{
954 struct bvec_iter src_iter, dst_iter;
955 struct bio_vec src_bv, dst_bv;
956 void *src_p, *dst_p;
957 unsigned bytes;
958
959 src_iter = src->bi_iter;
960 dst_iter = dst->bi_iter;
961
962 while (1) {
963 if (!src_iter.bi_size) {
964 src = src->bi_next;
965 if (!src)
966 break;
967
968 src_iter = src->bi_iter;
969 }
970
971 if (!dst_iter.bi_size) {
972 dst = dst->bi_next;
973 if (!dst)
974 break;
975
976 dst_iter = dst->bi_iter;
977 }
978
979 src_bv = bio_iter_iovec(src, src_iter);
980 dst_bv = bio_iter_iovec(dst, dst_iter);
981
982 bytes = min(src_bv.bv_len, dst_bv.bv_len);
983
984 src_p = kmap_atomic(src_bv.bv_page);
985 dst_p = kmap_atomic(dst_bv.bv_page);
986
987 memcpy(dst_p + dst_bv.bv_offset,
988 src_p + src_bv.bv_offset,
989 bytes);
990
991 kunmap_atomic(dst_p);
992 kunmap_atomic(src_p);
993
994 bio_advance_iter(src, &src_iter, bytes);
995 bio_advance_iter(dst, &dst_iter, bytes);
996 }
997}
998EXPORT_SYMBOL(bio_copy_data);
999
1000struct bio_map_data {
1001 int nr_sgvecs;
1002 int is_our_pages;
1003 struct sg_iovec sgvecs[];
1004};
1005
1006static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
1007 const struct sg_iovec *iov, int iov_count,
1008 int is_our_pages)
1009{
1010 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
1011 bmd->nr_sgvecs = iov_count;
1012 bmd->is_our_pages = is_our_pages;
1013 bio->bi_private = bmd;
1014}
1015
1016static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1017 gfp_t gfp_mask)
1018{
1019 if (iov_count > UIO_MAXIOV)
1020 return NULL;
1021
1022 return kmalloc(sizeof(struct bio_map_data) +
1023 sizeof(struct sg_iovec) * iov_count, gfp_mask);
1024}
1025
1026static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
1027 int to_user, int from_user, int do_free_page)
1028{
1029 int ret = 0, i;
1030 struct bio_vec *bvec;
1031 int iov_idx = 0;
1032 unsigned int iov_off = 0;
1033
1034 bio_for_each_segment_all(bvec, bio, i) {
1035 char *bv_addr = page_address(bvec->bv_page);
1036 unsigned int bv_len = bvec->bv_len;
1037
1038 while (bv_len && iov_idx < iov_count) {
1039 unsigned int bytes;
1040 char __user *iov_addr;
1041
1042 bytes = min_t(unsigned int,
1043 iov[iov_idx].iov_len - iov_off, bv_len);
1044 iov_addr = iov[iov_idx].iov_base + iov_off;
1045
1046 if (!ret) {
1047 if (to_user)
1048 ret = copy_to_user(iov_addr, bv_addr,
1049 bytes);
1050
1051 if (from_user)
1052 ret = copy_from_user(bv_addr, iov_addr,
1053 bytes);
1054
1055 if (ret)
1056 ret = -EFAULT;
1057 }
1058
1059 bv_len -= bytes;
1060 bv_addr += bytes;
1061 iov_addr += bytes;
1062 iov_off += bytes;
1063
1064 if (iov[iov_idx].iov_len == iov_off) {
1065 iov_idx++;
1066 iov_off = 0;
1067 }
1068 }
1069
1070 if (do_free_page)
1071 __free_page(bvec->bv_page);
1072 }
1073
1074 return ret;
1075}
1076
1077/**
1078 * bio_uncopy_user - finish previously mapped bio
1079 * @bio: bio being terminated
1080 *
1081 * Free pages allocated from bio_copy_user() and write back data
1082 * to user space in case of a read.
1083 */
1084int bio_uncopy_user(struct bio *bio)
1085{
1086 struct bio_map_data *bmd = bio->bi_private;
1087 struct bio_vec *bvec;
1088 int ret = 0, i;
1089
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091 /*
1092 * if we're in a workqueue, the request is orphaned, so
1093 * don't copy into a random user address space, just free.
1094 */
1095 if (current->mm)
1096 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
1097 bio_data_dir(bio) == READ,
1098 0, bmd->is_our_pages);
1099 else if (bmd->is_our_pages)
1100 bio_for_each_segment_all(bvec, bio, i)
1101 __free_page(bvec->bv_page);
1102 }
1103 kfree(bmd);
1104 bio_put(bio);
1105 return ret;
1106}
1107EXPORT_SYMBOL(bio_uncopy_user);
1108
1109/**
1110 * bio_copy_user_iov - copy user data to bio
1111 * @q: destination block queue
1112 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1113 * @iov: the iovec.
1114 * @iov_count: number of elements in the iovec
1115 * @write_to_vm: bool indicating writing to pages or not
1116 * @gfp_mask: memory allocation flags
1117 *
1118 * Prepares and returns a bio for indirect user io, bouncing data
1119 * to/from kernel pages as necessary. Must be paired with
1120 * call bio_uncopy_user() on io completion.
1121 */
1122struct bio *bio_copy_user_iov(struct request_queue *q,
1123 struct rq_map_data *map_data,
1124 const struct sg_iovec *iov, int iov_count,
1125 int write_to_vm, gfp_t gfp_mask)
1126{
1127 struct bio_map_data *bmd;
1128 struct bio_vec *bvec;
1129 struct page *page;
1130 struct bio *bio;
1131 int i, ret;
1132 int nr_pages = 0;
1133 unsigned int len = 0;
1134 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1135
1136 for (i = 0; i < iov_count; i++) {
1137 unsigned long uaddr;
1138 unsigned long end;
1139 unsigned long start;
1140
1141 uaddr = (unsigned long)iov[i].iov_base;
1142 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1143 start = uaddr >> PAGE_SHIFT;
1144
1145 /*
1146 * Overflow, abort
1147 */
1148 if (end < start)
1149 return ERR_PTR(-EINVAL);
1150
1151 nr_pages += end - start;
1152 len += iov[i].iov_len;
1153 }
1154
1155 if (offset)
1156 nr_pages++;
1157
1158 bmd = bio_alloc_map_data(iov_count, gfp_mask);
1159 if (!bmd)
1160 return ERR_PTR(-ENOMEM);
1161
1162 ret = -ENOMEM;
1163 bio = bio_kmalloc(gfp_mask, nr_pages);
1164 if (!bio)
1165 goto out_bmd;
1166
1167 if (!write_to_vm)
1168 bio->bi_rw |= REQ_WRITE;
1169
1170 ret = 0;
1171
1172 if (map_data) {
1173 nr_pages = 1 << map_data->page_order;
1174 i = map_data->offset / PAGE_SIZE;
1175 }
1176 while (len) {
1177 unsigned int bytes = PAGE_SIZE;
1178
1179 bytes -= offset;
1180
1181 if (bytes > len)
1182 bytes = len;
1183
1184 if (map_data) {
1185 if (i == map_data->nr_entries * nr_pages) {
1186 ret = -ENOMEM;
1187 break;
1188 }
1189
1190 page = map_data->pages[i / nr_pages];
1191 page += (i % nr_pages);
1192
1193 i++;
1194 } else {
1195 page = alloc_page(q->bounce_gfp | gfp_mask);
1196 if (!page) {
1197 ret = -ENOMEM;
1198 break;
1199 }
1200 }
1201
1202 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1203 break;
1204
1205 len -= bytes;
1206 offset = 0;
1207 }
1208
1209 if (ret)
1210 goto cleanup;
1211
1212 /*
1213 * success
1214 */
1215 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1216 (map_data && map_data->from_user)) {
1217 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
1218 if (ret)
1219 goto cleanup;
1220 }
1221
1222 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
1223 return bio;
1224cleanup:
1225 if (!map_data)
1226 bio_for_each_segment_all(bvec, bio, i)
1227 __free_page(bvec->bv_page);
1228
1229 bio_put(bio);
1230out_bmd:
1231 kfree(bmd);
1232 return ERR_PTR(ret);
1233}
1234
1235/**
1236 * bio_copy_user - copy user data to bio
1237 * @q: destination block queue
1238 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1239 * @uaddr: start of user address
1240 * @len: length in bytes
1241 * @write_to_vm: bool indicating writing to pages or not
1242 * @gfp_mask: memory allocation flags
1243 *
1244 * Prepares and returns a bio for indirect user io, bouncing data
1245 * to/from kernel pages as necessary. Must be paired with
1246 * call bio_uncopy_user() on io completion.
1247 */
1248struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
1249 unsigned long uaddr, unsigned int len,
1250 int write_to_vm, gfp_t gfp_mask)
1251{
1252 struct sg_iovec iov;
1253
1254 iov.iov_base = (void __user *)uaddr;
1255 iov.iov_len = len;
1256
1257 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
1258}
1259EXPORT_SYMBOL(bio_copy_user);
1260
1261static struct bio *__bio_map_user_iov(struct request_queue *q,
1262 struct block_device *bdev,
1263 const struct sg_iovec *iov, int iov_count,
1264 int write_to_vm, gfp_t gfp_mask)
1265{
1266 int i, j;
1267 int nr_pages = 0;
1268 struct page **pages;
1269 struct bio *bio;
1270 int cur_page = 0;
1271 int ret, offset;
1272
1273 for (i = 0; i < iov_count; i++) {
1274 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1275 unsigned long len = iov[i].iov_len;
1276 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1277 unsigned long start = uaddr >> PAGE_SHIFT;
1278
1279 /*
1280 * Overflow, abort
1281 */
1282 if (end < start)
1283 return ERR_PTR(-EINVAL);
1284
1285 nr_pages += end - start;
1286 /*
1287 * buffer must be aligned to at least hardsector size for now
1288 */
1289 if (uaddr & queue_dma_alignment(q))
1290 return ERR_PTR(-EINVAL);
1291 }
1292
1293 if (!nr_pages)
1294 return ERR_PTR(-EINVAL);
1295
1296 bio = bio_kmalloc(gfp_mask, nr_pages);
1297 if (!bio)
1298 return ERR_PTR(-ENOMEM);
1299
1300 ret = -ENOMEM;
1301 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1302 if (!pages)
1303 goto out;
1304
1305 for (i = 0; i < iov_count; i++) {
1306 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1307 unsigned long len = iov[i].iov_len;
1308 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1309 unsigned long start = uaddr >> PAGE_SHIFT;
1310 const int local_nr_pages = end - start;
1311 const int page_limit = cur_page + local_nr_pages;
1312
1313 ret = get_user_pages_fast(uaddr, local_nr_pages,
1314 write_to_vm, &pages[cur_page]);
1315 if (ret < local_nr_pages) {
1316 ret = -EFAULT;
1317 goto out_unmap;
1318 }
1319
1320 offset = uaddr & ~PAGE_MASK;
1321 for (j = cur_page; j < page_limit; j++) {
1322 unsigned int bytes = PAGE_SIZE - offset;
1323
1324 if (len <= 0)
1325 break;
1326
1327 if (bytes > len)
1328 bytes = len;
1329
1330 /*
1331 * sorry...
1332 */
1333 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1334 bytes)
1335 break;
1336
1337 len -= bytes;
1338 offset = 0;
1339 }
1340
1341 cur_page = j;
1342 /*
1343 * release the pages we didn't map into the bio, if any
1344 */
1345 while (j < page_limit)
1346 page_cache_release(pages[j++]);
1347 }
1348
1349 kfree(pages);
1350
1351 /*
1352 * set data direction, and check if mapped pages need bouncing
1353 */
1354 if (!write_to_vm)
1355 bio->bi_rw |= REQ_WRITE;
1356
1357 bio->bi_bdev = bdev;
1358 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1359 return bio;
1360
1361 out_unmap:
1362 for (i = 0; i < nr_pages; i++) {
1363 if(!pages[i])
1364 break;
1365 page_cache_release(pages[i]);
1366 }
1367 out:
1368 kfree(pages);
1369 bio_put(bio);
1370 return ERR_PTR(ret);
1371}
1372
1373/**
1374 * bio_map_user - map user address into bio
1375 * @q: the struct request_queue for the bio
1376 * @bdev: destination block device
1377 * @uaddr: start of user address
1378 * @len: length in bytes
1379 * @write_to_vm: bool indicating writing to pages or not
1380 * @gfp_mask: memory allocation flags
1381 *
1382 * Map the user space address into a bio suitable for io to a block
1383 * device. Returns an error pointer in case of error.
1384 */
1385struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1386 unsigned long uaddr, unsigned int len, int write_to_vm,
1387 gfp_t gfp_mask)
1388{
1389 struct sg_iovec iov;
1390
1391 iov.iov_base = (void __user *)uaddr;
1392 iov.iov_len = len;
1393
1394 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1395}
1396EXPORT_SYMBOL(bio_map_user);
1397
1398/**
1399 * bio_map_user_iov - map user sg_iovec table into bio
1400 * @q: the struct request_queue for the bio
1401 * @bdev: destination block device
1402 * @iov: the iovec.
1403 * @iov_count: number of elements in the iovec
1404 * @write_to_vm: bool indicating writing to pages or not
1405 * @gfp_mask: memory allocation flags
1406 *
1407 * Map the user space address into a bio suitable for io to a block
1408 * device. Returns an error pointer in case of error.
1409 */
1410struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1411 const struct sg_iovec *iov, int iov_count,
1412 int write_to_vm, gfp_t gfp_mask)
1413{
1414 struct bio *bio;
1415
1416 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1417 gfp_mask);
1418 if (IS_ERR(bio))
1419 return bio;
1420
1421 /*
1422 * subtle -- if __bio_map_user() ended up bouncing a bio,
1423 * it would normally disappear when its bi_end_io is run.
1424 * however, we need it for the unmap, so grab an extra
1425 * reference to it
1426 */
1427 bio_get(bio);
1428
1429 return bio;
1430}
1431
1432static void __bio_unmap_user(struct bio *bio)
1433{
1434 struct bio_vec *bvec;
1435 int i;
1436
1437 /*
1438 * make sure we dirty pages we wrote to
1439 */
1440 bio_for_each_segment_all(bvec, bio, i) {
1441 if (bio_data_dir(bio) == READ)
1442 set_page_dirty_lock(bvec->bv_page);
1443
1444 page_cache_release(bvec->bv_page);
1445 }
1446
1447 bio_put(bio);
1448}
1449
1450/**
1451 * bio_unmap_user - unmap a bio
1452 * @bio: the bio being unmapped
1453 *
1454 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1455 * a process context.
1456 *
1457 * bio_unmap_user() may sleep.
1458 */
1459void bio_unmap_user(struct bio *bio)
1460{
1461 __bio_unmap_user(bio);
1462 bio_put(bio);
1463}
1464EXPORT_SYMBOL(bio_unmap_user);
1465
1466static void bio_map_kern_endio(struct bio *bio, int err)
1467{
1468 bio_put(bio);
1469}
1470
1471static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1472 unsigned int len, gfp_t gfp_mask)
1473{
1474 unsigned long kaddr = (unsigned long)data;
1475 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1476 unsigned long start = kaddr >> PAGE_SHIFT;
1477 const int nr_pages = end - start;
1478 int offset, i;
1479 struct bio *bio;
1480
1481 bio = bio_kmalloc(gfp_mask, nr_pages);
1482 if (!bio)
1483 return ERR_PTR(-ENOMEM);
1484
1485 offset = offset_in_page(kaddr);
1486 for (i = 0; i < nr_pages; i++) {
1487 unsigned int bytes = PAGE_SIZE - offset;
1488
1489 if (len <= 0)
1490 break;
1491
1492 if (bytes > len)
1493 bytes = len;
1494
1495 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1496 offset) < bytes)
1497 break;
1498
1499 data += bytes;
1500 len -= bytes;
1501 offset = 0;
1502 }
1503
1504 bio->bi_end_io = bio_map_kern_endio;
1505 return bio;
1506}
1507
1508/**
1509 * bio_map_kern - map kernel address into bio
1510 * @q: the struct request_queue for the bio
1511 * @data: pointer to buffer to map
1512 * @len: length in bytes
1513 * @gfp_mask: allocation flags for bio allocation
1514 *
1515 * Map the kernel address into a bio suitable for io to a block
1516 * device. Returns an error pointer in case of error.
1517 */
1518struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1519 gfp_t gfp_mask)
1520{
1521 struct bio *bio;
1522
1523 bio = __bio_map_kern(q, data, len, gfp_mask);
1524 if (IS_ERR(bio))
1525 return bio;
1526
1527 if (bio->bi_iter.bi_size == len)
1528 return bio;
1529
1530 /*
1531 * Don't support partial mappings.
1532 */
1533 bio_put(bio);
1534 return ERR_PTR(-EINVAL);
1535}
1536EXPORT_SYMBOL(bio_map_kern);
1537
1538static void bio_copy_kern_endio(struct bio *bio, int err)
1539{
1540 struct bio_vec *bvec;
1541 const int read = bio_data_dir(bio) == READ;
1542 struct bio_map_data *bmd = bio->bi_private;
1543 int i;
1544 char *p = bmd->sgvecs[0].iov_base;
1545
1546 bio_for_each_segment_all(bvec, bio, i) {
1547 char *addr = page_address(bvec->bv_page);
1548
1549 if (read)
1550 memcpy(p, addr, bvec->bv_len);
1551
1552 __free_page(bvec->bv_page);
1553 p += bvec->bv_len;
1554 }
1555
1556 kfree(bmd);
1557 bio_put(bio);
1558}
1559
1560/**
1561 * bio_copy_kern - copy kernel address into bio
1562 * @q: the struct request_queue for the bio
1563 * @data: pointer to buffer to copy
1564 * @len: length in bytes
1565 * @gfp_mask: allocation flags for bio and page allocation
1566 * @reading: data direction is READ
1567 *
1568 * copy the kernel address into a bio suitable for io to a block
1569 * device. Returns an error pointer in case of error.
1570 */
1571struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1572 gfp_t gfp_mask, int reading)
1573{
1574 struct bio *bio;
1575 struct bio_vec *bvec;
1576 int i;
1577
1578 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1579 if (IS_ERR(bio))
1580 return bio;
1581
1582 if (!reading) {
1583 void *p = data;
1584
1585 bio_for_each_segment_all(bvec, bio, i) {
1586 char *addr = page_address(bvec->bv_page);
1587
1588 memcpy(addr, p, bvec->bv_len);
1589 p += bvec->bv_len;
1590 }
1591 }
1592
1593 bio->bi_end_io = bio_copy_kern_endio;
1594
1595 return bio;
1596}
1597EXPORT_SYMBOL(bio_copy_kern);
1598
1599/*
1600 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1601 * for performing direct-IO in BIOs.
1602 *
1603 * The problem is that we cannot run set_page_dirty() from interrupt context
1604 * because the required locks are not interrupt-safe. So what we can do is to
1605 * mark the pages dirty _before_ performing IO. And in interrupt context,
1606 * check that the pages are still dirty. If so, fine. If not, redirty them
1607 * in process context.
1608 *
1609 * We special-case compound pages here: normally this means reads into hugetlb
1610 * pages. The logic in here doesn't really work right for compound pages
1611 * because the VM does not uniformly chase down the head page in all cases.
1612 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1613 * handle them at all. So we skip compound pages here at an early stage.
1614 *
1615 * Note that this code is very hard to test under normal circumstances because
1616 * direct-io pins the pages with get_user_pages(). This makes
1617 * is_page_cache_freeable return false, and the VM will not clean the pages.
1618 * But other code (eg, flusher threads) could clean the pages if they are mapped
1619 * pagecache.
1620 *
1621 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1622 * deferred bio dirtying paths.
1623 */
1624
1625/*
1626 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1627 */
1628void bio_set_pages_dirty(struct bio *bio)
1629{
1630 struct bio_vec *bvec;
1631 int i;
1632
1633 bio_for_each_segment_all(bvec, bio, i) {
1634 struct page *page = bvec->bv_page;
1635
1636 if (page && !PageCompound(page))
1637 set_page_dirty_lock(page);
1638 }
1639}
1640
1641static void bio_release_pages(struct bio *bio)
1642{
1643 struct bio_vec *bvec;
1644 int i;
1645
1646 bio_for_each_segment_all(bvec, bio, i) {
1647 struct page *page = bvec->bv_page;
1648
1649 if (page)
1650 put_page(page);
1651 }
1652}
1653
1654/*
1655 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1656 * If they are, then fine. If, however, some pages are clean then they must
1657 * have been written out during the direct-IO read. So we take another ref on
1658 * the BIO and the offending pages and re-dirty the pages in process context.
1659 *
1660 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1661 * here on. It will run one page_cache_release() against each page and will
1662 * run one bio_put() against the BIO.
1663 */
1664
1665static void bio_dirty_fn(struct work_struct *work);
1666
1667static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1668static DEFINE_SPINLOCK(bio_dirty_lock);
1669static struct bio *bio_dirty_list;
1670
1671/*
1672 * This runs in process context
1673 */
1674static void bio_dirty_fn(struct work_struct *work)
1675{
1676 unsigned long flags;
1677 struct bio *bio;
1678
1679 spin_lock_irqsave(&bio_dirty_lock, flags);
1680 bio = bio_dirty_list;
1681 bio_dirty_list = NULL;
1682 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1683
1684 while (bio) {
1685 struct bio *next = bio->bi_private;
1686
1687 bio_set_pages_dirty(bio);
1688 bio_release_pages(bio);
1689 bio_put(bio);
1690 bio = next;
1691 }
1692}
1693
1694void bio_check_pages_dirty(struct bio *bio)
1695{
1696 struct bio_vec *bvec;
1697 int nr_clean_pages = 0;
1698 int i;
1699
1700 bio_for_each_segment_all(bvec, bio, i) {
1701 struct page *page = bvec->bv_page;
1702
1703 if (PageDirty(page) || PageCompound(page)) {
1704 page_cache_release(page);
1705 bvec->bv_page = NULL;
1706 } else {
1707 nr_clean_pages++;
1708 }
1709 }
1710
1711 if (nr_clean_pages) {
1712 unsigned long flags;
1713
1714 spin_lock_irqsave(&bio_dirty_lock, flags);
1715 bio->bi_private = bio_dirty_list;
1716 bio_dirty_list = bio;
1717 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1718 schedule_work(&bio_dirty_work);
1719 } else {
1720 bio_put(bio);
1721 }
1722}
1723
1724#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1725void bio_flush_dcache_pages(struct bio *bi)
1726{
1727 struct bio_vec bvec;
1728 struct bvec_iter iter;
1729
1730 bio_for_each_segment(bvec, bi, iter)
1731 flush_dcache_page(bvec.bv_page);
1732}
1733EXPORT_SYMBOL(bio_flush_dcache_pages);
1734#endif
1735
1736/**
1737 * bio_endio - end I/O on a bio
1738 * @bio: bio
1739 * @error: error, if any
1740 *
1741 * Description:
1742 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1743 * preferred way to end I/O on a bio, it takes care of clearing
1744 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1745 * established -Exxxx (-EIO, for instance) error values in case
1746 * something went wrong. No one should call bi_end_io() directly on a
1747 * bio unless they own it and thus know that it has an end_io
1748 * function.
1749 **/
1750void bio_endio(struct bio *bio, int error)
1751{
1752 while (bio) {
1753 BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
1754
1755 if (error)
1756 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1757 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1758 error = -EIO;
1759
1760 if (!atomic_dec_and_test(&bio->bi_remaining))
1761 return;
1762
1763 /*
1764 * Need to have a real endio function for chained bios,
1765 * otherwise various corner cases will break (like stacking
1766 * block devices that save/restore bi_end_io) - however, we want
1767 * to avoid unbounded recursion and blowing the stack. Tail call
1768 * optimization would handle this, but compiling with frame
1769 * pointers also disables gcc's sibling call optimization.
1770 */
1771 if (bio->bi_end_io == bio_chain_endio) {
1772 struct bio *parent = bio->bi_private;
1773 bio_put(bio);
1774 bio = parent;
1775 } else {
1776 if (bio->bi_end_io)
1777 bio->bi_end_io(bio, error);
1778 bio = NULL;
1779 }
1780 }
1781}
1782EXPORT_SYMBOL(bio_endio);
1783
1784/**
1785 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1786 * @bio: bio
1787 * @error: error, if any
1788 *
1789 * For code that has saved and restored bi_end_io; thing hard before using this
1790 * function, probably you should've cloned the entire bio.
1791 **/
1792void bio_endio_nodec(struct bio *bio, int error)
1793{
1794 atomic_inc(&bio->bi_remaining);
1795 bio_endio(bio, error);
1796}
1797EXPORT_SYMBOL(bio_endio_nodec);
1798
1799/**
1800 * bio_split - split a bio
1801 * @bio: bio to split
1802 * @sectors: number of sectors to split from the front of @bio
1803 * @gfp: gfp mask
1804 * @bs: bio set to allocate from
1805 *
1806 * Allocates and returns a new bio which represents @sectors from the start of
1807 * @bio, and updates @bio to represent the remaining sectors.
1808 *
1809 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1810 * responsibility to ensure that @bio is not freed before the split.
1811 */
1812struct bio *bio_split(struct bio *bio, int sectors,
1813 gfp_t gfp, struct bio_set *bs)
1814{
1815 struct bio *split = NULL;
1816
1817 BUG_ON(sectors <= 0);
1818 BUG_ON(sectors >= bio_sectors(bio));
1819
1820 split = bio_clone_fast(bio, gfp, bs);
1821 if (!split)
1822 return NULL;
1823
1824 split->bi_iter.bi_size = sectors << 9;
1825
1826 if (bio_integrity(split))
1827 bio_integrity_trim(split, 0, sectors);
1828
1829 bio_advance(bio, split->bi_iter.bi_size);
1830
1831 return split;
1832}
1833EXPORT_SYMBOL(bio_split);
1834
1835/**
1836 * bio_trim - trim a bio
1837 * @bio: bio to trim
1838 * @offset: number of sectors to trim from the front of @bio
1839 * @size: size we want to trim @bio to, in sectors
1840 */
1841void bio_trim(struct bio *bio, int offset, int size)
1842{
1843 /* 'bio' is a cloned bio which we need to trim to match
1844 * the given offset and size.
1845 */
1846
1847 size <<= 9;
1848 if (offset == 0 && size == bio->bi_iter.bi_size)
1849 return;
1850
1851 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1852
1853 bio_advance(bio, offset << 9);
1854
1855 bio->bi_iter.bi_size = size;
1856}
1857EXPORT_SYMBOL_GPL(bio_trim);
1858
1859/*
1860 * create memory pools for biovec's in a bio_set.
1861 * use the global biovec slabs created for general use.
1862 */
1863mempool_t *biovec_create_pool(int pool_entries)
1864{
1865 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1866
1867 return mempool_create_slab_pool(pool_entries, bp->slab);
1868}
1869
1870void bioset_free(struct bio_set *bs)
1871{
1872 if (bs->rescue_workqueue)
1873 destroy_workqueue(bs->rescue_workqueue);
1874
1875 if (bs->bio_pool)
1876 mempool_destroy(bs->bio_pool);
1877
1878 if (bs->bvec_pool)
1879 mempool_destroy(bs->bvec_pool);
1880
1881 bioset_integrity_free(bs);
1882 bio_put_slab(bs);
1883
1884 kfree(bs);
1885}
1886EXPORT_SYMBOL(bioset_free);
1887
1888/**
1889 * bioset_create - Create a bio_set
1890 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1891 * @front_pad: Number of bytes to allocate in front of the returned bio
1892 *
1893 * Description:
1894 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1895 * to ask for a number of bytes to be allocated in front of the bio.
1896 * Front pad allocation is useful for embedding the bio inside
1897 * another structure, to avoid allocating extra data to go with the bio.
1898 * Note that the bio must be embedded at the END of that structure always,
1899 * or things will break badly.
1900 */
1901struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1902{
1903 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1904 struct bio_set *bs;
1905
1906 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1907 if (!bs)
1908 return NULL;
1909
1910 bs->front_pad = front_pad;
1911
1912 spin_lock_init(&bs->rescue_lock);
1913 bio_list_init(&bs->rescue_list);
1914 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1915
1916 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1917 if (!bs->bio_slab) {
1918 kfree(bs);
1919 return NULL;
1920 }
1921
1922 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1923 if (!bs->bio_pool)
1924 goto bad;
1925
1926 bs->bvec_pool = biovec_create_pool(pool_size);
1927 if (!bs->bvec_pool)
1928 goto bad;
1929
1930 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1931 if (!bs->rescue_workqueue)
1932 goto bad;
1933
1934 return bs;
1935bad:
1936 bioset_free(bs);
1937 return NULL;
1938}
1939EXPORT_SYMBOL(bioset_create);
1940
1941#ifdef CONFIG_BLK_CGROUP
1942/**
1943 * bio_associate_current - associate a bio with %current
1944 * @bio: target bio
1945 *
1946 * Associate @bio with %current if it hasn't been associated yet. Block
1947 * layer will treat @bio as if it were issued by %current no matter which
1948 * task actually issues it.
1949 *
1950 * This function takes an extra reference of @task's io_context and blkcg
1951 * which will be put when @bio is released. The caller must own @bio,
1952 * ensure %current->io_context exists, and is responsible for synchronizing
1953 * calls to this function.
1954 */
1955int bio_associate_current(struct bio *bio)
1956{
1957 struct io_context *ioc;
1958 struct cgroup_subsys_state *css;
1959
1960 if (bio->bi_ioc)
1961 return -EBUSY;
1962
1963 ioc = current->io_context;
1964 if (!ioc)
1965 return -ENOENT;
1966
1967 /* acquire active ref on @ioc and associate */
1968 get_io_context_active(ioc);
1969 bio->bi_ioc = ioc;
1970
1971 /* associate blkcg if exists */
1972 rcu_read_lock();
1973 css = task_css(current, blkio_cgrp_id);
1974 if (css && css_tryget_online(css))
1975 bio->bi_css = css;
1976 rcu_read_unlock();
1977
1978 return 0;
1979}
1980
1981/**
1982 * bio_disassociate_task - undo bio_associate_current()
1983 * @bio: target bio
1984 */
1985void bio_disassociate_task(struct bio *bio)
1986{
1987 if (bio->bi_ioc) {
1988 put_io_context(bio->bi_ioc);
1989 bio->bi_ioc = NULL;
1990 }
1991 if (bio->bi_css) {
1992 css_put(bio->bi_css);
1993 bio->bi_css = NULL;
1994 }
1995}
1996
1997#endif /* CONFIG_BLK_CGROUP */
1998
1999static void __init biovec_init_slabs(void)
2000{
2001 int i;
2002
2003 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
2004 int size;
2005 struct biovec_slab *bvs = bvec_slabs + i;
2006
2007 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2008 bvs->slab = NULL;
2009 continue;
2010 }
2011
2012 size = bvs->nr_vecs * sizeof(struct bio_vec);
2013 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2014 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2015 }
2016}
2017
2018static int __init init_bio(void)
2019{
2020 bio_slab_max = 2;
2021 bio_slab_nr = 0;
2022 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2023 if (!bio_slabs)
2024 panic("bio: can't allocate bios\n");
2025
2026 bio_integrity_init();
2027 biovec_init_slabs();
2028
2029 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2030 if (!fs_bio_set)
2031 panic("bio: can't allocate bios\n");
2032
2033 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2034 panic("bio: can't create integrity pool\n");
2035
2036 return 0;
2037}
2038subsys_initcall(init_bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e491d9b5292..9f5bce33e6fe 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
185 lockdep_assert_held(q->queue_lock); 185 lockdep_assert_held(q->queue_lock);
186 186
187 /* blkg holds a reference to blkcg */ 187 /* blkg holds a reference to blkcg */
188 if (!css_tryget(&blkcg->css)) { 188 if (!css_tryget_online(&blkcg->css)) {
189 ret = -EINVAL; 189 ret = -EINVAL;
190 goto err_free_blkg; 190 goto err_free_blkg;
191 } 191 }
@@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
336 * under queue_lock. If it's not pointing to @blkg now, it never 336 * under queue_lock. If it's not pointing to @blkg now, it never
337 * will. Hint assignment itself can race safely. 337 * will. Hint assignment itself can race safely.
338 */ 338 */
339 if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) 339 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
340 rcu_assign_pointer(blkcg->blkg_hint, NULL); 340 rcu_assign_pointer(blkcg->blkg_hint, NULL);
341 341
342 /* 342 /*
@@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
451 struct blkcg_gq *blkg; 451 struct blkcg_gq *blkg;
452 int i; 452 int i;
453 453
454 mutex_lock(&blkcg_pol_mutex); 454 /*
455 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
456 * which ends up putting cgroup's internal cgroup_tree_mutex under
457 * it; however, cgroup_tree_mutex is nested above cgroup file
458 * active protection and grabbing blkcg_pol_mutex from a cgroup
459 * file operation creates a possible circular dependency. cgroup
460 * internal locking is planned to go through further simplification
461 * and this issue should go away soon. For now, let's trylock
462 * blkcg_pol_mutex and restart the write on failure.
463 *
464 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
465 */
466 if (!mutex_trylock(&blkcg_pol_mutex))
467 return restart_syscall();
455 spin_lock_irq(&blkcg->lock); 468 spin_lock_irq(&blkcg->lock);
456 469
457 /* 470 /*
@@ -894,7 +907,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
894 int ret = 0; 907 int ret = 0;
895 908
896 /* task_lock() is needed to avoid races with exit_io_context() */ 909 /* task_lock() is needed to avoid races with exit_io_context() */
897 cgroup_taskset_for_each(task, css, tset) { 910 cgroup_taskset_for_each(task, tset) {
898 task_lock(task); 911 task_lock(task);
899 ioc = task->io_context; 912 ioc = task->io_context;
900 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 913 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -906,17 +919,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
906 return ret; 919 return ret;
907} 920}
908 921
909struct cgroup_subsys blkio_subsys = { 922struct cgroup_subsys blkio_cgrp_subsys = {
910 .name = "blkio",
911 .css_alloc = blkcg_css_alloc, 923 .css_alloc = blkcg_css_alloc,
912 .css_offline = blkcg_css_offline, 924 .css_offline = blkcg_css_offline,
913 .css_free = blkcg_css_free, 925 .css_free = blkcg_css_free,
914 .can_attach = blkcg_can_attach, 926 .can_attach = blkcg_can_attach,
915 .subsys_id = blkio_subsys_id,
916 .base_cftypes = blkcg_files, 927 .base_cftypes = blkcg_files,
917 .module = THIS_MODULE,
918}; 928};
919EXPORT_SYMBOL_GPL(blkio_subsys); 929EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
920 930
921/** 931/**
922 * blkcg_activate_policy - activate a blkcg policy on a request_queue 932 * blkcg_activate_policy - activate a blkcg policy on a request_queue
@@ -1106,7 +1116,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
1106 1116
1107 /* everything is in place, add intf files for the new policy */ 1117 /* everything is in place, add intf files for the new policy */
1108 if (pol->cftypes) 1118 if (pol->cftypes)
1109 WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); 1119 WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
1110 ret = 0; 1120 ret = 0;
1111out_unlock: 1121out_unlock:
1112 mutex_unlock(&blkcg_pol_mutex); 1122 mutex_unlock(&blkcg_pol_mutex);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 86154eab9523..d692b29c083a 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
186 186
187static inline struct blkcg *task_blkcg(struct task_struct *tsk) 187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
188{ 188{
189 return css_to_blkcg(task_css(tsk, blkio_subsys_id)); 189 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
190} 190}
191 191
192static inline struct blkcg *bio_blkcg(struct bio *bio) 192static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -204,7 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
204 */ 204 */
205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
206{ 206{
207 return css_to_blkcg(css_parent(&blkcg->css)); 207 return css_to_blkcg(blkcg->css.parent);
208} 208}
209 209
210/** 210/**
@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
241 */ 241 */
242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243{ 243{
244 int ret; 244 char *p;
245 245
246 ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 246 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247 if (ret) 247 if (!p) {
248 strncpy(buf, "<unavailable>", buflen); 248 strncpy(buf, "<unavailable>", buflen);
249 return ret; 249 return -ENAMETOOLONG;
250 }
251
252 memmove(buf, p, buf + buflen - p);
253 return 0;
250} 254}
251 255
252/** 256/**
@@ -435,9 +439,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
435 uint64_t v; 439 uint64_t v;
436 440
437 do { 441 do {
438 start = u64_stats_fetch_begin_bh(&stat->syncp); 442 start = u64_stats_fetch_begin_irq(&stat->syncp);
439 v = stat->cnt; 443 v = stat->cnt;
440 } while (u64_stats_fetch_retry_bh(&stat->syncp, start)); 444 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
441 445
442 return v; 446 return v;
443} 447}
@@ -508,9 +512,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
508 struct blkg_rwstat tmp; 512 struct blkg_rwstat tmp;
509 513
510 do { 514 do {
511 start = u64_stats_fetch_begin_bh(&rwstat->syncp); 515 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
512 tmp = *rwstat; 516 tmp = *rwstat;
513 } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start)); 517 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
514 518
515 return tmp; 519 return tmp;
516} 520}
diff --git a/block/blk-core.c b/block/blk-core.c
index bfe16d5af9f9..40d654861c33 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -146,8 +146,8 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
146 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 146 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
147 (unsigned long long)blk_rq_pos(rq), 147 (unsigned long long)blk_rq_pos(rq),
148 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 148 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
149 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 149 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
150 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 150 rq->bio, rq->biotail, blk_rq_bytes(rq));
151 151
152 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 152 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
153 printk(KERN_INFO " cdb: "); 153 printk(KERN_INFO " cdb: ");
@@ -251,8 +251,10 @@ void blk_sync_queue(struct request_queue *q)
251 struct blk_mq_hw_ctx *hctx; 251 struct blk_mq_hw_ctx *hctx;
252 int i; 252 int i;
253 253
254 queue_for_each_hw_ctx(q, hctx, i) 254 queue_for_each_hw_ctx(q, hctx, i) {
255 cancel_delayed_work_sync(&hctx->delayed_work); 255 cancel_delayed_work_sync(&hctx->run_work);
256 cancel_delayed_work_sync(&hctx->delay_work);
257 }
256 } else { 258 } else {
257 cancel_delayed_work_sync(&q->delay_work); 259 cancel_delayed_work_sync(&q->delay_work);
258 } 260 }
@@ -574,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
574 if (!q) 576 if (!q)
575 return NULL; 577 return NULL;
576 578
577 if (percpu_counter_init(&q->mq_usage_counter, 0))
578 goto fail_q;
579
580 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); 579 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
581 if (q->id < 0) 580 if (q->id < 0)
582 goto fail_c; 581 goto fail_q;
583 582
584 q->backing_dev_info.ra_pages = 583 q->backing_dev_info.ra_pages =
585 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 584 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -637,8 +636,6 @@ fail_bdi:
637 bdi_destroy(&q->backing_dev_info); 636 bdi_destroy(&q->backing_dev_info);
638fail_id: 637fail_id:
639 ida_simple_remove(&blk_queue_ida, q->id); 638 ida_simple_remove(&blk_queue_ida, q->id);
640fail_c:
641 percpu_counter_destroy(&q->mq_usage_counter);
642fail_q: 639fail_q:
643 kmem_cache_free(blk_requestq_cachep, q); 640 kmem_cache_free(blk_requestq_cachep, q);
644 return NULL; 641 return NULL;
@@ -846,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags)
846 __freed_request(rl, sync ^ 1); 843 __freed_request(rl, sync ^ 1);
847} 844}
848 845
846int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
847{
848 struct request_list *rl;
849
850 spin_lock_irq(q->queue_lock);
851 q->nr_requests = nr;
852 blk_queue_congestion_threshold(q);
853
854 /* congestion isn't cgroup aware and follows root blkcg for now */
855 rl = &q->root_rl;
856
857 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
858 blk_set_queue_congested(q, BLK_RW_SYNC);
859 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
860 blk_clear_queue_congested(q, BLK_RW_SYNC);
861
862 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
863 blk_set_queue_congested(q, BLK_RW_ASYNC);
864 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
865 blk_clear_queue_congested(q, BLK_RW_ASYNC);
866
867 blk_queue_for_each_rl(rl, q) {
868 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
869 blk_set_rl_full(rl, BLK_RW_SYNC);
870 } else {
871 blk_clear_rl_full(rl, BLK_RW_SYNC);
872 wake_up(&rl->wait[BLK_RW_SYNC]);
873 }
874
875 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
876 blk_set_rl_full(rl, BLK_RW_ASYNC);
877 } else {
878 blk_clear_rl_full(rl, BLK_RW_ASYNC);
879 wake_up(&rl->wait[BLK_RW_ASYNC]);
880 }
881 }
882
883 spin_unlock_irq(q->queue_lock);
884 return 0;
885}
886
849/* 887/*
850 * Determine if elevator data should be initialized when allocating the 888 * Determine if elevator data should be initialized when allocating the
851 * request associated with @bio. 889 * request associated with @bio.
@@ -1135,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1135struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 1173struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1136{ 1174{
1137 if (q->mq_ops) 1175 if (q->mq_ops)
1138 return blk_mq_alloc_request(q, rw, gfp_mask); 1176 return blk_mq_alloc_request(q, rw, gfp_mask, false);
1139 else 1177 else
1140 return blk_old_get_request(q, rw, gfp_mask); 1178 return blk_old_get_request(q, rw, gfp_mask);
1141} 1179}
@@ -1231,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
1231static void part_round_stats_single(int cpu, struct hd_struct *part, 1269static void part_round_stats_single(int cpu, struct hd_struct *part,
1232 unsigned long now) 1270 unsigned long now)
1233{ 1271{
1272 int inflight;
1273
1234 if (now == part->stamp) 1274 if (now == part->stamp)
1235 return; 1275 return;
1236 1276
1237 if (part_in_flight(part)) { 1277 inflight = part_in_flight(part);
1278 if (inflight) {
1238 __part_stat_add(cpu, part, time_in_queue, 1279 __part_stat_add(cpu, part, time_in_queue,
1239 part_in_flight(part) * (now - part->stamp)); 1280 inflight * (now - part->stamp));
1240 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1281 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1241 } 1282 }
1242 part->stamp = now; 1283 part->stamp = now;
@@ -1307,7 +1348,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1307 struct request_list *rl = blk_rq_rl(req); 1348 struct request_list *rl = blk_rq_rl(req);
1308 1349
1309 BUG_ON(!list_empty(&req->queuelist)); 1350 BUG_ON(!list_empty(&req->queuelist));
1310 BUG_ON(!hlist_unhashed(&req->hash)); 1351 BUG_ON(ELV_ON_HASH(req));
1311 1352
1312 blk_free_request(rl, req); 1353 blk_free_request(rl, req);
1313 freed_request(rl, flags); 1354 freed_request(rl, flags);
@@ -1360,7 +1401,6 @@ void blk_add_request_payload(struct request *rq, struct page *page,
1360 1401
1361 rq->__data_len = rq->resid_len = len; 1402 rq->__data_len = rq->resid_len = len;
1362 rq->nr_phys_segments = 1; 1403 rq->nr_phys_segments = 1;
1363 rq->buffer = bio_data(bio);
1364} 1404}
1365EXPORT_SYMBOL_GPL(blk_add_request_payload); 1405EXPORT_SYMBOL_GPL(blk_add_request_payload);
1366 1406
@@ -1402,12 +1442,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1402 bio->bi_next = req->bio; 1442 bio->bi_next = req->bio;
1403 req->bio = bio; 1443 req->bio = bio;
1404 1444
1405 /*
1406 * may not be valid. if the low level driver said
1407 * it didn't need a bounce buffer then it better
1408 * not touch req->buffer either...
1409 */
1410 req->buffer = bio_data(bio);
1411 req->__sector = bio->bi_iter.bi_sector; 1445 req->__sector = bio->bi_iter.bi_sector;
1412 req->__data_len += bio->bi_iter.bi_size; 1446 req->__data_len += bio->bi_iter.bi_size;
1413 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1447 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@@ -1432,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1432 * added on the elevator at this point. In addition, we don't have 1466 * added on the elevator at this point. In addition, we don't have
1433 * reliable access to the elevator outside queue lock. Only check basic 1467 * reliable access to the elevator outside queue lock. Only check basic
1434 * merging parameters without querying the elevator. 1468 * merging parameters without querying the elevator.
1469 *
1470 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1435 */ 1471 */
1436bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 1472bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1437 unsigned int *request_count) 1473 unsigned int *request_count)
@@ -1441,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1441 bool ret = false; 1477 bool ret = false;
1442 struct list_head *plug_list; 1478 struct list_head *plug_list;
1443 1479
1444 if (blk_queue_nomerges(q))
1445 goto out;
1446
1447 plug = current->plug; 1480 plug = current->plug;
1448 if (!plug) 1481 if (!plug)
1449 goto out; 1482 goto out;
@@ -1522,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1522 * Check if we can merge with the plugged list before grabbing 1555 * Check if we can merge with the plugged list before grabbing
1523 * any locks. 1556 * any locks.
1524 */ 1557 */
1525 if (blk_attempt_plug_merge(q, bio, &request_count)) 1558 if (!blk_queue_nomerges(q) &&
1559 blk_attempt_plug_merge(q, bio, &request_count))
1526 return; 1560 return;
1527 1561
1528 spin_lock_irq(q->queue_lock); 1562 spin_lock_irq(q->queue_lock);
@@ -1654,7 +1688,7 @@ static int __init fail_make_request_debugfs(void)
1654 struct dentry *dir = fault_create_debugfs_attr("fail_make_request", 1688 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1655 NULL, &fail_make_request); 1689 NULL, &fail_make_request);
1656 1690
1657 return IS_ERR(dir) ? PTR_ERR(dir) : 0; 1691 return PTR_ERR_OR_ZERO(dir);
1658} 1692}
1659 1693
1660late_initcall(fail_make_request_debugfs); 1694late_initcall(fail_make_request_debugfs);
@@ -1928,7 +1962,7 @@ EXPORT_SYMBOL(submit_bio);
1928 * in some cases below, so export this function. 1962 * in some cases below, so export this function.
1929 * Request stacking drivers like request-based dm may change the queue 1963 * Request stacking drivers like request-based dm may change the queue
1930 * limits while requests are in the queue (e.g. dm's table swapping). 1964 * limits while requests are in the queue (e.g. dm's table swapping).
1931 * Such request stacking drivers should check those requests agaist 1965 * Such request stacking drivers should check those requests against
1932 * the new queue limits again when they dispatch those requests, 1966 * the new queue limits again when they dispatch those requests,
1933 * although such checkings are also done against the old queue limits 1967 * although such checkings are also done against the old queue limits
1934 * when submitting requests. 1968 * when submitting requests.
@@ -2353,7 +2387,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2353 if (!req->bio) 2387 if (!req->bio)
2354 return false; 2388 return false;
2355 2389
2356 trace_block_rq_complete(req->q, req); 2390 trace_block_rq_complete(req->q, req, nr_bytes);
2357 2391
2358 /* 2392 /*
2359 * For fs requests, rq is just carrier of independent bio's 2393 * For fs requests, rq is just carrier of independent bio's
@@ -2434,7 +2468,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2434 } 2468 }
2435 2469
2436 req->__data_len -= total_bytes; 2470 req->__data_len -= total_bytes;
2437 req->buffer = bio_data(req->bio);
2438 2471
2439 /* update sector only for requests with clear definition of sector */ 2472 /* update sector only for requests with clear definition of sector */
2440 if (req->cmd_type == REQ_TYPE_FS) 2473 if (req->cmd_type == REQ_TYPE_FS)
@@ -2503,7 +2536,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
2503/* 2536/*
2504 * queue lock must be held 2537 * queue lock must be held
2505 */ 2538 */
2506static void blk_finish_request(struct request *req, int error) 2539void blk_finish_request(struct request *req, int error)
2507{ 2540{
2508 if (blk_rq_tagged(req)) 2541 if (blk_rq_tagged(req))
2509 blk_queue_end_tag(req->q, req); 2542 blk_queue_end_tag(req->q, req);
@@ -2529,6 +2562,7 @@ static void blk_finish_request(struct request *req, int error)
2529 __blk_put_request(req->q, req); 2562 __blk_put_request(req->q, req);
2530 } 2563 }
2531} 2564}
2565EXPORT_SYMBOL(blk_finish_request);
2532 2566
2533/** 2567/**
2534 * blk_end_bidi_request - Complete a bidi request 2568 * blk_end_bidi_request - Complete a bidi request
@@ -2752,10 +2786,9 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2752 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2786 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2753 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2787 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2754 2788
2755 if (bio_has_data(bio)) { 2789 if (bio_has_data(bio))
2756 rq->nr_phys_segments = bio_phys_segments(q, bio); 2790 rq->nr_phys_segments = bio_phys_segments(q, bio);
2757 rq->buffer = bio_data(bio); 2791
2758 }
2759 rq->__data_len = bio->bi_iter.bi_size; 2792 rq->__data_len = bio->bi_iter.bi_size;
2760 rq->bio = rq->biotail = bio; 2793 rq->bio = rq->biotail = bio;
2761 2794
@@ -2831,7 +2864,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2831 2864
2832/* 2865/*
2833 * Copy attributes of the original request to the clone request. 2866 * Copy attributes of the original request to the clone request.
2834 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2867 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
2835 */ 2868 */
2836static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2869static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2837{ 2870{
@@ -2857,7 +2890,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2857 * 2890 *
2858 * Description: 2891 * Description:
2859 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2892 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2860 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2893 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
2861 * are not copied, and copying such parts is the caller's responsibility. 2894 * are not copied, and copying such parts is the caller's responsibility.
2862 * Also, pages which the original bios are pointing to are not copied 2895 * Also, pages which the original bios are pointing to are not copied
2863 * and the cloned bios just point same pages. 2896 * and the cloned bios just point same pages.
@@ -2904,20 +2937,25 @@ free_and_out:
2904} 2937}
2905EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2938EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2906 2939
2907int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2940int kblockd_schedule_work(struct work_struct *work)
2908{ 2941{
2909 return queue_work(kblockd_workqueue, work); 2942 return queue_work(kblockd_workqueue, work);
2910} 2943}
2911EXPORT_SYMBOL(kblockd_schedule_work); 2944EXPORT_SYMBOL(kblockd_schedule_work);
2912 2945
2913int kblockd_schedule_delayed_work(struct request_queue *q, 2946int kblockd_schedule_delayed_work(struct delayed_work *dwork,
2914 struct delayed_work *dwork, unsigned long delay) 2947 unsigned long delay)
2915{ 2948{
2916 return queue_delayed_work(kblockd_workqueue, dwork, delay); 2949 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2917} 2950}
2918EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2951EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2919 2952
2920#define PLUG_MAGIC 0x91827364 2953int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
2954 unsigned long delay)
2955{
2956 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
2957}
2958EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
2921 2959
2922/** 2960/**
2923 * blk_start_plug - initialize blk_plug and track it inside the task_struct 2961 * blk_start_plug - initialize blk_plug and track it inside the task_struct
@@ -2937,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug)
2937{ 2975{
2938 struct task_struct *tsk = current; 2976 struct task_struct *tsk = current;
2939 2977
2940 plug->magic = PLUG_MAGIC;
2941 INIT_LIST_HEAD(&plug->list); 2978 INIT_LIST_HEAD(&plug->list);
2942 INIT_LIST_HEAD(&plug->mq_list); 2979 INIT_LIST_HEAD(&plug->mq_list);
2943 INIT_LIST_HEAD(&plug->cb_list); 2980 INIT_LIST_HEAD(&plug->cb_list);
@@ -3034,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3034 LIST_HEAD(list); 3071 LIST_HEAD(list);
3035 unsigned int depth; 3072 unsigned int depth;
3036 3073
3037 BUG_ON(plug->magic != PLUG_MAGIC);
3038
3039 flush_plug_callbacks(plug, from_schedule); 3074 flush_plug_callbacks(plug, from_schedule);
3040 3075
3041 if (!list_empty(&plug->mq_list)) 3076 if (!list_empty(&plug->mq_list))
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 43e6b4755e9a..8ffee4b5f93d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
130 blk_clear_rq_complete(rq); 130 blk_clear_rq_complete(rq);
131} 131}
132 132
133static void mq_flush_run(struct work_struct *work)
134{
135 struct request *rq;
136
137 rq = container_of(work, struct request, mq_flush_work);
138
139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_insert_request(rq, false, true, false);
141}
142
143static bool blk_flush_queue_rq(struct request *rq, bool add_front) 133static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{ 134{
145 if (rq->q->mq_ops) { 135 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->mq_flush_work, mq_flush_run); 136 struct request_queue *q = rq->q;
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work); 137
138 blk_mq_add_to_requeue_list(rq, add_front);
139 blk_mq_kick_requeue_list(q);
148 return false; 140 return false;
149 } else { 141 } else {
150 if (add_front) 142 if (add_front)
@@ -231,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error)
231 struct request *rq, *n; 223 struct request *rq, *n;
232 unsigned long flags = 0; 224 unsigned long flags = 0;
233 225
234 if (q->mq_ops) 226 if (q->mq_ops) {
235 spin_lock_irqsave(&q->mq_flush_lock, flags); 227 spin_lock_irqsave(&q->mq_flush_lock, flags);
228 q->flush_rq->tag = -1;
229 }
236 230
237 running = &q->flush_queue[q->flush_running_idx]; 231 running = &q->flush_queue[q->flush_running_idx];
238 BUG_ON(q->flush_pending_idx == q->flush_running_idx); 232 BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -306,23 +300,9 @@ static bool blk_kick_flush(struct request_queue *q)
306 */ 300 */
307 q->flush_pending_idx ^= 1; 301 q->flush_pending_idx ^= 1;
308 302
309 if (q->mq_ops) { 303 blk_rq_init(q, q->flush_rq);
310 struct blk_mq_ctx *ctx = first_rq->mq_ctx; 304 if (q->mq_ops)
311 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 305 blk_mq_clone_flush_request(q->flush_rq, first_rq);
312
313 blk_mq_rq_init(hctx, q->flush_rq);
314 q->flush_rq->mq_ctx = ctx;
315
316 /*
317 * Reuse the tag value from the fist waiting request,
318 * with blk-mq the tag is generated during request
319 * allocation and drivers can rely on it being inside
320 * the range they asked for.
321 */
322 q->flush_rq->tag = first_rq->tag;
323 } else {
324 blk_rq_init(q, q->flush_rq);
325 }
326 306
327 q->flush_rq->cmd_type = REQ_TYPE_FS; 307 q->flush_rq->cmd_type = REQ_TYPE_FS;
328 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; 308 q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 242df01413f6..1a27f45ec776 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
68 * under queue_lock. If it's not pointing to @icq now, it never 68 * under queue_lock. If it's not pointing to @icq now, it never
69 * will. Hint assignment itself can race safely. 69 * will. Hint assignment itself can race safely.
70 */ 70 */
71 if (rcu_dereference_raw(ioc->icq_hint) == icq) 71 if (rcu_access_pointer(ioc->icq_hint) == icq)
72 rcu_assign_pointer(ioc->icq_hint, NULL); 72 rcu_assign_pointer(ioc->icq_hint, NULL);
73 73
74 ioc_exit_icq(icq); 74 ioc_exit_icq(icq);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf51edb0..0736729d6494 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
14 14
15#include "blk.h" 15#include "blk.h"
16 16
17int blk_iopoll_enabled = 1;
18EXPORT_SYMBOL(blk_iopoll_enabled);
19
20static unsigned int blk_iopoll_budget __read_mostly = 256; 17static unsigned int blk_iopoll_budget __read_mostly = 256;
21 18
22static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); 19static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
@@ -52,7 +49,7 @@ EXPORT_SYMBOL(blk_iopoll_sched);
52void __blk_iopoll_complete(struct blk_iopoll *iop) 49void __blk_iopoll_complete(struct blk_iopoll *iop)
53{ 50{
54 list_del(&iop->list); 51 list_del(&iop->list);
55 smp_mb__before_clear_bit(); 52 smp_mb__before_atomic();
56 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); 53 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
57} 54}
58EXPORT_SYMBOL(__blk_iopoll_complete); 55EXPORT_SYMBOL(__blk_iopoll_complete);
@@ -67,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete);
67 * iopoll handler will not be invoked again before blk_iopoll_sched_prep() 64 * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
68 * is called. 65 * is called.
69 **/ 66 **/
70void blk_iopoll_complete(struct blk_iopoll *iopoll) 67void blk_iopoll_complete(struct blk_iopoll *iop)
71{ 68{
72 unsigned long flags; 69 unsigned long flags;
73 70
74 local_irq_save(flags); 71 local_irq_save(flags);
75 __blk_iopoll_complete(iopoll); 72 __blk_iopoll_complete(iop);
76 local_irq_restore(flags); 73 local_irq_restore(flags);
77} 74}
78EXPORT_SYMBOL(blk_iopoll_complete); 75EXPORT_SYMBOL(blk_iopoll_complete);
@@ -164,7 +161,7 @@ EXPORT_SYMBOL(blk_iopoll_disable);
164void blk_iopoll_enable(struct blk_iopoll *iop) 161void blk_iopoll_enable(struct blk_iopoll *iop)
165{ 162{
166 BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); 163 BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
167 smp_mb__before_clear_bit(); 164 smp_mb__before_atomic();
168 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); 165 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
169} 166}
170EXPORT_SYMBOL(blk_iopoll_enable); 167EXPORT_SYMBOL(blk_iopoll_enable);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 97a733cf3d5f..8411be3c19d3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -226,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same);
226 * Generate and issue number of bios with zerofiled pages. 226 * Generate and issue number of bios with zerofiled pages.
227 */ 227 */
228 228
229int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 229static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
230 sector_t nr_sects, gfp_t gfp_mask) 230 sector_t nr_sects, gfp_t gfp_mask)
231{ 231{
232 int ret; 232 int ret;
233 struct bio *bio; 233 struct bio *bio;
diff --git a/block/blk-map.c b/block/blk-map.c
index ae4ae1047fd9..f890d4345b0c 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
155 if (!bio_flagged(bio, BIO_USER_MAPPED)) 155 if (!bio_flagged(bio, BIO_USER_MAPPED))
156 rq->cmd_flags |= REQ_COPY_USER; 156 rq->cmd_flags |= REQ_COPY_USER;
157 157
158 rq->buffer = NULL;
159 return 0; 158 return 0;
160unmap_rq: 159unmap_rq:
161 blk_rq_unmap_user(bio); 160 blk_rq_unmap_user(bio);
@@ -188,7 +187,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
188 * unmapping. 187 * unmapping.
189 */ 188 */
190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 189int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191 struct rq_map_data *map_data, struct sg_iovec *iov, 190 struct rq_map_data *map_data, const struct sg_iovec *iov,
192 int iov_count, unsigned int len, gfp_t gfp_mask) 191 int iov_count, unsigned int len, gfp_t gfp_mask)
193{ 192{
194 struct bio *bio; 193 struct bio *bio;
@@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
238 blk_queue_bounce(q, &bio); 237 blk_queue_bounce(q, &bio);
239 bio_get(bio); 238 bio_get(bio);
240 blk_rq_bio_prep(q, rq, bio); 239 blk_rq_bio_prep(q, rq, bio);
241 rq->buffer = NULL;
242 return 0; 240 return 0;
243} 241}
244EXPORT_SYMBOL(blk_rq_map_user_iov); 242EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -285,7 +283,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
285 * 283 *
286 * Description: 284 * Description:
287 * Data will be mapped directly if possible. Otherwise a bounce 285 * Data will be mapped directly if possible. Otherwise a bounce
288 * buffer is used. Can be called multple times to append multple 286 * buffer is used. Can be called multiple times to append multiple
289 * buffers. 287 * buffers.
290 */ 288 */
291int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 289int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
@@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
325 } 323 }
326 324
327 blk_queue_bounce(q, &rq->bio); 325 blk_queue_bounce(q, &rq->bio);
328 rq->buffer = NULL;
329 return 0; 326 return 0;
330} 327}
331EXPORT_SYMBOL(blk_rq_map_kern); 328EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6c583f9c5b65..b3bf0df0f4c2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 struct bio_vec bv, bvprv = { NULL }; 15 struct bio_vec bv, bvprv = { NULL };
16 int cluster, high, highprv = 1; 16 int cluster, high, highprv = 1, no_sg_merge;
17 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 18 struct bio *fbio, *bbio;
19 struct bvec_iter iter; 19 struct bvec_iter iter;
@@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
35 cluster = blk_queue_cluster(q); 35 cluster = blk_queue_cluster(q);
36 seg_size = 0; 36 seg_size = 0;
37 nr_phys_segs = 0; 37 nr_phys_segs = 0;
38 no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
39 high = 0;
38 for_each_bio(bio) { 40 for_each_bio(bio) {
39 bio_for_each_segment(bv, bio, iter) { 41 bio_for_each_segment(bv, bio, iter) {
40 /* 42 /*
43 * If SG merging is disabled, each bio vector is
44 * a segment
45 */
46 if (no_sg_merge)
47 goto new_segment;
48
49 /*
41 * the trick here is making sure that a high page is 50 * the trick here is making sure that a high page is
42 * never considered part of another segment, since that 51 * never considered part of another segment, since
43 * might change with the bounce page. 52 * that might change with the bounce page.
44 */ 53 */
45 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); 54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
46 if (!high && !highprv && cluster) { 55 if (!high && !highprv && cluster) {
@@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq)
84 93
85void blk_recount_segments(struct request_queue *q, struct bio *bio) 94void blk_recount_segments(struct request_queue *q, struct bio *bio)
86{ 95{
87 struct bio *nxt = bio->bi_next; 96 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
97 bio->bi_phys_segments = bio->bi_vcnt;
98 else {
99 struct bio *nxt = bio->bi_next;
100
101 bio->bi_next = NULL;
102 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
103 bio->bi_next = nxt;
104 }
88 105
89 bio->bi_next = NULL;
90 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
91 bio->bi_next = nxt;
92 bio->bi_flags |= (1 << BIO_SEG_VALID); 106 bio->bi_flags |= (1 << BIO_SEG_VALID);
93} 107}
94EXPORT_SYMBOL(blk_recount_segments); 108EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 136ef8643bba..bb3ed488f7b5 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -1,3 +1,8 @@
1/*
2 * CPU notifier helper code for blk-mq
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/module.h> 7#include <linux/module.h>
3#include <linux/init.h> 8#include <linux/init.h>
@@ -18,14 +23,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
18{ 23{
19 unsigned int cpu = (unsigned long) hcpu; 24 unsigned int cpu = (unsigned long) hcpu;
20 struct blk_mq_cpu_notifier *notify; 25 struct blk_mq_cpu_notifier *notify;
26 int ret = NOTIFY_OK;
21 27
22 raw_spin_lock(&blk_mq_cpu_notify_lock); 28 raw_spin_lock(&blk_mq_cpu_notify_lock);
23 29
24 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) 30 list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
25 notify->notify(notify->data, action, cpu); 31 ret = notify->notify(notify->data, action, cpu);
32 if (ret != NOTIFY_OK)
33 break;
34 }
26 35
27 raw_spin_unlock(&blk_mq_cpu_notify_lock); 36 raw_spin_unlock(&blk_mq_cpu_notify_lock);
28 return NOTIFY_OK; 37 return ret;
29} 38}
30 39
31void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) 40void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
@@ -45,7 +54,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
45} 54}
46 55
47void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 56void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
48 void (*fn)(void *, unsigned long, unsigned int), 57 int (*fn)(void *, unsigned long, unsigned int),
49 void *data) 58 void *data)
50{ 59{
51 notifier->notify = fn; 60 notifier->notify = fn;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f8721278601c..1065d7c65fa1 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,8 @@
1/*
2 * CPU <-> hardware queue mapping helpers
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
1#include <linux/kernel.h> 6#include <linux/kernel.h>
2#include <linux/threads.h> 7#include <linux/threads.h>
3#include <linux/module.h> 8#include <linux/module.h>
@@ -9,15 +14,6 @@
9#include "blk.h" 14#include "blk.h"
10#include "blk-mq.h" 15#include "blk-mq.h"
11 16
12static void show_map(unsigned int *map, unsigned int nr)
13{
14 int i;
15
16 pr_info("blk-mq: CPU -> queue map\n");
17 for_each_online_cpu(i)
18 pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
19}
20
21static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, 17static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
22 const int cpu) 18 const int cpu)
23{ 19{
@@ -85,24 +81,39 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
85 map[i] = map[first_sibling]; 81 map[i] = map[first_sibling];
86 } 82 }
87 83
88 show_map(map, nr_cpus);
89 free_cpumask_var(cpus); 84 free_cpumask_var(cpus);
90 return 0; 85 return 0;
91} 86}
92 87
93unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) 88unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
94{ 89{
95 unsigned int *map; 90 unsigned int *map;
96 91
97 /* If cpus are offline, map them to first hctx */ 92 /* If cpus are offline, map them to first hctx */
98 map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, 93 map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
99 reg->numa_node); 94 set->numa_node);
100 if (!map) 95 if (!map)
101 return NULL; 96 return NULL;
102 97
103 if (!blk_mq_update_queue_map(map, reg->nr_hw_queues)) 98 if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
104 return map; 99 return map;
105 100
106 kfree(map); 101 kfree(map);
107 return NULL; 102 return NULL;
108} 103}
104
105/*
106 * We have no quick way of doing reverse lookups. This is only used at
107 * queue init time, so runtime isn't important.
108 */
109int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
110{
111 int i;
112
113 for_each_possible_cpu(i) {
114 if (index == mq_map[i])
115 return cpu_to_node(i);
116 }
117
118 return NUMA_NO_NODE;
119}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b91ce75bd35d..ed5217867555 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -203,45 +203,36 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
203 return ret; 203 return ret;
204} 204}
205 205
206static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) 206static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
207{ 207{
208 ssize_t ret; 208 return blk_mq_tag_sysfs_show(hctx->tags, page);
209 209}
210 spin_lock(&hctx->lock);
211 ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
212 spin_unlock(&hctx->lock);
213 210
214 return ret; 211static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
212{
213 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
215} 214}
216 215
217static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx, 216static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
218 const char *page, size_t len)
219{ 217{
220 struct blk_mq_ctx *ctx; 218 unsigned int i, first = 1;
221 unsigned long ret; 219 ssize_t ret = 0;
222 unsigned int i;
223 220
224 if (kstrtoul(page, 10, &ret)) { 221 blk_mq_disable_hotplug();
225 pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
226 return -EINVAL;
227 }
228 222
229 spin_lock(&hctx->lock); 223 for_each_cpu(i, hctx->cpumask) {
230 if (ret) 224 if (first)
231 hctx->flags |= BLK_MQ_F_SHOULD_IPI; 225 ret += sprintf(ret + page, "%u", i);
232 else 226 else
233 hctx->flags &= ~BLK_MQ_F_SHOULD_IPI; 227 ret += sprintf(ret + page, ", %u", i);
234 spin_unlock(&hctx->lock);
235 228
236 hctx_for_each_ctx(hctx, ctx, i) 229 first = 0;
237 ctx->ipi_redirect = !!ret; 230 }
238 231
239 return len; 232 blk_mq_enable_hotplug();
240}
241 233
242static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) 234 ret += sprintf(ret + page, "\n");
243{ 235 return ret;
244 return blk_mq_tag_sysfs_show(hctx->tags, page);
245} 236}
246 237
247static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { 238static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
@@ -281,27 +272,31 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
281 .attr = {.name = "dispatched", .mode = S_IRUGO }, 272 .attr = {.name = "dispatched", .mode = S_IRUGO },
282 .show = blk_mq_hw_sysfs_dispatched_show, 273 .show = blk_mq_hw_sysfs_dispatched_show,
283}; 274};
275static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
276 .attr = {.name = "active", .mode = S_IRUGO },
277 .show = blk_mq_hw_sysfs_active_show,
278};
284static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { 279static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
285 .attr = {.name = "pending", .mode = S_IRUGO }, 280 .attr = {.name = "pending", .mode = S_IRUGO },
286 .show = blk_mq_hw_sysfs_rq_list_show, 281 .show = blk_mq_hw_sysfs_rq_list_show,
287}; 282};
288static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
289 .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
290 .show = blk_mq_hw_sysfs_ipi_show,
291 .store = blk_mq_hw_sysfs_ipi_store,
292};
293static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { 283static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
294 .attr = {.name = "tags", .mode = S_IRUGO }, 284 .attr = {.name = "tags", .mode = S_IRUGO },
295 .show = blk_mq_hw_sysfs_tags_show, 285 .show = blk_mq_hw_sysfs_tags_show,
296}; 286};
287static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
288 .attr = {.name = "cpu_list", .mode = S_IRUGO },
289 .show = blk_mq_hw_sysfs_cpus_show,
290};
297 291
298static struct attribute *default_hw_ctx_attrs[] = { 292static struct attribute *default_hw_ctx_attrs[] = {
299 &blk_mq_hw_sysfs_queued.attr, 293 &blk_mq_hw_sysfs_queued.attr,
300 &blk_mq_hw_sysfs_run.attr, 294 &blk_mq_hw_sysfs_run.attr,
301 &blk_mq_hw_sysfs_dispatched.attr, 295 &blk_mq_hw_sysfs_dispatched.attr,
302 &blk_mq_hw_sysfs_pending.attr, 296 &blk_mq_hw_sysfs_pending.attr,
303 &blk_mq_hw_sysfs_ipi.attr,
304 &blk_mq_hw_sysfs_tags.attr, 297 &blk_mq_hw_sysfs_tags.attr,
298 &blk_mq_hw_sysfs_cpus.attr,
299 &blk_mq_hw_sysfs_active.attr,
305 NULL, 300 NULL,
306}; 301};
307 302
@@ -332,6 +327,42 @@ static struct kobj_type blk_mq_hw_ktype = {
332 .release = blk_mq_sysfs_release, 327 .release = blk_mq_sysfs_release,
333}; 328};
334 329
330static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
331{
332 struct blk_mq_ctx *ctx;
333 int i;
334
335 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
336 return;
337
338 hctx_for_each_ctx(hctx, ctx, i)
339 kobject_del(&ctx->kobj);
340
341 kobject_del(&hctx->kobj);
342}
343
344static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
345{
346 struct request_queue *q = hctx->queue;
347 struct blk_mq_ctx *ctx;
348 int i, ret;
349
350 if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
351 return 0;
352
353 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
354 if (ret)
355 return ret;
356
357 hctx_for_each_ctx(hctx, ctx, i) {
358 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
359 if (ret)
360 break;
361 }
362
363 return ret;
364}
365
335void blk_mq_unregister_disk(struct gendisk *disk) 366void blk_mq_unregister_disk(struct gendisk *disk)
336{ 367{
337 struct request_queue *q = disk->queue; 368 struct request_queue *q = disk->queue;
@@ -340,11 +371,11 @@ void blk_mq_unregister_disk(struct gendisk *disk)
340 int i, j; 371 int i, j;
341 372
342 queue_for_each_hw_ctx(q, hctx, i) { 373 queue_for_each_hw_ctx(q, hctx, i) {
343 hctx_for_each_ctx(hctx, ctx, j) { 374 blk_mq_unregister_hctx(hctx);
344 kobject_del(&ctx->kobj); 375
376 hctx_for_each_ctx(hctx, ctx, j)
345 kobject_put(&ctx->kobj); 377 kobject_put(&ctx->kobj);
346 } 378
347 kobject_del(&hctx->kobj);
348 kobject_put(&hctx->kobj); 379 kobject_put(&hctx->kobj);
349 } 380 }
350 381
@@ -355,15 +386,30 @@ void blk_mq_unregister_disk(struct gendisk *disk)
355 kobject_put(&disk_to_dev(disk)->kobj); 386 kobject_put(&disk_to_dev(disk)->kobj);
356} 387}
357 388
389static void blk_mq_sysfs_init(struct request_queue *q)
390{
391 struct blk_mq_hw_ctx *hctx;
392 struct blk_mq_ctx *ctx;
393 int i, j;
394
395 kobject_init(&q->mq_kobj, &blk_mq_ktype);
396
397 queue_for_each_hw_ctx(q, hctx, i) {
398 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
399
400 hctx_for_each_ctx(hctx, ctx, j)
401 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
402 }
403}
404
358int blk_mq_register_disk(struct gendisk *disk) 405int blk_mq_register_disk(struct gendisk *disk)
359{ 406{
360 struct device *dev = disk_to_dev(disk); 407 struct device *dev = disk_to_dev(disk);
361 struct request_queue *q = disk->queue; 408 struct request_queue *q = disk->queue;
362 struct blk_mq_hw_ctx *hctx; 409 struct blk_mq_hw_ctx *hctx;
363 struct blk_mq_ctx *ctx; 410 int ret, i;
364 int ret, i, j;
365 411
366 kobject_init(&q->mq_kobj, &blk_mq_ktype); 412 blk_mq_sysfs_init(q);
367 413
368 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 414 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
369 if (ret < 0) 415 if (ret < 0)
@@ -372,20 +418,10 @@ int blk_mq_register_disk(struct gendisk *disk)
372 kobject_uevent(&q->mq_kobj, KOBJ_ADD); 418 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
373 419
374 queue_for_each_hw_ctx(q, hctx, i) { 420 queue_for_each_hw_ctx(q, hctx, i) {
375 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 421 hctx->flags |= BLK_MQ_F_SYSFS_UP;
376 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i); 422 ret = blk_mq_register_hctx(hctx);
377 if (ret) 423 if (ret)
378 break; 424 break;
379
380 if (!hctx->nr_ctx)
381 continue;
382
383 hctx_for_each_ctx(hctx, ctx, j) {
384 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
385 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
386 if (ret)
387 break;
388 }
389 } 425 }
390 426
391 if (ret) { 427 if (ret) {
@@ -395,3 +431,26 @@ int blk_mq_register_disk(struct gendisk *disk)
395 431
396 return 0; 432 return 0;
397} 433}
434
435void blk_mq_sysfs_unregister(struct request_queue *q)
436{
437 struct blk_mq_hw_ctx *hctx;
438 int i;
439
440 queue_for_each_hw_ctx(q, hctx, i)
441 blk_mq_unregister_hctx(hctx);
442}
443
444int blk_mq_sysfs_register(struct request_queue *q)
445{
446 struct blk_mq_hw_ctx *hctx;
447 int i, ret = 0;
448
449 queue_for_each_hw_ctx(q, hctx, i) {
450 ret = blk_mq_register_hctx(hctx);
451 if (ret)
452 break;
453 }
454
455 return ret;
456}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 83ae96c51a27..1aab39f71d95 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,78 +1,359 @@
1/*
2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
6 * submitters to sleep.
7 *
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
10 *
11 * Copyright (C) 2013-2014 Jens Axboe
12 */
1#include <linux/kernel.h> 13#include <linux/kernel.h>
2#include <linux/module.h> 14#include <linux/module.h>
3#include <linux/percpu_ida.h> 15#include <linux/random.h>
4 16
5#include <linux/blk-mq.h> 17#include <linux/blk-mq.h>
6#include "blk.h" 18#include "blk.h"
7#include "blk-mq.h" 19#include "blk-mq.h"
8#include "blk-mq-tag.h" 20#include "blk-mq-tag.h"
9 21
22static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
23{
24 int i;
25
26 for (i = 0; i < bt->map_nr; i++) {
27 struct blk_align_bitmap *bm = &bt->map[i];
28 int ret;
29
30 ret = find_first_zero_bit(&bm->word, bm->depth);
31 if (ret < bm->depth)
32 return true;
33 }
34
35 return false;
36}
37
38bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39{
40 if (!tags)
41 return true;
42
43 return bt_has_free_tags(&tags->bitmap_tags);
44}
45
46static inline void bt_index_inc(unsigned int *index)
47{
48 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
49}
50
10/* 51/*
11 * Per tagged queue (tag address space) map 52 * If a previously inactive queue goes active, bump the active user count.
12 */ 53 */
13struct blk_mq_tags { 54bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
14 unsigned int nr_tags; 55{
15 unsigned int nr_reserved_tags; 56 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
16 unsigned int nr_batch_move; 57 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
17 unsigned int nr_max_cache; 58 atomic_inc(&hctx->tags->active_queues);
18 59
19 struct percpu_ida free_tags; 60 return true;
20 struct percpu_ida reserved_tags; 61}
21};
22 62
23void blk_mq_wait_for_tags(struct blk_mq_tags *tags) 63/*
64 * Wakeup all potentially sleeping on normal (non-reserved) tags
65 */
66static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
24{ 67{
25 int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); 68 struct blk_mq_bitmap_tags *bt;
26 blk_mq_put_tag(tags, tag); 69 int i, wake_index;
70
71 bt = &tags->bitmap_tags;
72 wake_index = bt->wake_index;
73 for (i = 0; i < BT_WAIT_QUEUES; i++) {
74 struct bt_wait_state *bs = &bt->bs[wake_index];
75
76 if (waitqueue_active(&bs->wait))
77 wake_up(&bs->wait);
78
79 bt_index_inc(&wake_index);
80 }
27} 81}
28 82
29bool blk_mq_has_free_tags(struct blk_mq_tags *tags) 83/*
84 * If a previously busy queue goes inactive, potential waiters could now
85 * be allowed to queue. Wake them up and check.
86 */
87void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
88{
89 struct blk_mq_tags *tags = hctx->tags;
90
91 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
92 return;
93
94 atomic_dec(&tags->active_queues);
95
96 blk_mq_tag_wakeup_all(tags);
97}
98
99/*
100 * For shared tag users, we track the number of currently active users
101 * and attempt to provide a fair share of the tag depth for each of them.
102 */
103static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
104 struct blk_mq_bitmap_tags *bt)
105{
106 unsigned int depth, users;
107
108 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
109 return true;
110 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
111 return true;
112
113 /*
114 * Don't try dividing an ant
115 */
116 if (bt->depth == 1)
117 return true;
118
119 users = atomic_read(&hctx->tags->active_queues);
120 if (!users)
121 return true;
122
123 /*
124 * Allow at least some tags
125 */
126 depth = max((bt->depth + users - 1) / users, 4U);
127 return atomic_read(&hctx->nr_active) < depth;
128}
129
130static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
30{ 131{
31 return !tags || 132 int tag, org_last_tag, end;
32 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0; 133
134 org_last_tag = last_tag;
135 end = bm->depth;
136 do {
137restart:
138 tag = find_next_zero_bit(&bm->word, end, last_tag);
139 if (unlikely(tag >= end)) {
140 /*
141 * We started with an offset, start from 0 to
142 * exhaust the map.
143 */
144 if (org_last_tag && last_tag) {
145 end = last_tag;
146 last_tag = 0;
147 goto restart;
148 }
149 return -1;
150 }
151 last_tag = tag + 1;
152 } while (test_and_set_bit_lock(tag, &bm->word));
153
154 return tag;
33} 155}
34 156
35static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) 157/*
158 * Straight forward bitmap tag implementation, where each bit is a tag
159 * (cleared == free, and set == busy). The small twist is using per-cpu
160 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
161 * contexts. This enables us to drastically limit the space searched,
162 * without dirtying an extra shared cacheline like we would if we stored
163 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
164 * of that, each word of tags is in a separate cacheline. This means that
165 * multiple users will tend to stick to different cachelines, at least
166 * until the map is exhausted.
167 */
168static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
169 unsigned int *tag_cache)
36{ 170{
171 unsigned int last_tag, org_last_tag;
172 int index, i, tag;
173
174 if (!hctx_may_queue(hctx, bt))
175 return -1;
176
177 last_tag = org_last_tag = *tag_cache;
178 index = TAG_TO_INDEX(bt, last_tag);
179
180 for (i = 0; i < bt->map_nr; i++) {
181 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
182 if (tag != -1) {
183 tag += (index << bt->bits_per_word);
184 goto done;
185 }
186
187 last_tag = 0;
188 if (++index >= bt->map_nr)
189 index = 0;
190 }
191
192 *tag_cache = 0;
193 return -1;
194
195 /*
196 * Only update the cache from the allocation path, if we ended
197 * up using the specific cached tag.
198 */
199done:
200 if (tag == org_last_tag) {
201 last_tag = tag + 1;
202 if (last_tag >= bt->depth - 1)
203 last_tag = 0;
204
205 *tag_cache = last_tag;
206 }
207
208 return tag;
209}
210
211static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
212 struct blk_mq_hw_ctx *hctx)
213{
214 struct bt_wait_state *bs;
215
216 if (!hctx)
217 return &bt->bs[0];
218
219 bs = &bt->bs[hctx->wait_index];
220 bt_index_inc(&hctx->wait_index);
221 return bs;
222}
223
224static int bt_get(struct blk_mq_alloc_data *data,
225 struct blk_mq_bitmap_tags *bt,
226 struct blk_mq_hw_ctx *hctx,
227 unsigned int *last_tag)
228{
229 struct bt_wait_state *bs;
230 DEFINE_WAIT(wait);
37 int tag; 231 int tag;
38 232
39 tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? 233 tag = __bt_get(hctx, bt, last_tag);
40 TASK_UNINTERRUPTIBLE : TASK_RUNNING); 234 if (tag != -1)
41 if (tag < 0) 235 return tag;
42 return BLK_MQ_TAG_FAIL; 236
43 return tag + tags->nr_reserved_tags; 237 if (!(data->gfp & __GFP_WAIT))
238 return -1;
239
240 bs = bt_wait_ptr(bt, hctx);
241 do {
242 bool was_empty;
243
244 was_empty = list_empty(&wait.task_list);
245 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
246
247 tag = __bt_get(hctx, bt, last_tag);
248 if (tag != -1)
249 break;
250
251 if (was_empty)
252 atomic_set(&bs->wait_cnt, bt->wake_cnt);
253
254 blk_mq_put_ctx(data->ctx);
255
256 io_schedule();
257
258 data->ctx = blk_mq_get_ctx(data->q);
259 data->hctx = data->q->mq_ops->map_queue(data->q,
260 data->ctx->cpu);
261 if (data->reserved) {
262 bt = &data->hctx->tags->breserved_tags;
263 } else {
264 last_tag = &data->ctx->last_tag;
265 hctx = data->hctx;
266 bt = &hctx->tags->bitmap_tags;
267 }
268 finish_wait(&bs->wait, &wait);
269 bs = bt_wait_ptr(bt, hctx);
270 } while (1);
271
272 finish_wait(&bs->wait, &wait);
273 return tag;
44} 274}
45 275
46static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, 276static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
47 gfp_t gfp)
48{ 277{
49 int tag; 278 int tag;
50 279
51 if (unlikely(!tags->nr_reserved_tags)) { 280 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
281 &data->ctx->last_tag);
282 if (tag >= 0)
283 return tag + data->hctx->tags->nr_reserved_tags;
284
285 return BLK_MQ_TAG_FAIL;
286}
287
288static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
289{
290 int tag, zero = 0;
291
292 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
52 WARN_ON_ONCE(1); 293 WARN_ON_ONCE(1);
53 return BLK_MQ_TAG_FAIL; 294 return BLK_MQ_TAG_FAIL;
54 } 295 }
55 296
56 tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? 297 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
57 TASK_UNINTERRUPTIBLE : TASK_RUNNING);
58 if (tag < 0) 298 if (tag < 0)
59 return BLK_MQ_TAG_FAIL; 299 return BLK_MQ_TAG_FAIL;
300
60 return tag; 301 return tag;
61} 302}
62 303
63unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) 304unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
64{ 305{
65 if (!reserved) 306 if (!data->reserved)
66 return __blk_mq_get_tag(tags, gfp); 307 return __blk_mq_get_tag(data);
67 308
68 return __blk_mq_get_reserved_tag(tags, gfp); 309 return __blk_mq_get_reserved_tag(data);
310}
311
312static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
313{
314 int i, wake_index;
315
316 wake_index = bt->wake_index;
317 for (i = 0; i < BT_WAIT_QUEUES; i++) {
318 struct bt_wait_state *bs = &bt->bs[wake_index];
319
320 if (waitqueue_active(&bs->wait)) {
321 if (wake_index != bt->wake_index)
322 bt->wake_index = wake_index;
323
324 return bs;
325 }
326
327 bt_index_inc(&wake_index);
328 }
329
330 return NULL;
331}
332
333static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
334{
335 const int index = TAG_TO_INDEX(bt, tag);
336 struct bt_wait_state *bs;
337
338 /*
339 * The unlock memory barrier need to order access to req in free
340 * path and clearing tag bit
341 */
342 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
343
344 bs = bt_wake_ptr(bt);
345 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
346 atomic_set(&bs->wait_cnt, bt->wake_cnt);
347 bt_index_inc(&bt->wake_index);
348 wake_up(&bs->wait);
349 }
69} 350}
70 351
71static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) 352static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
72{ 353{
73 BUG_ON(tag >= tags->nr_tags); 354 BUG_ON(tag >= tags->nr_tags);
74 355
75 percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags); 356 bt_clear_tag(&tags->bitmap_tags, tag);
76} 357}
77 358
78static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, 359static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
@@ -80,22 +361,43 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
80{ 361{
81 BUG_ON(tag >= tags->nr_reserved_tags); 362 BUG_ON(tag >= tags->nr_reserved_tags);
82 363
83 percpu_ida_free(&tags->reserved_tags, tag); 364 bt_clear_tag(&tags->breserved_tags, tag);
84} 365}
85 366
86void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) 367void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
368 unsigned int *last_tag)
87{ 369{
88 if (tag >= tags->nr_reserved_tags) 370 struct blk_mq_tags *tags = hctx->tags;
89 __blk_mq_put_tag(tags, tag); 371
90 else 372 if (tag >= tags->nr_reserved_tags) {
373 const int real_tag = tag - tags->nr_reserved_tags;
374
375 __blk_mq_put_tag(tags, real_tag);
376 *last_tag = real_tag;
377 } else
91 __blk_mq_put_reserved_tag(tags, tag); 378 __blk_mq_put_reserved_tag(tags, tag);
92} 379}
93 380
94static int __blk_mq_tag_iter(unsigned id, void *data) 381static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
382 unsigned long *free_map, unsigned int off)
95{ 383{
96 unsigned long *tag_map = data; 384 int i;
97 __set_bit(id, tag_map); 385
98 return 0; 386 for (i = 0; i < bt->map_nr; i++) {
387 struct blk_align_bitmap *bm = &bt->map[i];
388 int bit = 0;
389
390 do {
391 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
392 if (bit >= bm->depth)
393 break;
394
395 __set_bit(bit + off, free_map);
396 bit++;
397 } while (1);
398
399 off += (1 << bt->bits_per_word);
400 }
99} 401}
100 402
101void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, 403void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
@@ -109,21 +411,128 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
109 if (!tag_map) 411 if (!tag_map)
110 return; 412 return;
111 413
112 percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map); 414 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
113 if (tags->nr_reserved_tags) 415 if (tags->nr_reserved_tags)
114 percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter, 416 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
115 tag_map);
116 417
117 fn(data, tag_map); 418 fn(data, tag_map);
118 kfree(tag_map); 419 kfree(tag_map);
119} 420}
421EXPORT_SYMBOL(blk_mq_tag_busy_iter);
422
423static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
424{
425 unsigned int i, used;
426
427 for (i = 0, used = 0; i < bt->map_nr; i++) {
428 struct blk_align_bitmap *bm = &bt->map[i];
429
430 used += bitmap_weight(&bm->word, bm->depth);
431 }
432
433 return bt->depth - used;
434}
435
436static void bt_update_count(struct blk_mq_bitmap_tags *bt,
437 unsigned int depth)
438{
439 unsigned int tags_per_word = 1U << bt->bits_per_word;
440 unsigned int map_depth = depth;
441
442 if (depth) {
443 int i;
444
445 for (i = 0; i < bt->map_nr; i++) {
446 bt->map[i].depth = min(map_depth, tags_per_word);
447 map_depth -= bt->map[i].depth;
448 }
449 }
450
451 bt->wake_cnt = BT_WAIT_BATCH;
452 if (bt->wake_cnt > depth / 4)
453 bt->wake_cnt = max(1U, depth / 4);
454
455 bt->depth = depth;
456}
457
458static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
459 int node, bool reserved)
460{
461 int i;
462
463 bt->bits_per_word = ilog2(BITS_PER_LONG);
464
465 /*
466 * Depth can be zero for reserved tags, that's not a failure
467 * condition.
468 */
469 if (depth) {
470 unsigned int nr, tags_per_word;
471
472 tags_per_word = (1 << bt->bits_per_word);
473
474 /*
475 * If the tag space is small, shrink the number of tags
476 * per word so we spread over a few cachelines, at least.
477 * If less than 4 tags, just forget about it, it's not
478 * going to work optimally anyway.
479 */
480 if (depth >= 4) {
481 while (tags_per_word * 4 > depth) {
482 bt->bits_per_word--;
483 tags_per_word = (1 << bt->bits_per_word);
484 }
485 }
486
487 nr = ALIGN(depth, tags_per_word) / tags_per_word;
488 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
489 GFP_KERNEL, node);
490 if (!bt->map)
491 return -ENOMEM;
492
493 bt->map_nr = nr;
494 }
495
496 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
497 if (!bt->bs) {
498 kfree(bt->map);
499 return -ENOMEM;
500 }
501
502 for (i = 0; i < BT_WAIT_QUEUES; i++)
503 init_waitqueue_head(&bt->bs[i].wait);
504
505 bt_update_count(bt, depth);
506 return 0;
507}
508
509static void bt_free(struct blk_mq_bitmap_tags *bt)
510{
511 kfree(bt->map);
512 kfree(bt->bs);
513}
514
515static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
516 int node)
517{
518 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
519
520 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
521 goto enomem;
522 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
523 goto enomem;
524
525 return tags;
526enomem:
527 bt_free(&tags->bitmap_tags);
528 kfree(tags);
529 return NULL;
530}
120 531
121struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, 532struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
122 unsigned int reserved_tags, int node) 533 unsigned int reserved_tags, int node)
123{ 534{
124 unsigned int nr_tags, nr_cache;
125 struct blk_mq_tags *tags; 535 struct blk_mq_tags *tags;
126 int ret;
127 536
128 if (total_tags > BLK_MQ_TAG_MAX) { 537 if (total_tags > BLK_MQ_TAG_MAX) {
129 pr_err("blk-mq: tag depth too large\n"); 538 pr_err("blk-mq: tag depth too large\n");
@@ -134,73 +543,59 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
134 if (!tags) 543 if (!tags)
135 return NULL; 544 return NULL;
136 545
137 nr_tags = total_tags - reserved_tags;
138 nr_cache = nr_tags / num_possible_cpus();
139
140 if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
141 nr_cache = BLK_MQ_TAG_CACHE_MIN;
142 else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
143 nr_cache = BLK_MQ_TAG_CACHE_MAX;
144
145 tags->nr_tags = total_tags; 546 tags->nr_tags = total_tags;
146 tags->nr_reserved_tags = reserved_tags; 547 tags->nr_reserved_tags = reserved_tags;
147 tags->nr_max_cache = nr_cache;
148 tags->nr_batch_move = max(1u, nr_cache / 2);
149 548
150 ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags - 549 return blk_mq_init_bitmap_tags(tags, node);
151 tags->nr_reserved_tags, 550}
152 tags->nr_max_cache,
153 tags->nr_batch_move);
154 if (ret)
155 goto err_free_tags;
156 551
157 if (reserved_tags) { 552void blk_mq_free_tags(struct blk_mq_tags *tags)
158 /* 553{
159 * With max_cahe and batch set to 1, the allocator fallbacks to 554 bt_free(&tags->bitmap_tags);
160 * no cached. It's fine reserved tags allocation is slow. 555 bt_free(&tags->breserved_tags);
161 */ 556 kfree(tags);
162 ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags, 557}
163 1, 1);
164 if (ret)
165 goto err_reserved_tags;
166 }
167 558
168 return tags; 559void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
560{
561 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
169 562
170err_reserved_tags: 563 *tag = prandom_u32() % depth;
171 percpu_ida_destroy(&tags->free_tags);
172err_free_tags:
173 kfree(tags);
174 return NULL;
175} 564}
176 565
177void blk_mq_free_tags(struct blk_mq_tags *tags) 566int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
178{ 567{
179 percpu_ida_destroy(&tags->free_tags); 568 tdepth -= tags->nr_reserved_tags;
180 percpu_ida_destroy(&tags->reserved_tags); 569 if (tdepth > tags->nr_tags)
181 kfree(tags); 570 return -EINVAL;
571
572 /*
573 * Don't need (or can't) update reserved tags here, they remain
574 * static and should never need resizing.
575 */
576 bt_update_count(&tags->bitmap_tags, tdepth);
577 blk_mq_tag_wakeup_all(tags);
578 return 0;
182} 579}
183 580
184ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) 581ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
185{ 582{
186 char *orig_page = page; 583 char *orig_page = page;
187 unsigned int cpu; 584 unsigned int free, res;
188 585
189 if (!tags) 586 if (!tags)
190 return 0; 587 return 0;
191 588
192 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," 589 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
193 " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags, 590 "bits_per_word=%u\n",
194 tags->nr_batch_move, tags->nr_max_cache); 591 tags->nr_tags, tags->nr_reserved_tags,
592 tags->bitmap_tags.bits_per_word);
195 593
196 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", 594 free = bt_unused_tags(&tags->bitmap_tags);
197 percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids), 595 res = bt_unused_tags(&tags->breserved_tags);
198 percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
199 596
200 for_each_possible_cpu(cpu) { 597 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
201 page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, 598 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
202 percpu_ida_free_tags(&tags->free_tags, cpu));
203 }
204 599
205 return page - orig_page; 600 return page - orig_page;
206} 601}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 947ba2c6148e..98696a65d4d4 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -1,17 +1,59 @@
1#ifndef INT_BLK_MQ_TAG_H 1#ifndef INT_BLK_MQ_TAG_H
2#define INT_BLK_MQ_TAG_H 2#define INT_BLK_MQ_TAG_H
3 3
4struct blk_mq_tags; 4#include "blk-mq.h"
5
6enum {
7 BT_WAIT_QUEUES = 8,
8 BT_WAIT_BATCH = 8,
9};
10
11struct bt_wait_state {
12 atomic_t wait_cnt;
13 wait_queue_head_t wait;
14} ____cacheline_aligned_in_smp;
15
16#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
17#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
18
19struct blk_mq_bitmap_tags {
20 unsigned int depth;
21 unsigned int wake_cnt;
22 unsigned int bits_per_word;
23
24 unsigned int map_nr;
25 struct blk_align_bitmap *map;
26
27 unsigned int wake_index;
28 struct bt_wait_state *bs;
29};
30
31/*
32 * Tag address space map.
33 */
34struct blk_mq_tags {
35 unsigned int nr_tags;
36 unsigned int nr_reserved_tags;
37
38 atomic_t active_queues;
39
40 struct blk_mq_bitmap_tags bitmap_tags;
41 struct blk_mq_bitmap_tags breserved_tags;
42
43 struct request **rqs;
44 struct list_head page_list;
45};
46
5 47
6extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); 48extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
7extern void blk_mq_free_tags(struct blk_mq_tags *tags); 49extern void blk_mq_free_tags(struct blk_mq_tags *tags);
8 50
9extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); 51extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
10extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); 52extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
11extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
12extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
13extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); 53extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
14extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 54extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
55extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
56extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
15 57
16enum { 58enum {
17 BLK_MQ_TAG_CACHE_MIN = 1, 59 BLK_MQ_TAG_CACHE_MIN = 1,
@@ -24,4 +66,23 @@ enum {
24 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, 66 BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
25}; 67};
26 68
69extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
70extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
71
72static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
73{
74 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
75 return false;
76
77 return __blk_mq_tag_busy(hctx);
78}
79
80static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
81{
82 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
83 return;
84
85 __blk_mq_tag_idle(hctx);
86}
87
27#endif 88#endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 883f72089015..4e4cd6208052 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,9 @@
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
1#include <linux/kernel.h> 7#include <linux/kernel.h>
2#include <linux/module.h> 8#include <linux/module.h>
3#include <linux/backing-dev.h> 9#include <linux/backing-dev.h>
@@ -27,28 +33,6 @@ static LIST_HEAD(all_q_list);
27 33
28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 34static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29 35
30static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
31 unsigned int cpu)
32{
33 return per_cpu_ptr(q->queue_ctx, cpu);
34}
35
36/*
37 * This assumes per-cpu software queueing queues. They could be per-node
38 * as well, for instance. For now this is hardcoded as-is. Note that we don't
39 * care about preemption, since we know the ctx's are persistent. This does
40 * mean that we can't rely on ctx always matching the currently running CPU.
41 */
42static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
43{
44 return __blk_mq_get_ctx(q, get_cpu());
45}
46
47static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
48{
49 put_cpu();
50}
51
52/* 36/*
53 * Check if any of the ctx's have pending work in this hardware queue 37 * Check if any of the ctx's have pending work in this hardware queue
54 */ 38 */
@@ -56,38 +40,40 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56{ 40{
57 unsigned int i; 41 unsigned int i;
58 42
59 for (i = 0; i < hctx->nr_ctx_map; i++) 43 for (i = 0; i < hctx->ctx_map.map_size; i++)
60 if (hctx->ctx_map[i]) 44 if (hctx->ctx_map.map[i].word)
61 return true; 45 return true;
62 46
63 return false; 47 return false;
64} 48}
65 49
50static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51 struct blk_mq_ctx *ctx)
52{
53 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54}
55
56#define CTX_TO_BIT(hctx, ctx) \
57 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58
66/* 59/*
67 * Mark this ctx as having pending work in this hardware queue 60 * Mark this ctx as having pending work in this hardware queue
68 */ 61 */
69static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, 62static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70 struct blk_mq_ctx *ctx) 63 struct blk_mq_ctx *ctx)
71{ 64{
72 if (!test_bit(ctx->index_hw, hctx->ctx_map)) 65 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
73 set_bit(ctx->index_hw, hctx->ctx_map); 66
67 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
74} 69}
75 70
76static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, 71static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
77 gfp_t gfp, bool reserved) 72 struct blk_mq_ctx *ctx)
78{ 73{
79 struct request *rq; 74 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
80 unsigned int tag;
81
82 tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
83 if (tag != BLK_MQ_TAG_FAIL) {
84 rq = hctx->rqs[tag];
85 rq->tag = tag;
86 75
87 return rq; 76 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
88 }
89
90 return NULL;
91} 77}
92 78
93static int blk_mq_queue_enter(struct request_queue *q) 79static int blk_mq_queue_enter(struct request_queue *q)
@@ -186,78 +172,99 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186 if (blk_queue_io_stat(q)) 172 if (blk_queue_io_stat(q))
187 rw_flags |= REQ_IO_STAT; 173 rw_flags |= REQ_IO_STAT;
188 174
175 INIT_LIST_HEAD(&rq->queuelist);
176 /* csd/requeue_work/fifo_time is initialized before use */
177 rq->q = q;
189 rq->mq_ctx = ctx; 178 rq->mq_ctx = ctx;
190 rq->cmd_flags = rw_flags; 179 rq->cmd_flags |= rw_flags;
191 rq->start_time = jiffies; 180 /* do not touch atomic flags, it needs atomic ops against the timer */
181 rq->cpu = -1;
182 INIT_HLIST_NODE(&rq->hash);
183 RB_CLEAR_NODE(&rq->rb_node);
184 rq->rq_disk = NULL;
185 rq->part = NULL;
186#ifdef CONFIG_BLK_CGROUP
187 rq->rl = NULL;
192 set_start_time_ns(rq); 188 set_start_time_ns(rq);
189 rq->io_start_time_ns = 0;
190#endif
191 rq->nr_phys_segments = 0;
192#if defined(CONFIG_BLK_DEV_INTEGRITY)
193 rq->nr_integrity_segments = 0;
194#endif
195 rq->special = NULL;
196 /* tag was already set */
197 rq->errors = 0;
198
199 rq->extra_len = 0;
200 rq->sense_len = 0;
201 rq->resid_len = 0;
202 rq->sense = NULL;
203
204 INIT_LIST_HEAD(&rq->timeout_list);
205 rq->end_io = NULL;
206 rq->end_io_data = NULL;
207 rq->next_rq = NULL;
208
193 ctx->rq_dispatched[rw_is_sync(rw_flags)]++; 209 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
194} 210}
195 211
196static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, 212static struct request *
197 int rw, gfp_t gfp, 213__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
198 bool reserved)
199{ 214{
200 struct request *rq; 215 struct request *rq;
216 unsigned int tag;
201 217
202 do { 218 tag = blk_mq_get_tag(data);
203 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); 219 if (tag != BLK_MQ_TAG_FAIL) {
204 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); 220 rq = data->hctx->tags->rqs[tag];
205 221
206 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); 222 rq->cmd_flags = 0;
207 if (rq) { 223 if (blk_mq_tag_busy(data->hctx)) {
208 blk_mq_rq_ctx_init(q, ctx, rq, rw); 224 rq->cmd_flags = REQ_MQ_INFLIGHT;
209 break; 225 atomic_inc(&data->hctx->nr_active);
210 } 226 }
211 227
212 blk_mq_put_ctx(ctx); 228 rq->tag = tag;
213 if (!(gfp & __GFP_WAIT)) 229 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
214 break; 230 return rq;
215 231 }
216 __blk_mq_run_hw_queue(hctx);
217 blk_mq_wait_for_tags(hctx->tags);
218 } while (1);
219 232
220 return rq; 233 return NULL;
221} 234}
222 235
223struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) 236struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
237 bool reserved)
224{ 238{
239 struct blk_mq_ctx *ctx;
240 struct blk_mq_hw_ctx *hctx;
225 struct request *rq; 241 struct request *rq;
242 struct blk_mq_alloc_data alloc_data;
226 243
227 if (blk_mq_queue_enter(q)) 244 if (blk_mq_queue_enter(q))
228 return NULL; 245 return NULL;
229 246
230 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); 247 ctx = blk_mq_get_ctx(q);
231 if (rq) 248 hctx = q->mq_ops->map_queue(q, ctx->cpu);
232 blk_mq_put_ctx(rq->mq_ctx); 249 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
233 return rq; 250 reserved, ctx, hctx);
234}
235
236struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
237 gfp_t gfp)
238{
239 struct request *rq;
240 251
241 if (blk_mq_queue_enter(q)) 252 rq = __blk_mq_alloc_request(&alloc_data, rw);
242 return NULL; 253 if (!rq && (gfp & __GFP_WAIT)) {
254 __blk_mq_run_hw_queue(hctx);
255 blk_mq_put_ctx(ctx);
243 256
244 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); 257 ctx = blk_mq_get_ctx(q);
245 if (rq) 258 hctx = q->mq_ops->map_queue(q, ctx->cpu);
246 blk_mq_put_ctx(rq->mq_ctx); 259 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
260 hctx);
261 rq = __blk_mq_alloc_request(&alloc_data, rw);
262 ctx = alloc_data.ctx;
263 }
264 blk_mq_put_ctx(ctx);
247 return rq; 265 return rq;
248} 266}
249EXPORT_SYMBOL(blk_mq_alloc_reserved_request); 267EXPORT_SYMBOL(blk_mq_alloc_request);
250
251/*
252 * Re-init and set pdu, if we have it
253 */
254void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
255{
256 blk_rq_init(hctx->queue, rq);
257
258 if (hctx->cmd_size)
259 rq->special = blk_mq_rq_to_pdu(rq);
260}
261 268
262static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, 269static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
263 struct blk_mq_ctx *ctx, struct request *rq) 270 struct blk_mq_ctx *ctx, struct request *rq)
@@ -265,9 +272,11 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
265 const int tag = rq->tag; 272 const int tag = rq->tag;
266 struct request_queue *q = rq->q; 273 struct request_queue *q = rq->q;
267 274
268 blk_mq_rq_init(hctx, rq); 275 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
269 blk_mq_put_tag(hctx->tags, tag); 276 atomic_dec(&hctx->nr_active);
270 277
278 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
279 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
271 blk_mq_queue_exit(q); 280 blk_mq_queue_exit(q);
272} 281}
273 282
@@ -283,20 +292,47 @@ void blk_mq_free_request(struct request *rq)
283 __blk_mq_free_request(hctx, ctx, rq); 292 __blk_mq_free_request(hctx, ctx, rq);
284} 293}
285 294
286bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) 295/*
296 * Clone all relevant state from a request that has been put on hold in
297 * the flush state machine into the preallocated flush request that hangs
298 * off the request queue.
299 *
300 * For a driver the flush request should be invisible, that's why we are
301 * impersonating the original request here.
302 */
303void blk_mq_clone_flush_request(struct request *flush_rq,
304 struct request *orig_rq)
287{ 305{
288 if (blk_update_request(rq, error, blk_rq_bytes(rq))) 306 struct blk_mq_hw_ctx *hctx =
289 return true; 307 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
290 308
309 flush_rq->mq_ctx = orig_rq->mq_ctx;
310 flush_rq->tag = orig_rq->tag;
311 memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
312 hctx->cmd_size);
313}
314
315inline void __blk_mq_end_io(struct request *rq, int error)
316{
291 blk_account_io_done(rq); 317 blk_account_io_done(rq);
292 318
293 if (rq->end_io) 319 if (rq->end_io) {
294 rq->end_io(rq, error); 320 rq->end_io(rq, error);
295 else 321 } else {
322 if (unlikely(blk_bidi_rq(rq)))
323 blk_mq_free_request(rq->next_rq);
296 blk_mq_free_request(rq); 324 blk_mq_free_request(rq);
297 return false; 325 }
298} 326}
299EXPORT_SYMBOL(blk_mq_end_io_partial); 327EXPORT_SYMBOL(__blk_mq_end_io);
328
329void blk_mq_end_io(struct request *rq, int error)
330{
331 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
332 BUG();
333 __blk_mq_end_io(rq, error);
334}
335EXPORT_SYMBOL(blk_mq_end_io);
300 336
301static void __blk_mq_complete_request_remote(void *data) 337static void __blk_mq_complete_request_remote(void *data)
302{ 338{
@@ -305,28 +341,42 @@ static void __blk_mq_complete_request_remote(void *data)
305 rq->q->softirq_done_fn(rq); 341 rq->q->softirq_done_fn(rq);
306} 342}
307 343
308void __blk_mq_complete_request(struct request *rq) 344static void blk_mq_ipi_complete_request(struct request *rq)
309{ 345{
310 struct blk_mq_ctx *ctx = rq->mq_ctx; 346 struct blk_mq_ctx *ctx = rq->mq_ctx;
347 bool shared = false;
311 int cpu; 348 int cpu;
312 349
313 if (!ctx->ipi_redirect) { 350 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
314 rq->q->softirq_done_fn(rq); 351 rq->q->softirq_done_fn(rq);
315 return; 352 return;
316 } 353 }
317 354
318 cpu = get_cpu(); 355 cpu = get_cpu();
319 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { 356 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
357 shared = cpus_share_cache(cpu, ctx->cpu);
358
359 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
320 rq->csd.func = __blk_mq_complete_request_remote; 360 rq->csd.func = __blk_mq_complete_request_remote;
321 rq->csd.info = rq; 361 rq->csd.info = rq;
322 rq->csd.flags = 0; 362 rq->csd.flags = 0;
323 __smp_call_function_single(ctx->cpu, &rq->csd, 0); 363 smp_call_function_single_async(ctx->cpu, &rq->csd);
324 } else { 364 } else {
325 rq->q->softirq_done_fn(rq); 365 rq->q->softirq_done_fn(rq);
326 } 366 }
327 put_cpu(); 367 put_cpu();
328} 368}
329 369
370void __blk_mq_complete_request(struct request *rq)
371{
372 struct request_queue *q = rq->q;
373
374 if (!q->softirq_done_fn)
375 blk_mq_end_io(rq, rq->errors);
376 else
377 blk_mq_ipi_complete_request(rq);
378}
379
330/** 380/**
331 * blk_mq_complete_request - end I/O on a request 381 * blk_mq_complete_request - end I/O on a request
332 * @rq: the request being processed 382 * @rq: the request being processed
@@ -337,7 +387,9 @@ void __blk_mq_complete_request(struct request *rq)
337 **/ 387 **/
338void blk_mq_complete_request(struct request *rq) 388void blk_mq_complete_request(struct request *rq)
339{ 389{
340 if (unlikely(blk_should_fake_timeout(rq->q))) 390 struct request_queue *q = rq->q;
391
392 if (unlikely(blk_should_fake_timeout(q)))
341 return; 393 return;
342 if (!blk_mark_rq_complete(rq)) 394 if (!blk_mark_rq_complete(rq))
343 __blk_mq_complete_request(rq); 395 __blk_mq_complete_request(rq);
@@ -350,13 +402,31 @@ static void blk_mq_start_request(struct request *rq, bool last)
350 402
351 trace_block_rq_issue(q, rq); 403 trace_block_rq_issue(q, rq);
352 404
405 rq->resid_len = blk_rq_bytes(rq);
406 if (unlikely(blk_bidi_rq(rq)))
407 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
408
353 /* 409 /*
354 * Just mark start time and set the started bit. Due to memory 410 * Just mark start time and set the started bit. Due to memory
355 * ordering, we know we'll see the correct deadline as long as 411 * ordering, we know we'll see the correct deadline as long as
356 * REQ_ATOMIC_STARTED is seen. 412 * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
413 * unless one has been set in the request.
357 */ 414 */
358 rq->deadline = jiffies + q->rq_timeout; 415 if (!rq->timeout)
359 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 416 rq->deadline = jiffies + q->rq_timeout;
417 else
418 rq->deadline = jiffies + rq->timeout;
419
420 /*
421 * Mark us as started and clear complete. Complete might have been
422 * set if requeue raced with timeout, which then marked it as
423 * complete. So be sure to clear complete again when we start
424 * the request, otherwise we'll ignore the completion event.
425 */
426 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
427 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
428 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
429 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
360 430
361 if (q->dma_drain_size && blk_rq_bytes(rq)) { 431 if (q->dma_drain_size && blk_rq_bytes(rq)) {
362 /* 432 /*
@@ -378,7 +448,7 @@ static void blk_mq_start_request(struct request *rq, bool last)
378 rq->cmd_flags |= REQ_END; 448 rq->cmd_flags |= REQ_END;
379} 449}
380 450
381static void blk_mq_requeue_request(struct request *rq) 451static void __blk_mq_requeue_request(struct request *rq)
382{ 452{
383 struct request_queue *q = rq->q; 453 struct request_queue *q = rq->q;
384 454
@@ -391,6 +461,91 @@ static void blk_mq_requeue_request(struct request *rq)
391 rq->nr_phys_segments--; 461 rq->nr_phys_segments--;
392} 462}
393 463
464void blk_mq_requeue_request(struct request *rq)
465{
466 __blk_mq_requeue_request(rq);
467 blk_clear_rq_complete(rq);
468
469 BUG_ON(blk_queued_rq(rq));
470 blk_mq_add_to_requeue_list(rq, true);
471}
472EXPORT_SYMBOL(blk_mq_requeue_request);
473
474static void blk_mq_requeue_work(struct work_struct *work)
475{
476 struct request_queue *q =
477 container_of(work, struct request_queue, requeue_work);
478 LIST_HEAD(rq_list);
479 struct request *rq, *next;
480 unsigned long flags;
481
482 spin_lock_irqsave(&q->requeue_lock, flags);
483 list_splice_init(&q->requeue_list, &rq_list);
484 spin_unlock_irqrestore(&q->requeue_lock, flags);
485
486 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
487 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
488 continue;
489
490 rq->cmd_flags &= ~REQ_SOFTBARRIER;
491 list_del_init(&rq->queuelist);
492 blk_mq_insert_request(rq, true, false, false);
493 }
494
495 while (!list_empty(&rq_list)) {
496 rq = list_entry(rq_list.next, struct request, queuelist);
497 list_del_init(&rq->queuelist);
498 blk_mq_insert_request(rq, false, false, false);
499 }
500
501 blk_mq_run_queues(q, false);
502}
503
504void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
505{
506 struct request_queue *q = rq->q;
507 unsigned long flags;
508
509 /*
510 * We abuse this flag that is otherwise used by the I/O scheduler to
511 * request head insertation from the workqueue.
512 */
513 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
514
515 spin_lock_irqsave(&q->requeue_lock, flags);
516 if (at_head) {
517 rq->cmd_flags |= REQ_SOFTBARRIER;
518 list_add(&rq->queuelist, &q->requeue_list);
519 } else {
520 list_add_tail(&rq->queuelist, &q->requeue_list);
521 }
522 spin_unlock_irqrestore(&q->requeue_lock, flags);
523}
524EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
525
526void blk_mq_kick_requeue_list(struct request_queue *q)
527{
528 kblockd_schedule_work(&q->requeue_work);
529}
530EXPORT_SYMBOL(blk_mq_kick_requeue_list);
531
532static inline bool is_flush_request(struct request *rq, unsigned int tag)
533{
534 return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
535 rq->q->flush_rq->tag == tag);
536}
537
538struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
539{
540 struct request *rq = tags->rqs[tag];
541
542 if (!is_flush_request(rq, tag))
543 return rq;
544
545 return rq->q->flush_rq;
546}
547EXPORT_SYMBOL(blk_mq_tag_to_rq);
548
394struct blk_mq_timeout_data { 549struct blk_mq_timeout_data {
395 struct blk_mq_hw_ctx *hctx; 550 struct blk_mq_hw_ctx *hctx;
396 unsigned long *next; 551 unsigned long *next;
@@ -412,12 +567,13 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
412 do { 567 do {
413 struct request *rq; 568 struct request *rq;
414 569
415 tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag); 570 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
416 if (tag >= hctx->queue_depth) 571 if (tag >= hctx->tags->nr_tags)
417 break; 572 break;
418 573
419 rq = hctx->rqs[tag++]; 574 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
420 575 if (rq->q != hctx->queue)
576 continue;
421 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 577 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
422 continue; 578 continue;
423 579
@@ -442,6 +598,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
442 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); 598 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
443} 599}
444 600
601static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
602{
603 struct request_queue *q = rq->q;
604
605 /*
606 * We know that complete is set at this point. If STARTED isn't set
607 * anymore, then the request isn't active and the "timeout" should
608 * just be ignored. This can happen due to the bitflag ordering.
609 * Timeout first checks if STARTED is set, and if it is, assumes
610 * the request is active. But if we race with completion, then
611 * we both flags will get cleared. So check here again, and ignore
612 * a timeout event with a request that isn't active.
613 */
614 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
615 return BLK_EH_NOT_HANDLED;
616
617 if (!q->mq_ops->timeout)
618 return BLK_EH_RESET_TIMER;
619
620 return q->mq_ops->timeout(rq);
621}
622
445static void blk_mq_rq_timer(unsigned long data) 623static void blk_mq_rq_timer(unsigned long data)
446{ 624{
447 struct request_queue *q = (struct request_queue *) data; 625 struct request_queue *q = (struct request_queue *) data;
@@ -449,11 +627,24 @@ static void blk_mq_rq_timer(unsigned long data)
449 unsigned long next = 0; 627 unsigned long next = 0;
450 int i, next_set = 0; 628 int i, next_set = 0;
451 629
452 queue_for_each_hw_ctx(q, hctx, i) 630 queue_for_each_hw_ctx(q, hctx, i) {
631 /*
632 * If not software queues are currently mapped to this
633 * hardware queue, there's nothing to check
634 */
635 if (!hctx->nr_ctx || !hctx->tags)
636 continue;
637
453 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); 638 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
639 }
454 640
455 if (next_set) 641 if (next_set) {
456 mod_timer(&q->timeout, round_jiffies_up(next)); 642 next = blk_rq_timeout(round_jiffies_up(next));
643 mod_timer(&q->timeout, next);
644 } else {
645 queue_for_each_hw_ctx(q, hctx, i)
646 blk_mq_tag_idle(hctx);
647 }
457} 648}
458 649
459/* 650/*
@@ -495,9 +686,38 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
495 return false; 686 return false;
496} 687}
497 688
498void blk_mq_add_timer(struct request *rq) 689/*
690 * Process software queues that have been marked busy, splicing them
691 * to the for-dispatch
692 */
693static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
499{ 694{
500 __blk_add_timer(rq, NULL); 695 struct blk_mq_ctx *ctx;
696 int i;
697
698 for (i = 0; i < hctx->ctx_map.map_size; i++) {
699 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
700 unsigned int off, bit;
701
702 if (!bm->word)
703 continue;
704
705 bit = 0;
706 off = i * hctx->ctx_map.bits_per_word;
707 do {
708 bit = find_next_bit(&bm->word, bm->depth, bit);
709 if (bit >= bm->depth)
710 break;
711
712 ctx = hctx->ctxs[bit + off];
713 clear_bit(bit, &bm->word);
714 spin_lock(&ctx->lock);
715 list_splice_tail_init(&ctx->rq_list, list);
716 spin_unlock(&ctx->lock);
717
718 bit++;
719 } while (1);
720 }
501} 721}
502 722
503/* 723/*
@@ -509,12 +729,13 @@ void blk_mq_add_timer(struct request *rq)
509static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 729static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
510{ 730{
511 struct request_queue *q = hctx->queue; 731 struct request_queue *q = hctx->queue;
512 struct blk_mq_ctx *ctx;
513 struct request *rq; 732 struct request *rq;
514 LIST_HEAD(rq_list); 733 LIST_HEAD(rq_list);
515 int bit, queued; 734 int queued;
735
736 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
516 737
517 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) 738 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
518 return; 739 return;
519 740
520 hctx->run++; 741 hctx->run++;
@@ -522,15 +743,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
522 /* 743 /*
523 * Touch any software queue that has pending entries. 744 * Touch any software queue that has pending entries.
524 */ 745 */
525 for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { 746 flush_busy_ctxs(hctx, &rq_list);
526 clear_bit(bit, hctx->ctx_map);
527 ctx = hctx->ctxs[bit];
528 BUG_ON(bit != ctx->index_hw);
529
530 spin_lock(&ctx->lock);
531 list_splice_tail_init(&ctx->rq_list, &rq_list);
532 spin_unlock(&ctx->lock);
533 }
534 747
535 /* 748 /*
536 * If we have previous entries on our dispatch list, grab them 749 * If we have previous entries on our dispatch list, grab them
@@ -544,13 +757,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
544 } 757 }
545 758
546 /* 759 /*
547 * Delete and return all entries from our dispatch list
548 */
549 queued = 0;
550
551 /*
552 * Now process all the entries, sending them to the driver. 760 * Now process all the entries, sending them to the driver.
553 */ 761 */
762 queued = 0;
554 while (!list_empty(&rq_list)) { 763 while (!list_empty(&rq_list)) {
555 int ret; 764 int ret;
556 765
@@ -565,13 +774,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
565 queued++; 774 queued++;
566 continue; 775 continue;
567 case BLK_MQ_RQ_QUEUE_BUSY: 776 case BLK_MQ_RQ_QUEUE_BUSY:
568 /*
569 * FIXME: we should have a mechanism to stop the queue
570 * like blk_stop_queue, otherwise we will waste cpu
571 * time
572 */
573 list_add(&rq->queuelist, &rq_list); 777 list_add(&rq->queuelist, &rq_list);
574 blk_mq_requeue_request(rq); 778 __blk_mq_requeue_request(rq);
575 break; 779 break;
576 default: 780 default:
577 pr_err("blk-mq: bad return on queue: %d\n", ret); 781 pr_err("blk-mq: bad return on queue: %d\n", ret);
@@ -601,17 +805,44 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
601 } 805 }
602} 806}
603 807
808/*
809 * It'd be great if the workqueue API had a way to pass
810 * in a mask and had some smarts for more clever placement.
811 * For now we just round-robin here, switching for every
812 * BLK_MQ_CPU_WORK_BATCH queued items.
813 */
814static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
815{
816 int cpu = hctx->next_cpu;
817
818 if (--hctx->next_cpu_batch <= 0) {
819 int next_cpu;
820
821 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
822 if (next_cpu >= nr_cpu_ids)
823 next_cpu = cpumask_first(hctx->cpumask);
824
825 hctx->next_cpu = next_cpu;
826 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
827 }
828
829 return cpu;
830}
831
604void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) 832void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
605{ 833{
606 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags))) 834 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
607 return; 835 return;
608 836
609 if (!async) 837 if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
610 __blk_mq_run_hw_queue(hctx); 838 __blk_mq_run_hw_queue(hctx);
839 else if (hctx->queue->nr_hw_queues == 1)
840 kblockd_schedule_delayed_work(&hctx->run_work, 0);
611 else { 841 else {
612 struct request_queue *q = hctx->queue; 842 unsigned int cpu;
613 843
614 kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0); 844 cpu = blk_mq_hctx_next_cpu(hctx);
845 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
615 } 846 }
616} 847}
617 848
@@ -623,17 +854,20 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
623 queue_for_each_hw_ctx(q, hctx, i) { 854 queue_for_each_hw_ctx(q, hctx, i) {
624 if ((!blk_mq_hctx_has_pending(hctx) && 855 if ((!blk_mq_hctx_has_pending(hctx) &&
625 list_empty_careful(&hctx->dispatch)) || 856 list_empty_careful(&hctx->dispatch)) ||
626 test_bit(BLK_MQ_S_STOPPED, &hctx->flags)) 857 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
627 continue; 858 continue;
628 859
860 preempt_disable();
629 blk_mq_run_hw_queue(hctx, async); 861 blk_mq_run_hw_queue(hctx, async);
862 preempt_enable();
630 } 863 }
631} 864}
632EXPORT_SYMBOL(blk_mq_run_queues); 865EXPORT_SYMBOL(blk_mq_run_queues);
633 866
634void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 867void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
635{ 868{
636 cancel_delayed_work(&hctx->delayed_work); 869 cancel_delayed_work(&hctx->run_work);
870 cancel_delayed_work(&hctx->delay_work);
637 set_bit(BLK_MQ_S_STOPPED, &hctx->state); 871 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
638} 872}
639EXPORT_SYMBOL(blk_mq_stop_hw_queue); 873EXPORT_SYMBOL(blk_mq_stop_hw_queue);
@@ -651,11 +885,25 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queues);
651void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) 885void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
652{ 886{
653 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 887 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
888
889 preempt_disable();
654 __blk_mq_run_hw_queue(hctx); 890 __blk_mq_run_hw_queue(hctx);
891 preempt_enable();
655} 892}
656EXPORT_SYMBOL(blk_mq_start_hw_queue); 893EXPORT_SYMBOL(blk_mq_start_hw_queue);
657 894
658void blk_mq_start_stopped_hw_queues(struct request_queue *q) 895void blk_mq_start_hw_queues(struct request_queue *q)
896{
897 struct blk_mq_hw_ctx *hctx;
898 int i;
899
900 queue_for_each_hw_ctx(q, hctx, i)
901 blk_mq_start_hw_queue(hctx);
902}
903EXPORT_SYMBOL(blk_mq_start_hw_queues);
904
905
906void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
659{ 907{
660 struct blk_mq_hw_ctx *hctx; 908 struct blk_mq_hw_ctx *hctx;
661 int i; 909 int i;
@@ -665,19 +913,47 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q)
665 continue; 913 continue;
666 914
667 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 915 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
668 blk_mq_run_hw_queue(hctx, true); 916 preempt_disable();
917 blk_mq_run_hw_queue(hctx, async);
918 preempt_enable();
669 } 919 }
670} 920}
671EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); 921EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
672 922
673static void blk_mq_work_fn(struct work_struct *work) 923static void blk_mq_run_work_fn(struct work_struct *work)
674{ 924{
675 struct blk_mq_hw_ctx *hctx; 925 struct blk_mq_hw_ctx *hctx;
676 926
677 hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); 927 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
928
678 __blk_mq_run_hw_queue(hctx); 929 __blk_mq_run_hw_queue(hctx);
679} 930}
680 931
932static void blk_mq_delay_work_fn(struct work_struct *work)
933{
934 struct blk_mq_hw_ctx *hctx;
935
936 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
937
938 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
939 __blk_mq_run_hw_queue(hctx);
940}
941
942void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
943{
944 unsigned long tmo = msecs_to_jiffies(msecs);
945
946 if (hctx->queue->nr_hw_queues == 1)
947 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
948 else {
949 unsigned int cpu;
950
951 cpu = blk_mq_hctx_next_cpu(hctx);
952 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
953 }
954}
955EXPORT_SYMBOL(blk_mq_delay_queue);
956
681static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, 957static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
682 struct request *rq, bool at_head) 958 struct request *rq, bool at_head)
683{ 959{
@@ -689,12 +965,13 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
689 list_add(&rq->queuelist, &ctx->rq_list); 965 list_add(&rq->queuelist, &ctx->rq_list);
690 else 966 else
691 list_add_tail(&rq->queuelist, &ctx->rq_list); 967 list_add_tail(&rq->queuelist, &ctx->rq_list);
968
692 blk_mq_hctx_mark_pending(hctx, ctx); 969 blk_mq_hctx_mark_pending(hctx, ctx);
693 970
694 /* 971 /*
695 * We do this early, to ensure we are on the right CPU. 972 * We do this early, to ensure we are on the right CPU.
696 */ 973 */
697 blk_mq_add_timer(rq); 974 blk_add_timer(rq);
698} 975}
699 976
700void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, 977void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
@@ -719,10 +996,10 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
719 spin_unlock(&ctx->lock); 996 spin_unlock(&ctx->lock);
720 } 997 }
721 998
722 blk_mq_put_ctx(current_ctx);
723
724 if (run_queue) 999 if (run_queue)
725 blk_mq_run_hw_queue(hctx, async); 1000 blk_mq_run_hw_queue(hctx, async);
1001
1002 blk_mq_put_ctx(current_ctx);
726} 1003}
727 1004
728static void blk_mq_insert_requests(struct request_queue *q, 1005static void blk_mq_insert_requests(struct request_queue *q,
@@ -758,9 +1035,8 @@ static void blk_mq_insert_requests(struct request_queue *q,
758 } 1035 }
759 spin_unlock(&ctx->lock); 1036 spin_unlock(&ctx->lock);
760 1037
761 blk_mq_put_ctx(current_ctx);
762
763 blk_mq_run_hw_queue(hctx, from_schedule); 1038 blk_mq_run_hw_queue(hctx, from_schedule);
1039 blk_mq_put_ctx(current_ctx);
764} 1040}
765 1041
766static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) 1042static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -823,24 +1099,175 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
823static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) 1099static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
824{ 1100{
825 init_request_from_bio(rq, bio); 1101 init_request_from_bio(rq, bio);
826 blk_account_io_start(rq, 1); 1102
1103 if (blk_do_io_stat(rq)) {
1104 rq->start_time = jiffies;
1105 blk_account_io_start(rq, 1);
1106 }
827} 1107}
828 1108
829static void blk_mq_make_request(struct request_queue *q, struct bio *bio) 1109static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1110 struct blk_mq_ctx *ctx,
1111 struct request *rq, struct bio *bio)
1112{
1113 struct request_queue *q = hctx->queue;
1114
1115 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1116 blk_mq_bio_to_request(rq, bio);
1117 spin_lock(&ctx->lock);
1118insert_rq:
1119 __blk_mq_insert_request(hctx, rq, false);
1120 spin_unlock(&ctx->lock);
1121 return false;
1122 } else {
1123 spin_lock(&ctx->lock);
1124 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1125 blk_mq_bio_to_request(rq, bio);
1126 goto insert_rq;
1127 }
1128
1129 spin_unlock(&ctx->lock);
1130 __blk_mq_free_request(hctx, ctx, rq);
1131 return true;
1132 }
1133}
1134
1135struct blk_map_ctx {
1136 struct blk_mq_hw_ctx *hctx;
1137 struct blk_mq_ctx *ctx;
1138};
1139
1140static struct request *blk_mq_map_request(struct request_queue *q,
1141 struct bio *bio,
1142 struct blk_map_ctx *data)
830{ 1143{
831 struct blk_mq_hw_ctx *hctx; 1144 struct blk_mq_hw_ctx *hctx;
832 struct blk_mq_ctx *ctx; 1145 struct blk_mq_ctx *ctx;
1146 struct request *rq;
1147 int rw = bio_data_dir(bio);
1148 struct blk_mq_alloc_data alloc_data;
1149
1150 if (unlikely(blk_mq_queue_enter(q))) {
1151 bio_endio(bio, -EIO);
1152 return NULL;
1153 }
1154
1155 ctx = blk_mq_get_ctx(q);
1156 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1157
1158 if (rw_is_sync(bio->bi_rw))
1159 rw |= REQ_SYNC;
1160
1161 trace_block_getrq(q, bio, rw);
1162 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1163 hctx);
1164 rq = __blk_mq_alloc_request(&alloc_data, rw);
1165 if (unlikely(!rq)) {
1166 __blk_mq_run_hw_queue(hctx);
1167 blk_mq_put_ctx(ctx);
1168 trace_block_sleeprq(q, bio, rw);
1169
1170 ctx = blk_mq_get_ctx(q);
1171 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1172 blk_mq_set_alloc_data(&alloc_data, q,
1173 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1174 rq = __blk_mq_alloc_request(&alloc_data, rw);
1175 ctx = alloc_data.ctx;
1176 hctx = alloc_data.hctx;
1177 }
1178
1179 hctx->queued++;
1180 data->hctx = hctx;
1181 data->ctx = ctx;
1182 return rq;
1183}
1184
1185/*
1186 * Multiple hardware queue variant. This will not use per-process plugs,
1187 * but will attempt to bypass the hctx queueing if we can go straight to
1188 * hardware for SYNC IO.
1189 */
1190static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1191{
833 const int is_sync = rw_is_sync(bio->bi_rw); 1192 const int is_sync = rw_is_sync(bio->bi_rw);
834 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1193 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
835 int rw = bio_data_dir(bio); 1194 struct blk_map_ctx data;
836 struct request *rq; 1195 struct request *rq;
1196
1197 blk_queue_bounce(q, &bio);
1198
1199 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1200 bio_endio(bio, -EIO);
1201 return;
1202 }
1203
1204 rq = blk_mq_map_request(q, bio, &data);
1205 if (unlikely(!rq))
1206 return;
1207
1208 if (unlikely(is_flush_fua)) {
1209 blk_mq_bio_to_request(rq, bio);
1210 blk_insert_flush(rq);
1211 goto run_queue;
1212 }
1213
1214 if (is_sync) {
1215 int ret;
1216
1217 blk_mq_bio_to_request(rq, bio);
1218 blk_mq_start_request(rq, true);
1219 blk_add_timer(rq);
1220
1221 /*
1222 * For OK queue, we are done. For error, kill it. Any other
1223 * error (busy), just add it to our list as we previously
1224 * would have done
1225 */
1226 ret = q->mq_ops->queue_rq(data.hctx, rq);
1227 if (ret == BLK_MQ_RQ_QUEUE_OK)
1228 goto done;
1229 else {
1230 __blk_mq_requeue_request(rq);
1231
1232 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1233 rq->errors = -EIO;
1234 blk_mq_end_io(rq, rq->errors);
1235 goto done;
1236 }
1237 }
1238 }
1239
1240 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1241 /*
1242 * For a SYNC request, send it to the hardware immediately. For
1243 * an ASYNC request, just ensure that we run it later on. The
1244 * latter allows for merging opportunities and more efficient
1245 * dispatching.
1246 */
1247run_queue:
1248 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1249 }
1250done:
1251 blk_mq_put_ctx(data.ctx);
1252}
1253
1254/*
1255 * Single hardware queue variant. This will attempt to use any per-process
1256 * plug for merging and IO deferral.
1257 */
1258static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1259{
1260 const int is_sync = rw_is_sync(bio->bi_rw);
1261 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
837 unsigned int use_plug, request_count = 0; 1262 unsigned int use_plug, request_count = 0;
1263 struct blk_map_ctx data;
1264 struct request *rq;
838 1265
839 /* 1266 /*
840 * If we have multiple hardware queues, just go directly to 1267 * If we have multiple hardware queues, just go directly to
841 * one of those for sync IO. 1268 * one of those for sync IO.
842 */ 1269 */
843 use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync); 1270 use_plug = !is_flush_fua && !is_sync;
844 1271
845 blk_queue_bounce(q, &bio); 1272 blk_queue_bounce(q, &bio);
846 1273
@@ -849,37 +1276,16 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
849 return; 1276 return;
850 } 1277 }
851 1278
852 if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) 1279 if (use_plug && !blk_queue_nomerges(q) &&
1280 blk_attempt_plug_merge(q, bio, &request_count))
853 return; 1281 return;
854 1282
855 if (blk_mq_queue_enter(q)) { 1283 rq = blk_mq_map_request(q, bio, &data);
856 bio_endio(bio, -EIO); 1284 if (unlikely(!rq))
857 return; 1285 return;
858 }
859
860 ctx = blk_mq_get_ctx(q);
861 hctx = q->mq_ops->map_queue(q, ctx->cpu);
862
863 if (is_sync)
864 rw |= REQ_SYNC;
865 trace_block_getrq(q, bio, rw);
866 rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
867 if (likely(rq))
868 blk_mq_rq_ctx_init(q, ctx, rq, rw);
869 else {
870 blk_mq_put_ctx(ctx);
871 trace_block_sleeprq(q, bio, rw);
872 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
873 false);
874 ctx = rq->mq_ctx;
875 hctx = q->mq_ops->map_queue(q, ctx->cpu);
876 }
877
878 hctx->queued++;
879 1286
880 if (unlikely(is_flush_fua)) { 1287 if (unlikely(is_flush_fua)) {
881 blk_mq_bio_to_request(rq, bio); 1288 blk_mq_bio_to_request(rq, bio);
882 blk_mq_put_ctx(ctx);
883 blk_insert_flush(rq); 1289 blk_insert_flush(rq);
884 goto run_queue; 1290 goto run_queue;
885 } 1291 }
@@ -901,31 +1307,23 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
901 trace_block_plug(q); 1307 trace_block_plug(q);
902 } 1308 }
903 list_add_tail(&rq->queuelist, &plug->mq_list); 1309 list_add_tail(&rq->queuelist, &plug->mq_list);
904 blk_mq_put_ctx(ctx); 1310 blk_mq_put_ctx(data.ctx);
905 return; 1311 return;
906 } 1312 }
907 } 1313 }
908 1314
909 spin_lock(&ctx->lock); 1315 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
910 1316 /*
911 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && 1317 * For a SYNC request, send it to the hardware immediately. For
912 blk_mq_attempt_merge(q, ctx, bio)) 1318 * an ASYNC request, just ensure that we run it later on. The
913 __blk_mq_free_request(hctx, ctx, rq); 1319 * latter allows for merging opportunities and more efficient
914 else { 1320 * dispatching.
915 blk_mq_bio_to_request(rq, bio); 1321 */
916 __blk_mq_insert_request(hctx, rq, false); 1322run_queue:
1323 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
917 } 1324 }
918 1325
919 spin_unlock(&ctx->lock); 1326 blk_mq_put_ctx(data.ctx);
920 blk_mq_put_ctx(ctx);
921
922 /*
923 * For a SYNC request, send it to the hardware immediately. For an
924 * ASYNC request, just ensure that we run it later on. The latter
925 * allows for merging opportunities and more efficient dispatching.
926 */
927run_queue:
928 blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
929} 1327}
930 1328
931/* 1329/*
@@ -937,138 +1335,68 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
937} 1335}
938EXPORT_SYMBOL(blk_mq_map_queue); 1336EXPORT_SYMBOL(blk_mq_map_queue);
939 1337
940struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg, 1338static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
941 unsigned int hctx_index) 1339 struct blk_mq_tags *tags, unsigned int hctx_idx)
942{
943 return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
944 GFP_KERNEL | __GFP_ZERO, reg->numa_node);
945}
946EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
947
948void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
949 unsigned int hctx_index)
950{
951 kfree(hctx);
952}
953EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
954
955static void blk_mq_hctx_notify(void *data, unsigned long action,
956 unsigned int cpu)
957{ 1340{
958 struct blk_mq_hw_ctx *hctx = data; 1341 struct page *page;
959 struct blk_mq_ctx *ctx;
960 LIST_HEAD(tmp);
961
962 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
963 return;
964
965 /*
966 * Move ctx entries to new CPU, if this one is going away.
967 */
968 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
969
970 spin_lock(&ctx->lock);
971 if (!list_empty(&ctx->rq_list)) {
972 list_splice_init(&ctx->rq_list, &tmp);
973 clear_bit(ctx->index_hw, hctx->ctx_map);
974 }
975 spin_unlock(&ctx->lock);
976
977 if (list_empty(&tmp))
978 return;
979
980 ctx = blk_mq_get_ctx(hctx->queue);
981 spin_lock(&ctx->lock);
982
983 while (!list_empty(&tmp)) {
984 struct request *rq;
985
986 rq = list_first_entry(&tmp, struct request, queuelist);
987 rq->mq_ctx = ctx;
988 list_move_tail(&rq->queuelist, &ctx->rq_list);
989 }
990
991 blk_mq_hctx_mark_pending(hctx, ctx);
992
993 spin_unlock(&ctx->lock);
994 blk_mq_put_ctx(ctx);
995}
996
997static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
998 void (*init)(void *, struct blk_mq_hw_ctx *,
999 struct request *, unsigned int),
1000 void *data)
1001{
1002 unsigned int i;
1003 1342
1004 for (i = 0; i < hctx->queue_depth; i++) { 1343 if (tags->rqs && set->ops->exit_request) {
1005 struct request *rq = hctx->rqs[i]; 1344 int i;
1006 1345
1007 init(data, hctx, rq, i); 1346 for (i = 0; i < tags->nr_tags; i++) {
1347 if (!tags->rqs[i])
1348 continue;
1349 set->ops->exit_request(set->driver_data, tags->rqs[i],
1350 hctx_idx, i);
1351 }
1008 } 1352 }
1009}
1010 1353
1011void blk_mq_init_commands(struct request_queue *q, 1354 while (!list_empty(&tags->page_list)) {
1012 void (*init)(void *, struct blk_mq_hw_ctx *, 1355 page = list_first_entry(&tags->page_list, struct page, lru);
1013 struct request *, unsigned int),
1014 void *data)
1015{
1016 struct blk_mq_hw_ctx *hctx;
1017 unsigned int i;
1018
1019 queue_for_each_hw_ctx(q, hctx, i)
1020 blk_mq_init_hw_commands(hctx, init, data);
1021}
1022EXPORT_SYMBOL(blk_mq_init_commands);
1023
1024static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
1025{
1026 struct page *page;
1027
1028 while (!list_empty(&hctx->page_list)) {
1029 page = list_first_entry(&hctx->page_list, struct page, lru);
1030 list_del_init(&page->lru); 1356 list_del_init(&page->lru);
1031 __free_pages(page, page->private); 1357 __free_pages(page, page->private);
1032 } 1358 }
1033 1359
1034 kfree(hctx->rqs); 1360 kfree(tags->rqs);
1035 1361
1036 if (hctx->tags) 1362 blk_mq_free_tags(tags);
1037 blk_mq_free_tags(hctx->tags);
1038} 1363}
1039 1364
1040static size_t order_to_size(unsigned int order) 1365static size_t order_to_size(unsigned int order)
1041{ 1366{
1042 size_t ret = PAGE_SIZE; 1367 return (size_t)PAGE_SIZE << order;
1043
1044 while (order--)
1045 ret *= 2;
1046
1047 return ret;
1048} 1368}
1049 1369
1050static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, 1370static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1051 unsigned int reserved_tags, int node) 1371 unsigned int hctx_idx)
1052{ 1372{
1373 struct blk_mq_tags *tags;
1053 unsigned int i, j, entries_per_page, max_order = 4; 1374 unsigned int i, j, entries_per_page, max_order = 4;
1054 size_t rq_size, left; 1375 size_t rq_size, left;
1055 1376
1056 INIT_LIST_HEAD(&hctx->page_list); 1377 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1378 set->numa_node);
1379 if (!tags)
1380 return NULL;
1057 1381
1058 hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *), 1382 INIT_LIST_HEAD(&tags->page_list);
1059 GFP_KERNEL, node); 1383
1060 if (!hctx->rqs) 1384 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1061 return -ENOMEM; 1385 GFP_KERNEL, set->numa_node);
1386 if (!tags->rqs) {
1387 blk_mq_free_tags(tags);
1388 return NULL;
1389 }
1062 1390
1063 /* 1391 /*
1064 * rq_size is the size of the request plus driver payload, rounded 1392 * rq_size is the size of the request plus driver payload, rounded
1065 * to the cacheline size 1393 * to the cacheline size
1066 */ 1394 */
1067 rq_size = round_up(sizeof(struct request) + hctx->cmd_size, 1395 rq_size = round_up(sizeof(struct request) + set->cmd_size,
1068 cache_line_size()); 1396 cache_line_size());
1069 left = rq_size * hctx->queue_depth; 1397 left = rq_size * set->queue_depth;
1070 1398
1071 for (i = 0; i < hctx->queue_depth;) { 1399 for (i = 0; i < set->queue_depth; ) {
1072 int this_order = max_order; 1400 int this_order = max_order;
1073 struct page *page; 1401 struct page *page;
1074 int to_do; 1402 int to_do;
@@ -1078,7 +1406,8 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1078 this_order--; 1406 this_order--;
1079 1407
1080 do { 1408 do {
1081 page = alloc_pages_node(node, GFP_KERNEL, this_order); 1409 page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1410 this_order);
1082 if (page) 1411 if (page)
1083 break; 1412 break;
1084 if (!this_order--) 1413 if (!this_order--)
@@ -1088,73 +1417,200 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx,
1088 } while (1); 1417 } while (1);
1089 1418
1090 if (!page) 1419 if (!page)
1091 break; 1420 goto fail;
1092 1421
1093 page->private = this_order; 1422 page->private = this_order;
1094 list_add_tail(&page->lru, &hctx->page_list); 1423 list_add_tail(&page->lru, &tags->page_list);
1095 1424
1096 p = page_address(page); 1425 p = page_address(page);
1097 entries_per_page = order_to_size(this_order) / rq_size; 1426 entries_per_page = order_to_size(this_order) / rq_size;
1098 to_do = min(entries_per_page, hctx->queue_depth - i); 1427 to_do = min(entries_per_page, set->queue_depth - i);
1099 left -= to_do * rq_size; 1428 left -= to_do * rq_size;
1100 for (j = 0; j < to_do; j++) { 1429 for (j = 0; j < to_do; j++) {
1101 hctx->rqs[i] = p; 1430 tags->rqs[i] = p;
1102 blk_mq_rq_init(hctx, hctx->rqs[i]); 1431 if (set->ops->init_request) {
1432 if (set->ops->init_request(set->driver_data,
1433 tags->rqs[i], hctx_idx, i,
1434 set->numa_node))
1435 goto fail;
1436 }
1437
1103 p += rq_size; 1438 p += rq_size;
1104 i++; 1439 i++;
1105 } 1440 }
1106 } 1441 }
1107 1442
1108 if (i < (reserved_tags + BLK_MQ_TAG_MIN)) 1443 return tags;
1109 goto err_rq_map; 1444
1110 else if (i != hctx->queue_depth) { 1445fail:
1111 hctx->queue_depth = i; 1446 pr_warn("%s: failed to allocate requests\n", __func__);
1112 pr_warn("%s: queue depth set to %u because of low memory\n", 1447 blk_mq_free_rq_map(set, tags, hctx_idx);
1113 __func__, i); 1448 return NULL;
1114 } 1449}
1450
1451static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1452{
1453 kfree(bitmap->map);
1454}
1455
1456static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1457{
1458 unsigned int bpw = 8, total, num_maps, i;
1459
1460 bitmap->bits_per_word = bpw;
1115 1461
1116 hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); 1462 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1117 if (!hctx->tags) { 1463 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1118err_rq_map: 1464 GFP_KERNEL, node);
1119 blk_mq_free_rq_map(hctx); 1465 if (!bitmap->map)
1120 return -ENOMEM; 1466 return -ENOMEM;
1467
1468 bitmap->map_size = num_maps;
1469
1470 total = nr_cpu_ids;
1471 for (i = 0; i < num_maps; i++) {
1472 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1473 total -= bitmap->map[i].depth;
1121 } 1474 }
1122 1475
1123 return 0; 1476 return 0;
1124} 1477}
1125 1478
1479static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1480{
1481 struct request_queue *q = hctx->queue;
1482 struct blk_mq_ctx *ctx;
1483 LIST_HEAD(tmp);
1484
1485 /*
1486 * Move ctx entries to new CPU, if this one is going away.
1487 */
1488 ctx = __blk_mq_get_ctx(q, cpu);
1489
1490 spin_lock(&ctx->lock);
1491 if (!list_empty(&ctx->rq_list)) {
1492 list_splice_init(&ctx->rq_list, &tmp);
1493 blk_mq_hctx_clear_pending(hctx, ctx);
1494 }
1495 spin_unlock(&ctx->lock);
1496
1497 if (list_empty(&tmp))
1498 return NOTIFY_OK;
1499
1500 ctx = blk_mq_get_ctx(q);
1501 spin_lock(&ctx->lock);
1502
1503 while (!list_empty(&tmp)) {
1504 struct request *rq;
1505
1506 rq = list_first_entry(&tmp, struct request, queuelist);
1507 rq->mq_ctx = ctx;
1508 list_move_tail(&rq->queuelist, &ctx->rq_list);
1509 }
1510
1511 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1512 blk_mq_hctx_mark_pending(hctx, ctx);
1513
1514 spin_unlock(&ctx->lock);
1515
1516 blk_mq_run_hw_queue(hctx, true);
1517 blk_mq_put_ctx(ctx);
1518 return NOTIFY_OK;
1519}
1520
1521static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1522{
1523 struct request_queue *q = hctx->queue;
1524 struct blk_mq_tag_set *set = q->tag_set;
1525
1526 if (set->tags[hctx->queue_num])
1527 return NOTIFY_OK;
1528
1529 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1530 if (!set->tags[hctx->queue_num])
1531 return NOTIFY_STOP;
1532
1533 hctx->tags = set->tags[hctx->queue_num];
1534 return NOTIFY_OK;
1535}
1536
1537static int blk_mq_hctx_notify(void *data, unsigned long action,
1538 unsigned int cpu)
1539{
1540 struct blk_mq_hw_ctx *hctx = data;
1541
1542 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1543 return blk_mq_hctx_cpu_offline(hctx, cpu);
1544 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1545 return blk_mq_hctx_cpu_online(hctx, cpu);
1546
1547 return NOTIFY_OK;
1548}
1549
1550static void blk_mq_exit_hw_queues(struct request_queue *q,
1551 struct blk_mq_tag_set *set, int nr_queue)
1552{
1553 struct blk_mq_hw_ctx *hctx;
1554 unsigned int i;
1555
1556 queue_for_each_hw_ctx(q, hctx, i) {
1557 if (i == nr_queue)
1558 break;
1559
1560 blk_mq_tag_idle(hctx);
1561
1562 if (set->ops->exit_hctx)
1563 set->ops->exit_hctx(hctx, i);
1564
1565 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1566 kfree(hctx->ctxs);
1567 blk_mq_free_bitmap(&hctx->ctx_map);
1568 }
1569
1570}
1571
1572static void blk_mq_free_hw_queues(struct request_queue *q,
1573 struct blk_mq_tag_set *set)
1574{
1575 struct blk_mq_hw_ctx *hctx;
1576 unsigned int i;
1577
1578 queue_for_each_hw_ctx(q, hctx, i) {
1579 free_cpumask_var(hctx->cpumask);
1580 kfree(hctx);
1581 }
1582}
1583
1126static int blk_mq_init_hw_queues(struct request_queue *q, 1584static int blk_mq_init_hw_queues(struct request_queue *q,
1127 struct blk_mq_reg *reg, void *driver_data) 1585 struct blk_mq_tag_set *set)
1128{ 1586{
1129 struct blk_mq_hw_ctx *hctx; 1587 struct blk_mq_hw_ctx *hctx;
1130 unsigned int i, j; 1588 unsigned int i;
1131 1589
1132 /* 1590 /*
1133 * Initialize hardware queues 1591 * Initialize hardware queues
1134 */ 1592 */
1135 queue_for_each_hw_ctx(q, hctx, i) { 1593 queue_for_each_hw_ctx(q, hctx, i) {
1136 unsigned int num_maps;
1137 int node; 1594 int node;
1138 1595
1139 node = hctx->numa_node; 1596 node = hctx->numa_node;
1140 if (node == NUMA_NO_NODE) 1597 if (node == NUMA_NO_NODE)
1141 node = hctx->numa_node = reg->numa_node; 1598 node = hctx->numa_node = set->numa_node;
1142 1599
1143 INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); 1600 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1601 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1144 spin_lock_init(&hctx->lock); 1602 spin_lock_init(&hctx->lock);
1145 INIT_LIST_HEAD(&hctx->dispatch); 1603 INIT_LIST_HEAD(&hctx->dispatch);
1146 hctx->queue = q; 1604 hctx->queue = q;
1147 hctx->queue_num = i; 1605 hctx->queue_num = i;
1148 hctx->flags = reg->flags; 1606 hctx->flags = set->flags;
1149 hctx->queue_depth = reg->queue_depth; 1607 hctx->cmd_size = set->cmd_size;
1150 hctx->cmd_size = reg->cmd_size;
1151 1608
1152 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1609 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1153 blk_mq_hctx_notify, hctx); 1610 blk_mq_hctx_notify, hctx);
1154 blk_mq_register_cpu_notifier(&hctx->cpu_notifier); 1611 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1155 1612
1156 if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) 1613 hctx->tags = set->tags[i];
1157 break;
1158 1614
1159 /* 1615 /*
1160 * Allocate space for all possible cpus to avoid allocation in 1616 * Allocate space for all possible cpus to avoid allocation in
@@ -1165,17 +1621,13 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1165 if (!hctx->ctxs) 1621 if (!hctx->ctxs)
1166 break; 1622 break;
1167 1623
1168 num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; 1624 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1169 hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
1170 GFP_KERNEL, node);
1171 if (!hctx->ctx_map)
1172 break; 1625 break;
1173 1626
1174 hctx->nr_ctx_map = num_maps;
1175 hctx->nr_ctx = 0; 1627 hctx->nr_ctx = 0;
1176 1628
1177 if (reg->ops->init_hctx && 1629 if (set->ops->init_hctx &&
1178 reg->ops->init_hctx(hctx, driver_data, i)) 1630 set->ops->init_hctx(hctx, set->driver_data, i))
1179 break; 1631 break;
1180 } 1632 }
1181 1633
@@ -1185,17 +1637,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1185 /* 1637 /*
1186 * Init failed 1638 * Init failed
1187 */ 1639 */
1188 queue_for_each_hw_ctx(q, hctx, j) { 1640 blk_mq_exit_hw_queues(q, set, i);
1189 if (i == j)
1190 break;
1191
1192 if (reg->ops->exit_hctx)
1193 reg->ops->exit_hctx(hctx, j);
1194
1195 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1196 blk_mq_free_rq_map(hctx);
1197 kfree(hctx->ctxs);
1198 }
1199 1641
1200 return 1; 1642 return 1;
1201} 1643}
@@ -1216,12 +1658,13 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
1216 __ctx->queue = q; 1658 __ctx->queue = q;
1217 1659
1218 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1660 /* If the cpu isn't online, the cpu is mapped to first hctx */
1219 hctx = q->mq_ops->map_queue(q, i);
1220 hctx->nr_ctx++;
1221
1222 if (!cpu_online(i)) 1661 if (!cpu_online(i))
1223 continue; 1662 continue;
1224 1663
1664 hctx = q->mq_ops->map_queue(q, i);
1665 cpumask_set_cpu(i, hctx->cpumask);
1666 hctx->nr_ctx++;
1667
1225 /* 1668 /*
1226 * Set local node, IFF we have more than one hw queue. If 1669 * Set local node, IFF we have more than one hw queue. If
1227 * not, we remain on the home node of the device 1670 * not, we remain on the home node of the device
@@ -1238,6 +1681,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1238 struct blk_mq_ctx *ctx; 1681 struct blk_mq_ctx *ctx;
1239 1682
1240 queue_for_each_hw_ctx(q, hctx, i) { 1683 queue_for_each_hw_ctx(q, hctx, i) {
1684 cpumask_clear(hctx->cpumask);
1241 hctx->nr_ctx = 0; 1685 hctx->nr_ctx = 0;
1242 } 1686 }
1243 1687
@@ -1246,115 +1690,208 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1246 */ 1690 */
1247 queue_for_each_ctx(q, ctx, i) { 1691 queue_for_each_ctx(q, ctx, i) {
1248 /* If the cpu isn't online, the cpu is mapped to first hctx */ 1692 /* If the cpu isn't online, the cpu is mapped to first hctx */
1693 if (!cpu_online(i))
1694 continue;
1695
1249 hctx = q->mq_ops->map_queue(q, i); 1696 hctx = q->mq_ops->map_queue(q, i);
1697 cpumask_set_cpu(i, hctx->cpumask);
1250 ctx->index_hw = hctx->nr_ctx; 1698 ctx->index_hw = hctx->nr_ctx;
1251 hctx->ctxs[hctx->nr_ctx++] = ctx; 1699 hctx->ctxs[hctx->nr_ctx++] = ctx;
1252 } 1700 }
1701
1702 queue_for_each_hw_ctx(q, hctx, i) {
1703 /*
1704 * If not software queues are mapped to this hardware queue,
1705 * disable it and free the request entries
1706 */
1707 if (!hctx->nr_ctx) {
1708 struct blk_mq_tag_set *set = q->tag_set;
1709
1710 if (set->tags[i]) {
1711 blk_mq_free_rq_map(set, set->tags[i], i);
1712 set->tags[i] = NULL;
1713 hctx->tags = NULL;
1714 }
1715 continue;
1716 }
1717
1718 /*
1719 * Initialize batch roundrobin counts
1720 */
1721 hctx->next_cpu = cpumask_first(hctx->cpumask);
1722 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1723 }
1253} 1724}
1254 1725
1255struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, 1726static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1256 void *driver_data)
1257{ 1727{
1258 struct blk_mq_hw_ctx **hctxs; 1728 struct blk_mq_hw_ctx *hctx;
1259 struct blk_mq_ctx *ctx;
1260 struct request_queue *q; 1729 struct request_queue *q;
1730 bool shared;
1261 int i; 1731 int i;
1262 1732
1263 if (!reg->nr_hw_queues || 1733 if (set->tag_list.next == set->tag_list.prev)
1264 !reg->ops->queue_rq || !reg->ops->map_queue || 1734 shared = false;
1265 !reg->ops->alloc_hctx || !reg->ops->free_hctx) 1735 else
1266 return ERR_PTR(-EINVAL); 1736 shared = true;
1737
1738 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1739 blk_mq_freeze_queue(q);
1267 1740
1268 if (!reg->queue_depth) 1741 queue_for_each_hw_ctx(q, hctx, i) {
1269 reg->queue_depth = BLK_MQ_MAX_DEPTH; 1742 if (shared)
1270 else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) { 1743 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1271 pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth); 1744 else
1272 reg->queue_depth = BLK_MQ_MAX_DEPTH; 1745 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1746 }
1747 blk_mq_unfreeze_queue(q);
1273 } 1748 }
1749}
1750
1751static void blk_mq_del_queue_tag_set(struct request_queue *q)
1752{
1753 struct blk_mq_tag_set *set = q->tag_set;
1754
1755 blk_mq_freeze_queue(q);
1756
1757 mutex_lock(&set->tag_list_lock);
1758 list_del_init(&q->tag_set_list);
1759 blk_mq_update_tag_set_depth(set);
1760 mutex_unlock(&set->tag_list_lock);
1761
1762 blk_mq_unfreeze_queue(q);
1763}
1764
1765static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1766 struct request_queue *q)
1767{
1768 q->tag_set = set;
1769
1770 mutex_lock(&set->tag_list_lock);
1771 list_add_tail(&q->tag_set_list, &set->tag_list);
1772 blk_mq_update_tag_set_depth(set);
1773 mutex_unlock(&set->tag_list_lock);
1774}
1274 1775
1275 if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) 1776struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1276 return ERR_PTR(-EINVAL); 1777{
1778 struct blk_mq_hw_ctx **hctxs;
1779 struct blk_mq_ctx __percpu *ctx;
1780 struct request_queue *q;
1781 unsigned int *map;
1782 int i;
1277 1783
1278 ctx = alloc_percpu(struct blk_mq_ctx); 1784 ctx = alloc_percpu(struct blk_mq_ctx);
1279 if (!ctx) 1785 if (!ctx)
1280 return ERR_PTR(-ENOMEM); 1786 return ERR_PTR(-ENOMEM);
1281 1787
1282 hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, 1788 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1283 reg->numa_node); 1789 set->numa_node);
1284 1790
1285 if (!hctxs) 1791 if (!hctxs)
1286 goto err_percpu; 1792 goto err_percpu;
1287 1793
1288 for (i = 0; i < reg->nr_hw_queues; i++) { 1794 map = blk_mq_make_queue_map(set);
1289 hctxs[i] = reg->ops->alloc_hctx(reg, i); 1795 if (!map)
1796 goto err_map;
1797
1798 for (i = 0; i < set->nr_hw_queues; i++) {
1799 int node = blk_mq_hw_queue_to_node(map, i);
1800
1801 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1802 GFP_KERNEL, node);
1290 if (!hctxs[i]) 1803 if (!hctxs[i])
1291 goto err_hctxs; 1804 goto err_hctxs;
1292 1805
1293 hctxs[i]->numa_node = NUMA_NO_NODE; 1806 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1807 goto err_hctxs;
1808
1809 atomic_set(&hctxs[i]->nr_active, 0);
1810 hctxs[i]->numa_node = node;
1294 hctxs[i]->queue_num = i; 1811 hctxs[i]->queue_num = i;
1295 } 1812 }
1296 1813
1297 q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node); 1814 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1298 if (!q) 1815 if (!q)
1299 goto err_hctxs; 1816 goto err_hctxs;
1300 1817
1301 q->mq_map = blk_mq_make_queue_map(reg); 1818 if (percpu_counter_init(&q->mq_usage_counter, 0))
1302 if (!q->mq_map)
1303 goto err_map; 1819 goto err_map;
1304 1820
1305 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1821 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1306 blk_queue_rq_timeout(q, 30000); 1822 blk_queue_rq_timeout(q, 30000);
1307 1823
1308 q->nr_queues = nr_cpu_ids; 1824 q->nr_queues = nr_cpu_ids;
1309 q->nr_hw_queues = reg->nr_hw_queues; 1825 q->nr_hw_queues = set->nr_hw_queues;
1826 q->mq_map = map;
1310 1827
1311 q->queue_ctx = ctx; 1828 q->queue_ctx = ctx;
1312 q->queue_hw_ctx = hctxs; 1829 q->queue_hw_ctx = hctxs;
1313 1830
1314 q->mq_ops = reg->ops; 1831 q->mq_ops = set->ops;
1315 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; 1832 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1316 1833
1834 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1835 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1836
1317 q->sg_reserved_size = INT_MAX; 1837 q->sg_reserved_size = INT_MAX;
1318 1838
1319 blk_queue_make_request(q, blk_mq_make_request); 1839 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1320 blk_queue_rq_timed_out(q, reg->ops->timeout); 1840 INIT_LIST_HEAD(&q->requeue_list);
1321 if (reg->timeout) 1841 spin_lock_init(&q->requeue_lock);
1322 blk_queue_rq_timeout(q, reg->timeout); 1842
1843 if (q->nr_hw_queues > 1)
1844 blk_queue_make_request(q, blk_mq_make_request);
1845 else
1846 blk_queue_make_request(q, blk_sq_make_request);
1847
1848 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1849 if (set->timeout)
1850 blk_queue_rq_timeout(q, set->timeout);
1851
1852 /*
1853 * Do this after blk_queue_make_request() overrides it...
1854 */
1855 q->nr_requests = set->queue_depth;
1323 1856
1324 if (reg->ops->complete) 1857 if (set->ops->complete)
1325 blk_queue_softirq_done(q, reg->ops->complete); 1858 blk_queue_softirq_done(q, set->ops->complete);
1326 1859
1327 blk_mq_init_flush(q); 1860 blk_mq_init_flush(q);
1328 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1861 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1329 1862
1330 q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, 1863 q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1331 cache_line_size()), GFP_KERNEL); 1864 set->cmd_size, cache_line_size()),
1865 GFP_KERNEL);
1332 if (!q->flush_rq) 1866 if (!q->flush_rq)
1333 goto err_hw; 1867 goto err_hw;
1334 1868
1335 if (blk_mq_init_hw_queues(q, reg, driver_data)) 1869 if (blk_mq_init_hw_queues(q, set))
1336 goto err_flush_rq; 1870 goto err_flush_rq;
1337 1871
1338 blk_mq_map_swqueue(q);
1339
1340 mutex_lock(&all_q_mutex); 1872 mutex_lock(&all_q_mutex);
1341 list_add_tail(&q->all_q_node, &all_q_list); 1873 list_add_tail(&q->all_q_node, &all_q_list);
1342 mutex_unlock(&all_q_mutex); 1874 mutex_unlock(&all_q_mutex);
1343 1875
1876 blk_mq_add_queue_tag_set(set, q);
1877
1878 blk_mq_map_swqueue(q);
1879
1344 return q; 1880 return q;
1345 1881
1346err_flush_rq: 1882err_flush_rq:
1347 kfree(q->flush_rq); 1883 kfree(q->flush_rq);
1348err_hw: 1884err_hw:
1349 kfree(q->mq_map);
1350err_map:
1351 blk_cleanup_queue(q); 1885 blk_cleanup_queue(q);
1352err_hctxs: 1886err_hctxs:
1353 for (i = 0; i < reg->nr_hw_queues; i++) { 1887 kfree(map);
1888 for (i = 0; i < set->nr_hw_queues; i++) {
1354 if (!hctxs[i]) 1889 if (!hctxs[i])
1355 break; 1890 break;
1356 reg->ops->free_hctx(hctxs[i], i); 1891 free_cpumask_var(hctxs[i]->cpumask);
1892 kfree(hctxs[i]);
1357 } 1893 }
1894err_map:
1358 kfree(hctxs); 1895 kfree(hctxs);
1359err_percpu: 1896err_percpu:
1360 free_percpu(ctx); 1897 free_percpu(ctx);
@@ -1364,18 +1901,14 @@ EXPORT_SYMBOL(blk_mq_init_queue);
1364 1901
1365void blk_mq_free_queue(struct request_queue *q) 1902void blk_mq_free_queue(struct request_queue *q)
1366{ 1903{
1367 struct blk_mq_hw_ctx *hctx; 1904 struct blk_mq_tag_set *set = q->tag_set;
1368 int i;
1369 1905
1370 queue_for_each_hw_ctx(q, hctx, i) { 1906 blk_mq_del_queue_tag_set(q);
1371 kfree(hctx->ctx_map); 1907
1372 kfree(hctx->ctxs); 1908 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1373 blk_mq_free_rq_map(hctx); 1909 blk_mq_free_hw_queues(q, set);
1374 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); 1910
1375 if (q->mq_ops->exit_hctx) 1911 percpu_counter_destroy(&q->mq_usage_counter);
1376 q->mq_ops->exit_hctx(hctx, i);
1377 q->mq_ops->free_hctx(hctx, i);
1378 }
1379 1912
1380 free_percpu(q->queue_ctx); 1913 free_percpu(q->queue_ctx);
1381 kfree(q->queue_hw_ctx); 1914 kfree(q->queue_hw_ctx);
@@ -1395,6 +1928,8 @@ static void blk_mq_queue_reinit(struct request_queue *q)
1395{ 1928{
1396 blk_mq_freeze_queue(q); 1929 blk_mq_freeze_queue(q);
1397 1930
1931 blk_mq_sysfs_unregister(q);
1932
1398 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); 1933 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1399 1934
1400 /* 1935 /*
@@ -1405,6 +1940,8 @@ static void blk_mq_queue_reinit(struct request_queue *q)
1405 1940
1406 blk_mq_map_swqueue(q); 1941 blk_mq_map_swqueue(q);
1407 1942
1943 blk_mq_sysfs_register(q);
1944
1408 blk_mq_unfreeze_queue(q); 1945 blk_mq_unfreeze_queue(q);
1409} 1946}
1410 1947
@@ -1414,10 +1951,10 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1414 struct request_queue *q; 1951 struct request_queue *q;
1415 1952
1416 /* 1953 /*
1417 * Before new mapping is established, hotadded cpu might already start 1954 * Before new mappings are established, hotadded cpu might already
1418 * handling requests. This doesn't break anything as we map offline 1955 * start handling requests. This doesn't break anything as we map
1419 * CPUs to first hardware queue. We will re-init queue below to get 1956 * offline CPUs to first hardware queue. We will re-init the queue
1420 * optimal settings. 1957 * below to get optimal settings.
1421 */ 1958 */
1422 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && 1959 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1423 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) 1960 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
@@ -1430,6 +1967,91 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1430 return NOTIFY_OK; 1967 return NOTIFY_OK;
1431} 1968}
1432 1969
1970int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1971{
1972 int i;
1973
1974 if (!set->nr_hw_queues)
1975 return -EINVAL;
1976 if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1977 return -EINVAL;
1978 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1979 return -EINVAL;
1980
1981 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1982 return -EINVAL;
1983
1984
1985 set->tags = kmalloc_node(set->nr_hw_queues *
1986 sizeof(struct blk_mq_tags *),
1987 GFP_KERNEL, set->numa_node);
1988 if (!set->tags)
1989 goto out;
1990
1991 for (i = 0; i < set->nr_hw_queues; i++) {
1992 set->tags[i] = blk_mq_init_rq_map(set, i);
1993 if (!set->tags[i])
1994 goto out_unwind;
1995 }
1996
1997 mutex_init(&set->tag_list_lock);
1998 INIT_LIST_HEAD(&set->tag_list);
1999
2000 return 0;
2001
2002out_unwind:
2003 while (--i >= 0)
2004 blk_mq_free_rq_map(set, set->tags[i], i);
2005out:
2006 return -ENOMEM;
2007}
2008EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2009
2010void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2011{
2012 int i;
2013
2014 for (i = 0; i < set->nr_hw_queues; i++) {
2015 if (set->tags[i])
2016 blk_mq_free_rq_map(set, set->tags[i], i);
2017 }
2018
2019 kfree(set->tags);
2020}
2021EXPORT_SYMBOL(blk_mq_free_tag_set);
2022
2023int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2024{
2025 struct blk_mq_tag_set *set = q->tag_set;
2026 struct blk_mq_hw_ctx *hctx;
2027 int i, ret;
2028
2029 if (!set || nr > set->queue_depth)
2030 return -EINVAL;
2031
2032 ret = 0;
2033 queue_for_each_hw_ctx(q, hctx, i) {
2034 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2035 if (ret)
2036 break;
2037 }
2038
2039 if (!ret)
2040 q->nr_requests = nr;
2041
2042 return ret;
2043}
2044
2045void blk_mq_disable_hotplug(void)
2046{
2047 mutex_lock(&all_q_mutex);
2048}
2049
2050void blk_mq_enable_hotplug(void)
2051{
2052 mutex_unlock(&all_q_mutex);
2053}
2054
1433static int __init blk_mq_init(void) 2055static int __init blk_mq_init(void)
1434{ 2056{
1435 blk_mq_cpu_init(); 2057 blk_mq_cpu_init();
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 72beba1f9d55..26460884c6cd 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -1,6 +1,8 @@
1#ifndef INT_BLK_MQ_H 1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H 2#define INT_BLK_MQ_H
3 3
4struct blk_mq_tag_set;
5
4struct blk_mq_ctx { 6struct blk_mq_ctx {
5 struct { 7 struct {
6 spinlock_t lock; 8 spinlock_t lock;
@@ -9,7 +11,8 @@ struct blk_mq_ctx {
9 11
10 unsigned int cpu; 12 unsigned int cpu;
11 unsigned int index_hw; 13 unsigned int index_hw;
12 unsigned int ipi_redirect; 14
15 unsigned int last_tag ____cacheline_aligned_in_smp;
13 16
14 /* incremented at dispatch time */ 17 /* incremented at dispatch time */
15 unsigned long rq_dispatched[2]; 18 unsigned long rq_dispatched[2];
@@ -20,33 +23,95 @@ struct blk_mq_ctx {
20 23
21 struct request_queue *queue; 24 struct request_queue *queue;
22 struct kobject kobj; 25 struct kobject kobj;
23}; 26} ____cacheline_aligned_in_smp;
24 27
25void __blk_mq_complete_request(struct request *rq); 28void __blk_mq_complete_request(struct request *rq);
26void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
27void blk_mq_init_flush(struct request_queue *q); 30void blk_mq_init_flush(struct request_queue *q);
28void blk_mq_drain_queue(struct request_queue *q); 31void blk_mq_drain_queue(struct request_queue *q);
29void blk_mq_free_queue(struct request_queue *q); 32void blk_mq_free_queue(struct request_queue *q);
30void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); 33void blk_mq_clone_flush_request(struct request *flush_rq,
34 struct request *orig_rq);
35int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
31 36
32/* 37/*
33 * CPU hotplug helpers 38 * CPU hotplug helpers
34 */ 39 */
35struct blk_mq_cpu_notifier; 40struct blk_mq_cpu_notifier;
36void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, 41void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
37 void (*fn)(void *, unsigned long, unsigned int), 42 int (*fn)(void *, unsigned long, unsigned int),
38 void *data); 43 void *data);
39void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 44void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
40void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 45void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
41void blk_mq_cpu_init(void); 46void blk_mq_cpu_init(void);
47void blk_mq_enable_hotplug(void);
48void blk_mq_disable_hotplug(void);
42 49
43/* 50/*
44 * CPU -> queue mappings 51 * CPU -> queue mappings
45 */ 52 */
46struct blk_mq_reg; 53extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
47extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
48extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); 54extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
55extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
56
57/*
58 * sysfs helpers
59 */
60extern int blk_mq_sysfs_register(struct request_queue *q);
61extern void blk_mq_sysfs_unregister(struct request_queue *q);
62
63/*
64 * Basic implementation of sparser bitmap, allowing the user to spread
65 * the bits over more cachelines.
66 */
67struct blk_align_bitmap {
68 unsigned long word;
69 unsigned long depth;
70} ____cacheline_aligned_in_smp;
71
72static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
73 unsigned int cpu)
74{
75 return per_cpu_ptr(q->queue_ctx, cpu);
76}
77
78/*
79 * This assumes per-cpu software queueing queues. They could be per-node
80 * as well, for instance. For now this is hardcoded as-is. Note that we don't
81 * care about preemption, since we know the ctx's are persistent. This does
82 * mean that we can't rely on ctx always matching the currently running CPU.
83 */
84static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
85{
86 return __blk_mq_get_ctx(q, get_cpu());
87}
88
89static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
90{
91 put_cpu();
92}
93
94struct blk_mq_alloc_data {
95 /* input parameter */
96 struct request_queue *q;
97 gfp_t gfp;
98 bool reserved;
99
100 /* input & output parameter */
101 struct blk_mq_ctx *ctx;
102 struct blk_mq_hw_ctx *hctx;
103};
49 104
50void blk_mq_add_timer(struct request *rq); 105static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
106 struct request_queue *q, gfp_t gfp, bool reserved,
107 struct blk_mq_ctx *ctx,
108 struct blk_mq_hw_ctx *hctx)
109{
110 data->q = q;
111 data->gfp = gfp;
112 data->reserved = reserved;
113 data->ctx = ctx;
114 data->hctx = hctx;
115}
51 116
52#endif 117#endif
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1a97eb..53b1737e978d 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
30 while (!list_empty(&local_list)) { 30 while (!list_empty(&local_list)) {
31 struct request *rq; 31 struct request *rq;
32 32
33 rq = list_entry(local_list.next, struct request, csd.list); 33 rq = list_entry(local_list.next, struct request, ipi_list);
34 list_del_init(&rq->csd.list); 34 list_del_init(&rq->ipi_list);
35 rq->q->softirq_done_fn(rq); 35 rq->q->softirq_done_fn(rq);
36 } 36 }
37} 37}
@@ -45,9 +45,9 @@ static void trigger_softirq(void *data)
45 45
46 local_irq_save(flags); 46 local_irq_save(flags);
47 list = this_cpu_ptr(&blk_cpu_done); 47 list = this_cpu_ptr(&blk_cpu_done);
48 list_add_tail(&rq->csd.list, list); 48 list_add_tail(&rq->ipi_list, list);
49 49
50 if (list->next == &rq->csd.list) 50 if (list->next == &rq->ipi_list)
51 raise_softirq_irqoff(BLOCK_SOFTIRQ); 51 raise_softirq_irqoff(BLOCK_SOFTIRQ);
52 52
53 local_irq_restore(flags); 53 local_irq_restore(flags);
@@ -65,7 +65,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
65 data->info = rq; 65 data->info = rq;
66 data->flags = 0; 66 data->flags = 0;
67 67
68 __smp_call_function_single(cpu, data, 0); 68 smp_call_function_single_async(cpu, data);
69 return 0; 69 return 0;
70 } 70 }
71 71
@@ -136,7 +136,7 @@ void __blk_complete_request(struct request *req)
136 struct list_head *list; 136 struct list_head *list;
137do_local: 137do_local:
138 list = this_cpu_ptr(&blk_cpu_done); 138 list = this_cpu_ptr(&blk_cpu_done);
139 list_add_tail(&req->csd.list, list); 139 list_add_tail(&req->ipi_list, list);
140 140
141 /* 141 /*
142 * if the list only contains our just added request, 142 * if the list only contains our just added request,
@@ -144,7 +144,7 @@ do_local:
144 * entries there, someone already raised the irq but it 144 * entries there, someone already raised the irq but it
145 * hasn't run yet. 145 * hasn't run yet.
146 */ 146 */
147 if (list->next == &req->csd.list) 147 if (list->next == &req->ipi_list)
148 raise_softirq_irqoff(BLOCK_SOFTIRQ); 148 raise_softirq_irqoff(BLOCK_SOFTIRQ);
149 } else if (raise_blk_irq(ccpu, req)) 149 } else if (raise_blk_irq(ccpu, req))
150 goto do_local; 150 goto do_local;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 7500f876dae4..23321fbab293 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
48static ssize_t 48static ssize_t
49queue_requests_store(struct request_queue *q, const char *page, size_t count) 49queue_requests_store(struct request_queue *q, const char *page, size_t count)
50{ 50{
51 struct request_list *rl;
52 unsigned long nr; 51 unsigned long nr;
53 int ret; 52 int ret, err;
54 53
55 if (!q->request_fn) 54 if (!q->request_fn && !q->mq_ops)
56 return -EINVAL; 55 return -EINVAL;
57 56
58 ret = queue_var_store(&nr, page, count); 57 ret = queue_var_store(&nr, page, count);
@@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
62 if (nr < BLKDEV_MIN_RQ) 61 if (nr < BLKDEV_MIN_RQ)
63 nr = BLKDEV_MIN_RQ; 62 nr = BLKDEV_MIN_RQ;
64 63
65 spin_lock_irq(q->queue_lock); 64 if (q->request_fn)
66 q->nr_requests = nr; 65 err = blk_update_nr_requests(q, nr);
67 blk_queue_congestion_threshold(q); 66 else
68 67 err = blk_mq_update_nr_requests(q, nr);
69 /* congestion isn't cgroup aware and follows root blkcg for now */ 68
70 rl = &q->root_rl; 69 if (err)
71 70 return err;
72 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
73 blk_set_queue_congested(q, BLK_RW_SYNC);
74 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
75 blk_clear_queue_congested(q, BLK_RW_SYNC);
76
77 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
78 blk_set_queue_congested(q, BLK_RW_ASYNC);
79 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
80 blk_clear_queue_congested(q, BLK_RW_ASYNC);
81
82 blk_queue_for_each_rl(rl, q) {
83 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
84 blk_set_rl_full(rl, BLK_RW_SYNC);
85 } else {
86 blk_clear_rl_full(rl, BLK_RW_SYNC);
87 wake_up(&rl->wait[BLK_RW_SYNC]);
88 }
89
90 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
91 blk_set_rl_full(rl, BLK_RW_ASYNC);
92 } else {
93 blk_clear_rl_full(rl, BLK_RW_ASYNC);
94 wake_up(&rl->wait[BLK_RW_ASYNC]);
95 }
96 }
97 71
98 spin_unlock_irq(q->queue_lock);
99 return ret; 72 return ret;
100} 73}
101 74
@@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj)
544 if (q->queue_tags) 517 if (q->queue_tags)
545 __blk_queue_free_tags(q); 518 __blk_queue_free_tags(q);
546 519
547 percpu_counter_destroy(&q->mq_usage_counter);
548
549 if (q->mq_ops) 520 if (q->mq_ops)
550 blk_mq_free_queue(q); 521 blk_mq_free_queue(q);
551 522
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1474c3ab7e72..3fdb21a390c1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
744static bool throtl_slice_used(struct throtl_grp *tg, bool rw) 744static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
745{ 745{
746 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) 746 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
747 return 0; 747 return false;
748 748
749 return 1; 749 return 1;
750} 750}
@@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
842 if (tg->io_disp[rw] + 1 <= io_allowed) { 842 if (tg->io_disp[rw] + 1 <= io_allowed) {
843 if (wait) 843 if (wait)
844 *wait = 0; 844 *wait = 0;
845 return 1; 845 return true;
846 } 846 }
847 847
848 /* Calc approx time to dispatch */ 848 /* Calc approx time to dispatch */
@@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
881 if (wait) 881 if (wait)
882 *wait = 0; 882 *wait = 0;
883 return 1; 883 return true;
884 } 884 }
885 885
886 /* Calc approx time to dispatch */ 886 /* Calc approx time to dispatch */
@@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
923 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { 923 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
924 if (wait) 924 if (wait)
925 *wait = 0; 925 *wait = 0;
926 return 1; 926 return true;
927 } 927 }
928 928
929 /* 929 /*
@@ -1258,7 +1258,7 @@ out_unlock:
1258 * of throtl_data->service_queue. Those bio's are ready and issued by this 1258 * of throtl_data->service_queue. Those bio's are ready and issued by this
1259 * function. 1259 * function.
1260 */ 1260 */
1261void blk_throtl_dispatch_work_fn(struct work_struct *work) 1261static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1262{ 1262{
1263 struct throtl_data *td = container_of(work, struct throtl_data, 1263 struct throtl_data *td = container_of(work, struct throtl_data,
1264 dispatch_work); 1264 dispatch_work);
@@ -1346,10 +1346,10 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v)
1346 return 0; 1346 return 0;
1347} 1347}
1348 1348
1349static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft, 1349static ssize_t tg_set_conf(struct kernfs_open_file *of,
1350 const char *buf, bool is_u64) 1350 char *buf, size_t nbytes, loff_t off, bool is_u64)
1351{ 1351{
1352 struct blkcg *blkcg = css_to_blkcg(css); 1352 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1353 struct blkg_conf_ctx ctx; 1353 struct blkg_conf_ctx ctx;
1354 struct throtl_grp *tg; 1354 struct throtl_grp *tg;
1355 struct throtl_service_queue *sq; 1355 struct throtl_service_queue *sq;
@@ -1368,9 +1368,9 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1368 ctx.v = -1; 1368 ctx.v = -1;
1369 1369
1370 if (is_u64) 1370 if (is_u64)
1371 *(u64 *)((void *)tg + cft->private) = ctx.v; 1371 *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v;
1372 else 1372 else
1373 *(unsigned int *)((void *)tg + cft->private) = ctx.v; 1373 *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v;
1374 1374
1375 throtl_log(&tg->service_queue, 1375 throtl_log(&tg->service_queue,
1376 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", 1376 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
@@ -1404,19 +1404,19 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1404 } 1404 }
1405 1405
1406 blkg_conf_finish(&ctx); 1406 blkg_conf_finish(&ctx);
1407 return 0; 1407 return nbytes;
1408} 1408}
1409 1409
1410static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft, 1410static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1411 const char *buf) 1411 char *buf, size_t nbytes, loff_t off)
1412{ 1412{
1413 return tg_set_conf(css, cft, buf, true); 1413 return tg_set_conf(of, buf, nbytes, off, true);
1414} 1414}
1415 1415
1416static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft, 1416static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1417 const char *buf) 1417 char *buf, size_t nbytes, loff_t off)
1418{ 1418{
1419 return tg_set_conf(css, cft, buf, false); 1419 return tg_set_conf(of, buf, nbytes, off, false);
1420} 1420}
1421 1421
1422static struct cftype throtl_files[] = { 1422static struct cftype throtl_files[] = {
@@ -1424,29 +1424,25 @@ static struct cftype throtl_files[] = {
1424 .name = "throttle.read_bps_device", 1424 .name = "throttle.read_bps_device",
1425 .private = offsetof(struct throtl_grp, bps[READ]), 1425 .private = offsetof(struct throtl_grp, bps[READ]),
1426 .seq_show = tg_print_conf_u64, 1426 .seq_show = tg_print_conf_u64,
1427 .write_string = tg_set_conf_u64, 1427 .write = tg_set_conf_u64,
1428 .max_write_len = 256,
1429 }, 1428 },
1430 { 1429 {
1431 .name = "throttle.write_bps_device", 1430 .name = "throttle.write_bps_device",
1432 .private = offsetof(struct throtl_grp, bps[WRITE]), 1431 .private = offsetof(struct throtl_grp, bps[WRITE]),
1433 .seq_show = tg_print_conf_u64, 1432 .seq_show = tg_print_conf_u64,
1434 .write_string = tg_set_conf_u64, 1433 .write = tg_set_conf_u64,
1435 .max_write_len = 256,
1436 }, 1434 },
1437 { 1435 {
1438 .name = "throttle.read_iops_device", 1436 .name = "throttle.read_iops_device",
1439 .private = offsetof(struct throtl_grp, iops[READ]), 1437 .private = offsetof(struct throtl_grp, iops[READ]),
1440 .seq_show = tg_print_conf_uint, 1438 .seq_show = tg_print_conf_uint,
1441 .write_string = tg_set_conf_uint, 1439 .write = tg_set_conf_uint,
1442 .max_write_len = 256,
1443 }, 1440 },
1444 { 1441 {
1445 .name = "throttle.write_iops_device", 1442 .name = "throttle.write_iops_device",
1446 .private = offsetof(struct throtl_grp, iops[WRITE]), 1443 .private = offsetof(struct throtl_grp, iops[WRITE]),
1447 .seq_show = tg_print_conf_uint, 1444 .seq_show = tg_print_conf_uint,
1448 .write_string = tg_set_conf_uint, 1445 .write = tg_set_conf_uint,
1449 .max_write_len = 256,
1450 }, 1446 },
1451 { 1447 {
1452 .name = "throttle.io_service_bytes", 1448 .name = "throttle.io_service_bytes",
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index d96f7061c6fd..95a09590ccfd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req)
96 __blk_complete_request(req); 96 __blk_complete_request(req);
97 break; 97 break;
98 case BLK_EH_RESET_TIMER: 98 case BLK_EH_RESET_TIMER:
99 if (q->mq_ops) 99 blk_add_timer(req);
100 blk_mq_add_timer(req);
101 else
102 blk_add_timer(req);
103
104 blk_clear_rq_complete(req); 100 blk_clear_rq_complete(req);
105 break; 101 break;
106 case BLK_EH_NOT_HANDLED: 102 case BLK_EH_NOT_HANDLED:
@@ -170,7 +166,26 @@ void blk_abort_request(struct request *req)
170} 166}
171EXPORT_SYMBOL_GPL(blk_abort_request); 167EXPORT_SYMBOL_GPL(blk_abort_request);
172 168
173void __blk_add_timer(struct request *req, struct list_head *timeout_list) 169unsigned long blk_rq_timeout(unsigned long timeout)
170{
171 unsigned long maxt;
172
173 maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
174 if (time_after(timeout, maxt))
175 timeout = maxt;
176
177 return timeout;
178}
179
180/**
181 * blk_add_timer - Start timeout timer for a single request
182 * @req: request that is about to start running.
183 *
184 * Notes:
185 * Each request has its own timer, and as it is added to the queue, we
186 * set up the timer. When the request completes, we cancel the timer.
187 */
188void blk_add_timer(struct request *req)
174{ 189{
175 struct request_queue *q = req->q; 190 struct request_queue *q = req->q;
176 unsigned long expiry; 191 unsigned long expiry;
@@ -188,32 +203,29 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
188 req->timeout = q->rq_timeout; 203 req->timeout = q->rq_timeout;
189 204
190 req->deadline = jiffies + req->timeout; 205 req->deadline = jiffies + req->timeout;
191 if (timeout_list) 206 if (!q->mq_ops)
192 list_add_tail(&req->timeout_list, timeout_list); 207 list_add_tail(&req->timeout_list, &req->q->timeout_list);
193 208
194 /* 209 /*
195 * If the timer isn't already pending or this timeout is earlier 210 * If the timer isn't already pending or this timeout is earlier
196 * than an existing one, modify the timer. Round up to next nearest 211 * than an existing one, modify the timer. Round up to next nearest
197 * second. 212 * second.
198 */ 213 */
199 expiry = round_jiffies_up(req->deadline); 214 expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
200 215
201 if (!timer_pending(&q->timeout) || 216 if (!timer_pending(&q->timeout) ||
202 time_before(expiry, q->timeout.expires)) 217 time_before(expiry, q->timeout.expires)) {
203 mod_timer(&q->timeout, expiry); 218 unsigned long diff = q->timeout.expires - expiry;
204 219
205} 220 /*
221 * Due to added timer slack to group timers, the timer
222 * will often be a little in front of what we asked for.
223 * So apply some tolerance here too, otherwise we keep
224 * modifying the timer because expires for value X
225 * will be X + something.
226 */
227 if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
228 mod_timer(&q->timeout, expiry);
229 }
206 230
207/**
208 * blk_add_timer - Start timeout timer for a single request
209 * @req: request that is about to start running.
210 *
211 * Notes:
212 * Each request has its own timer, and as it is added to the queue, we
213 * set up the timer. When the request completes, we cancel the timer.
214 */
215void blk_add_timer(struct request *req)
216{
217 __blk_add_timer(req, &req->q->timeout_list);
218} 231}
219
diff --git a/block/blk.h b/block/blk.h
index d23b415b8a28..45385e9abf6f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -9,6 +9,9 @@
9/* Number of requests a "batching" process may submit */ 9/* Number of requests a "batching" process may submit */
10#define BLK_BATCH_REQ 32 10#define BLK_BATCH_REQ 32
11 11
12/* Max future timer expiry for timeouts */
13#define BLK_MAX_TIMEOUT (5 * HZ)
14
12extern struct kmem_cache *blk_requestq_cachep; 15extern struct kmem_cache *blk_requestq_cachep;
13extern struct kmem_cache *request_cachep; 16extern struct kmem_cache *request_cachep;
14extern struct kobj_type blk_queue_ktype; 17extern struct kobj_type blk_queue_ktype;
@@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error,
37void blk_rq_timed_out_timer(unsigned long data); 40void blk_rq_timed_out_timer(unsigned long data);
38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, 41void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
39 unsigned int *next_set); 42 unsigned int *next_set);
40void __blk_add_timer(struct request *req, struct list_head *timeout_list); 43unsigned long blk_rq_timeout(unsigned long timeout);
44void blk_add_timer(struct request *req);
41void blk_delete_timer(struct request *); 45void blk_delete_timer(struct request *);
42void blk_add_timer(struct request *);
43 46
44 47
45bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 48bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
@@ -78,7 +81,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
78/* 81/*
79 * Internal elevator interface 82 * Internal elevator interface
80 */ 83 */
81#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash) 84#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
82 85
83void blk_insert_flush(struct request *rq); 86void blk_insert_flush(struct request *rq);
84void blk_abort_flushes(struct request_queue *q); 87void blk_abort_flushes(struct request_queue *q);
@@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
185 return q->nr_congestion_off; 188 return q->nr_congestion_off;
186} 189}
187 190
191extern int blk_update_nr_requests(struct request_queue *, unsigned int);
192
188/* 193/*
189 * Contribute to IO statistics IFF: 194 * Contribute to IO statistics IFF:
190 * 195 *
diff --git a/block/bounce.c b/block/bounce.c
new file mode 100644
index 000000000000..ab21ba203d5c
--- /dev/null
+++ b/block/bounce.c
@@ -0,0 +1,290 @@
1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/mm.h>
9#include <linux/export.h>
10#include <linux/swap.h>
11#include <linux/gfp.h>
12#include <linux/bio.h>
13#include <linux/pagemap.h>
14#include <linux/mempool.h>
15#include <linux/blkdev.h>
16#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/highmem.h>
19#include <linux/bootmem.h>
20#include <linux/printk.h>
21#include <asm/tlbflush.h>
22
23#include <trace/events/block.h>
24
25#define POOL_SIZE 64
26#define ISA_POOL_SIZE 16
27
28static mempool_t *page_pool, *isa_page_pool;
29
30#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
31static __init int init_emergency_pool(void)
32{
33#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
34 if (max_pfn <= max_low_pfn)
35 return 0;
36#endif
37
38 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
39 BUG_ON(!page_pool);
40 pr_info("pool size: %d pages\n", POOL_SIZE);
41
42 return 0;
43}
44
45__initcall(init_emergency_pool);
46#endif
47
48#ifdef CONFIG_HIGHMEM
49/*
50 * highmem version, map in to vec
51 */
52static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
53{
54 unsigned long flags;
55 unsigned char *vto;
56
57 local_irq_save(flags);
58 vto = kmap_atomic(to->bv_page);
59 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
60 kunmap_atomic(vto);
61 local_irq_restore(flags);
62}
63
64#else /* CONFIG_HIGHMEM */
65
66#define bounce_copy_vec(to, vfrom) \
67 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
68
69#endif /* CONFIG_HIGHMEM */
70
71/*
72 * allocate pages in the DMA region for the ISA pool
73 */
74static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
75{
76 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
77}
78
79/*
80 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
81 * as the max address, so check if the pool has already been created.
82 */
83int init_emergency_isa_pool(void)
84{
85 if (isa_page_pool)
86 return 0;
87
88 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
89 mempool_free_pages, (void *) 0);
90 BUG_ON(!isa_page_pool);
91
92 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
93 return 0;
94}
95
96/*
97 * Simple bounce buffer support for highmem pages. Depending on the
98 * queue gfp mask set, *to may or may not be a highmem page. kmap it
99 * always, it will do the Right Thing
100 */
101static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
102{
103 unsigned char *vfrom;
104 struct bio_vec tovec, *fromvec = from->bi_io_vec;
105 struct bvec_iter iter;
106
107 bio_for_each_segment(tovec, to, iter) {
108 if (tovec.bv_page != fromvec->bv_page) {
109 /*
110 * fromvec->bv_offset and fromvec->bv_len might have
111 * been modified by the block layer, so use the original
112 * copy, bounce_copy_vec already uses tovec->bv_len
113 */
114 vfrom = page_address(fromvec->bv_page) +
115 tovec.bv_offset;
116
117 bounce_copy_vec(&tovec, vfrom);
118 flush_dcache_page(tovec.bv_page);
119 }
120
121 fromvec++;
122 }
123}
124
125static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
126{
127 struct bio *bio_orig = bio->bi_private;
128 struct bio_vec *bvec, *org_vec;
129 int i;
130
131 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
132 set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
133
134 /*
135 * free up bounce indirect pages used
136 */
137 bio_for_each_segment_all(bvec, bio, i) {
138 org_vec = bio_orig->bi_io_vec + i;
139 if (bvec->bv_page == org_vec->bv_page)
140 continue;
141
142 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
143 mempool_free(bvec->bv_page, pool);
144 }
145
146 bio_endio(bio_orig, err);
147 bio_put(bio);
148}
149
150static void bounce_end_io_write(struct bio *bio, int err)
151{
152 bounce_end_io(bio, page_pool, err);
153}
154
155static void bounce_end_io_write_isa(struct bio *bio, int err)
156{
157
158 bounce_end_io(bio, isa_page_pool, err);
159}
160
161static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
162{
163 struct bio *bio_orig = bio->bi_private;
164
165 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
166 copy_to_high_bio_irq(bio_orig, bio);
167
168 bounce_end_io(bio, pool, err);
169}
170
171static void bounce_end_io_read(struct bio *bio, int err)
172{
173 __bounce_end_io_read(bio, page_pool, err);
174}
175
176static void bounce_end_io_read_isa(struct bio *bio, int err)
177{
178 __bounce_end_io_read(bio, isa_page_pool, err);
179}
180
181#ifdef CONFIG_NEED_BOUNCE_POOL
182static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
183{
184 if (bio_data_dir(bio) != WRITE)
185 return 0;
186
187 if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
188 return 0;
189
190 return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
191}
192#else
193static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
194{
195 return 0;
196}
197#endif /* CONFIG_NEED_BOUNCE_POOL */
198
199static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
200 mempool_t *pool, int force)
201{
202 struct bio *bio;
203 int rw = bio_data_dir(*bio_orig);
204 struct bio_vec *to, from;
205 struct bvec_iter iter;
206 unsigned i;
207
208 if (force)
209 goto bounce;
210 bio_for_each_segment(from, *bio_orig, iter)
211 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
212 goto bounce;
213
214 return;
215bounce:
216 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
217
218 bio_for_each_segment_all(to, bio, i) {
219 struct page *page = to->bv_page;
220
221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
222 continue;
223
224 inc_zone_page_state(to->bv_page, NR_BOUNCE);
225 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
226
227 if (rw == WRITE) {
228 char *vto, *vfrom;
229
230 flush_dcache_page(page);
231
232 vto = page_address(to->bv_page) + to->bv_offset;
233 vfrom = kmap_atomic(page) + to->bv_offset;
234 memcpy(vto, vfrom, to->bv_len);
235 kunmap_atomic(vfrom);
236 }
237 }
238
239 trace_block_bio_bounce(q, *bio_orig);
240
241 bio->bi_flags |= (1 << BIO_BOUNCED);
242
243 if (pool == page_pool) {
244 bio->bi_end_io = bounce_end_io_write;
245 if (rw == READ)
246 bio->bi_end_io = bounce_end_io_read;
247 } else {
248 bio->bi_end_io = bounce_end_io_write_isa;
249 if (rw == READ)
250 bio->bi_end_io = bounce_end_io_read_isa;
251 }
252
253 bio->bi_private = *bio_orig;
254 *bio_orig = bio;
255}
256
257void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
258{
259 int must_bounce;
260 mempool_t *pool;
261
262 /*
263 * Data-less bio, nothing to bounce
264 */
265 if (!bio_has_data(*bio_orig))
266 return;
267
268 must_bounce = must_snapshot_stable_pages(q, *bio_orig);
269
270 /*
271 * for non-isa bounce case, just check if the bounce pfn is equal
272 * to or bigger than the highest pfn in the system -- in that case,
273 * don't waste time iterating over bio segments
274 */
275 if (!(q->bounce_gfp & GFP_DMA)) {
276 if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
277 return;
278 pool = page_pool;
279 } else {
280 BUG_ON(!isa_page_pool);
281 pool = isa_page_pool;
282 }
283
284 /*
285 * slow path
286 */
287 __blk_queue_bounce(q, bio_orig, pool, must_bounce);
288}
289
290EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/block/bsg.c b/block/bsg.c
index 420a5a9f1b23..e5214c148096 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1008,7 +1008,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
1008 /* 1008 /*
1009 * we need a proper transport to send commands, not a stacked device 1009 * we need a proper transport to send commands, not a stacked device
1010 */ 1010 */
1011 if (!q->request_fn) 1011 if (!queue_is_rq_based(q))
1012 return 0; 1012 return 0;
1013 1013
1014 bcd = &q->bsg_dev; 1014 bcd = &q->bsg_dev;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 744833b630c6..cadc37841744 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -908,7 +908,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
908{ 908{
909 if (cfqd->busy_queues) { 909 if (cfqd->busy_queues) {
910 cfq_log(cfqd, "schedule dispatch"); 910 cfq_log(cfqd, "schedule dispatch");
911 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 911 kblockd_schedule_work(&cfqd->unplug_work);
912 } 912 }
913} 913}
914 914
@@ -1670,11 +1670,11 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1670 return 0; 1670 return 0;
1671} 1671}
1672 1672
1673static int __cfqg_set_weight_device(struct cgroup_subsys_state *css, 1673static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1674 struct cftype *cft, const char *buf, 1674 char *buf, size_t nbytes, loff_t off,
1675 bool is_leaf_weight) 1675 bool is_leaf_weight)
1676{ 1676{
1677 struct blkcg *blkcg = css_to_blkcg(css); 1677 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1678 struct blkg_conf_ctx ctx; 1678 struct blkg_conf_ctx ctx;
1679 struct cfq_group *cfqg; 1679 struct cfq_group *cfqg;
1680 int ret; 1680 int ret;
@@ -1697,19 +1697,19 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
1697 } 1697 }
1698 1698
1699 blkg_conf_finish(&ctx); 1699 blkg_conf_finish(&ctx);
1700 return ret; 1700 return ret ?: nbytes;
1701} 1701}
1702 1702
1703static int cfqg_set_weight_device(struct cgroup_subsys_state *css, 1703static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1704 struct cftype *cft, const char *buf) 1704 char *buf, size_t nbytes, loff_t off)
1705{ 1705{
1706 return __cfqg_set_weight_device(css, cft, buf, false); 1706 return __cfqg_set_weight_device(of, buf, nbytes, off, false);
1707} 1707}
1708 1708
1709static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css, 1709static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1710 struct cftype *cft, const char *buf) 1710 char *buf, size_t nbytes, loff_t off)
1711{ 1711{
1712 return __cfqg_set_weight_device(css, cft, buf, true); 1712 return __cfqg_set_weight_device(of, buf, nbytes, off, true);
1713} 1713}
1714 1714
1715static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, 1715static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
@@ -1837,8 +1837,7 @@ static struct cftype cfq_blkcg_files[] = {
1837 .name = "weight_device", 1837 .name = "weight_device",
1838 .flags = CFTYPE_ONLY_ON_ROOT, 1838 .flags = CFTYPE_ONLY_ON_ROOT,
1839 .seq_show = cfqg_print_leaf_weight_device, 1839 .seq_show = cfqg_print_leaf_weight_device,
1840 .write_string = cfqg_set_leaf_weight_device, 1840 .write = cfqg_set_leaf_weight_device,
1841 .max_write_len = 256,
1842 }, 1841 },
1843 { 1842 {
1844 .name = "weight", 1843 .name = "weight",
@@ -1852,8 +1851,7 @@ static struct cftype cfq_blkcg_files[] = {
1852 .name = "weight_device", 1851 .name = "weight_device",
1853 .flags = CFTYPE_NOT_ON_ROOT, 1852 .flags = CFTYPE_NOT_ON_ROOT,
1854 .seq_show = cfqg_print_weight_device, 1853 .seq_show = cfqg_print_weight_device,
1855 .write_string = cfqg_set_weight_device, 1854 .write = cfqg_set_weight_device,
1856 .max_write_len = 256,
1857 }, 1855 },
1858 { 1856 {
1859 .name = "weight", 1857 .name = "weight",
@@ -1865,8 +1863,7 @@ static struct cftype cfq_blkcg_files[] = {
1865 { 1863 {
1866 .name = "leaf_weight_device", 1864 .name = "leaf_weight_device",
1867 .seq_show = cfqg_print_leaf_weight_device, 1865 .seq_show = cfqg_print_leaf_weight_device,
1868 .write_string = cfqg_set_leaf_weight_device, 1866 .write = cfqg_set_leaf_weight_device,
1869 .max_write_len = 256,
1870 }, 1867 },
1871 { 1868 {
1872 .name = "leaf_weight", 1869 .name = "leaf_weight",
@@ -2367,10 +2364,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
2367 * reposition in fifo if next is older than rq 2364 * reposition in fifo if next is older than rq
2368 */ 2365 */
2369 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 2366 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2370 time_before(rq_fifo_time(next), rq_fifo_time(rq)) && 2367 time_before(next->fifo_time, rq->fifo_time) &&
2371 cfqq == RQ_CFQQ(next)) { 2368 cfqq == RQ_CFQQ(next)) {
2372 list_move(&rq->queuelist, &next->queuelist); 2369 list_move(&rq->queuelist, &next->queuelist);
2373 rq_set_fifo_time(rq, rq_fifo_time(next)); 2370 rq->fifo_time = next->fifo_time;
2374 } 2371 }
2375 2372
2376 if (cfqq->next_rq == next) 2373 if (cfqq->next_rq == next)
@@ -2814,7 +2811,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2814 return NULL; 2811 return NULL;
2815 2812
2816 rq = rq_entry_fifo(cfqq->fifo.next); 2813 rq = rq_entry_fifo(cfqq->fifo.next);
2817 if (time_before(jiffies, rq_fifo_time(rq))) 2814 if (time_before(jiffies, rq->fifo_time))
2818 rq = NULL; 2815 rq = NULL;
2819 2816
2820 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 2817 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3927,7 +3924,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
3927 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 3924 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3928 cfq_init_prio_data(cfqq, RQ_CIC(rq)); 3925 cfq_init_prio_data(cfqq, RQ_CIC(rq));
3929 3926
3930 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3927 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
3931 list_add_tail(&rq->queuelist, &cfqq->fifo); 3928 list_add_tail(&rq->queuelist, &cfqq->fifo);
3932 cfq_add_rq_rb(rq); 3929 cfq_add_rq_rb(rq);
3933 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, 3930 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
@@ -4463,7 +4460,7 @@ out_free:
4463static ssize_t 4460static ssize_t
4464cfq_var_show(unsigned int var, char *page) 4461cfq_var_show(unsigned int var, char *page)
4465{ 4462{
4466 return sprintf(page, "%d\n", var); 4463 return sprintf(page, "%u\n", var);
4467} 4464}
4468 4465
4469static ssize_t 4466static ssize_t
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef66406c625..a753df2b3fc2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
106 /* 106 /*
107 * set expire time and add to fifo list 107 * set expire time and add to fifo list
108 */ 108 */
109 rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); 109 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
110 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); 110 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
111} 111}
112 112
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
174 * and move into next position (next will be deleted) in fifo 174 * and move into next position (next will be deleted) in fifo
175 */ 175 */
176 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 176 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
177 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 177 if (time_before(next->fifo_time, req->fifo_time)) {
178 list_move(&req->queuelist, &next->queuelist); 178 list_move(&req->queuelist, &next->queuelist);
179 rq_set_fifo_time(req, rq_fifo_time(next)); 179 req->fifo_time = next->fifo_time;
180 } 180 }
181 } 181 }
182 182
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
230 /* 230 /*
231 * rq is expired! 231 * rq is expired!
232 */ 232 */
233 if (time_after_eq(jiffies, rq_fifo_time(rq))) 233 if (time_after_eq(jiffies, rq->fifo_time))
234 return 1; 234 return 1;
235 235
236 return 0; 236 return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7d6714..1e01b66a0b92 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
247static inline void __elv_rqhash_del(struct request *rq) 247static inline void __elv_rqhash_del(struct request *rq)
248{ 248{
249 hash_del(&rq->hash); 249 hash_del(&rq->hash);
250 rq->cmd_flags &= ~REQ_HASHED;
250} 251}
251 252
252static void elv_rqhash_del(struct request_queue *q, struct request *rq) 253static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
261 262
262 BUG_ON(ELV_ON_HASH(rq)); 263 BUG_ON(ELV_ON_HASH(rq));
263 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); 264 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
265 rq->cmd_flags |= REQ_HASHED;
264} 266}
265 267
266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 268static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/block/ioprio.c b/block/ioprio.c
new file mode 100644
index 000000000000..e50170ca7c33
--- /dev/null
+++ b/block/ioprio.c
@@ -0,0 +1,241 @@
1/*
2 * fs/ioprio.c
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5 *
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
12 *
13 * IOW, setting BE scheduling class with prio 2 is done ala:
14 *
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
16 *
17 * ioprio_set(PRIO_PROCESS, pid, prio);
18 *
19 * See also Documentation/block/ioprio.txt
20 *
21 */
22#include <linux/gfp.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ioprio.h>
26#include <linux/blkdev.h>
27#include <linux/capability.h>
28#include <linux/syscalls.h>
29#include <linux/security.h>
30#include <linux/pid_namespace.h>
31
32int set_task_ioprio(struct task_struct *task, int ioprio)
33{
34 int err;
35 struct io_context *ioc;
36 const struct cred *cred = current_cred(), *tcred;
37
38 rcu_read_lock();
39 tcred = __task_cred(task);
40 if (!uid_eq(tcred->uid, cred->euid) &&
41 !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
42 rcu_read_unlock();
43 return -EPERM;
44 }
45 rcu_read_unlock();
46
47 err = security_task_setioprio(task, ioprio);
48 if (err)
49 return err;
50
51 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
52 if (ioc) {
53 ioc->ioprio = ioprio;
54 put_io_context(ioc);
55 }
56
57 return err;
58}
59EXPORT_SYMBOL_GPL(set_task_ioprio);
60
61SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
62{
63 int class = IOPRIO_PRIO_CLASS(ioprio);
64 int data = IOPRIO_PRIO_DATA(ioprio);
65 struct task_struct *p, *g;
66 struct user_struct *user;
67 struct pid *pgrp;
68 kuid_t uid;
69 int ret;
70
71 switch (class) {
72 case IOPRIO_CLASS_RT:
73 if (!capable(CAP_SYS_ADMIN))
74 return -EPERM;
75 /* fall through, rt has prio field too */
76 case IOPRIO_CLASS_BE:
77 if (data >= IOPRIO_BE_NR || data < 0)
78 return -EINVAL;
79
80 break;
81 case IOPRIO_CLASS_IDLE:
82 break;
83 case IOPRIO_CLASS_NONE:
84 if (data)
85 return -EINVAL;
86 break;
87 default:
88 return -EINVAL;
89 }
90
91 ret = -ESRCH;
92 rcu_read_lock();
93 switch (which) {
94 case IOPRIO_WHO_PROCESS:
95 if (!who)
96 p = current;
97 else
98 p = find_task_by_vpid(who);
99 if (p)
100 ret = set_task_ioprio(p, ioprio);
101 break;
102 case IOPRIO_WHO_PGRP:
103 if (!who)
104 pgrp = task_pgrp(current);
105 else
106 pgrp = find_vpid(who);
107 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
108 ret = set_task_ioprio(p, ioprio);
109 if (ret)
110 break;
111 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
112 break;
113 case IOPRIO_WHO_USER:
114 uid = make_kuid(current_user_ns(), who);
115 if (!uid_valid(uid))
116 break;
117 if (!who)
118 user = current_user();
119 else
120 user = find_user(uid);
121
122 if (!user)
123 break;
124
125 do_each_thread(g, p) {
126 if (!uid_eq(task_uid(p), uid))
127 continue;
128 ret = set_task_ioprio(p, ioprio);
129 if (ret)
130 goto free_uid;
131 } while_each_thread(g, p);
132free_uid:
133 if (who)
134 free_uid(user);
135 break;
136 default:
137 ret = -EINVAL;
138 }
139
140 rcu_read_unlock();
141 return ret;
142}
143
144static int get_task_ioprio(struct task_struct *p)
145{
146 int ret;
147
148 ret = security_task_getioprio(p);
149 if (ret)
150 goto out;
151 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
152 if (p->io_context)
153 ret = p->io_context->ioprio;
154out:
155 return ret;
156}
157
158int ioprio_best(unsigned short aprio, unsigned short bprio)
159{
160 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
161 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
162
163 if (aclass == IOPRIO_CLASS_NONE)
164 aclass = IOPRIO_CLASS_BE;
165 if (bclass == IOPRIO_CLASS_NONE)
166 bclass = IOPRIO_CLASS_BE;
167
168 if (aclass == bclass)
169 return min(aprio, bprio);
170 if (aclass > bclass)
171 return bprio;
172 else
173 return aprio;
174}
175
176SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
177{
178 struct task_struct *g, *p;
179 struct user_struct *user;
180 struct pid *pgrp;
181 kuid_t uid;
182 int ret = -ESRCH;
183 int tmpio;
184
185 rcu_read_lock();
186 switch (which) {
187 case IOPRIO_WHO_PROCESS:
188 if (!who)
189 p = current;
190 else
191 p = find_task_by_vpid(who);
192 if (p)
193 ret = get_task_ioprio(p);
194 break;
195 case IOPRIO_WHO_PGRP:
196 if (!who)
197 pgrp = task_pgrp(current);
198 else
199 pgrp = find_vpid(who);
200 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
201 tmpio = get_task_ioprio(p);
202 if (tmpio < 0)
203 continue;
204 if (ret == -ESRCH)
205 ret = tmpio;
206 else
207 ret = ioprio_best(ret, tmpio);
208 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
209 break;
210 case IOPRIO_WHO_USER:
211 uid = make_kuid(current_user_ns(), who);
212 if (!who)
213 user = current_user();
214 else
215 user = find_user(uid);
216
217 if (!user)
218 break;
219
220 do_each_thread(g, p) {
221 if (!uid_eq(task_uid(p), user->uid))
222 continue;
223 tmpio = get_task_ioprio(p);
224 if (tmpio < 0)
225 continue;
226 if (ret == -ESRCH)
227 ret = tmpio;
228 else
229 ret = ioprio_best(ret, tmpio);
230 } while_each_thread(g, p);
231
232 if (who)
233 free_uid(user);
234 break;
235 default:
236 ret = -EINVAL;
237 }
238
239 rcu_read_unlock();
240 return ret;
241}
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f36..f2ec43bfeec1 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
11 * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de) 11 * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
12 */ 12 */
13 13
14#include <linux/compiler.h>
15
14struct partition_info 16struct partition_info
15{ 17{
16 u8 flg; /* bit 0: active; bit 7: bootable */ 18 u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
29 u32 bsl_st; /* start of bad sector list */ 31 u32 bsl_st; /* start of bad sector list */
30 u32 bsl_cnt; /* length of bad sector list */ 32 u32 bsl_cnt; /* length of bad sector list */
31 u16 checksum; /* checksum for bootable disks */ 33 u16 checksum; /* checksum for bootable disks */
32} __attribute__((__packed__)); 34} __packed;
33 35
34int atari_partition(struct parsed_partitions *state); 36int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index 4efcafba7e64..abd0b19288a6 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,6 +32,7 @@
32#include <linux/major.h> 32#include <linux/major.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/efi.h> 34#include <linux/efi.h>
35#include <linux/compiler.h>
35 36
36#define MSDOS_MBR_SIGNATURE 0xaa55 37#define MSDOS_MBR_SIGNATURE 0xaa55
37#define EFI_PMBR_OSTYPE_EFI 0xEF 38#define EFI_PMBR_OSTYPE_EFI 0xEF
@@ -87,13 +88,13 @@ typedef struct _gpt_header {
87 * 88 *
88 * uint8_t reserved2[ BlockSize - 92 ]; 89 * uint8_t reserved2[ BlockSize - 92 ];
89 */ 90 */
90} __attribute__ ((packed)) gpt_header; 91} __packed gpt_header;
91 92
92typedef struct _gpt_entry_attributes { 93typedef struct _gpt_entry_attributes {
93 u64 required_to_function:1; 94 u64 required_to_function:1;
94 u64 reserved:47; 95 u64 reserved:47;
95 u64 type_guid_specific:16; 96 u64 type_guid_specific:16;
96} __attribute__ ((packed)) gpt_entry_attributes; 97} __packed gpt_entry_attributes;
97 98
98typedef struct _gpt_entry { 99typedef struct _gpt_entry {
99 efi_guid_t partition_type_guid; 100 efi_guid_t partition_type_guid;
@@ -102,7 +103,7 @@ typedef struct _gpt_entry {
102 __le64 ending_lba; 103 __le64 ending_lba;
103 gpt_entry_attributes attributes; 104 gpt_entry_attributes attributes;
104 efi_char16_t partition_name[72 / sizeof (efi_char16_t)]; 105 efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
105} __attribute__ ((packed)) gpt_entry; 106} __packed gpt_entry;
106 107
107typedef struct _gpt_mbr_record { 108typedef struct _gpt_mbr_record {
108 u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */ 109 u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
@@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
124 __le16 unknown; 125 __le16 unknown;
125 gpt_mbr_record partition_record[4]; 126 gpt_mbr_record partition_record[4];
126 __le16 signature; 127 __le16 signature;
127} __attribute__ ((packed)) legacy_mbr; 128} __packed legacy_mbr;
128 129
129/* Functions */ 130/* Functions */
130extern int efi_partition(struct parsed_partitions *state); 131extern int efi_partition(struct parsed_partitions *state);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706b..9721fa589bb1 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
8 8
9#include "check.h" 9#include "check.h"
10#include "karma.h" 10#include "karma.h"
11#include <linux/compiler.h>
11 12
12int karma_partition(struct parsed_partitions *state) 13int karma_partition(struct parsed_partitions *state)
13{ 14{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
26 } d_partitions[2]; 27 } d_partitions[2];
27 u8 d_blank[208]; 28 u8 d_blank[208];
28 __le16 d_magic; 29 __le16 d_magic;
29 } __attribute__((packed)) *label; 30 } __packed *label;
30 struct d_partition *p; 31 struct d_partition *p;
31 32
32 data = read_part_sector(state, 0, &sect); 33 data = read_part_sector(state, 0, &sect);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 26487972ac54..9c28a5b38042 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -205,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
205 if (capable(CAP_SYS_RAWIO)) 205 if (capable(CAP_SYS_RAWIO))
206 return 0; 206 return 0;
207 207
208 /* if there's no filter set, assume we're filtering everything out */
209 if (!filter)
210 return -EPERM;
211
212 /* Anybody who can open the device can do a read-safe command */ 208 /* Anybody who can open the device can do a read-safe command */
213 if (test_bit(cmd[0], filter->read_ok)) 209 if (test_bit(cmd[0], filter->read_ok))
214 return 0; 210 return 0;