diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 14:19:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 14:19:05 -0500 |
commit | f568849edac8611d603e00bd6cbbcfea09395ae6 (patch) | |
tree | b9472d640fe5d87426d38c9d81d946cf197ad3fb /block | |
parent | d9894c228b11273e720bb63ba120d1d326fe9d94 (diff) | |
parent | 675675ada486dde5bf9aa51665e90706bff11a35 (diff) |
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe:
"The major piece in here is the immutable bio_ve series from Kent, the
rest is fairly minor. It was supposed to go in last round, but
various issues pushed it to this release instead. The pull request
contains:
- Various smaller blk-mq fixes from different folks. Nothing major
here, just minor fixes and cleanups.
- Fix for a memory leak in the error path in the block ioctl code
from Christian Engelmayer.
- Header export fix from CaiZhiyong.
- Finally the immutable biovec changes from Kent Overstreet. This
enables some nice future work on making arbitrarily sized bios
possible, and splitting more efficient. Related fixes to immutable
bio_vecs:
- dm-cache immutable fixup from Mike Snitzer.
- btrfs immutable fixup from Muthu Kumar.
- bio-integrity fix from Nic Bellinger, which is also going to stable"
* 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits)
xtensa: fixup simdisk driver to work with immutable bio_vecs
block/blk-mq-cpu.c: use hotcpu_notifier()
blk-mq: for_each_* macro correctness
block: Fix memory leak in rw_copy_check_uvector() handling
bio-integrity: Fix bio_integrity_verify segment start bug
block: remove unrelated header files and export symbol
blk-mq: uses page->list incorrectly
blk-mq: use __smp_call_function_single directly
btrfs: fix missing increment of bi_remaining
Revert "block: Warn and free bio if bi_end_io is not set"
block: Warn and free bio if bi_end_io is not set
blk-mq: fix initializing request's start time
block: blk-mq: don't export blk_mq_free_queue()
block: blk-mq: make blk_sync_queue support mq
block: blk-mq: support draining mq queue
dm cache: increment bi_remaining when bi_end_io is restored
block: fixup for generic bio chaining
block: Really silence spurious compiler warnings
block: Silence spurious compiler warnings
block: Kill bio_pair_split()
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 61 | ||||
-rw-r--r-- | block/blk-exec.c | 4 | ||||
-rw-r--r-- | block/blk-flush.c | 2 | ||||
-rw-r--r-- | block/blk-integrity.c | 40 | ||||
-rw-r--r-- | block/blk-lib.c | 12 | ||||
-rw-r--r-- | block/blk-map.c | 6 | ||||
-rw-r--r-- | block/blk-merge.c | 66 | ||||
-rw-r--r-- | block/blk-mq-cpu.c | 37 | ||||
-rw-r--r-- | block/blk-mq.c | 123 | ||||
-rw-r--r-- | block/blk-mq.h | 3 | ||||
-rw-r--r-- | block/blk-sysfs.c | 1 | ||||
-rw-r--r-- | block/blk-throttle.c | 14 | ||||
-rw-r--r-- | block/cmdline-parser.c | 18 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 6 |
15 files changed, 181 insertions, 214 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 8bdd0121212a..c00e0bdeab4a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | #include "blk.h" | 39 | #include "blk.h" |
40 | #include "blk-cgroup.h" | 40 | #include "blk-cgroup.h" |
41 | #include "blk-mq.h" | ||
41 | 42 | ||
42 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 43 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
43 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 44 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
@@ -130,7 +131,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, | |||
130 | bio_advance(bio, nbytes); | 131 | bio_advance(bio, nbytes); |
131 | 132 | ||
132 | /* don't actually finish bio if it's part of flush sequence */ | 133 | /* don't actually finish bio if it's part of flush sequence */ |
133 | if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) | 134 | if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) |
134 | bio_endio(bio, error); | 135 | bio_endio(bio, error); |
135 | } | 136 | } |
136 | 137 | ||
@@ -245,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
245 | void blk_sync_queue(struct request_queue *q) | 246 | void blk_sync_queue(struct request_queue *q) |
246 | { | 247 | { |
247 | del_timer_sync(&q->timeout); | 248 | del_timer_sync(&q->timeout); |
248 | cancel_delayed_work_sync(&q->delay_work); | 249 | |
250 | if (q->mq_ops) { | ||
251 | struct blk_mq_hw_ctx *hctx; | ||
252 | int i; | ||
253 | |||
254 | queue_for_each_hw_ctx(q, hctx, i) | ||
255 | cancel_delayed_work_sync(&hctx->delayed_work); | ||
256 | } else { | ||
257 | cancel_delayed_work_sync(&q->delay_work); | ||
258 | } | ||
249 | } | 259 | } |
250 | EXPORT_SYMBOL(blk_sync_queue); | 260 | EXPORT_SYMBOL(blk_sync_queue); |
251 | 261 | ||
@@ -497,8 +507,13 @@ void blk_cleanup_queue(struct request_queue *q) | |||
497 | * Drain all requests queued before DYING marking. Set DEAD flag to | 507 | * Drain all requests queued before DYING marking. Set DEAD flag to |
498 | * prevent that q->request_fn() gets invoked after draining finished. | 508 | * prevent that q->request_fn() gets invoked after draining finished. |
499 | */ | 509 | */ |
500 | spin_lock_irq(lock); | 510 | if (q->mq_ops) { |
501 | __blk_drain_queue(q, true); | 511 | blk_mq_drain_queue(q); |
512 | spin_lock_irq(lock); | ||
513 | } else { | ||
514 | spin_lock_irq(lock); | ||
515 | __blk_drain_queue(q, true); | ||
516 | } | ||
502 | queue_flag_set(QUEUE_FLAG_DEAD, q); | 517 | queue_flag_set(QUEUE_FLAG_DEAD, q); |
503 | spin_unlock_irq(lock); | 518 | spin_unlock_irq(lock); |
504 | 519 | ||
@@ -1326,7 +1341,7 @@ void blk_add_request_payload(struct request *rq, struct page *page, | |||
1326 | bio->bi_io_vec->bv_offset = 0; | 1341 | bio->bi_io_vec->bv_offset = 0; |
1327 | bio->bi_io_vec->bv_len = len; | 1342 | bio->bi_io_vec->bv_len = len; |
1328 | 1343 | ||
1329 | bio->bi_size = len; | 1344 | bio->bi_iter.bi_size = len; |
1330 | bio->bi_vcnt = 1; | 1345 | bio->bi_vcnt = 1; |
1331 | bio->bi_phys_segments = 1; | 1346 | bio->bi_phys_segments = 1; |
1332 | 1347 | ||
@@ -1351,7 +1366,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, | |||
1351 | 1366 | ||
1352 | req->biotail->bi_next = bio; | 1367 | req->biotail->bi_next = bio; |
1353 | req->biotail = bio; | 1368 | req->biotail = bio; |
1354 | req->__data_len += bio->bi_size; | 1369 | req->__data_len += bio->bi_iter.bi_size; |
1355 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 1370 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); |
1356 | 1371 | ||
1357 | blk_account_io_start(req, false); | 1372 | blk_account_io_start(req, false); |
@@ -1380,8 +1395,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, | |||
1380 | * not touch req->buffer either... | 1395 | * not touch req->buffer either... |
1381 | */ | 1396 | */ |
1382 | req->buffer = bio_data(bio); | 1397 | req->buffer = bio_data(bio); |
1383 | req->__sector = bio->bi_sector; | 1398 | req->__sector = bio->bi_iter.bi_sector; |
1384 | req->__data_len += bio->bi_size; | 1399 | req->__data_len += bio->bi_iter.bi_size; |
1385 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); | 1400 | req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); |
1386 | 1401 | ||
1387 | blk_account_io_start(req, false); | 1402 | blk_account_io_start(req, false); |
@@ -1459,7 +1474,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1459 | req->cmd_flags |= REQ_FAILFAST_MASK; | 1474 | req->cmd_flags |= REQ_FAILFAST_MASK; |
1460 | 1475 | ||
1461 | req->errors = 0; | 1476 | req->errors = 0; |
1462 | req->__sector = bio->bi_sector; | 1477 | req->__sector = bio->bi_iter.bi_sector; |
1463 | req->ioprio = bio_prio(bio); | 1478 | req->ioprio = bio_prio(bio); |
1464 | blk_rq_bio_prep(req->q, req, bio); | 1479 | blk_rq_bio_prep(req->q, req, bio); |
1465 | } | 1480 | } |
@@ -1583,12 +1598,12 @@ static inline void blk_partition_remap(struct bio *bio) | |||
1583 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { | 1598 | if (bio_sectors(bio) && bdev != bdev->bd_contains) { |
1584 | struct hd_struct *p = bdev->bd_part; | 1599 | struct hd_struct *p = bdev->bd_part; |
1585 | 1600 | ||
1586 | bio->bi_sector += p->start_sect; | 1601 | bio->bi_iter.bi_sector += p->start_sect; |
1587 | bio->bi_bdev = bdev->bd_contains; | 1602 | bio->bi_bdev = bdev->bd_contains; |
1588 | 1603 | ||
1589 | trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, | 1604 | trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, |
1590 | bdev->bd_dev, | 1605 | bdev->bd_dev, |
1591 | bio->bi_sector - p->start_sect); | 1606 | bio->bi_iter.bi_sector - p->start_sect); |
1592 | } | 1607 | } |
1593 | } | 1608 | } |
1594 | 1609 | ||
@@ -1654,7 +1669,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | |||
1654 | /* Test device or partition size, when known. */ | 1669 | /* Test device or partition size, when known. */ |
1655 | maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; | 1670 | maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; |
1656 | if (maxsector) { | 1671 | if (maxsector) { |
1657 | sector_t sector = bio->bi_sector; | 1672 | sector_t sector = bio->bi_iter.bi_sector; |
1658 | 1673 | ||
1659 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { | 1674 | if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { |
1660 | /* | 1675 | /* |
@@ -1690,7 +1705,7 @@ generic_make_request_checks(struct bio *bio) | |||
1690 | "generic_make_request: Trying to access " | 1705 | "generic_make_request: Trying to access " |
1691 | "nonexistent block-device %s (%Lu)\n", | 1706 | "nonexistent block-device %s (%Lu)\n", |
1692 | bdevname(bio->bi_bdev, b), | 1707 | bdevname(bio->bi_bdev, b), |
1693 | (long long) bio->bi_sector); | 1708 | (long long) bio->bi_iter.bi_sector); |
1694 | goto end_io; | 1709 | goto end_io; |
1695 | } | 1710 | } |
1696 | 1711 | ||
@@ -1704,9 +1719,9 @@ generic_make_request_checks(struct bio *bio) | |||
1704 | } | 1719 | } |
1705 | 1720 | ||
1706 | part = bio->bi_bdev->bd_part; | 1721 | part = bio->bi_bdev->bd_part; |
1707 | if (should_fail_request(part, bio->bi_size) || | 1722 | if (should_fail_request(part, bio->bi_iter.bi_size) || |
1708 | should_fail_request(&part_to_disk(part)->part0, | 1723 | should_fail_request(&part_to_disk(part)->part0, |
1709 | bio->bi_size)) | 1724 | bio->bi_iter.bi_size)) |
1710 | goto end_io; | 1725 | goto end_io; |
1711 | 1726 | ||
1712 | /* | 1727 | /* |
@@ -1865,7 +1880,7 @@ void submit_bio(int rw, struct bio *bio) | |||
1865 | if (rw & WRITE) { | 1880 | if (rw & WRITE) { |
1866 | count_vm_events(PGPGOUT, count); | 1881 | count_vm_events(PGPGOUT, count); |
1867 | } else { | 1882 | } else { |
1868 | task_io_account_read(bio->bi_size); | 1883 | task_io_account_read(bio->bi_iter.bi_size); |
1869 | count_vm_events(PGPGIN, count); | 1884 | count_vm_events(PGPGIN, count); |
1870 | } | 1885 | } |
1871 | 1886 | ||
@@ -1874,7 +1889,7 @@ void submit_bio(int rw, struct bio *bio) | |||
1874 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", | 1889 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", |
1875 | current->comm, task_pid_nr(current), | 1890 | current->comm, task_pid_nr(current), |
1876 | (rw & WRITE) ? "WRITE" : "READ", | 1891 | (rw & WRITE) ? "WRITE" : "READ", |
1877 | (unsigned long long)bio->bi_sector, | 1892 | (unsigned long long)bio->bi_iter.bi_sector, |
1878 | bdevname(bio->bi_bdev, b), | 1893 | bdevname(bio->bi_bdev, b), |
1879 | count); | 1894 | count); |
1880 | } | 1895 | } |
@@ -2007,7 +2022,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) | |||
2007 | for (bio = rq->bio; bio; bio = bio->bi_next) { | 2022 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
2008 | if ((bio->bi_rw & ff) != ff) | 2023 | if ((bio->bi_rw & ff) != ff) |
2009 | break; | 2024 | break; |
2010 | bytes += bio->bi_size; | 2025 | bytes += bio->bi_iter.bi_size; |
2011 | } | 2026 | } |
2012 | 2027 | ||
2013 | /* this could lead to infinite loop */ | 2028 | /* this could lead to infinite loop */ |
@@ -2378,9 +2393,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
2378 | total_bytes = 0; | 2393 | total_bytes = 0; |
2379 | while (req->bio) { | 2394 | while (req->bio) { |
2380 | struct bio *bio = req->bio; | 2395 | struct bio *bio = req->bio; |
2381 | unsigned bio_bytes = min(bio->bi_size, nr_bytes); | 2396 | unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); |
2382 | 2397 | ||
2383 | if (bio_bytes == bio->bi_size) | 2398 | if (bio_bytes == bio->bi_iter.bi_size) |
2384 | req->bio = bio->bi_next; | 2399 | req->bio = bio->bi_next; |
2385 | 2400 | ||
2386 | req_bio_endio(req, bio, bio_bytes, error); | 2401 | req_bio_endio(req, bio, bio_bytes, error); |
@@ -2728,7 +2743,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2728 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2743 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2729 | rq->buffer = bio_data(bio); | 2744 | rq->buffer = bio_data(bio); |
2730 | } | 2745 | } |
2731 | rq->__data_len = bio->bi_size; | 2746 | rq->__data_len = bio->bi_iter.bi_size; |
2732 | rq->bio = rq->biotail = bio; | 2747 | rq->bio = rq->biotail = bio; |
2733 | 2748 | ||
2734 | if (bio->bi_bdev) | 2749 | if (bio->bi_bdev) |
@@ -2746,10 +2761,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2746 | void rq_flush_dcache_pages(struct request *rq) | 2761 | void rq_flush_dcache_pages(struct request *rq) |
2747 | { | 2762 | { |
2748 | struct req_iterator iter; | 2763 | struct req_iterator iter; |
2749 | struct bio_vec *bvec; | 2764 | struct bio_vec bvec; |
2750 | 2765 | ||
2751 | rq_for_each_segment(bvec, rq, iter) | 2766 | rq_for_each_segment(bvec, rq, iter) |
2752 | flush_dcache_page(bvec->bv_page); | 2767 | flush_dcache_page(bvec.bv_page); |
2753 | } | 2768 | } |
2754 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); | 2769 | EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); |
2755 | #endif | 2770 | #endif |
diff --git a/block/blk-exec.c b/block/blk-exec.c index c3edf9dff566..bbfc072a79c2 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -60,6 +60,10 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, | |||
60 | rq->rq_disk = bd_disk; | 60 | rq->rq_disk = bd_disk; |
61 | rq->end_io = done; | 61 | rq->end_io = done; |
62 | 62 | ||
63 | /* | ||
64 | * don't check dying flag for MQ because the request won't | ||
65 | * be resued after dying flag is set | ||
66 | */ | ||
63 | if (q->mq_ops) { | 67 | if (q->mq_ops) { |
64 | blk_mq_insert_request(q, rq, true); | 68 | blk_mq_insert_request(q, rq, true); |
65 | return; | 69 | return; |
diff --git a/block/blk-flush.c b/block/blk-flush.c index fb6f3c0ffa49..9288aaf35c21 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | |||
548 | * copied from blk_rq_pos(rq). | 548 | * copied from blk_rq_pos(rq). |
549 | */ | 549 | */ |
550 | if (error_sector) | 550 | if (error_sector) |
551 | *error_sector = bio->bi_sector; | 551 | *error_sector = bio->bi_iter.bi_sector; |
552 | 552 | ||
553 | bio_put(bio); | 553 | bio_put(bio); |
554 | return ret; | 554 | return ret; |
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 03cf7179e8ef..7fbab84399e6 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported"; | |||
43 | */ | 43 | */ |
44 | int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) | 44 | int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) |
45 | { | 45 | { |
46 | struct bio_vec *iv, *ivprv = NULL; | 46 | struct bio_vec iv, ivprv = { NULL }; |
47 | unsigned int segments = 0; | 47 | unsigned int segments = 0; |
48 | unsigned int seg_size = 0; | 48 | unsigned int seg_size = 0; |
49 | unsigned int i = 0; | 49 | struct bvec_iter iter; |
50 | int prev = 0; | ||
50 | 51 | ||
51 | bio_for_each_integrity_vec(iv, bio, i) { | 52 | bio_for_each_integrity_vec(iv, bio, iter) { |
52 | 53 | ||
53 | if (ivprv) { | 54 | if (prev) { |
54 | if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) | 55 | if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) |
55 | goto new_segment; | 56 | goto new_segment; |
56 | 57 | ||
57 | if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) | 58 | if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) |
58 | goto new_segment; | 59 | goto new_segment; |
59 | 60 | ||
60 | if (seg_size + iv->bv_len > queue_max_segment_size(q)) | 61 | if (seg_size + iv.bv_len > queue_max_segment_size(q)) |
61 | goto new_segment; | 62 | goto new_segment; |
62 | 63 | ||
63 | seg_size += iv->bv_len; | 64 | seg_size += iv.bv_len; |
64 | } else { | 65 | } else { |
65 | new_segment: | 66 | new_segment: |
66 | segments++; | 67 | segments++; |
67 | seg_size = iv->bv_len; | 68 | seg_size = iv.bv_len; |
68 | } | 69 | } |
69 | 70 | ||
71 | prev = 1; | ||
70 | ivprv = iv; | 72 | ivprv = iv; |
71 | } | 73 | } |
72 | 74 | ||
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg); | |||
87 | int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, | 89 | int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, |
88 | struct scatterlist *sglist) | 90 | struct scatterlist *sglist) |
89 | { | 91 | { |
90 | struct bio_vec *iv, *ivprv = NULL; | 92 | struct bio_vec iv, ivprv = { NULL }; |
91 | struct scatterlist *sg = NULL; | 93 | struct scatterlist *sg = NULL; |
92 | unsigned int segments = 0; | 94 | unsigned int segments = 0; |
93 | unsigned int i = 0; | 95 | struct bvec_iter iter; |
96 | int prev = 0; | ||
94 | 97 | ||
95 | bio_for_each_integrity_vec(iv, bio, i) { | 98 | bio_for_each_integrity_vec(iv, bio, iter) { |
96 | 99 | ||
97 | if (ivprv) { | 100 | if (prev) { |
98 | if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) | 101 | if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv)) |
99 | goto new_segment; | 102 | goto new_segment; |
100 | 103 | ||
101 | if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) | 104 | if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv)) |
102 | goto new_segment; | 105 | goto new_segment; |
103 | 106 | ||
104 | if (sg->length + iv->bv_len > queue_max_segment_size(q)) | 107 | if (sg->length + iv.bv_len > queue_max_segment_size(q)) |
105 | goto new_segment; | 108 | goto new_segment; |
106 | 109 | ||
107 | sg->length += iv->bv_len; | 110 | sg->length += iv.bv_len; |
108 | } else { | 111 | } else { |
109 | new_segment: | 112 | new_segment: |
110 | if (!sg) | 113 | if (!sg) |
@@ -114,10 +117,11 @@ new_segment: | |||
114 | sg = sg_next(sg); | 117 | sg = sg_next(sg); |
115 | } | 118 | } |
116 | 119 | ||
117 | sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); | 120 | sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); |
118 | segments++; | 121 | segments++; |
119 | } | 122 | } |
120 | 123 | ||
124 | prev = 1; | ||
121 | ivprv = iv; | 125 | ivprv = iv; |
122 | } | 126 | } |
123 | 127 | ||
diff --git a/block/blk-lib.c b/block/blk-lib.c index 9b5b561cb928..2da76c999ef3 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
108 | req_sects = end_sect - sector; | 108 | req_sects = end_sect - sector; |
109 | } | 109 | } |
110 | 110 | ||
111 | bio->bi_sector = sector; | 111 | bio->bi_iter.bi_sector = sector; |
112 | bio->bi_end_io = bio_batch_end_io; | 112 | bio->bi_end_io = bio_batch_end_io; |
113 | bio->bi_bdev = bdev; | 113 | bio->bi_bdev = bdev; |
114 | bio->bi_private = &bb; | 114 | bio->bi_private = &bb; |
115 | 115 | ||
116 | bio->bi_size = req_sects << 9; | 116 | bio->bi_iter.bi_size = req_sects << 9; |
117 | nr_sects -= req_sects; | 117 | nr_sects -= req_sects; |
118 | sector = end_sect; | 118 | sector = end_sect; |
119 | 119 | ||
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
174 | break; | 174 | break; |
175 | } | 175 | } |
176 | 176 | ||
177 | bio->bi_sector = sector; | 177 | bio->bi_iter.bi_sector = sector; |
178 | bio->bi_end_io = bio_batch_end_io; | 178 | bio->bi_end_io = bio_batch_end_io; |
179 | bio->bi_bdev = bdev; | 179 | bio->bi_bdev = bdev; |
180 | bio->bi_private = &bb; | 180 | bio->bi_private = &bb; |
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
184 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | 184 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); |
185 | 185 | ||
186 | if (nr_sects > max_write_same_sectors) { | 186 | if (nr_sects > max_write_same_sectors) { |
187 | bio->bi_size = max_write_same_sectors << 9; | 187 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
188 | nr_sects -= max_write_same_sectors; | 188 | nr_sects -= max_write_same_sectors; |
189 | sector += max_write_same_sectors; | 189 | sector += max_write_same_sectors; |
190 | } else { | 190 | } else { |
191 | bio->bi_size = nr_sects << 9; | 191 | bio->bi_iter.bi_size = nr_sects << 9; |
192 | nr_sects = 0; | 192 | nr_sects = 0; |
193 | } | 193 | } |
194 | 194 | ||
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
240 | break; | 240 | break; |
241 | } | 241 | } |
242 | 242 | ||
243 | bio->bi_sector = sector; | 243 | bio->bi_iter.bi_sector = sector; |
244 | bio->bi_bdev = bdev; | 244 | bio->bi_bdev = bdev; |
245 | bio->bi_end_io = bio_batch_end_io; | 245 | bio->bi_end_io = bio_batch_end_io; |
246 | bio->bi_private = &bb; | 246 | bio->bi_private = &bb; |
diff --git a/block/blk-map.c b/block/blk-map.c index 623e1cd4cffe..ae4ae1047fd9 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |||
20 | rq->biotail->bi_next = bio; | 20 | rq->biotail->bi_next = bio; |
21 | rq->biotail = bio; | 21 | rq->biotail = bio; |
22 | 22 | ||
23 | rq->__data_len += bio->bi_size; | 23 | rq->__data_len += bio->bi_iter.bi_size; |
24 | } | 24 | } |
25 | return 0; | 25 | return 0; |
26 | } | 26 | } |
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
76 | 76 | ||
77 | ret = blk_rq_append_bio(q, rq, bio); | 77 | ret = blk_rq_append_bio(q, rq, bio); |
78 | if (!ret) | 78 | if (!ret) |
79 | return bio->bi_size; | 79 | return bio->bi_iter.bi_size; |
80 | 80 | ||
81 | /* if it was boucned we must call the end io function */ | 81 | /* if it was boucned we must call the end io function */ |
82 | bio_endio(bio, 0); | 82 | bio_endio(bio, 0); |
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
220 | if (IS_ERR(bio)) | 220 | if (IS_ERR(bio)) |
221 | return PTR_ERR(bio); | 221 | return PTR_ERR(bio); |
222 | 222 | ||
223 | if (bio->bi_size != len) { | 223 | if (bio->bi_iter.bi_size != len) { |
224 | /* | 224 | /* |
225 | * Grab an extra reference to this bio, as bio_unmap_user() | 225 | * Grab an extra reference to this bio, as bio_unmap_user() |
226 | * expects to be able to drop it twice as it happens on the | 226 | * expects to be able to drop it twice as it happens on the |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 1ffc58977835..8f8adaa95466 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -12,10 +12,11 @@ | |||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio) | 13 | struct bio *bio) |
14 | { | 14 | { |
15 | struct bio_vec *bv, *bvprv = NULL; | 15 | struct bio_vec bv, bvprv = { NULL }; |
16 | int cluster, i, high, highprv = 1; | 16 | int cluster, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; | 17 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; | 18 | struct bio *fbio, *bbio; |
19 | struct bvec_iter iter; | ||
19 | 20 | ||
20 | if (!bio) | 21 | if (!bio) |
21 | return 0; | 22 | return 0; |
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
25 | seg_size = 0; | 26 | seg_size = 0; |
26 | nr_phys_segs = 0; | 27 | nr_phys_segs = 0; |
27 | for_each_bio(bio) { | 28 | for_each_bio(bio) { |
28 | bio_for_each_segment(bv, bio, i) { | 29 | bio_for_each_segment(bv, bio, iter) { |
29 | /* | 30 | /* |
30 | * the trick here is making sure that a high page is | 31 | * the trick here is making sure that a high page is |
31 | * never considered part of another segment, since that | 32 | * never considered part of another segment, since that |
32 | * might change with the bounce page. | 33 | * might change with the bounce page. |
33 | */ | 34 | */ |
34 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); | 35 | high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); |
35 | if (high || highprv) | 36 | if (!high && !highprv && cluster) { |
36 | goto new_segment; | 37 | if (seg_size + bv.bv_len |
37 | if (cluster) { | ||
38 | if (seg_size + bv->bv_len | ||
39 | > queue_max_segment_size(q)) | 38 | > queue_max_segment_size(q)) |
40 | goto new_segment; | 39 | goto new_segment; |
41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | 40 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
42 | goto new_segment; | 41 | goto new_segment; |
43 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | 42 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
44 | goto new_segment; | 43 | goto new_segment; |
45 | 44 | ||
46 | seg_size += bv->bv_len; | 45 | seg_size += bv.bv_len; |
47 | bvprv = bv; | 46 | bvprv = bv; |
48 | continue; | 47 | continue; |
49 | } | 48 | } |
@@ -54,7 +53,7 @@ new_segment: | |||
54 | 53 | ||
55 | nr_phys_segs++; | 54 | nr_phys_segs++; |
56 | bvprv = bv; | 55 | bvprv = bv; |
57 | seg_size = bv->bv_len; | 56 | seg_size = bv.bv_len; |
58 | highprv = high; | 57 | highprv = high; |
59 | } | 58 | } |
60 | bbio = bio; | 59 | bbio = bio; |
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments); | |||
87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 86 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
88 | struct bio *nxt) | 87 | struct bio *nxt) |
89 | { | 88 | { |
89 | struct bio_vec end_bv = { NULL }, nxt_bv; | ||
90 | struct bvec_iter iter; | ||
91 | |||
90 | if (!blk_queue_cluster(q)) | 92 | if (!blk_queue_cluster(q)) |
91 | return 0; | 93 | return 0; |
92 | 94 | ||
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
97 | if (!bio_has_data(bio)) | 99 | if (!bio_has_data(bio)) |
98 | return 1; | 100 | return 1; |
99 | 101 | ||
100 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | 102 | bio_for_each_segment(end_bv, bio, iter) |
103 | if (end_bv.bv_len == iter.bi_size) | ||
104 | break; | ||
105 | |||
106 | nxt_bv = bio_iovec(nxt); | ||
107 | |||
108 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | ||
101 | return 0; | 109 | return 0; |
102 | 110 | ||
103 | /* | 111 | /* |
104 | * bio and nxt are contiguous in memory; check if the queue allows | 112 | * bio and nxt are contiguous in memory; check if the queue allows |
105 | * these two to be merged into one | 113 | * these two to be merged into one |
106 | */ | 114 | */ |
107 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | 115 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
108 | return 1; | 116 | return 1; |
109 | 117 | ||
110 | return 0; | 118 | return 0; |
111 | } | 119 | } |
112 | 120 | ||
113 | static void | 121 | static inline void |
114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | 122 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
115 | struct scatterlist *sglist, struct bio_vec **bvprv, | 123 | struct scatterlist *sglist, struct bio_vec *bvprv, |
116 | struct scatterlist **sg, int *nsegs, int *cluster) | 124 | struct scatterlist **sg, int *nsegs, int *cluster) |
117 | { | 125 | { |
118 | 126 | ||
119 | int nbytes = bvec->bv_len; | 127 | int nbytes = bvec->bv_len; |
120 | 128 | ||
121 | if (*bvprv && *cluster) { | 129 | if (*sg && *cluster) { |
122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | 130 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
123 | goto new_segment; | 131 | goto new_segment; |
124 | 132 | ||
125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) | 133 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
126 | goto new_segment; | 134 | goto new_segment; |
127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) | 135 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
128 | goto new_segment; | 136 | goto new_segment; |
129 | 137 | ||
130 | (*sg)->length += nbytes; | 138 | (*sg)->length += nbytes; |
@@ -150,7 +158,7 @@ new_segment: | |||
150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | 158 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
151 | (*nsegs)++; | 159 | (*nsegs)++; |
152 | } | 160 | } |
153 | *bvprv = bvec; | 161 | *bvprv = *bvec; |
154 | } | 162 | } |
155 | 163 | ||
156 | /* | 164 | /* |
@@ -160,7 +168,7 @@ new_segment: | |||
160 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | 168 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
161 | struct scatterlist *sglist) | 169 | struct scatterlist *sglist) |
162 | { | 170 | { |
163 | struct bio_vec *bvec, *bvprv; | 171 | struct bio_vec bvec, bvprv = { NULL }; |
164 | struct req_iterator iter; | 172 | struct req_iterator iter; |
165 | struct scatterlist *sg; | 173 | struct scatterlist *sg; |
166 | int nsegs, cluster; | 174 | int nsegs, cluster; |
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
171 | /* | 179 | /* |
172 | * for each bio in rq | 180 | * for each bio in rq |
173 | */ | 181 | */ |
174 | bvprv = NULL; | ||
175 | sg = NULL; | 182 | sg = NULL; |
176 | rq_for_each_segment(bvec, rq, iter) { | 183 | rq_for_each_segment(bvec, rq, iter) { |
177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | 184 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, |
178 | &nsegs, &cluster); | 185 | &nsegs, &cluster); |
179 | } /* segments in rq */ | 186 | } /* segments in rq */ |
180 | 187 | ||
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg); | |||
223 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | 230 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, |
224 | struct scatterlist *sglist) | 231 | struct scatterlist *sglist) |
225 | { | 232 | { |
226 | struct bio_vec *bvec, *bvprv; | 233 | struct bio_vec bvec, bvprv = { NULL }; |
227 | struct scatterlist *sg; | 234 | struct scatterlist *sg; |
228 | int nsegs, cluster; | 235 | int nsegs, cluster; |
229 | unsigned long i; | 236 | struct bvec_iter iter; |
230 | 237 | ||
231 | nsegs = 0; | 238 | nsegs = 0; |
232 | cluster = blk_queue_cluster(q); | 239 | cluster = blk_queue_cluster(q); |
233 | 240 | ||
234 | bvprv = NULL; | ||
235 | sg = NULL; | 241 | sg = NULL; |
236 | bio_for_each_segment(bvec, bio, i) { | 242 | bio_for_each_segment(bvec, bio, iter) { |
237 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | 243 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, |
238 | &nsegs, &cluster); | 244 | &nsegs, &cluster); |
239 | } /* segments in bio */ | 245 | } /* segments in bio */ |
240 | 246 | ||
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |||
543 | 549 | ||
544 | int blk_try_merge(struct request *rq, struct bio *bio) | 550 | int blk_try_merge(struct request *rq, struct bio *bio) |
545 | { | 551 | { |
546 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) | 552 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
547 | return ELEVATOR_BACK_MERGE; | 553 | return ELEVATOR_BACK_MERGE; |
548 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) | 554 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
549 | return ELEVATOR_FRONT_MERGE; | 555 | return ELEVATOR_FRONT_MERGE; |
550 | return ELEVATOR_NO_MERGE; | 556 | return ELEVATOR_NO_MERGE; |
551 | } | 557 | } |
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 0045ace9bdf0..3146befb56aa 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c | |||
@@ -28,36 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, | |||
28 | return NOTIFY_OK; | 28 | return NOTIFY_OK; |
29 | } | 29 | } |
30 | 30 | ||
31 | static void blk_mq_cpu_notify(void *data, unsigned long action, | ||
32 | unsigned int cpu) | ||
33 | { | ||
34 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
35 | /* | ||
36 | * If the CPU goes away, ensure that we run any pending | ||
37 | * completions. | ||
38 | */ | ||
39 | struct llist_node *node; | ||
40 | struct request *rq; | ||
41 | |||
42 | local_irq_disable(); | ||
43 | |||
44 | node = llist_del_all(&per_cpu(ipi_lists, cpu)); | ||
45 | while (node) { | ||
46 | struct llist_node *next = node->next; | ||
47 | |||
48 | rq = llist_entry(node, struct request, ll_list); | ||
49 | __blk_mq_end_io(rq, rq->errors); | ||
50 | node = next; | ||
51 | } | ||
52 | |||
53 | local_irq_enable(); | ||
54 | } | ||
55 | } | ||
56 | |||
57 | static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = { | ||
58 | .notifier_call = blk_mq_main_cpu_notify, | ||
59 | }; | ||
60 | |||
61 | void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) | 31 | void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) |
62 | { | 32 | { |
63 | BUG_ON(!notifier->notify); | 33 | BUG_ON(!notifier->notify); |
@@ -82,12 +52,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, | |||
82 | notifier->data = data; | 52 | notifier->data = data; |
83 | } | 53 | } |
84 | 54 | ||
85 | static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = { | ||
86 | .notify = blk_mq_cpu_notify, | ||
87 | }; | ||
88 | |||
89 | void __init blk_mq_cpu_init(void) | 55 | void __init blk_mq_cpu_init(void) |
90 | { | 56 | { |
91 | register_hotcpu_notifier(&blk_mq_main_cpu_notifier); | 57 | hotcpu_notifier(blk_mq_main_cpu_notify, 0); |
92 | blk_mq_register_cpu_notifier(&cpu_notifier); | ||
93 | } | 58 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index c79126e11030..57039fcd9c93 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list); | |||
27 | 27 | ||
28 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); | 28 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); |
29 | 29 | ||
30 | DEFINE_PER_CPU(struct llist_head, ipi_lists); | ||
31 | |||
32 | static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | 30 | static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
33 | unsigned int cpu) | 31 | unsigned int cpu) |
34 | { | 32 | { |
@@ -106,10 +104,13 @@ static int blk_mq_queue_enter(struct request_queue *q) | |||
106 | 104 | ||
107 | spin_lock_irq(q->queue_lock); | 105 | spin_lock_irq(q->queue_lock); |
108 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, | 106 | ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, |
109 | !blk_queue_bypass(q), *q->queue_lock); | 107 | !blk_queue_bypass(q) || blk_queue_dying(q), |
108 | *q->queue_lock); | ||
110 | /* inc usage with lock hold to avoid freeze_queue runs here */ | 109 | /* inc usage with lock hold to avoid freeze_queue runs here */ |
111 | if (!ret) | 110 | if (!ret && !blk_queue_dying(q)) |
112 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); | 111 | __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); |
112 | else if (blk_queue_dying(q)) | ||
113 | ret = -ENODEV; | ||
113 | spin_unlock_irq(q->queue_lock); | 114 | spin_unlock_irq(q->queue_lock); |
114 | 115 | ||
115 | return ret; | 116 | return ret; |
@@ -120,6 +121,22 @@ static void blk_mq_queue_exit(struct request_queue *q) | |||
120 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); | 121 | __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); |
121 | } | 122 | } |
122 | 123 | ||
124 | static void __blk_mq_drain_queue(struct request_queue *q) | ||
125 | { | ||
126 | while (true) { | ||
127 | s64 count; | ||
128 | |||
129 | spin_lock_irq(q->queue_lock); | ||
130 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
131 | spin_unlock_irq(q->queue_lock); | ||
132 | |||
133 | if (count == 0) | ||
134 | break; | ||
135 | blk_mq_run_queues(q, false); | ||
136 | msleep(10); | ||
137 | } | ||
138 | } | ||
139 | |||
123 | /* | 140 | /* |
124 | * Guarantee no request is in use, so we can change any data structure of | 141 | * Guarantee no request is in use, so we can change any data structure of |
125 | * the queue afterward. | 142 | * the queue afterward. |
@@ -133,21 +150,13 @@ static void blk_mq_freeze_queue(struct request_queue *q) | |||
133 | queue_flag_set(QUEUE_FLAG_BYPASS, q); | 150 | queue_flag_set(QUEUE_FLAG_BYPASS, q); |
134 | spin_unlock_irq(q->queue_lock); | 151 | spin_unlock_irq(q->queue_lock); |
135 | 152 | ||
136 | if (!drain) | 153 | if (drain) |
137 | return; | 154 | __blk_mq_drain_queue(q); |
138 | 155 | } | |
139 | while (true) { | ||
140 | s64 count; | ||
141 | |||
142 | spin_lock_irq(q->queue_lock); | ||
143 | count = percpu_counter_sum(&q->mq_usage_counter); | ||
144 | spin_unlock_irq(q->queue_lock); | ||
145 | 156 | ||
146 | if (count == 0) | 157 | void blk_mq_drain_queue(struct request_queue *q) |
147 | break; | 158 | { |
148 | blk_mq_run_queues(q, false); | 159 | __blk_mq_drain_queue(q); |
149 | msleep(10); | ||
150 | } | ||
151 | } | 160 | } |
152 | 161 | ||
153 | static void blk_mq_unfreeze_queue(struct request_queue *q) | 162 | static void blk_mq_unfreeze_queue(struct request_queue *q) |
@@ -179,6 +188,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
179 | 188 | ||
180 | rq->mq_ctx = ctx; | 189 | rq->mq_ctx = ctx; |
181 | rq->cmd_flags = rw_flags; | 190 | rq->cmd_flags = rw_flags; |
191 | rq->start_time = jiffies; | ||
192 | set_start_time_ns(rq); | ||
182 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 193 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; |
183 | } | 194 | } |
184 | 195 | ||
@@ -305,7 +316,7 @@ void blk_mq_complete_request(struct request *rq, int error) | |||
305 | struct bio *next = bio->bi_next; | 316 | struct bio *next = bio->bi_next; |
306 | 317 | ||
307 | bio->bi_next = NULL; | 318 | bio->bi_next = NULL; |
308 | bytes += bio->bi_size; | 319 | bytes += bio->bi_iter.bi_size; |
309 | blk_mq_bio_endio(rq, bio, error); | 320 | blk_mq_bio_endio(rq, bio, error); |
310 | bio = next; | 321 | bio = next; |
311 | } | 322 | } |
@@ -326,55 +337,12 @@ void __blk_mq_end_io(struct request *rq, int error) | |||
326 | blk_mq_complete_request(rq, error); | 337 | blk_mq_complete_request(rq, error); |
327 | } | 338 | } |
328 | 339 | ||
329 | #if defined(CONFIG_SMP) | 340 | static void blk_mq_end_io_remote(void *data) |
330 | |||
331 | /* | ||
332 | * Called with interrupts disabled. | ||
333 | */ | ||
334 | static void ipi_end_io(void *data) | ||
335 | { | ||
336 | struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id()); | ||
337 | struct llist_node *entry, *next; | ||
338 | struct request *rq; | ||
339 | |||
340 | entry = llist_del_all(list); | ||
341 | |||
342 | while (entry) { | ||
343 | next = entry->next; | ||
344 | rq = llist_entry(entry, struct request, ll_list); | ||
345 | __blk_mq_end_io(rq, rq->errors); | ||
346 | entry = next; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu, | ||
351 | struct request *rq, const int error) | ||
352 | { | 341 | { |
353 | struct call_single_data *data = &rq->csd; | 342 | struct request *rq = data; |
354 | |||
355 | rq->errors = error; | ||
356 | rq->ll_list.next = NULL; | ||
357 | 343 | ||
358 | /* | 344 | __blk_mq_end_io(rq, rq->errors); |
359 | * If the list is non-empty, an existing IPI must already | ||
360 | * be "in flight". If that is the case, we need not schedule | ||
361 | * a new one. | ||
362 | */ | ||
363 | if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) { | ||
364 | data->func = ipi_end_io; | ||
365 | data->flags = 0; | ||
366 | __smp_call_function_single(ctx->cpu, data, 0); | ||
367 | } | ||
368 | |||
369 | return true; | ||
370 | } | ||
371 | #else /* CONFIG_SMP */ | ||
372 | static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu, | ||
373 | struct request *rq, const int error) | ||
374 | { | ||
375 | return false; | ||
376 | } | 345 | } |
377 | #endif | ||
378 | 346 | ||
379 | /* | 347 | /* |
380 | * End IO on this request on a multiqueue enabled driver. We'll either do | 348 | * End IO on this request on a multiqueue enabled driver. We'll either do |
@@ -390,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error) | |||
390 | return __blk_mq_end_io(rq, error); | 358 | return __blk_mq_end_io(rq, error); |
391 | 359 | ||
392 | cpu = get_cpu(); | 360 | cpu = get_cpu(); |
393 | 361 | if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { | |
394 | if (cpu == ctx->cpu || !cpu_online(ctx->cpu) || | 362 | rq->errors = error; |
395 | !ipi_remote_cpu(ctx, cpu, rq, error)) | 363 | rq->csd.func = blk_mq_end_io_remote; |
364 | rq->csd.info = rq; | ||
365 | rq->csd.flags = 0; | ||
366 | __smp_call_function_single(ctx->cpu, &rq->csd, 0); | ||
367 | } else { | ||
396 | __blk_mq_end_io(rq, error); | 368 | __blk_mq_end_io(rq, error); |
397 | 369 | } | |
398 | put_cpu(); | 370 | put_cpu(); |
399 | } | 371 | } |
400 | EXPORT_SYMBOL(blk_mq_end_io); | 372 | EXPORT_SYMBOL(blk_mq_end_io); |
@@ -1091,8 +1063,8 @@ static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) | |||
1091 | struct page *page; | 1063 | struct page *page; |
1092 | 1064 | ||
1093 | while (!list_empty(&hctx->page_list)) { | 1065 | while (!list_empty(&hctx->page_list)) { |
1094 | page = list_first_entry(&hctx->page_list, struct page, list); | 1066 | page = list_first_entry(&hctx->page_list, struct page, lru); |
1095 | list_del_init(&page->list); | 1067 | list_del_init(&page->lru); |
1096 | __free_pages(page, page->private); | 1068 | __free_pages(page, page->private); |
1097 | } | 1069 | } |
1098 | 1070 | ||
@@ -1156,7 +1128,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, | |||
1156 | break; | 1128 | break; |
1157 | 1129 | ||
1158 | page->private = this_order; | 1130 | page->private = this_order; |
1159 | list_add_tail(&page->list, &hctx->page_list); | 1131 | list_add_tail(&page->lru, &hctx->page_list); |
1160 | 1132 | ||
1161 | p = page_address(page); | 1133 | p = page_address(page); |
1162 | entries_per_page = order_to_size(this_order) / rq_size; | 1134 | entries_per_page = order_to_size(this_order) / rq_size; |
@@ -1429,7 +1401,6 @@ void blk_mq_free_queue(struct request_queue *q) | |||
1429 | int i; | 1401 | int i; |
1430 | 1402 | ||
1431 | queue_for_each_hw_ctx(q, hctx, i) { | 1403 | queue_for_each_hw_ctx(q, hctx, i) { |
1432 | cancel_delayed_work_sync(&hctx->delayed_work); | ||
1433 | kfree(hctx->ctx_map); | 1404 | kfree(hctx->ctx_map); |
1434 | kfree(hctx->ctxs); | 1405 | kfree(hctx->ctxs); |
1435 | blk_mq_free_rq_map(hctx); | 1406 | blk_mq_free_rq_map(hctx); |
@@ -1451,7 +1422,6 @@ void blk_mq_free_queue(struct request_queue *q) | |||
1451 | list_del_init(&q->all_q_node); | 1422 | list_del_init(&q->all_q_node); |
1452 | mutex_unlock(&all_q_mutex); | 1423 | mutex_unlock(&all_q_mutex); |
1453 | } | 1424 | } |
1454 | EXPORT_SYMBOL(blk_mq_free_queue); | ||
1455 | 1425 | ||
1456 | /* Basically redo blk_mq_init_queue with queue frozen */ | 1426 | /* Basically redo blk_mq_init_queue with queue frozen */ |
1457 | static void blk_mq_queue_reinit(struct request_queue *q) | 1427 | static void blk_mq_queue_reinit(struct request_queue *q) |
@@ -1495,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
1495 | 1465 | ||
1496 | static int __init blk_mq_init(void) | 1466 | static int __init blk_mq_init(void) |
1497 | { | 1467 | { |
1498 | unsigned int i; | ||
1499 | |||
1500 | for_each_possible_cpu(i) | ||
1501 | init_llist_head(&per_cpu(ipi_lists, i)); | ||
1502 | |||
1503 | blk_mq_cpu_init(); | 1468 | blk_mq_cpu_init(); |
1504 | 1469 | ||
1505 | /* Must be called after percpu_counter_hotcpu_callback() */ | 1470 | /* Must be called after percpu_counter_hotcpu_callback() */ |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 52bf1f96a2c2..5c3917984b00 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -27,6 +27,8 @@ void blk_mq_complete_request(struct request *rq, int error); | |||
27 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); | 27 | void blk_mq_run_request(struct request *rq, bool run_queue, bool async); |
28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); | 28 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
29 | void blk_mq_init_flush(struct request_queue *q); | 29 | void blk_mq_init_flush(struct request_queue *q); |
30 | void blk_mq_drain_queue(struct request_queue *q); | ||
31 | void blk_mq_free_queue(struct request_queue *q); | ||
30 | 32 | ||
31 | /* | 33 | /* |
32 | * CPU hotplug helpers | 34 | * CPU hotplug helpers |
@@ -38,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, | |||
38 | void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); | 40 | void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); |
39 | void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); | 41 | void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); |
40 | void blk_mq_cpu_init(void); | 42 | void blk_mq_cpu_init(void); |
41 | DECLARE_PER_CPU(struct llist_head, ipi_lists); | ||
42 | 43 | ||
43 | /* | 44 | /* |
44 | * CPU -> queue mappings | 45 | * CPU -> queue mappings |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 97779522472f..8095c4a21fc0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include "blk.h" | 12 | #include "blk.h" |
13 | #include "blk-cgroup.h" | 13 | #include "blk-cgroup.h" |
14 | #include "blk-mq.h" | ||
14 | 15 | ||
15 | struct queue_sysfs_entry { | 16 | struct queue_sysfs_entry { |
16 | struct attribute attr; | 17 | struct attribute attr; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a760857e6b62..1474c3ab7e72 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, | |||
877 | do_div(tmp, HZ); | 877 | do_div(tmp, HZ); |
878 | bytes_allowed = tmp; | 878 | bytes_allowed = tmp; |
879 | 879 | ||
880 | if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { | 880 | if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { |
881 | if (wait) | 881 | if (wait) |
882 | *wait = 0; | 882 | *wait = 0; |
883 | return 1; | 883 | return 1; |
884 | } | 884 | } |
885 | 885 | ||
886 | /* Calc approx time to dispatch */ | 886 | /* Calc approx time to dispatch */ |
887 | extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; | 887 | extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; |
888 | jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); | 888 | jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); |
889 | 889 | ||
890 | if (!jiffy_wait) | 890 | if (!jiffy_wait) |
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | |||
987 | bool rw = bio_data_dir(bio); | 987 | bool rw = bio_data_dir(bio); |
988 | 988 | ||
989 | /* Charge the bio to the group */ | 989 | /* Charge the bio to the group */ |
990 | tg->bytes_disp[rw] += bio->bi_size; | 990 | tg->bytes_disp[rw] += bio->bi_iter.bi_size; |
991 | tg->io_disp[rw]++; | 991 | tg->io_disp[rw]++; |
992 | 992 | ||
993 | /* | 993 | /* |
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | |||
1003 | */ | 1003 | */ |
1004 | if (!(bio->bi_rw & REQ_THROTTLED)) { | 1004 | if (!(bio->bi_rw & REQ_THROTTLED)) { |
1005 | bio->bi_rw |= REQ_THROTTLED; | 1005 | bio->bi_rw |= REQ_THROTTLED; |
1006 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, | 1006 | throtl_update_dispatch_stats(tg_to_blkg(tg), |
1007 | bio->bi_rw); | 1007 | bio->bi_iter.bi_size, bio->bi_rw); |
1008 | } | 1008 | } |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1503,7 +1503,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1503 | if (tg) { | 1503 | if (tg) { |
1504 | if (!tg->has_rules[rw]) { | 1504 | if (!tg->has_rules[rw]) { |
1505 | throtl_update_dispatch_stats(tg_to_blkg(tg), | 1505 | throtl_update_dispatch_stats(tg_to_blkg(tg), |
1506 | bio->bi_size, bio->bi_rw); | 1506 | bio->bi_iter.bi_size, bio->bi_rw); |
1507 | goto out_unlock_rcu; | 1507 | goto out_unlock_rcu; |
1508 | } | 1508 | } |
1509 | } | 1509 | } |
@@ -1559,7 +1559,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1559 | /* out-of-limit, queue to @tg */ | 1559 | /* out-of-limit, queue to @tg */ |
1560 | throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", | 1560 | throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", |
1561 | rw == READ ? 'R' : 'W', | 1561 | rw == READ ? 'R' : 'W', |
1562 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], | 1562 | tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], |
1563 | tg->io_disp[rw], tg->iops[rw], | 1563 | tg->io_disp[rw], tg->iops[rw], |
1564 | sq->nr_queued[READ], sq->nr_queued[WRITE]); | 1564 | sq->nr_queued[READ], sq->nr_queued[WRITE]); |
1565 | 1565 | ||
diff --git a/block/cmdline-parser.c b/block/cmdline-parser.c index cc2637f8674e..9dbc67e42a99 100644 --- a/block/cmdline-parser.c +++ b/block/cmdline-parser.c | |||
@@ -4,8 +4,7 @@ | |||
4 | * Written by Cai Zhiyong <caizhiyong@huawei.com> | 4 | * Written by Cai Zhiyong <caizhiyong@huawei.com> |
5 | * | 5 | * |
6 | */ | 6 | */ |
7 | #include <linux/buffer_head.h> | 7 | #include <linux/export.h> |
8 | #include <linux/module.h> | ||
9 | #include <linux/cmdline-parser.h> | 8 | #include <linux/cmdline-parser.h> |
10 | 9 | ||
11 | static int parse_subpart(struct cmdline_subpart **subpart, char *partdef) | 10 | static int parse_subpart(struct cmdline_subpart **subpart, char *partdef) |
@@ -159,6 +158,7 @@ void cmdline_parts_free(struct cmdline_parts **parts) | |||
159 | *parts = next_parts; | 158 | *parts = next_parts; |
160 | } | 159 | } |
161 | } | 160 | } |
161 | EXPORT_SYMBOL(cmdline_parts_free); | ||
162 | 162 | ||
163 | int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline) | 163 | int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline) |
164 | { | 164 | { |
@@ -206,6 +206,7 @@ fail: | |||
206 | cmdline_parts_free(parts); | 206 | cmdline_parts_free(parts); |
207 | goto done; | 207 | goto done; |
208 | } | 208 | } |
209 | EXPORT_SYMBOL(cmdline_parts_parse); | ||
209 | 210 | ||
210 | struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, | 211 | struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, |
211 | const char *bdev) | 212 | const char *bdev) |
@@ -214,17 +215,17 @@ struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, | |||
214 | parts = parts->next_parts; | 215 | parts = parts->next_parts; |
215 | return parts; | 216 | return parts; |
216 | } | 217 | } |
218 | EXPORT_SYMBOL(cmdline_parts_find); | ||
217 | 219 | ||
218 | /* | 220 | /* |
219 | * add_part() | 221 | * add_part() |
220 | * 0 success. | 222 | * 0 success. |
221 | * 1 can not add so many partitions. | 223 | * 1 can not add so many partitions. |
222 | */ | 224 | */ |
223 | void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, | 225 | int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, |
224 | int slot, | 226 | int slot, |
225 | int (*add_part)(int, struct cmdline_subpart *, void *), | 227 | int (*add_part)(int, struct cmdline_subpart *, void *), |
226 | void *param) | 228 | void *param) |
227 | |||
228 | { | 229 | { |
229 | sector_t from = 0; | 230 | sector_t from = 0; |
230 | struct cmdline_subpart *subpart; | 231 | struct cmdline_subpart *subpart; |
@@ -247,4 +248,7 @@ void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, | |||
247 | if (add_part(slot, subpart, param)) | 248 | if (add_part(slot, subpart, param)) |
248 | break; | 249 | break; |
249 | } | 250 | } |
251 | |||
252 | return slot; | ||
250 | } | 253 | } |
254 | EXPORT_SYMBOL(cmdline_parts_set); | ||
diff --git a/block/elevator.c b/block/elevator.c index b7ff2861b6bd..42c45a7d6714 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
440 | /* | 440 | /* |
441 | * See if our hash lookup can find a potential backmerge. | 441 | * See if our hash lookup can find a potential backmerge. |
442 | */ | 442 | */ |
443 | __rq = elv_rqhash_find(q, bio->bi_sector); | 443 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
444 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 444 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
445 | *req = __rq; | 445 | *req = __rq; |
446 | return ELEVATOR_BACK_MERGE; | 446 | return ELEVATOR_BACK_MERGE; |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 625e3e471d65..26487972ac54 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -323,12 +323,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, | |||
323 | 323 | ||
324 | if (hdr->iovec_count) { | 324 | if (hdr->iovec_count) { |
325 | size_t iov_data_len; | 325 | size_t iov_data_len; |
326 | struct iovec *iov; | 326 | struct iovec *iov = NULL; |
327 | 327 | ||
328 | ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, | 328 | ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, |
329 | 0, NULL, &iov); | 329 | 0, NULL, &iov); |
330 | if (ret < 0) | 330 | if (ret < 0) { |
331 | kfree(iov); | ||
331 | goto out; | 332 | goto out; |
333 | } | ||
332 | 334 | ||
333 | iov_data_len = ret; | 335 | iov_data_len = ret; |
334 | ret = 0; | 336 | ret = 0; |