diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-02 12:29:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-02 12:29:34 -0400 |
commit | 681a2895486243a82547d8c9f53043eb54b53da0 (patch) | |
tree | 464273280aed6db55a99cc0d8614d4393f94fc48 /drivers | |
parent | 6c52486dedbb30a1313da64945dcd686b4579c51 (diff) | |
parent | ed851860b4552fc8963ecf71eab9f6f7a5c19d74 (diff) |
Merge branch 'for-3.16/core' of git://git.kernel.dk/linux-block into next
Pull block core updates from Jens Axboe:
"It's a big(ish) round this time, lots of development effort has gone
into blk-mq in the last 3 months. Generally we're heading to where
3.16 will be a feature complete and performant blk-mq. scsi-mq is
progressing nicely and will hopefully be in 3.17. A nvme port is in
progress, and the Micron pci-e flash driver, mtip32xx, is converted
and will be sent in with the driver pull request for 3.16.
This pull request contains:
- Lots of prep and support patches for scsi-mq have been integrated.
All from Christoph.
- API and code cleanups for blk-mq from Christoph.
- Lots of good corner case and error handling cleanup fixes for
blk-mq from Ming Lei.
- A flew of blk-mq updates from me:
* Provide strict mappings so that the driver can rely on the CPU
to queue mapping. This enables optimizations in the driver.
* Provided a bitmap tagging instead of percpu_ida, which never
really worked well for blk-mq. percpu_ida relies on the fact
that we have a lot more tags available than we really need, it
fails miserably for cases where we exhaust (or are close to
exhausting) the tag space.
* Provide sane support for shared tag maps, as utilized by scsi-mq
* Various fixes for IO timeouts.
* API cleanups, and lots of perf tweaks and optimizations.
- Remove 'buffer' from struct request. This is ancient code, from
when requests were always virtually mapped. Kill it, to reclaim
some space in struct request. From me.
- Remove 'magic' from blk_plug. Since we store these on the stack
and since we've never caught any actual bugs with this, lets just
get rid of it. From me.
- Only call part_in_flight() once for IO completion, as includes two
atomic reads. Hopefully we'll get a better implementation soon, as
the part IO stats are now one of the more expensive parts of doing
IO on blk-mq. From me.
- File migration of block code from {mm,fs}/ to block/. This
includes bio.c, bio-integrity.c, bounce.c, and ioprio.c. From me,
from a discussion on lkml.
That should describe the meat of the pull request. Also has various
little fixes and cleanups from Dave Jones, Shaohua Li, Duan Jiong,
Fengguang Wu, Fabian Frederick, Randy Dunlap, Robert Elliott, and Sam
Bradshaw"
* 'for-3.16/core' of git://git.kernel.dk/linux-block: (100 commits)
blk-mq: push IPI or local end_io decision to __blk_mq_complete_request()
blk-mq: remember to start timeout handler for direct queue
block: ensure that the timer is always added
blk-mq: blk_mq_unregister_hctx() can be static
blk-mq: make the sysfs mq/ layout reflect current mappings
blk-mq: blk_mq_tag_to_rq should handle flush request
block: remove dead code in scsi_ioctl:blk_verify_command
blk-mq: request initialization optimizations
block: add queue flag for disabling SG merging
block: remove 'magic' from struct blk_plug
blk-mq: remove alloc_hctx and free_hctx methods
blk-mq: add file comments and update copyright notices
blk-mq: remove blk_mq_alloc_request_pinned
blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
blk-mq: remove blk_mq_wait_for_tags
blk-mq: initialize request in __blk_mq_alloc_request
blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request
blk-mq: add helper to insert requests from irq context
blk-mq: remove stale comment for blk_mq_complete_request()
blk-mq: allow non-softirq completions
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/amiflop.c | 2 | ||||
-rw-r--r-- | drivers/block/ataflop.c | 2 | ||||
-rw-r--r-- | drivers/block/floppy.c | 18 | ||||
-rw-r--r-- | drivers/block/hd.c | 10 | ||||
-rw-r--r-- | drivers/block/mg_disk.c | 12 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 117 | ||||
-rw-r--r-- | drivers/block/paride/pcd.c | 2 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 4 | ||||
-rw-r--r-- | drivers/block/paride/pf.c | 4 | ||||
-rw-r--r-- | drivers/block/skd_main.c | 5 | ||||
-rw-r--r-- | drivers/block/swim.c | 2 | ||||
-rw-r--r-- | drivers/block/swim3.c | 6 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 75 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 4 | ||||
-rw-r--r-- | drivers/block/xsysace.c | 4 | ||||
-rw-r--r-- | drivers/block/z2ram.c | 6 | ||||
-rw-r--r-- | drivers/cdrom/gdrom.c | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 1 | ||||
-rw-r--r-- | drivers/ide/ide-disk.c | 5 | ||||
-rw-r--r-- | drivers/md/dm.c | 1 | ||||
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 3 | ||||
-rw-r--r-- | drivers/mtd/ubi/block.c | 2 | ||||
-rw-r--r-- | drivers/sbus/char/jsflash.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 5 | ||||
-rw-r--r-- | drivers/scsi/sd.c | 13 |
25 files changed, 137 insertions, 170 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 748dea4f34dc..758da2287d9a 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1406,7 +1406,7 @@ next_segment: | |||
1406 | 1406 | ||
1407 | track = block / (floppy->dtype->sects * floppy->type->sect_mult); | 1407 | track = block / (floppy->dtype->sects * floppy->type->sect_mult); |
1408 | sector = block % (floppy->dtype->sects * floppy->type->sect_mult); | 1408 | sector = block % (floppy->dtype->sects * floppy->type->sect_mult); |
1409 | data = rq->buffer + 512 * cnt; | 1409 | data = bio_data(rq->bio) + 512 * cnt; |
1410 | #ifdef DEBUG | 1410 | #ifdef DEBUG |
1411 | printk("access to track %d, sector %d, with buffer at " | 1411 | printk("access to track %d, sector %d, with buffer at " |
1412 | "0x%08lx\n", track, sector, data); | 1412 | "0x%08lx\n", track, sector, data); |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index cfa64bdf01c9..2104b1b4ccda 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1484,7 +1484,7 @@ repeat: | |||
1484 | ReqCnt = 0; | 1484 | ReqCnt = 0; |
1485 | ReqCmd = rq_data_dir(fd_request); | 1485 | ReqCmd = rq_data_dir(fd_request); |
1486 | ReqBlock = blk_rq_pos(fd_request); | 1486 | ReqBlock = blk_rq_pos(fd_request); |
1487 | ReqBuffer = fd_request->buffer; | 1487 | ReqBuffer = bio_data(fd_request->bio); |
1488 | setup_req_params( drive ); | 1488 | setup_req_params( drive ); |
1489 | do_fd_action( drive ); | 1489 | do_fd_action( drive ); |
1490 | 1490 | ||
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index fa9bb742df6e..dc3a41c82b38 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void) | |||
2351 | } | 2351 | } |
2352 | 2352 | ||
2353 | if (CT(COMMAND) != FD_READ || | 2353 | if (CT(COMMAND) != FD_READ || |
2354 | raw_cmd->kernel_data == current_req->buffer) { | 2354 | raw_cmd->kernel_data == bio_data(current_req->bio)) { |
2355 | /* transfer directly from buffer */ | 2355 | /* transfer directly from buffer */ |
2356 | cont->done(1); | 2356 | cont->done(1); |
2357 | } else if (CT(COMMAND) == FD_READ) { | 2357 | } else if (CT(COMMAND) == FD_READ) { |
@@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void) | |||
2640 | raw_cmd->flags &= ~FD_RAW_WRITE; | 2640 | raw_cmd->flags &= ~FD_RAW_WRITE; |
2641 | raw_cmd->flags |= FD_RAW_READ; | 2641 | raw_cmd->flags |= FD_RAW_READ; |
2642 | COMMAND = FM_MODE(_floppy, FD_READ); | 2642 | COMMAND = FM_MODE(_floppy, FD_READ); |
2643 | } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { | 2643 | } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { |
2644 | unsigned long dma_limit; | 2644 | unsigned long dma_limit; |
2645 | int direct, indirect; | 2645 | int direct, indirect; |
2646 | 2646 | ||
@@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void) | |||
2654 | */ | 2654 | */ |
2655 | max_size = buffer_chain_size(); | 2655 | max_size = buffer_chain_size(); |
2656 | dma_limit = (MAX_DMA_ADDRESS - | 2656 | dma_limit = (MAX_DMA_ADDRESS - |
2657 | ((unsigned long)current_req->buffer)) >> 9; | 2657 | ((unsigned long)bio_data(current_req->bio))) >> 9; |
2658 | if ((unsigned long)max_size > dma_limit) | 2658 | if ((unsigned long)max_size > dma_limit) |
2659 | max_size = dma_limit; | 2659 | max_size = dma_limit; |
2660 | /* 64 kb boundaries */ | 2660 | /* 64 kb boundaries */ |
2661 | if (CROSS_64KB(current_req->buffer, max_size << 9)) | 2661 | if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) |
2662 | max_size = (K_64 - | 2662 | max_size = (K_64 - |
2663 | ((unsigned long)current_req->buffer) % | 2663 | ((unsigned long)bio_data(current_req->bio)) % |
2664 | K_64) >> 9; | 2664 | K_64) >> 9; |
2665 | direct = transfer_size(ssize, max_sector, max_size) - fsector_t; | 2665 | direct = transfer_size(ssize, max_sector, max_size) - fsector_t; |
2666 | /* | 2666 | /* |
@@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void) | |||
2677 | (DP->read_track & (1 << DRS->probed_format)))))) { | 2677 | (DP->read_track & (1 << DRS->probed_format)))))) { |
2678 | max_size = blk_rq_sectors(current_req); | 2678 | max_size = blk_rq_sectors(current_req); |
2679 | } else { | 2679 | } else { |
2680 | raw_cmd->kernel_data = current_req->buffer; | 2680 | raw_cmd->kernel_data = bio_data(current_req->bio); |
2681 | raw_cmd->length = current_count_sectors << 9; | 2681 | raw_cmd->length = current_count_sectors << 9; |
2682 | if (raw_cmd->length == 0) { | 2682 | if (raw_cmd->length == 0) { |
2683 | DPRINT("%s: zero dma transfer attempted\n", __func__); | 2683 | DPRINT("%s: zero dma transfer attempted\n", __func__); |
@@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void) | |||
2731 | raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; | 2731 | raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; |
2732 | raw_cmd->length <<= 9; | 2732 | raw_cmd->length <<= 9; |
2733 | if ((raw_cmd->length < current_count_sectors << 9) || | 2733 | if ((raw_cmd->length < current_count_sectors << 9) || |
2734 | (raw_cmd->kernel_data != current_req->buffer && | 2734 | (raw_cmd->kernel_data != bio_data(current_req->bio) && |
2735 | CT(COMMAND) == FD_WRITE && | 2735 | CT(COMMAND) == FD_WRITE && |
2736 | (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || | 2736 | (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || |
2737 | aligned_sector_t < buffer_min)) || | 2737 | aligned_sector_t < buffer_min)) || |
@@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void) | |||
2739 | raw_cmd->length <= 0 || current_count_sectors <= 0) { | 2739 | raw_cmd->length <= 0 || current_count_sectors <= 0) { |
2740 | DPRINT("fractionary current count b=%lx s=%lx\n", | 2740 | DPRINT("fractionary current count b=%lx s=%lx\n", |
2741 | raw_cmd->length, current_count_sectors); | 2741 | raw_cmd->length, current_count_sectors); |
2742 | if (raw_cmd->kernel_data != current_req->buffer) | 2742 | if (raw_cmd->kernel_data != bio_data(current_req->bio)) |
2743 | pr_info("addr=%d, length=%ld\n", | 2743 | pr_info("addr=%d, length=%ld\n", |
2744 | (int)((raw_cmd->kernel_data - | 2744 | (int)((raw_cmd->kernel_data - |
2745 | floppy_track_buffer) >> 9), | 2745 | floppy_track_buffer) >> 9), |
@@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void) | |||
2756 | return 0; | 2756 | return 0; |
2757 | } | 2757 | } |
2758 | 2758 | ||
2759 | if (raw_cmd->kernel_data != current_req->buffer) { | 2759 | if (raw_cmd->kernel_data != bio_data(current_req->bio)) { |
2760 | if (raw_cmd->kernel_data < floppy_track_buffer || | 2760 | if (raw_cmd->kernel_data < floppy_track_buffer || |
2761 | current_count_sectors < 0 || | 2761 | current_count_sectors < 0 || |
2762 | raw_cmd->length < 0 || | 2762 | raw_cmd->length < 0 || |
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index bf397bf108b7..8a290c08262f 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -464,11 +464,11 @@ static void read_intr(void) | |||
464 | 464 | ||
465 | ok_to_read: | 465 | ok_to_read: |
466 | req = hd_req; | 466 | req = hd_req; |
467 | insw(HD_DATA, req->buffer, 256); | 467 | insw(HD_DATA, bio_data(req->bio), 256); |
468 | #ifdef DEBUG | 468 | #ifdef DEBUG |
469 | printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", | 469 | printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", |
470 | req->rq_disk->disk_name, blk_rq_pos(req) + 1, | 470 | req->rq_disk->disk_name, blk_rq_pos(req) + 1, |
471 | blk_rq_sectors(req) - 1, req->buffer+512); | 471 | blk_rq_sectors(req) - 1, bio_data(req->bio)+512); |
472 | #endif | 472 | #endif |
473 | if (hd_end_request(0, 512)) { | 473 | if (hd_end_request(0, 512)) { |
474 | SET_HANDLER(&read_intr); | 474 | SET_HANDLER(&read_intr); |
@@ -505,7 +505,7 @@ static void write_intr(void) | |||
505 | ok_to_write: | 505 | ok_to_write: |
506 | if (hd_end_request(0, 512)) { | 506 | if (hd_end_request(0, 512)) { |
507 | SET_HANDLER(&write_intr); | 507 | SET_HANDLER(&write_intr); |
508 | outsw(HD_DATA, req->buffer, 256); | 508 | outsw(HD_DATA, bio_data(req->bio), 256); |
509 | return; | 509 | return; |
510 | } | 510 | } |
511 | 511 | ||
@@ -624,7 +624,7 @@ repeat: | |||
624 | printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", | 624 | printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", |
625 | req->rq_disk->disk_name, | 625 | req->rq_disk->disk_name, |
626 | req_data_dir(req) == READ ? "read" : "writ", | 626 | req_data_dir(req) == READ ? "read" : "writ", |
627 | cyl, head, sec, nsect, req->buffer); | 627 | cyl, head, sec, nsect, bio_data(req->bio)); |
628 | #endif | 628 | #endif |
629 | if (req->cmd_type == REQ_TYPE_FS) { | 629 | if (req->cmd_type == REQ_TYPE_FS) { |
630 | switch (rq_data_dir(req)) { | 630 | switch (rq_data_dir(req)) { |
@@ -643,7 +643,7 @@ repeat: | |||
643 | bad_rw_intr(); | 643 | bad_rw_intr(); |
644 | goto repeat; | 644 | goto repeat; |
645 | } | 645 | } |
646 | outsw(HD_DATA, req->buffer, 256); | 646 | outsw(HD_DATA, bio_data(req->bio), 256); |
647 | break; | 647 | break; |
648 | default: | 648 | default: |
649 | printk("unknown hd-command\n"); | 649 | printk("unknown hd-command\n"); |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index eb59b1241366..e352cac707e8 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host, | |||
479 | 479 | ||
480 | static void mg_read_one(struct mg_host *host, struct request *req) | 480 | static void mg_read_one(struct mg_host *host, struct request *req) |
481 | { | 481 | { |
482 | u16 *buff = (u16 *)req->buffer; | 482 | u16 *buff = (u16 *)bio_data(req->bio); |
483 | u32 i; | 483 | u32 i; |
484 | 484 | ||
485 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) | 485 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) |
@@ -496,7 +496,7 @@ static void mg_read(struct request *req) | |||
496 | mg_bad_rw_intr(host); | 496 | mg_bad_rw_intr(host); |
497 | 497 | ||
498 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", | 498 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", |
499 | blk_rq_sectors(req), blk_rq_pos(req), req->buffer); | 499 | blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); |
500 | 500 | ||
501 | do { | 501 | do { |
502 | if (mg_wait(host, ATA_DRQ, | 502 | if (mg_wait(host, ATA_DRQ, |
@@ -514,7 +514,7 @@ static void mg_read(struct request *req) | |||
514 | 514 | ||
515 | static void mg_write_one(struct mg_host *host, struct request *req) | 515 | static void mg_write_one(struct mg_host *host, struct request *req) |
516 | { | 516 | { |
517 | u16 *buff = (u16 *)req->buffer; | 517 | u16 *buff = (u16 *)bio_data(req->bio); |
518 | u32 i; | 518 | u32 i; |
519 | 519 | ||
520 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) | 520 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) |
@@ -534,7 +534,7 @@ static void mg_write(struct request *req) | |||
534 | } | 534 | } |
535 | 535 | ||
536 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", | 536 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", |
537 | rem, blk_rq_pos(req), req->buffer); | 537 | rem, blk_rq_pos(req), bio_data(req->bio)); |
538 | 538 | ||
539 | if (mg_wait(host, ATA_DRQ, | 539 | if (mg_wait(host, ATA_DRQ, |
540 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { | 540 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { |
@@ -585,7 +585,7 @@ ok_to_read: | |||
585 | mg_read_one(host, req); | 585 | mg_read_one(host, req); |
586 | 586 | ||
587 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", | 587 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", |
588 | blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); | 588 | blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); |
589 | 589 | ||
590 | /* send read confirm */ | 590 | /* send read confirm */ |
591 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); | 591 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); |
@@ -624,7 +624,7 @@ ok_to_write: | |||
624 | /* write 1 sector and set handler if remains */ | 624 | /* write 1 sector and set handler if remains */ |
625 | mg_write_one(host, req); | 625 | mg_write_one(host, req); |
626 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", | 626 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", |
627 | blk_rq_pos(req), blk_rq_sectors(req), req->buffer); | 627 | blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); |
628 | host->mg_do_intr = mg_write_intr; | 628 | host->mg_do_intr = mg_write_intr; |
629 | mod_timer(&host->timer, jiffies + 3 * HZ); | 629 | mod_timer(&host->timer, jiffies + 3 * HZ); |
630 | } | 630 | } |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 091b9ea14feb..b40af63a5476 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -32,6 +32,7 @@ struct nullb { | |||
32 | unsigned int index; | 32 | unsigned int index; |
33 | struct request_queue *q; | 33 | struct request_queue *q; |
34 | struct gendisk *disk; | 34 | struct gendisk *disk; |
35 | struct blk_mq_tag_set tag_set; | ||
35 | struct hrtimer timer; | 36 | struct hrtimer timer; |
36 | unsigned int queue_depth; | 37 | unsigned int queue_depth; |
37 | spinlock_t lock; | 38 | spinlock_t lock; |
@@ -226,7 +227,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) | |||
226 | 227 | ||
227 | static void null_softirq_done_fn(struct request *rq) | 228 | static void null_softirq_done_fn(struct request *rq) |
228 | { | 229 | { |
229 | end_cmd(rq->special); | 230 | end_cmd(blk_mq_rq_to_pdu(rq)); |
230 | } | 231 | } |
231 | 232 | ||
232 | static inline void null_handle_cmd(struct nullb_cmd *cmd) | 233 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
@@ -311,7 +312,7 @@ static void null_request_fn(struct request_queue *q) | |||
311 | 312 | ||
312 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | 313 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) |
313 | { | 314 | { |
314 | struct nullb_cmd *cmd = rq->special; | 315 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); |
315 | 316 | ||
316 | cmd->rq = rq; | 317 | cmd->rq = rq; |
317 | cmd->nq = hctx->driver_data; | 318 | cmd->nq = hctx->driver_data; |
@@ -320,46 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
320 | return BLK_MQ_RQ_QUEUE_OK; | 321 | return BLK_MQ_RQ_QUEUE_OK; |
321 | } | 322 | } |
322 | 323 | ||
323 | static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) | ||
324 | { | ||
325 | int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); | ||
326 | int tip = (reg->nr_hw_queues % nr_online_nodes); | ||
327 | int node = 0, i, n; | ||
328 | |||
329 | /* | ||
330 | * Split submit queues evenly wrt to the number of nodes. If uneven, | ||
331 | * fill the first buckets with one extra, until the rest is filled with | ||
332 | * no extra. | ||
333 | */ | ||
334 | for (i = 0, n = 1; i < hctx_index; i++, n++) { | ||
335 | if (n % b_size == 0) { | ||
336 | n = 0; | ||
337 | node++; | ||
338 | |||
339 | tip--; | ||
340 | if (!tip) | ||
341 | b_size = reg->nr_hw_queues / nr_online_nodes; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * A node might not be online, therefore map the relative node id to the | ||
347 | * real node id. | ||
348 | */ | ||
349 | for_each_online_node(n) { | ||
350 | if (!node) | ||
351 | break; | ||
352 | node--; | ||
353 | } | ||
354 | |||
355 | return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); | ||
356 | } | ||
357 | |||
358 | static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) | ||
359 | { | ||
360 | kfree(hctx); | ||
361 | } | ||
362 | |||
363 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) | 324 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
364 | { | 325 | { |
365 | BUG_ON(!nullb); | 326 | BUG_ON(!nullb); |
@@ -389,19 +350,14 @@ static struct blk_mq_ops null_mq_ops = { | |||
389 | .complete = null_softirq_done_fn, | 350 | .complete = null_softirq_done_fn, |
390 | }; | 351 | }; |
391 | 352 | ||
392 | static struct blk_mq_reg null_mq_reg = { | ||
393 | .ops = &null_mq_ops, | ||
394 | .queue_depth = 64, | ||
395 | .cmd_size = sizeof(struct nullb_cmd), | ||
396 | .flags = BLK_MQ_F_SHOULD_MERGE, | ||
397 | }; | ||
398 | |||
399 | static void null_del_dev(struct nullb *nullb) | 353 | static void null_del_dev(struct nullb *nullb) |
400 | { | 354 | { |
401 | list_del_init(&nullb->list); | 355 | list_del_init(&nullb->list); |
402 | 356 | ||
403 | del_gendisk(nullb->disk); | 357 | del_gendisk(nullb->disk); |
404 | blk_cleanup_queue(nullb->q); | 358 | blk_cleanup_queue(nullb->q); |
359 | if (queue_mode == NULL_Q_MQ) | ||
360 | blk_mq_free_tag_set(&nullb->tag_set); | ||
405 | put_disk(nullb->disk); | 361 | put_disk(nullb->disk); |
406 | kfree(nullb); | 362 | kfree(nullb); |
407 | } | 363 | } |
@@ -506,7 +462,7 @@ static int null_add_dev(void) | |||
506 | 462 | ||
507 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | 463 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); |
508 | if (!nullb) | 464 | if (!nullb) |
509 | return -ENOMEM; | 465 | goto out; |
510 | 466 | ||
511 | spin_lock_init(&nullb->lock); | 467 | spin_lock_init(&nullb->lock); |
512 | 468 | ||
@@ -514,49 +470,44 @@ static int null_add_dev(void) | |||
514 | submit_queues = nr_online_nodes; | 470 | submit_queues = nr_online_nodes; |
515 | 471 | ||
516 | if (setup_queues(nullb)) | 472 | if (setup_queues(nullb)) |
517 | goto err; | 473 | goto out_free_nullb; |
518 | 474 | ||
519 | if (queue_mode == NULL_Q_MQ) { | 475 | if (queue_mode == NULL_Q_MQ) { |
520 | null_mq_reg.numa_node = home_node; | 476 | nullb->tag_set.ops = &null_mq_ops; |
521 | null_mq_reg.queue_depth = hw_queue_depth; | 477 | nullb->tag_set.nr_hw_queues = submit_queues; |
522 | null_mq_reg.nr_hw_queues = submit_queues; | 478 | nullb->tag_set.queue_depth = hw_queue_depth; |
523 | 479 | nullb->tag_set.numa_node = home_node; | |
524 | if (use_per_node_hctx) { | 480 | nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); |
525 | null_mq_reg.ops->alloc_hctx = null_alloc_hctx; | 481 | nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
526 | null_mq_reg.ops->free_hctx = null_free_hctx; | 482 | nullb->tag_set.driver_data = nullb; |
527 | } else { | 483 | |
528 | null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; | 484 | if (blk_mq_alloc_tag_set(&nullb->tag_set)) |
529 | null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; | 485 | goto out_cleanup_queues; |
530 | } | 486 | |
531 | 487 | nullb->q = blk_mq_init_queue(&nullb->tag_set); | |
532 | nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); | 488 | if (!nullb->q) |
489 | goto out_cleanup_tags; | ||
533 | } else if (queue_mode == NULL_Q_BIO) { | 490 | } else if (queue_mode == NULL_Q_BIO) { |
534 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 491 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
492 | if (!nullb->q) | ||
493 | goto out_cleanup_queues; | ||
535 | blk_queue_make_request(nullb->q, null_queue_bio); | 494 | blk_queue_make_request(nullb->q, null_queue_bio); |
536 | init_driver_queues(nullb); | 495 | init_driver_queues(nullb); |
537 | } else { | 496 | } else { |
538 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 497 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
498 | if (!nullb->q) | ||
499 | goto out_cleanup_queues; | ||
539 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 500 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
540 | if (nullb->q) | 501 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
541 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | ||
542 | init_driver_queues(nullb); | 502 | init_driver_queues(nullb); |
543 | } | 503 | } |
544 | 504 | ||
545 | if (!nullb->q) | ||
546 | goto queue_fail; | ||
547 | |||
548 | nullb->q->queuedata = nullb; | 505 | nullb->q->queuedata = nullb; |
549 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 506 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
550 | 507 | ||
551 | disk = nullb->disk = alloc_disk_node(1, home_node); | 508 | disk = nullb->disk = alloc_disk_node(1, home_node); |
552 | if (!disk) { | 509 | if (!disk) |
553 | queue_fail: | 510 | goto out_cleanup_blk_queue; |
554 | blk_cleanup_queue(nullb->q); | ||
555 | cleanup_queues(nullb); | ||
556 | err: | ||
557 | kfree(nullb); | ||
558 | return -ENOMEM; | ||
559 | } | ||
560 | 511 | ||
561 | mutex_lock(&lock); | 512 | mutex_lock(&lock); |
562 | list_add_tail(&nullb->list, &nullb_list); | 513 | list_add_tail(&nullb->list, &nullb_list); |
@@ -579,6 +530,18 @@ err: | |||
579 | sprintf(disk->disk_name, "nullb%d", nullb->index); | 530 | sprintf(disk->disk_name, "nullb%d", nullb->index); |
580 | add_disk(disk); | 531 | add_disk(disk); |
581 | return 0; | 532 | return 0; |
533 | |||
534 | out_cleanup_blk_queue: | ||
535 | blk_cleanup_queue(nullb->q); | ||
536 | out_cleanup_tags: | ||
537 | if (queue_mode == NULL_Q_MQ) | ||
538 | blk_mq_free_tag_set(&nullb->tag_set); | ||
539 | out_cleanup_queues: | ||
540 | cleanup_queues(nullb); | ||
541 | out_free_nullb: | ||
542 | kfree(nullb); | ||
543 | out: | ||
544 | return -ENOMEM; | ||
582 | } | 545 | } |
583 | 546 | ||
584 | static int __init null_init(void) | 547 | static int __init null_init(void) |
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e76bdc074dbe..719cb1bc1640 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c | |||
@@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q) | |||
747 | pcd_current = cd; | 747 | pcd_current = cd; |
748 | pcd_sector = blk_rq_pos(pcd_req); | 748 | pcd_sector = blk_rq_pos(pcd_req); |
749 | pcd_count = blk_rq_cur_sectors(pcd_req); | 749 | pcd_count = blk_rq_cur_sectors(pcd_req); |
750 | pcd_buf = pcd_req->buffer; | 750 | pcd_buf = bio_data(pcd_req->bio); |
751 | pcd_busy = 1; | 751 | pcd_busy = 1; |
752 | ps_set_intr(do_pcd_read, NULL, 0, nice); | 752 | ps_set_intr(do_pcd_read, NULL, 0, nice); |
753 | return; | 753 | return; |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 19ad8f0c83ef..fea7e76a00de 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -454,7 +454,7 @@ static enum action do_pd_io_start(void) | |||
454 | if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) | 454 | if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) |
455 | return Fail; | 455 | return Fail; |
456 | pd_run = blk_rq_sectors(pd_req); | 456 | pd_run = blk_rq_sectors(pd_req); |
457 | pd_buf = pd_req->buffer; | 457 | pd_buf = bio_data(pd_req->bio); |
458 | pd_retries = 0; | 458 | pd_retries = 0; |
459 | if (pd_cmd == READ) | 459 | if (pd_cmd == READ) |
460 | return do_pd_read_start(); | 460 | return do_pd_read_start(); |
@@ -485,7 +485,7 @@ static int pd_next_buf(void) | |||
485 | spin_lock_irqsave(&pd_lock, saved_flags); | 485 | spin_lock_irqsave(&pd_lock, saved_flags); |
486 | __blk_end_request_cur(pd_req, 0); | 486 | __blk_end_request_cur(pd_req, 0); |
487 | pd_count = blk_rq_cur_sectors(pd_req); | 487 | pd_count = blk_rq_cur_sectors(pd_req); |
488 | pd_buf = pd_req->buffer; | 488 | pd_buf = bio_data(pd_req->bio); |
489 | spin_unlock_irqrestore(&pd_lock, saved_flags); | 489 | spin_unlock_irqrestore(&pd_lock, saved_flags); |
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f5c86d523ba0..9a15fd3c9349 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
@@ -795,7 +795,7 @@ repeat: | |||
795 | } | 795 | } |
796 | 796 | ||
797 | pf_cmd = rq_data_dir(pf_req); | 797 | pf_cmd = rq_data_dir(pf_req); |
798 | pf_buf = pf_req->buffer; | 798 | pf_buf = bio_data(pf_req->bio); |
799 | pf_retries = 0; | 799 | pf_retries = 0; |
800 | 800 | ||
801 | pf_busy = 1; | 801 | pf_busy = 1; |
@@ -827,7 +827,7 @@ static int pf_next_buf(void) | |||
827 | if (!pf_req) | 827 | if (!pf_req) |
828 | return 1; | 828 | return 1; |
829 | pf_count = blk_rq_cur_sectors(pf_req); | 829 | pf_count = blk_rq_cur_sectors(pf_req); |
830 | pf_buf = pf_req->buffer; | 830 | pf_buf = bio_data(pf_req->bio); |
831 | } | 831 | } |
832 | return 0; | 832 | return 0; |
833 | } | 833 | } |
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a69dd93d1bd5..c48d9084c965 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | |||
563 | 563 | ||
564 | req = skreq->req; | 564 | req = skreq->req; |
565 | blk_add_request_payload(req, page, len); | 565 | blk_add_request_payload(req, page, len); |
566 | req->buffer = buf; | ||
567 | } | 566 | } |
568 | 567 | ||
569 | static void skd_request_fn_not_online(struct request_queue *q); | 568 | static void skd_request_fn_not_online(struct request_queue *q); |
@@ -744,6 +743,7 @@ static void skd_request_fn(struct request_queue *q) | |||
744 | break; | 743 | break; |
745 | } | 744 | } |
746 | skreq->discard_page = 1; | 745 | skreq->discard_page = 1; |
746 | req->completion_data = page; | ||
747 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); | 747 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); |
748 | 748 | ||
749 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { | 749 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { |
@@ -858,8 +858,7 @@ static void skd_end_request(struct skd_device *skdev, | |||
858 | (skreq->discard_page == 1)) { | 858 | (skreq->discard_page == 1)) { |
859 | pr_debug("%s:%s:%d, free the page!", | 859 | pr_debug("%s:%s:%d, free the page!", |
860 | skdev->name, __func__, __LINE__); | 860 | skdev->name, __func__, __LINE__); |
861 | free_page((unsigned long)req->buffer); | 861 | __free_page(req->completion_data); |
862 | req->buffer = NULL; | ||
863 | } | 862 | } |
864 | 863 | ||
865 | if (unlikely(error)) { | 864 | if (unlikely(error)) { |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index b02d53a399f3..6b44bbe528b7 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q) | |||
549 | case READ: | 549 | case READ: |
550 | err = floppy_read_sectors(fs, blk_rq_pos(req), | 550 | err = floppy_read_sectors(fs, blk_rq_pos(req), |
551 | blk_rq_cur_sectors(req), | 551 | blk_rq_cur_sectors(req), |
552 | req->buffer); | 552 | bio_data(req->bio)); |
553 | break; | 553 | break; |
554 | } | 554 | } |
555 | done: | 555 | done: |
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c74f7b56e7c4..523ee8fd4c15 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs) | |||
342 | swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", | 342 | swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", |
343 | req->rq_disk->disk_name, req->cmd, | 343 | req->rq_disk->disk_name, req->cmd, |
344 | (long)blk_rq_pos(req), blk_rq_sectors(req), | 344 | (long)blk_rq_pos(req), blk_rq_sectors(req), |
345 | req->buffer); | 345 | bio_data(req->bio)); |
346 | swim3_dbg(" errors=%d current_nr_sectors=%u\n", | 346 | swim3_dbg(" errors=%d current_nr_sectors=%u\n", |
347 | req->errors, blk_rq_cur_sectors(req)); | 347 | req->errors, blk_rq_cur_sectors(req)); |
348 | #endif | 348 | #endif |
@@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs) | |||
479 | /* Set up 3 dma commands: write preamble, data, postamble */ | 479 | /* Set up 3 dma commands: write preamble, data, postamble */ |
480 | init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); | 480 | init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); |
481 | ++cp; | 481 | ++cp; |
482 | init_dma(cp, OUTPUT_MORE, req->buffer, 512); | 482 | init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); |
483 | ++cp; | 483 | ++cp; |
484 | init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); | 484 | init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); |
485 | } else { | 485 | } else { |
486 | init_dma(cp, INPUT_LAST, req->buffer, n * 512); | 486 | init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); |
487 | } | 487 | } |
488 | ++cp; | 488 | ++cp; |
489 | out_le16(&cp->command, DBDMA_STOP); | 489 | out_le16(&cp->command, DBDMA_STOP); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index cb9b1f8326c3..c8f286e8d80f 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -30,6 +30,9 @@ struct virtio_blk | |||
30 | /* The disk structure for the kernel. */ | 30 | /* The disk structure for the kernel. */ |
31 | struct gendisk *disk; | 31 | struct gendisk *disk; |
32 | 32 | ||
33 | /* Block layer tags. */ | ||
34 | struct blk_mq_tag_set tag_set; | ||
35 | |||
33 | /* Process context for config space updates */ | 36 | /* Process context for config space updates */ |
34 | struct work_struct config_work; | 37 | struct work_struct config_work; |
35 | 38 | ||
@@ -112,7 +115,7 @@ static int __virtblk_add_req(struct virtqueue *vq, | |||
112 | 115 | ||
113 | static inline void virtblk_request_done(struct request *req) | 116 | static inline void virtblk_request_done(struct request *req) |
114 | { | 117 | { |
115 | struct virtblk_req *vbr = req->special; | 118 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
116 | int error = virtblk_result(vbr); | 119 | int error = virtblk_result(vbr); |
117 | 120 | ||
118 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { | 121 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) { |
@@ -147,14 +150,14 @@ static void virtblk_done(struct virtqueue *vq) | |||
147 | 150 | ||
148 | /* In case queue is stopped waiting for more buffers. */ | 151 | /* In case queue is stopped waiting for more buffers. */ |
149 | if (req_done) | 152 | if (req_done) |
150 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); | 153 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |
151 | spin_unlock_irqrestore(&vblk->vq_lock, flags); | 154 | spin_unlock_irqrestore(&vblk->vq_lock, flags); |
152 | } | 155 | } |
153 | 156 | ||
154 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | 157 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) |
155 | { | 158 | { |
156 | struct virtio_blk *vblk = hctx->queue->queuedata; | 159 | struct virtio_blk *vblk = hctx->queue->queuedata; |
157 | struct virtblk_req *vbr = req->special; | 160 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
158 | unsigned long flags; | 161 | unsigned long flags; |
159 | unsigned int num; | 162 | unsigned int num; |
160 | const bool last = (req->cmd_flags & REQ_END) != 0; | 163 | const bool last = (req->cmd_flags & REQ_END) != 0; |
@@ -480,33 +483,27 @@ static const struct device_attribute dev_attr_cache_type_rw = | |||
480 | __ATTR(cache_type, S_IRUGO|S_IWUSR, | 483 | __ATTR(cache_type, S_IRUGO|S_IWUSR, |
481 | virtblk_cache_type_show, virtblk_cache_type_store); | 484 | virtblk_cache_type_show, virtblk_cache_type_store); |
482 | 485 | ||
483 | static struct blk_mq_ops virtio_mq_ops = { | 486 | static int virtblk_init_request(void *data, struct request *rq, |
484 | .queue_rq = virtio_queue_rq, | 487 | unsigned int hctx_idx, unsigned int request_idx, |
485 | .map_queue = blk_mq_map_queue, | 488 | unsigned int numa_node) |
486 | .alloc_hctx = blk_mq_alloc_single_hw_queue, | ||
487 | .free_hctx = blk_mq_free_single_hw_queue, | ||
488 | .complete = virtblk_request_done, | ||
489 | }; | ||
490 | |||
491 | static struct blk_mq_reg virtio_mq_reg = { | ||
492 | .ops = &virtio_mq_ops, | ||
493 | .nr_hw_queues = 1, | ||
494 | .queue_depth = 0, /* Set in virtblk_probe */ | ||
495 | .numa_node = NUMA_NO_NODE, | ||
496 | .flags = BLK_MQ_F_SHOULD_MERGE, | ||
497 | }; | ||
498 | module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); | ||
499 | |||
500 | static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, | ||
501 | struct request *rq, unsigned int nr) | ||
502 | { | 489 | { |
503 | struct virtio_blk *vblk = data; | 490 | struct virtio_blk *vblk = data; |
504 | struct virtblk_req *vbr = rq->special; | 491 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); |
505 | 492 | ||
506 | sg_init_table(vbr->sg, vblk->sg_elems); | 493 | sg_init_table(vbr->sg, vblk->sg_elems); |
507 | return 0; | 494 | return 0; |
508 | } | 495 | } |
509 | 496 | ||
497 | static struct blk_mq_ops virtio_mq_ops = { | ||
498 | .queue_rq = virtio_queue_rq, | ||
499 | .map_queue = blk_mq_map_queue, | ||
500 | .complete = virtblk_request_done, | ||
501 | .init_request = virtblk_init_request, | ||
502 | }; | ||
503 | |||
504 | static unsigned int virtblk_queue_depth; | ||
505 | module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); | ||
506 | |||
510 | static int virtblk_probe(struct virtio_device *vdev) | 507 | static int virtblk_probe(struct virtio_device *vdev) |
511 | { | 508 | { |
512 | struct virtio_blk *vblk; | 509 | struct virtio_blk *vblk; |
@@ -561,24 +558,34 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
561 | } | 558 | } |
562 | 559 | ||
563 | /* Default queue sizing is to fill the ring. */ | 560 | /* Default queue sizing is to fill the ring. */ |
564 | if (!virtio_mq_reg.queue_depth) { | 561 | if (!virtblk_queue_depth) { |
565 | virtio_mq_reg.queue_depth = vblk->vq->num_free; | 562 | virtblk_queue_depth = vblk->vq->num_free; |
566 | /* ... but without indirect descs, we use 2 descs per req */ | 563 | /* ... but without indirect descs, we use 2 descs per req */ |
567 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) | 564 | if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) |
568 | virtio_mq_reg.queue_depth /= 2; | 565 | virtblk_queue_depth /= 2; |
569 | } | 566 | } |
570 | virtio_mq_reg.cmd_size = | 567 | |
568 | memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); | ||
569 | vblk->tag_set.ops = &virtio_mq_ops; | ||
570 | vblk->tag_set.nr_hw_queues = 1; | ||
571 | vblk->tag_set.queue_depth = virtblk_queue_depth; | ||
572 | vblk->tag_set.numa_node = NUMA_NO_NODE; | ||
573 | vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | ||
574 | vblk->tag_set.cmd_size = | ||
571 | sizeof(struct virtblk_req) + | 575 | sizeof(struct virtblk_req) + |
572 | sizeof(struct scatterlist) * sg_elems; | 576 | sizeof(struct scatterlist) * sg_elems; |
577 | vblk->tag_set.driver_data = vblk; | ||
573 | 578 | ||
574 | q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); | 579 | err = blk_mq_alloc_tag_set(&vblk->tag_set); |
580 | if (err) | ||
581 | goto out_put_disk; | ||
582 | |||
583 | q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); | ||
575 | if (!q) { | 584 | if (!q) { |
576 | err = -ENOMEM; | 585 | err = -ENOMEM; |
577 | goto out_put_disk; | 586 | goto out_free_tags; |
578 | } | 587 | } |
579 | 588 | ||
580 | blk_mq_init_commands(q, virtblk_init_vbr, vblk); | ||
581 | |||
582 | q->queuedata = vblk; | 589 | q->queuedata = vblk; |
583 | 590 | ||
584 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); | 591 | virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); |
@@ -679,6 +686,8 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
679 | out_del_disk: | 686 | out_del_disk: |
680 | del_gendisk(vblk->disk); | 687 | del_gendisk(vblk->disk); |
681 | blk_cleanup_queue(vblk->disk->queue); | 688 | blk_cleanup_queue(vblk->disk->queue); |
689 | out_free_tags: | ||
690 | blk_mq_free_tag_set(&vblk->tag_set); | ||
682 | out_put_disk: | 691 | out_put_disk: |
683 | put_disk(vblk->disk); | 692 | put_disk(vblk->disk); |
684 | out_free_vq: | 693 | out_free_vq: |
@@ -705,6 +714,8 @@ static void virtblk_remove(struct virtio_device *vdev) | |||
705 | del_gendisk(vblk->disk); | 714 | del_gendisk(vblk->disk); |
706 | blk_cleanup_queue(vblk->disk->queue); | 715 | blk_cleanup_queue(vblk->disk->queue); |
707 | 716 | ||
717 | blk_mq_free_tag_set(&vblk->tag_set); | ||
718 | |||
708 | /* Stop all the virtqueues. */ | 719 | /* Stop all the virtqueues. */ |
709 | vdev->config->reset(vdev); | 720 | vdev->config->reset(vdev); |
710 | 721 | ||
@@ -749,7 +760,7 @@ static int virtblk_restore(struct virtio_device *vdev) | |||
749 | vblk->config_enable = true; | 760 | vblk->config_enable = true; |
750 | ret = init_vq(vdev->priv); | 761 | ret = init_vq(vdev->priv); |
751 | if (!ret) | 762 | if (!ret) |
752 | blk_mq_start_stopped_hw_queues(vblk->disk->queue); | 763 | blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); |
753 | 764 | ||
754 | return ret; | 765 | return ret; |
755 | } | 766 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index efe1b4761735..283a30e88287 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq) | |||
612 | } | 612 | } |
613 | 613 | ||
614 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " | 614 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " |
615 | "(%u/%u) buffer:%p [%s]\n", | 615 | "(%u/%u) [%s]\n", |
616 | req, req->cmd, (unsigned long)blk_rq_pos(req), | 616 | req, req->cmd, (unsigned long)blk_rq_pos(req), |
617 | blk_rq_cur_sectors(req), blk_rq_sectors(req), | 617 | blk_rq_cur_sectors(req), blk_rq_sectors(req), |
618 | req->buffer, rq_data_dir(req) ? "write" : "read"); | 618 | rq_data_dir(req) ? "write" : "read"); |
619 | 619 | ||
620 | if (blkif_queue_request(req)) { | 620 | if (blkif_queue_request(req)) { |
621 | blk_requeue_request(rq, req); | 621 | blk_requeue_request(rq, req); |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 1393b8871a28..ab3ea62e5dfc 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
@@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
661 | rq_data_dir(req)); | 661 | rq_data_dir(req)); |
662 | 662 | ||
663 | ace->req = req; | 663 | ace->req = req; |
664 | ace->data_ptr = req->buffer; | 664 | ace->data_ptr = bio_data(req->bio); |
665 | ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; | 665 | ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; |
666 | ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); | 666 | ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); |
667 | 667 | ||
@@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace) | |||
733 | * blk_rq_sectors(ace->req), | 733 | * blk_rq_sectors(ace->req), |
734 | * blk_rq_cur_sectors(ace->req)); | 734 | * blk_rq_cur_sectors(ace->req)); |
735 | */ | 735 | */ |
736 | ace->data_ptr = ace->req->buffer; | 736 | ace->data_ptr = bio_data(ace->req->bio); |
737 | ace->data_count = blk_rq_cur_sectors(ace->req) * 16; | 737 | ace->data_count = blk_rq_cur_sectors(ace->req) * 16; |
738 | ace_fsm_yieldirq(ace); | 738 | ace_fsm_yieldirq(ace); |
739 | break; | 739 | break; |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 27de5046708a..968f9e52effa 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q) | |||
87 | while (len) { | 87 | while (len) { |
88 | unsigned long addr = start & Z2RAM_CHUNKMASK; | 88 | unsigned long addr = start & Z2RAM_CHUNKMASK; |
89 | unsigned long size = Z2RAM_CHUNKSIZE - addr; | 89 | unsigned long size = Z2RAM_CHUNKSIZE - addr; |
90 | void *buffer = bio_data(req->bio); | ||
91 | |||
90 | if (len < size) | 92 | if (len < size) |
91 | size = len; | 93 | size = len; |
92 | addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; | 94 | addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; |
93 | if (rq_data_dir(req) == READ) | 95 | if (rq_data_dir(req) == READ) |
94 | memcpy(req->buffer, (char *)addr, size); | 96 | memcpy(buffer, (char *)addr, size); |
95 | else | 97 | else |
96 | memcpy((char *)addr, req->buffer, size); | 98 | memcpy((char *)addr, buffer, size); |
97 | start += size; | 99 | start += size; |
98 | len -= size; | 100 | len -= size; |
99 | } | 101 | } |
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 51e75ad96422..584bc3126403 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
@@ -602,7 +602,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) | |||
602 | spin_unlock(&gdrom_lock); | 602 | spin_unlock(&gdrom_lock); |
603 | block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; | 603 | block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; |
604 | block_cnt = blk_rq_sectors(req)/GD_TO_BLK; | 604 | block_cnt = blk_rq_sectors(req)/GD_TO_BLK; |
605 | __raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG); | 605 | __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); |
606 | __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); | 606 | __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); |
607 | __raw_writel(1, GDROM_DMA_DIRECTION_REG); | 607 | __raw_writel(1, GDROM_DMA_DIRECTION_REG); |
608 | __raw_writel(1, GDROM_DMA_ENABLE_REG); | 608 | __raw_writel(1, GDROM_DMA_ENABLE_REG); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 102c50d38902..06cea7ff3a7c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk) | |||
902 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); | 902 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); |
903 | trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); | 903 | trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); |
904 | } | 904 | } |
905 | EXPORT_SYMBOL_GPL(add_disk_randomness); | ||
905 | #endif | 906 | #endif |
906 | 907 | ||
907 | /********************************************************************* | 908 | /********************************************************************* |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 16f69be820c7..ee880382e3bc 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, | |||
188 | 188 | ||
189 | ledtrig_ide_activity(); | 189 | ledtrig_ide_activity(); |
190 | 190 | ||
191 | pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", | 191 | pr_debug("%s: %sing: block=%llu, sectors=%u\n", |
192 | drive->name, rq_data_dir(rq) == READ ? "read" : "writ", | 192 | drive->name, rq_data_dir(rq) == READ ? "read" : "writ", |
193 | (unsigned long long)block, blk_rq_sectors(rq), | 193 | (unsigned long long)block, blk_rq_sectors(rq)); |
194 | (unsigned long)rq->buffer); | ||
195 | 194 | ||
196 | if (hwif->rw_disk) | 195 | if (hwif->rw_disk) |
197 | hwif->rw_disk(drive, rq); | 196 | hwif->rw_disk(drive, rq); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e64916498..6a71bc7c9133 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq, | |||
1544 | clone->cmd = rq->cmd; | 1544 | clone->cmd = rq->cmd; |
1545 | clone->cmd_len = rq->cmd_len; | 1545 | clone->cmd_len = rq->cmd_len; |
1546 | clone->sense = rq->sense; | 1546 | clone->sense = rq->sense; |
1547 | clone->buffer = rq->buffer; | ||
1548 | clone->end_io = end_clone_request; | 1547 | clone->end_io = end_clone_request; |
1549 | clone->end_io_data = tio; | 1548 | clone->end_io_data = tio; |
1550 | 1549 | ||
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0b2ccb68c0d0..4dbfaee9aa95 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
82 | 82 | ||
83 | block = blk_rq_pos(req) << 9 >> tr->blkshift; | 83 | block = blk_rq_pos(req) << 9 >> tr->blkshift; |
84 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; | 84 | nsect = blk_rq_cur_bytes(req) >> tr->blkshift; |
85 | 85 | buf = bio_data(req->bio); | |
86 | buf = req->buffer; | ||
87 | 86 | ||
88 | if (req->cmd_type != REQ_TYPE_FS) | 87 | if (req->cmd_type != REQ_TYPE_FS) |
89 | return -EIO; | 88 | return -EIO; |
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 8d659e6a1b4c..20a667c95da4 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c | |||
@@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req) | |||
253 | * flash access anyway. | 253 | * flash access anyway. |
254 | */ | 254 | */ |
255 | mutex_lock(&dev->dev_mutex); | 255 | mutex_lock(&dev->dev_mutex); |
256 | ret = ubiblock_read(dev, req->buffer, sec, len); | 256 | ret = ubiblock_read(dev, bio_data(req->bio), sec, len); |
257 | mutex_unlock(&dev->dev_mutex); | 257 | mutex_unlock(&dev->dev_mutex); |
258 | 258 | ||
259 | return ret; | 259 | return ret; |
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 4ccb5d869389..a40ee1e37486 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c | |||
@@ -207,7 +207,7 @@ static void jsfd_do_request(struct request_queue *q) | |||
207 | goto end; | 207 | goto end; |
208 | } | 208 | } |
209 | 209 | ||
210 | jsfd_read(req->buffer, jdp->dbase + offset, len); | 210 | jsfd_read(bio_data(req->bio), jdp->dbase + offset, len); |
211 | err = 0; | 211 | err = 0; |
212 | end: | 212 | end: |
213 | if (!__blk_end_request_cur(req, err)) | 213 | if (!__blk_end_request_cur(req, err)) |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9db097a28a74..a0c95cac91f0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -140,7 +140,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) | |||
140 | cmd->result = 0; | 140 | cmd->result = 0; |
141 | spin_lock_irqsave(q->queue_lock, flags); | 141 | spin_lock_irqsave(q->queue_lock, flags); |
142 | blk_requeue_request(q, cmd->request); | 142 | blk_requeue_request(q, cmd->request); |
143 | kblockd_schedule_work(q, &device->requeue_work); | 143 | kblockd_schedule_work(&device->requeue_work); |
144 | spin_unlock_irqrestore(q->queue_lock, flags); | 144 | spin_unlock_irqrestore(q->queue_lock, flags); |
145 | } | 145 | } |
146 | 146 | ||
@@ -1019,8 +1019,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, | |||
1019 | return BLKPREP_DEFER; | 1019 | return BLKPREP_DEFER; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | req->buffer = NULL; | ||
1023 | |||
1024 | /* | 1022 | /* |
1025 | * Next, walk the list, and fill in the addresses and sizes of | 1023 | * Next, walk the list, and fill in the addresses and sizes of |
1026 | * each segment. | 1024 | * each segment. |
@@ -1158,7 +1156,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) | |||
1158 | BUG_ON(blk_rq_bytes(req)); | 1156 | BUG_ON(blk_rq_bytes(req)); |
1159 | 1157 | ||
1160 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); | 1158 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1161 | req->buffer = NULL; | ||
1162 | } | 1159 | } |
1163 | 1160 | ||
1164 | cmd->cmd_len = req->cmd_len; | 1161 | cmd->cmd_len = req->cmd_len; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd182863..96af195224f2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -737,16 +737,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) | |||
737 | goto out; | 737 | goto out; |
738 | } | 738 | } |
739 | 739 | ||
740 | rq->completion_data = page; | ||
740 | blk_add_request_payload(rq, page, len); | 741 | blk_add_request_payload(rq, page, len); |
741 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); | 742 | ret = scsi_setup_blk_pc_cmnd(sdp, rq); |
742 | rq->buffer = page_address(page); | ||
743 | rq->__data_len = nr_bytes; | 743 | rq->__data_len = nr_bytes; |
744 | 744 | ||
745 | out: | 745 | out: |
746 | if (ret != BLKPREP_OK) { | 746 | if (ret != BLKPREP_OK) |
747 | __free_page(page); | 747 | __free_page(page); |
748 | rq->buffer = NULL; | ||
749 | } | ||
750 | return ret; | 748 | return ret; |
751 | } | 749 | } |
752 | 750 | ||
@@ -842,10 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) | |||
842 | { | 840 | { |
843 | struct scsi_cmnd *SCpnt = rq->special; | 841 | struct scsi_cmnd *SCpnt = rq->special; |
844 | 842 | ||
845 | if (rq->cmd_flags & REQ_DISCARD) { | 843 | if (rq->cmd_flags & REQ_DISCARD) |
846 | free_page((unsigned long)rq->buffer); | 844 | __free_page(rq->completion_data); |
847 | rq->buffer = NULL; | 845 | |
848 | } | ||
849 | if (SCpnt->cmnd != rq->cmd) { | 846 | if (SCpnt->cmnd != rq->cmd) { |
850 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | 847 | mempool_free(SCpnt->cmnd, sd_cdb_pool); |
851 | SCpnt->cmnd = NULL; | 848 | SCpnt->cmnd = NULL; |