summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-02-17 16:08:19 -0500
committerJens Axboe <axboe@fb.com>2017-02-17 16:08:19 -0500
commit818551e2b2c662a1b26de6b4f7d6b8411a838d18 (patch)
treef38b4c951df4d33db81ae7b7765a56bce491c2a8 /drivers
parent6010720da8aab51f33beee63b73cf88016e9b250 (diff)
parent7520872c0cf4d3df6d74242c6edfb9e70a47df4d (diff)
Merge branch 'for-4.11/next' into for-4.11/linus-merge
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c77
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_nl.c12
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/hd.c45
-rw-r--r--drivers/block/mg_disk.c31
-rw-r--r--drivers/block/nbd.c258
-rw-r--r--drivers/block/null_blk.c4
-rw-r--r--drivers/block/osdblk.c6
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/paride/pd.c15
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps3disk.c15
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/block/skd_main.c15
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c205
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/cdrom/cdrom.c34
-rw-r--r--drivers/cdrom/gdrom.c29
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ide/ide-atapi.c78
-rw-r--r--drivers/ide/ide-cd.c190
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-cd_verbose.c6
-rw-r--r--drivers/ide/ide-devsets.c13
-rw-r--r--drivers/ide/ide-disk.c12
-rw-r--r--drivers/ide/ide-eh.c8
-rw-r--r--drivers/ide/ide-floppy.c37
-rw-r--r--drivers/ide/ide-io.c13
-rw-r--r--drivers/ide/ide-ioctls.c14
-rw-r--r--drivers/ide/ide-park.c20
-rw-r--r--drivers/ide/ide-pm.c20
-rw-r--r--drivers/ide/ide-probe.c36
-rw-r--r--drivers/ide/ide-tape.c41
-rw-r--r--drivers/ide/ide-taskfile.c8
-rw-r--r--drivers/ide/sis5513.c2
-rw-r--r--drivers/md/bcache/request.c10
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-mpath.c132
-rw-r--r--drivers/md/dm-rq.c268
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-target.c7
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/memstick/core/ms_block.c11
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c13
-rw-r--r--drivers/mtd/ubi/block.c15
-rw-r--r--drivers/nvme/host/core.c56
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/scsi.c7
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c247
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c222
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c174
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c38
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/osd/osd_initiator.c22
-rw-r--r--drivers/scsi/osst.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c2
-rw-r--r--drivers/scsi/scsi.c354
-rw-r--r--drivers/scsi/scsi_error.c43
-rw-r--r--drivers/scsi/scsi_lib.c264
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/scsi_transport_fc.c34
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c5
-rw-r--r--drivers/scsi/sd.c48
-rw-r--r--drivers/scsi/sg.c33
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sr.c9
-rw-r--r--drivers/scsi/st.c28
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_pscsi.c14
108 files changed, 1495 insertions, 2163 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1f863e757ee4..c771d4c341ea 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1265,13 +1265,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
1265 */ 1265 */
1266static int atapi_drain_needed(struct request *rq) 1266static int atapi_drain_needed(struct request *rq)
1267{ 1267{
1268 if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) 1268 if (likely(!blk_rq_is_passthrough(rq)))
1269 return 0; 1269 return 0;
1270 1270
1271 if (!blk_rq_bytes(rq) || op_is_write(req_op(rq))) 1271 if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
1272 return 0; 1272 return 0;
1273 1273
1274 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; 1274 return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
1275} 1275}
1276 1276
1277static int ata_scsi_dev_config(struct scsi_device *sdev, 1277static int ata_scsi_dev_config(struct scsi_device *sdev,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 223ff2fcae7e..f744de7a0f9b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -69,6 +69,7 @@ config AMIGA_Z2RAM
69config GDROM 69config GDROM
70 tristate "SEGA Dreamcast GD-ROM drive" 70 tristate "SEGA Dreamcast GD-ROM drive"
71 depends on SH_DREAMCAST 71 depends on SH_DREAMCAST
72 select BLK_SCSI_REQUEST # only for the generic cdrom code
72 help 73 help
73 A standard SEGA Dreamcast comes with a modified CD ROM drive called a 74 A standard SEGA Dreamcast comes with a modified CD ROM drive called a
74 "GD-ROM" by SEGA to signify it is capable of reading special disks 75 "GD-ROM" by SEGA to signify it is capable of reading special disks
@@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA
114 tristate "Compaq Smart Array 5xxx support" 115 tristate "Compaq Smart Array 5xxx support"
115 depends on PCI 116 depends on PCI
116 select CHECK_SIGNATURE 117 select CHECK_SIGNATURE
118 select BLK_SCSI_REQUEST
117 help 119 help
118 This is the driver for Compaq Smart Array 5xxx controllers. 120 This is the driver for Compaq Smart Array 5xxx controllers.
119 Everyone using these boards should say Y here. 121 Everyone using these boards should say Y here.
@@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX
386config CDROM_PKTCDVD 388config CDROM_PKTCDVD
387 tristate "Packet writing on CD/DVD media (DEPRECATED)" 389 tristate "Packet writing on CD/DVD media (DEPRECATED)"
388 depends on !UML 390 depends on !UML
391 select BLK_SCSI_REQUEST
389 help 392 help
390 Note: This driver is deprecated and will be removed from the 393 Note: This driver is deprecated and will be removed from the
391 kernel in the near future! 394 kernel in the near future!
@@ -501,6 +504,16 @@ config VIRTIO_BLK
501 This is the virtual block driver for virtio. It can be used with 504 This is the virtual block driver for virtio. It can be used with
502 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 505 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
503 506
507config VIRTIO_BLK_SCSI
508 bool "SCSI passthrough request for the Virtio block driver"
509 depends on VIRTIO_BLK
510 select BLK_SCSI_REQUEST
511 ---help---
512 Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
513 virtio-blk devices. This is only supported for the legacy
514 virtio protocol and not enabled by default by any hypervisor.
515 Your probably want to virtio-scsi instead.
516
504config BLK_DEV_HD 517config BLK_DEV_HD
505 bool "Very old hard disk (MFM/RLL/IDE) driver" 518 bool "Very old hard disk (MFM/RLL/IDE) driver"
506 depends on HAVE_IDE 519 depends on HAVE_IDE
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index ec9d8610b25f..027b876370bc 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
396 WARN_ON(d->gd); 396 WARN_ON(d->gd);
397 WARN_ON(d->flags & DEVFL_UP); 397 WARN_ON(d->flags & DEVFL_UP);
398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 398 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
399 q->backing_dev_info.name = "aoe"; 399 q->backing_dev_info->name = "aoe";
400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; 400 q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
401 d->bufpool = mp; 401 d->bufpool = mp;
402 d->blkq = gd->queue = q; 402 d->blkq = gd->queue = q;
403 q->queuedata = d; 403 q->queuedata = d;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 3a44438a1195..27d613795653 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -52,6 +52,7 @@
52#include <scsi/scsi.h> 52#include <scsi/scsi.h>
53#include <scsi/sg.h> 53#include <scsi/sg.h>
54#include <scsi/scsi_ioctl.h> 54#include <scsi/scsi_ioctl.h>
55#include <scsi/scsi_request.h>
55#include <linux/cdrom.h> 56#include <linux/cdrom.h>
56#include <linux/scatterlist.h> 57#include <linux/scatterlist.h>
57#include <linux/kthread.h> 58#include <linux/kthread.h>
@@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq)
1853 dev_dbg(&h->pdev->dev, "Done with %p\n", rq); 1854 dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
1854 1855
1855 /* set the residual count for pc requests */ 1856 /* set the residual count for pc requests */
1856 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1857 if (blk_rq_is_passthrough(rq))
1857 rq->resid_len = c->err_info->ResidualCnt; 1858 scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
1858 1859
1859 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1860 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
1860 1861
@@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
1941static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1942static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1942 int drv_index) 1943 int drv_index)
1943{ 1944{
1944 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1945 disk->queue = blk_alloc_queue(GFP_KERNEL);
1945 if (!disk->queue) 1946 if (!disk->queue)
1946 goto init_queue_failure; 1947 goto init_queue_failure;
1948
1949 disk->queue->cmd_size = sizeof(struct scsi_request);
1950 disk->queue->request_fn = do_cciss_request;
1951 disk->queue->queue_lock = &h->lock;
1952 if (blk_init_allocated_queue(disk->queue) < 0)
1953 goto cleanup_queue;
1954
1947 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1955 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1948 disk->major = h->major; 1956 disk->major = h->major;
1949 disk->first_minor = drv_index << NWD_SHIFT; 1957 disk->first_minor = drv_index << NWD_SHIFT;
@@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
3075 driver_byte = DRIVER_OK; 3083 driver_byte = DRIVER_OK;
3076 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 3084 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
3077 3085
3078 if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) 3086 if (blk_rq_is_passthrough(cmd->rq))
3079 host_byte = DID_PASSTHROUGH; 3087 host_byte = DID_PASSTHROUGH;
3080 else 3088 else
3081 host_byte = DID_OK; 3089 host_byte = DID_OK;
@@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
3084 host_byte, driver_byte); 3092 host_byte, driver_byte);
3085 3093
3086 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 3094 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
3087 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) 3095 if (!blk_rq_is_passthrough(cmd->rq))
3088 dev_warn(&h->pdev->dev, "cmd %p " 3096 dev_warn(&h->pdev->dev, "cmd %p "
3089 "has SCSI Status 0x%x\n", 3097 "has SCSI Status 0x%x\n",
3090 cmd, cmd->err_info->ScsiStatus); 3098 cmd, cmd->err_info->ScsiStatus);
@@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h,
3095 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 3103 sense_key = 0xf & cmd->err_info->SenseInfo[2];
3096 /* no status or recovered error */ 3104 /* no status or recovered error */
3097 if (((sense_key == 0x0) || (sense_key == 0x1)) && 3105 if (((sense_key == 0x0) || (sense_key == 0x1)) &&
3098 (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) 3106 !blk_rq_is_passthrough(cmd->rq))
3099 error_value = 0; 3107 error_value = 0;
3100 3108
3101 if (check_for_unit_attention(h, cmd)) { 3109 if (check_for_unit_attention(h, cmd)) {
3102 *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); 3110 *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
3103 return 0; 3111 return 0;
3104 } 3112 }
3105 3113
3106 /* Not SG_IO or similar? */ 3114 /* Not SG_IO or similar? */
3107 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { 3115 if (!blk_rq_is_passthrough(cmd->rq)) {
3108 if (error_value != 0) 3116 if (error_value != 0)
3109 dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" 3117 dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
3110 " sense key = 0x%x\n", cmd, sense_key); 3118 " sense key = 0x%x\n", cmd, sense_key);
3111 return error_value; 3119 return error_value;
3112 } 3120 }
3113 3121
3114 /* SG_IO or similar, copy sense data back */ 3122 scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
3115 if (cmd->rq->sense) {
3116 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
3117 cmd->rq->sense_len = cmd->err_info->SenseLen;
3118 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
3119 cmd->rq->sense_len);
3120 } else
3121 cmd->rq->sense_len = 0;
3122
3123 return error_value; 3123 return error_value;
3124} 3124}
3125 3125
@@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3146 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3146 rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
3147 break; 3147 break;
3148 case CMD_DATA_UNDERRUN: 3148 case CMD_DATA_UNDERRUN:
3149 if (cmd->rq->cmd_type == REQ_TYPE_FS) { 3149 if (!blk_rq_is_passthrough(cmd->rq)) {
3150 dev_warn(&h->pdev->dev, "cmd %p has" 3150 dev_warn(&h->pdev->dev, "cmd %p has"
3151 " completed with data underrun " 3151 " completed with data underrun "
3152 "reported\n", cmd); 3152 "reported\n", cmd);
3153 cmd->rq->resid_len = cmd->err_info->ResidualCnt;
3154 } 3153 }
3155 break; 3154 break;
3156 case CMD_DATA_OVERRUN: 3155 case CMD_DATA_OVERRUN:
3157 if (cmd->rq->cmd_type == REQ_TYPE_FS) 3156 if (!blk_rq_is_passthrough(cmd->rq))
3158 dev_warn(&h->pdev->dev, "cciss: cmd %p has" 3157 dev_warn(&h->pdev->dev, "cciss: cmd %p has"
3159 " completed with data overrun " 3158 " completed with data overrun "
3160 "reported\n", cmd); 3159 "reported\n", cmd);
@@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3164 "reported invalid\n", cmd); 3163 "reported invalid\n", cmd);
3165 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3164 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3166 cmd->err_info->CommandStatus, DRIVER_OK, 3165 cmd->err_info->CommandStatus, DRIVER_OK,
3167 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3166 blk_rq_is_passthrough(cmd->rq) ?
3168 DID_PASSTHROUGH : DID_ERROR); 3167 DID_PASSTHROUGH : DID_ERROR);
3169 break; 3168 break;
3170 case CMD_PROTOCOL_ERR: 3169 case CMD_PROTOCOL_ERR:
@@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3172 "protocol error\n", cmd); 3171 "protocol error\n", cmd);
3173 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3172 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3174 cmd->err_info->CommandStatus, DRIVER_OK, 3173 cmd->err_info->CommandStatus, DRIVER_OK,
3175 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3174 blk_rq_is_passthrough(cmd->rq) ?
3176 DID_PASSTHROUGH : DID_ERROR); 3175 DID_PASSTHROUGH : DID_ERROR);
3177 break; 3176 break;
3178 case CMD_HARDWARE_ERR: 3177 case CMD_HARDWARE_ERR:
@@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3180 " hardware error\n", cmd); 3179 " hardware error\n", cmd);
3181 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3180 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3182 cmd->err_info->CommandStatus, DRIVER_OK, 3181 cmd->err_info->CommandStatus, DRIVER_OK,
3183 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3182 blk_rq_is_passthrough(cmd->rq) ?
3184 DID_PASSTHROUGH : DID_ERROR); 3183 DID_PASSTHROUGH : DID_ERROR);
3185 break; 3184 break;
3186 case CMD_CONNECTION_LOST: 3185 case CMD_CONNECTION_LOST:
@@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3188 "connection lost\n", cmd); 3187 "connection lost\n", cmd);
3189 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3188 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3190 cmd->err_info->CommandStatus, DRIVER_OK, 3189 cmd->err_info->CommandStatus, DRIVER_OK,
3191 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3190 blk_rq_is_passthrough(cmd->rq) ?
3192 DID_PASSTHROUGH : DID_ERROR); 3191 DID_PASSTHROUGH : DID_ERROR);
3193 break; 3192 break;
3194 case CMD_ABORTED: 3193 case CMD_ABORTED:
@@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3196 "aborted\n", cmd); 3195 "aborted\n", cmd);
3197 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3196 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3198 cmd->err_info->CommandStatus, DRIVER_OK, 3197 cmd->err_info->CommandStatus, DRIVER_OK,
3199 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3198 blk_rq_is_passthrough(cmd->rq) ?
3200 DID_PASSTHROUGH : DID_ABORT); 3199 DID_PASSTHROUGH : DID_ABORT);
3201 break; 3200 break;
3202 case CMD_ABORT_FAILED: 3201 case CMD_ABORT_FAILED:
@@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3204 "abort failed\n", cmd); 3203 "abort failed\n", cmd);
3205 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3204 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3206 cmd->err_info->CommandStatus, DRIVER_OK, 3205 cmd->err_info->CommandStatus, DRIVER_OK,
3207 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3206 blk_rq_is_passthrough(cmd->rq) ?
3208 DID_PASSTHROUGH : DID_ERROR); 3207 DID_PASSTHROUGH : DID_ERROR);
3209 break; 3208 break;
3210 case CMD_UNSOLICITED_ABORT: 3209 case CMD_UNSOLICITED_ABORT:
@@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3219 "%p retried too many times\n", cmd); 3218 "%p retried too many times\n", cmd);
3220 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3219 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3221 cmd->err_info->CommandStatus, DRIVER_OK, 3220 cmd->err_info->CommandStatus, DRIVER_OK,
3222 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3221 blk_rq_is_passthrough(cmd->rq) ?
3223 DID_PASSTHROUGH : DID_ABORT); 3222 DID_PASSTHROUGH : DID_ABORT);
3224 break; 3223 break;
3225 case CMD_TIMEOUT: 3224 case CMD_TIMEOUT:
3226 dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); 3225 dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
3227 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3226 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3228 cmd->err_info->CommandStatus, DRIVER_OK, 3227 cmd->err_info->CommandStatus, DRIVER_OK,
3229 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3228 blk_rq_is_passthrough(cmd->rq) ?
3230 DID_PASSTHROUGH : DID_ERROR); 3229 DID_PASSTHROUGH : DID_ERROR);
3231 break; 3230 break;
3232 case CMD_UNABORTABLE: 3231 case CMD_UNABORTABLE:
3233 dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); 3232 dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
3234 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3233 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3235 cmd->err_info->CommandStatus, DRIVER_OK, 3234 cmd->err_info->CommandStatus, DRIVER_OK,
3236 cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? 3235 blk_rq_is_passthrough(cmd->rq) ?
3237 DID_PASSTHROUGH : DID_ERROR); 3236 DID_PASSTHROUGH : DID_ERROR);
3238 break; 3237 break;
3239 default: 3238 default:
@@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
3242 cmd->err_info->CommandStatus); 3241 cmd->err_info->CommandStatus);
3243 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3242 rq->errors = make_status_bytes(SAM_STAT_GOOD,
3244 cmd->err_info->CommandStatus, DRIVER_OK, 3243 cmd->err_info->CommandStatus, DRIVER_OK,
3245 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3244 blk_rq_is_passthrough(cmd->rq) ?
3246 DID_PASSTHROUGH : DID_ERROR); 3245 DID_PASSTHROUGH : DID_ERROR);
3247 } 3246 }
3248 3247
@@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q)
3395 c->Header.SGList = h->max_cmd_sgentries; 3394 c->Header.SGList = h->max_cmd_sgentries;
3396 set_performant_mode(h, c); 3395 set_performant_mode(h, c);
3397 3396
3398 if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3397 switch (req_op(creq)) {
3398 case REQ_OP_READ:
3399 case REQ_OP_WRITE:
3399 if(h->cciss_read == CCISS_READ_10) { 3400 if(h->cciss_read == CCISS_READ_10) {
3400 c->Request.CDB[1] = 0; 3401 c->Request.CDB[1] = 0;
3401 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ 3402 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q)
3425 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3426 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
3426 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3427 c->Request.CDB[14] = c->Request.CDB[15] = 0;
3427 } 3428 }
3428 } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3429 break;
3429 c->Request.CDBLen = creq->cmd_len; 3430 case REQ_OP_SCSI_IN:
3430 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3431 case REQ_OP_SCSI_OUT:
3431 } else { 3432 c->Request.CDBLen = scsi_req(creq)->cmd_len;
3433 memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
3434 scsi_req(creq)->sense = c->err_info->SenseInfo;
3435 break;
3436 default:
3432 dev_warn(&h->pdev->dev, "bad request type %d\n", 3437 dev_warn(&h->pdev->dev, "bad request type %d\n",
3433 creq->cmd_type); 3438 creq->cmd_flags);
3434 BUG(); 3439 BUG();
3435 } 3440 }
3436 3441
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 83482721bc01..d305f05be648 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
2462 2462
2463 if (get_ldev(device)) { 2463 if (get_ldev(device)) {
2464 q = bdev_get_queue(device->ldev->backing_bdev); 2464 q = bdev_get_queue(device->ldev->backing_bdev);
2465 r = bdi_congested(&q->backing_dev_info, bdi_bits); 2465 r = bdi_congested(q->backing_dev_info, bdi_bits);
2466 put_ldev(device); 2466 put_ldev(device);
2467 if (r) 2467 if (r)
2468 reason = 'b'; 2468 reason = 'b';
@@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
2834 /* we have no partitions. we contain only ourselves. */ 2834 /* we have no partitions. we contain only ourselves. */
2835 device->this_bdev->bd_contains = device->this_bdev; 2835 device->this_bdev->bd_contains = device->this_bdev;
2836 2836
2837 q->backing_dev_info.congested_fn = drbd_congested; 2837 q->backing_dev_info->congested_fn = drbd_congested;
2838 q->backing_dev_info.congested_data = device; 2838 q->backing_dev_info->congested_data = device;
2839 2839
2840 blk_queue_make_request(q, drbd_make_request); 2840 blk_queue_make_request(q, drbd_make_request);
2841 blk_queue_write_cache(q, true, true); 2841 blk_queue_write_cache(q, true, true);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f35db29cac76..908c704e20aa 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
1328 if (b) { 1328 if (b) {
1329 blk_queue_stack_limits(q, b); 1329 blk_queue_stack_limits(q, b);
1330 1330
1331 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 1331 if (q->backing_dev_info->ra_pages !=
1332 b->backing_dev_info->ra_pages) {
1332 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 1333 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1333 q->backing_dev_info.ra_pages, 1334 q->backing_dev_info->ra_pages,
1334 b->backing_dev_info.ra_pages); 1335 b->backing_dev_info->ra_pages);
1335 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 1336 q->backing_dev_info->ra_pages =
1337 b->backing_dev_info->ra_pages;
1336 } 1338 }
1337 } 1339 }
1338 fixup_discard_if_not_supported(q); 1340 fixup_discard_if_not_supported(q);
@@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
3345 s->dev_disk_flags = md->flags; 3347 s->dev_disk_flags = md->flags;
3346 q = bdev_get_queue(device->ldev->backing_bdev); 3348 q = bdev_get_queue(device->ldev->backing_bdev);
3347 s->dev_lower_blocked = 3349 s->dev_lower_blocked =
3348 bdi_congested(&q->backing_dev_info, 3350 bdi_congested(q->backing_dev_info,
3349 (1 << WB_async_congested) | 3351 (1 << WB_async_congested) |
3350 (1 << WB_sync_congested)); 3352 (1 << WB_sync_congested));
3351 put_ldev(device); 3353 put_ldev(device);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index be2b93fd2c11..8378142f7a55 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
288 seq_printf(seq, "%2d: cs:Unconfigured\n", i); 288 seq_printf(seq, "%2d: cs:Unconfigured\n", i);
289 } else { 289 } else {
290 /* reset device->congestion_reason */ 290 /* reset device->congestion_reason */
291 bdi_rw_congested(&device->rq_queue->backing_dev_info); 291 bdi_rw_congested(device->rq_queue->backing_dev_info);
292 292
293 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); 293 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
294 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; 294 wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de279fe4e4fd..cb6bdb75d52d 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
938 938
939 switch (rbm) { 939 switch (rbm) {
940 case RB_CONGESTED_REMOTE: 940 case RB_CONGESTED_REMOTE:
941 bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; 941 bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
942 return bdi_read_congested(bdi); 942 return bdi_read_congested(bdi);
943 case RB_LEAST_PENDING: 943 case RB_LEAST_PENDING:
944 return atomic_read(&device->local_cnt) > 944 return atomic_read(&device->local_cnt) >
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 184887af4b9f..45b4384f650c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q)
2900 return; 2900 return;
2901 2901
2902 if (WARN(atomic_read(&usage_count) == 0, 2902 if (WARN(atomic_read(&usage_count) == 0,
2903 "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", 2903 "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
2904 current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, 2904 current_req, (long)blk_rq_pos(current_req),
2905 (unsigned long long) current_req->cmd_flags)) 2905 (unsigned long long) current_req->cmd_flags))
2906 return; 2906 return;
2907 2907
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index a9b48ed7a3cd..6043648da1e8 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -626,30 +626,29 @@ repeat:
626 req_data_dir(req) == READ ? "read" : "writ", 626 req_data_dir(req) == READ ? "read" : "writ",
627 cyl, head, sec, nsect, bio_data(req->bio)); 627 cyl, head, sec, nsect, bio_data(req->bio));
628#endif 628#endif
629 if (req->cmd_type == REQ_TYPE_FS) { 629
630 switch (rq_data_dir(req)) { 630 switch (req_op(req)) {
631 case READ: 631 case REQ_OP_READ:
632 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, 632 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
633 &read_intr); 633 &read_intr);
634 if (reset) 634 if (reset)
635 goto repeat; 635 goto repeat;
636 break; 636 break;
637 case WRITE: 637 case REQ_OP_WRITE:
638 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, 638 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
639 &write_intr); 639 &write_intr);
640 if (reset) 640 if (reset)
641 goto repeat; 641 goto repeat;
642 if (wait_DRQ()) { 642 if (wait_DRQ()) {
643 bad_rw_intr(); 643 bad_rw_intr();
644 goto repeat; 644 goto repeat;
645 }
646 outsw(HD_DATA, bio_data(req->bio), 256);
647 break;
648 default:
649 printk("unknown hd-command\n");
650 hd_end_request_cur(-EIO);
651 break;
652 } 645 }
646 outsw(HD_DATA, bio_data(req->bio), 256);
647 break;
648 default:
649 printk("unknown hd-command\n");
650 hd_end_request_cur(-EIO);
651 break;
653 } 652 }
654} 653}
655 654
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e937fcf71769..286f276f586e 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q)
670 break; 670 break;
671 } 671 }
672 672
673 if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { 673 switch (req_op(host->req)) {
674 mg_end_request_cur(host, -EIO); 674 case REQ_OP_READ:
675 continue;
676 }
677
678 if (rq_data_dir(host->req) == READ)
679 mg_read(host->req); 675 mg_read(host->req);
680 else 676 break;
677 case REQ_OP_WRITE:
681 mg_write(host->req); 678 mg_write(host->req);
679 break;
680 default:
681 mg_end_request_cur(host, -EIO);
682 break;
683 }
682 } 684 }
683} 685}
684 686
@@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req,
687 unsigned int sect_num, 689 unsigned int sect_num,
688 unsigned int sect_cnt) 690 unsigned int sect_cnt)
689{ 691{
690 if (rq_data_dir(req) == READ) { 692 switch (req_op(host->req)) {
693 case REQ_OP_READ:
691 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) 694 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
692 != MG_ERR_NONE) { 695 != MG_ERR_NONE) {
693 mg_bad_rw_intr(host); 696 mg_bad_rw_intr(host);
694 return host->error; 697 return host->error;
695 } 698 }
696 } else { 699 break;
700 case REQ_OP_WRITE:
697 /* TODO : handler */ 701 /* TODO : handler */
698 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 702 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
699 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 703 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req,
712 mod_timer(&host->timer, jiffies + 3 * HZ); 716 mod_timer(&host->timer, jiffies + 3 * HZ);
713 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 717 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
714 MG_REG_COMMAND); 718 MG_REG_COMMAND);
719 break;
720 default:
721 mg_end_request_cur(host, -EIO);
722 break;
715 } 723 }
716 return MG_ERR_NONE; 724 return MG_ERR_NONE;
717} 725}
@@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q)
753 continue; 761 continue;
754 } 762 }
755 763
756 if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
757 mg_end_request_cur(host, -EIO);
758 continue;
759 }
760
761 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 764 if (!mg_issue_req(req, host, sect_num, sect_cnt))
762 return; 765 return;
763 } 766 }
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9fd06eeb1a17..0be84a3cb6d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,6 +41,9 @@
41 41
42#include <linux/nbd.h> 42#include <linux/nbd.h>
43 43
44static DEFINE_IDR(nbd_index_idr);
45static DEFINE_MUTEX(nbd_index_mutex);
46
44struct nbd_sock { 47struct nbd_sock {
45 struct socket *sock; 48 struct socket *sock;
46 struct mutex tx_lock; 49 struct mutex tx_lock;
@@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
89#define NBD_MAGIC 0x68797548 92#define NBD_MAGIC 0x68797548
90 93
91static unsigned int nbds_max = 16; 94static unsigned int nbds_max = 16;
92static struct nbd_device *nbd_dev;
93static int max_part; 95static int max_part;
96static struct workqueue_struct *recv_workqueue;
97static int part_shift;
94 98
95static inline struct device *nbd_to_dev(struct nbd_device *nbd) 99static inline struct device *nbd_to_dev(struct nbd_device *nbd)
96{ 100{
@@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
193 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 197 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
194 req->errors++; 198 req->errors++;
195 199
196 /*
197 * If our disconnect packet times out then we're already holding the
198 * config_lock and could deadlock here, so just set an error and return,
199 * we'll handle shutting everything down later.
200 */
201 if (req->cmd_type == REQ_TYPE_DRV_PRIV)
202 return BLK_EH_HANDLED;
203 mutex_lock(&nbd->config_lock); 200 mutex_lock(&nbd->config_lock);
204 sock_shutdown(nbd); 201 sock_shutdown(nbd);
205 mutex_unlock(&nbd->config_lock); 202 mutex_unlock(&nbd->config_lock);
@@ -278,14 +275,29 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
278 u32 type; 275 u32 type;
279 u32 tag = blk_mq_unique_tag(req); 276 u32 tag = blk_mq_unique_tag(req);
280 277
281 if (req_op(req) == REQ_OP_DISCARD) 278 switch (req_op(req)) {
279 case REQ_OP_DISCARD:
282 type = NBD_CMD_TRIM; 280 type = NBD_CMD_TRIM;
283 else if (req_op(req) == REQ_OP_FLUSH) 281 break;
282 case REQ_OP_FLUSH:
284 type = NBD_CMD_FLUSH; 283 type = NBD_CMD_FLUSH;
285 else if (rq_data_dir(req) == WRITE) 284 break;
285 case REQ_OP_WRITE:
286 type = NBD_CMD_WRITE; 286 type = NBD_CMD_WRITE;
287 else 287 break;
288 case REQ_OP_READ:
288 type = NBD_CMD_READ; 289 type = NBD_CMD_READ;
290 break;
291 default:
292 return -EIO;
293 }
294
295 if (rq_data_dir(req) == WRITE &&
296 (nbd->flags & NBD_FLAG_READ_ONLY)) {
297 dev_err_ratelimited(disk_to_dev(nbd->disk),
298 "Write on read-only\n");
299 return -EIO;
300 }
289 301
290 memset(&request, 0, sizeof(request)); 302 memset(&request, 0, sizeof(request));
291 request.magic = htonl(NBD_REQUEST_MAGIC); 303 request.magic = htonl(NBD_REQUEST_MAGIC);
@@ -510,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
510 goto error_out; 522 goto error_out;
511 } 523 }
512 524
513 if (req->cmd_type != REQ_TYPE_FS &&
514 req->cmd_type != REQ_TYPE_DRV_PRIV)
515 goto error_out;
516
517 if (req->cmd_type == REQ_TYPE_FS &&
518 rq_data_dir(req) == WRITE &&
519 (nbd->flags & NBD_FLAG_READ_ONLY)) {
520 dev_err_ratelimited(disk_to_dev(nbd->disk),
521 "Write on read-only\n");
522 goto error_out;
523 }
524
525 req->errors = 0; 525 req->errors = 0;
526 526
527 nsock = nbd->socks[index]; 527 nsock = nbd->socks[index];
@@ -785,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
785 INIT_WORK(&args[i].work, recv_work); 785 INIT_WORK(&args[i].work, recv_work);
786 args[i].nbd = nbd; 786 args[i].nbd = nbd;
787 args[i].index = i; 787 args[i].index = i;
788 queue_work(system_long_wq, &args[i].work); 788 queue_work(recv_workqueue, &args[i].work);
789 } 789 }
790 wait_event_interruptible(nbd->recv_wq, 790 wait_event_interruptible(nbd->recv_wq,
791 atomic_read(&nbd->recv_threads) == 0); 791 atomic_read(&nbd->recv_threads) == 0);
@@ -996,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
996 .timeout = nbd_xmit_timeout, 996 .timeout = nbd_xmit_timeout,
997}; 997};
998 998
999static void nbd_dev_remove(struct nbd_device *nbd)
1000{
1001 struct gendisk *disk = nbd->disk;
1002 nbd->magic = 0;
1003 if (disk) {
1004 del_gendisk(disk);
1005 blk_cleanup_queue(disk->queue);
1006 blk_mq_free_tag_set(&nbd->tag_set);
1007 put_disk(disk);
1008 }
1009 kfree(nbd);
1010}
1011
1012static int nbd_dev_add(int index)
1013{
1014 struct nbd_device *nbd;
1015 struct gendisk *disk;
1016 struct request_queue *q;
1017 int err = -ENOMEM;
1018
1019 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1020 if (!nbd)
1021 goto out;
1022
1023 disk = alloc_disk(1 << part_shift);
1024 if (!disk)
1025 goto out_free_nbd;
1026
1027 if (index >= 0) {
1028 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1029 GFP_KERNEL);
1030 if (err == -ENOSPC)
1031 err = -EEXIST;
1032 } else {
1033 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1034 if (err >= 0)
1035 index = err;
1036 }
1037 if (err < 0)
1038 goto out_free_disk;
1039
1040 nbd->disk = disk;
1041 nbd->tag_set.ops = &nbd_mq_ops;
1042 nbd->tag_set.nr_hw_queues = 1;
1043 nbd->tag_set.queue_depth = 128;
1044 nbd->tag_set.numa_node = NUMA_NO_NODE;
1045 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1046 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1047 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1048 nbd->tag_set.driver_data = nbd;
1049
1050 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1051 if (err)
1052 goto out_free_idr;
1053
1054 q = blk_mq_init_queue(&nbd->tag_set);
1055 if (IS_ERR(q)) {
1056 err = PTR_ERR(q);
1057 goto out_free_tags;
1058 }
1059 disk->queue = q;
1060
1061 /*
1062 * Tell the block layer that we are not a rotational device
1063 */
1064 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1065 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1066 disk->queue->limits.discard_granularity = 512;
1067 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1068 disk->queue->limits.discard_zeroes_data = 0;
1069 blk_queue_max_hw_sectors(disk->queue, 65536);
1070 disk->queue->limits.max_sectors = 256;
1071
1072 nbd->magic = NBD_MAGIC;
1073 mutex_init(&nbd->config_lock);
1074 disk->major = NBD_MAJOR;
1075 disk->first_minor = index << part_shift;
1076 disk->fops = &nbd_fops;
1077 disk->private_data = nbd;
1078 sprintf(disk->disk_name, "nbd%d", index);
1079 init_waitqueue_head(&nbd->recv_wq);
1080 nbd_reset(nbd);
1081 add_disk(disk);
1082 return index;
1083
1084out_free_tags:
1085 blk_mq_free_tag_set(&nbd->tag_set);
1086out_free_idr:
1087 idr_remove(&nbd_index_idr, index);
1088out_free_disk:
1089 put_disk(disk);
1090out_free_nbd:
1091 kfree(nbd);
1092out:
1093 return err;
1094}
1095
999/* 1096/*
1000 * And here should be modules and kernel interface 1097 * And here should be modules and kernel interface
1001 * (Just smiley confuses emacs :-) 1098 * (Just smiley confuses emacs :-)
@@ -1003,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
1003 1100
1004static int __init nbd_init(void) 1101static int __init nbd_init(void)
1005{ 1102{
1006 int err = -ENOMEM;
1007 int i; 1103 int i;
1008 int part_shift;
1009 1104
1010 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 1105 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1011 1106
@@ -1034,111 +1129,38 @@ static int __init nbd_init(void)
1034 1129
1035 if (nbds_max > 1UL << (MINORBITS - part_shift)) 1130 if (nbds_max > 1UL << (MINORBITS - part_shift))
1036 return -EINVAL; 1131 return -EINVAL;
1037 1132 recv_workqueue = alloc_workqueue("knbd-recv",
1038 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 1133 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1039 if (!nbd_dev) 1134 if (!recv_workqueue)
1040 return -ENOMEM; 1135 return -ENOMEM;
1041 1136
1042 for (i = 0; i < nbds_max; i++) { 1137 if (register_blkdev(NBD_MAJOR, "nbd"))
1043 struct request_queue *q; 1138 return -EIO;
1044 struct gendisk *disk = alloc_disk(1 << part_shift);
1045 if (!disk)
1046 goto out;
1047 nbd_dev[i].disk = disk;
1048
1049 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
1050 nbd_dev[i].tag_set.nr_hw_queues = 1;
1051 nbd_dev[i].tag_set.queue_depth = 128;
1052 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
1053 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
1054 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1055 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1056 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
1057
1058 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
1059 if (err) {
1060 put_disk(disk);
1061 goto out;
1062 }
1063
1064 /*
1065 * The new linux 2.5 block layer implementation requires
1066 * every gendisk to have its very own request_queue struct.
1067 * These structs are big so we dynamically allocate them.
1068 */
1069 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1070 if (IS_ERR(q)) {
1071 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1072 put_disk(disk);
1073 goto out;
1074 }
1075 disk->queue = q;
1076
1077 /*
1078 * Tell the block layer that we are not a rotational device
1079 */
1080 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1081 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1082 disk->queue->limits.discard_granularity = 512;
1083 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1084 disk->queue->limits.discard_zeroes_data = 0;
1085 blk_queue_max_hw_sectors(disk->queue, 65536);
1086 disk->queue->limits.max_sectors = 256;
1087 }
1088
1089 if (register_blkdev(NBD_MAJOR, "nbd")) {
1090 err = -EIO;
1091 goto out;
1092 }
1093
1094 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1095 1139
1096 nbd_dbg_init(); 1140 nbd_dbg_init();
1097 1141
1098 for (i = 0; i < nbds_max; i++) { 1142 mutex_lock(&nbd_index_mutex);
1099 struct gendisk *disk = nbd_dev[i].disk; 1143 for (i = 0; i < nbds_max; i++)
1100 nbd_dev[i].magic = NBD_MAGIC; 1144 nbd_dev_add(i);
1101 mutex_init(&nbd_dev[i].config_lock); 1145 mutex_unlock(&nbd_index_mutex);
1102 disk->major = NBD_MAJOR; 1146 return 0;
1103 disk->first_minor = i << part_shift; 1147}
1104 disk->fops = &nbd_fops;
1105 disk->private_data = &nbd_dev[i];
1106 sprintf(disk->disk_name, "nbd%d", i);
1107 init_waitqueue_head(&nbd_dev[i].recv_wq);
1108 nbd_reset(&nbd_dev[i]);
1109 add_disk(disk);
1110 }
1111 1148
1149static int nbd_exit_cb(int id, void *ptr, void *data)
1150{
1151 struct nbd_device *nbd = ptr;
1152 nbd_dev_remove(nbd);
1112 return 0; 1153 return 0;
1113out:
1114 while (i--) {
1115 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1116 blk_cleanup_queue(nbd_dev[i].disk->queue);
1117 put_disk(nbd_dev[i].disk);
1118 }
1119 kfree(nbd_dev);
1120 return err;
1121} 1154}
1122 1155
1123static void __exit nbd_cleanup(void) 1156static void __exit nbd_cleanup(void)
1124{ 1157{
1125 int i;
1126
1127 nbd_dbg_close(); 1158 nbd_dbg_close();
1128 1159
1129 for (i = 0; i < nbds_max; i++) { 1160 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
1130 struct gendisk *disk = nbd_dev[i].disk; 1161 idr_destroy(&nbd_index_idr);
1131 nbd_dev[i].magic = 0; 1162 destroy_workqueue(recv_workqueue);
1132 if (disk) {
1133 del_gendisk(disk);
1134 blk_cleanup_queue(disk->queue);
1135 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1136 put_disk(disk);
1137 }
1138 }
1139 unregister_blkdev(NBD_MAJOR, "nbd"); 1163 unregister_blkdev(NBD_MAJOR, "nbd");
1140 kfree(nbd_dev);
1141 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1142} 1164}
1143 1165
1144module_init(nbd_init); 1166module_init(nbd_init);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a67b7ea1e3bf..6f2e565bccc5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -432,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
432 struct request *rq; 432 struct request *rq;
433 struct bio *bio = rqd->bio; 433 struct bio *bio = rqd->bio;
434 434
435 rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); 435 rq = blk_mq_alloc_request(q,
436 op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
436 if (IS_ERR(rq)) 437 if (IS_ERR(rq))
437 return -ENOMEM; 438 return -ENOMEM;
438 439
439 rq->cmd_type = REQ_TYPE_DRV_PRIV;
440 rq->__sector = bio->bi_iter.bi_sector; 440 rq->__sector = bio->bi_iter.bi_sector;
441 rq->ioprio = bio_prio(bio); 441 rq->ioprio = bio_prio(bio);
442 442
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 92900f5f0b47..8127b8201a01 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q)
308 if (!rq) 308 if (!rq)
309 break; 309 break;
310 310
311 /* filter out block requests we don't understand */
312 if (rq->cmd_type != REQ_TYPE_FS) {
313 blk_end_request_all(rq, 0);
314 continue;
315 }
316
317 /* deduce our operation (read, write, flush) */ 311 /* deduce our operation (read, write, flush) */
318 /* I wish the block layer simplified cmd_type/cmd_flags/cmd[] 312 /* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
319 * into a clearly defined set of RPC commands: 313 * into a clearly defined set of RPC commands:
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index efefb5ac3004..3a15247942e4 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -25,6 +25,7 @@ config PARIDE_PD
25config PARIDE_PCD 25config PARIDE_PCD
26 tristate "Parallel port ATAPI CD-ROMs" 26 tristate "Parallel port ATAPI CD-ROMs"
27 depends on PARIDE 27 depends on PARIDE
28 select BLK_SCSI_REQUEST # only for the generic cdrom code
28 ---help--- 29 ---help---
29 This option enables the high-level driver for ATAPI CD-ROM devices 30 This option enables the high-level driver for ATAPI CD-ROM devices
30 connected through a parallel port. If you chose to build PARIDE 31 connected through a parallel port. If you chose to build PARIDE
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c3ed2fc72daa..644ba0888bd4 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */
439static int pd_block; /* address of next requested block */ 439static int pd_block; /* address of next requested block */
440static int pd_count; /* number of blocks still to do */ 440static int pd_count; /* number of blocks still to do */
441static int pd_run; /* sectors in current cluster */ 441static int pd_run; /* sectors in current cluster */
442static int pd_cmd; /* current command READ/WRITE */
443static char *pd_buf; /* buffer for request in progress */ 442static char *pd_buf; /* buffer for request in progress */
444 443
445static enum action do_pd_io_start(void) 444static enum action do_pd_io_start(void)
446{ 445{
447 if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) { 446 switch (req_op(pd_req)) {
447 case REQ_OP_DRV_IN:
448 phase = pd_special; 448 phase = pd_special;
449 return pd_special(); 449 return pd_special();
450 } 450 case REQ_OP_READ:
451 451 case REQ_OP_WRITE:
452 pd_cmd = rq_data_dir(pd_req);
453 if (pd_cmd == READ || pd_cmd == WRITE) {
454 pd_block = blk_rq_pos(pd_req); 452 pd_block = blk_rq_pos(pd_req);
455 pd_count = blk_rq_cur_sectors(pd_req); 453 pd_count = blk_rq_cur_sectors(pd_req);
456 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 454 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
@@ -458,7 +456,7 @@ static enum action do_pd_io_start(void)
458 pd_run = blk_rq_sectors(pd_req); 456 pd_run = blk_rq_sectors(pd_req);
459 pd_buf = bio_data(pd_req->bio); 457 pd_buf = bio_data(pd_req->bio);
460 pd_retries = 0; 458 pd_retries = 0;
461 if (pd_cmd == READ) 459 if (req_op(pd_req) == REQ_OP_READ)
462 return do_pd_read_start(); 460 return do_pd_read_start();
463 else 461 else
464 return do_pd_write_start(); 462 return do_pd_write_start();
@@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk,
723 struct request *rq; 721 struct request *rq;
724 int err = 0; 722 int err = 0;
725 723
726 rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); 724 rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
727 if (IS_ERR(rq)) 725 if (IS_ERR(rq))
728 return PTR_ERR(rq); 726 return PTR_ERR(rq);
729 727
730 rq->cmd_type = REQ_TYPE_DRV_PRIV;
731 rq->special = func; 728 rq->special = func;
732 729
733 err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); 730 err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1b94c1ca5c5f..66d846ba85a9 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
704 int ret = 0; 704 int ret = 0;
705 705
706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
707 WRITE : READ, __GFP_RECLAIM); 707 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
708 if (IS_ERR(rq)) 708 if (IS_ERR(rq))
709 return PTR_ERR(rq); 709 return PTR_ERR(rq);
710 blk_rq_set_block_pc(rq); 710 scsi_req_init(rq);
711 711
712 if (cgc->buflen) { 712 if (cgc->buflen) {
713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
716 goto out; 716 goto out;
717 } 717 }
718 718
719 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 719 scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
720 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 720 memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
721 721
722 rq->timeout = 60*HZ; 722 rq->timeout = 60*HZ;
723 if (cgc->quiet) 723 if (cgc->quiet)
@@ -1243,7 +1243,7 @@ try_next_bio:
1243 && pd->bio_queue_size <= pd->write_congestion_off); 1243 && pd->bio_queue_size <= pd->write_congestion_off);
1244 spin_unlock(&pd->lock); 1244 spin_unlock(&pd->lock);
1245 if (wakeup) { 1245 if (wakeup) {
1246 clear_bdi_congested(&pd->disk->queue->backing_dev_info, 1246 clear_bdi_congested(pd->disk->queue->backing_dev_info,
1247 BLK_RW_ASYNC); 1247 BLK_RW_ASYNC);
1248 } 1248 }
1249 1249
@@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2370 spin_lock(&pd->lock); 2370 spin_lock(&pd->lock);
2371 if (pd->write_congestion_on > 0 2371 if (pd->write_congestion_on > 0
2372 && pd->bio_queue_size >= pd->write_congestion_on) { 2372 && pd->bio_queue_size >= pd->write_congestion_on) {
2373 set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC); 2373 set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
2374 do { 2374 do {
2375 spin_unlock(&pd->lock); 2375 spin_unlock(&pd->lock);
2376 congestion_wait(BLK_RW_ASYNC, HZ); 2376 congestion_wait(BLK_RW_ASYNC, HZ);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 76f33c84ce3d..a809e3e9feb8 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
197 197
198 while ((req = blk_fetch_request(q))) { 198 while ((req = blk_fetch_request(q))) {
199 if (req_op(req) == REQ_OP_FLUSH) { 199 switch (req_op(req)) {
200 case REQ_OP_FLUSH:
200 if (ps3disk_submit_flush_request(dev, req)) 201 if (ps3disk_submit_flush_request(dev, req))
201 break; 202 return;
202 } else if (req->cmd_type == REQ_TYPE_FS) { 203 break;
204 case REQ_OP_READ:
205 case REQ_OP_WRITE:
203 if (ps3disk_submit_request_sg(dev, req)) 206 if (ps3disk_submit_request_sg(dev, req))
204 break; 207 return;
205 } else { 208 break;
209 default:
206 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 210 blk_dump_rq_flags(req, DEVICE_NAME " bad request");
207 __blk_end_request_all(req, -EIO); 211 __blk_end_request_all(req, -EIO);
208 continue;
209 } 212 }
210 } 213 }
211} 214}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 36d2b9f4e836..588721f30a22 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4099,19 +4099,21 @@ static void rbd_queue_workfn(struct work_struct *work)
4099 bool must_be_locked; 4099 bool must_be_locked;
4100 int result; 4100 int result;
4101 4101
4102 if (rq->cmd_type != REQ_TYPE_FS) { 4102 switch (req_op(rq)) {
4103 dout("%s: non-fs request type %d\n", __func__, 4103 case REQ_OP_DISCARD:
4104 (int) rq->cmd_type);
4105 result = -EIO;
4106 goto err;
4107 }
4108
4109 if (req_op(rq) == REQ_OP_DISCARD)
4110 op_type = OBJ_OP_DISCARD; 4104 op_type = OBJ_OP_DISCARD;
4111 else if (req_op(rq) == REQ_OP_WRITE) 4105 break;
4106 case REQ_OP_WRITE:
4112 op_type = OBJ_OP_WRITE; 4107 op_type = OBJ_OP_WRITE;
4113 else 4108 break;
4109 case REQ_OP_READ:
4114 op_type = OBJ_OP_READ; 4110 op_type = OBJ_OP_READ;
4111 break;
4112 default:
4113 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4114 result = -EIO;
4115 goto err;
4116 }
4115 4117
4116 /* Ignore/skip any zero-length requests */ 4118 /* Ignore/skip any zero-length requests */
4117 4119
@@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4524 q->limits.discard_zeroes_data = 1; 4526 q->limits.discard_zeroes_data = 1;
4525 4527
4526 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4528 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4527 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; 4529 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4528 4530
4529 disk->queue = q; 4531 disk->queue = q;
4530 4532
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index abf805e332e2..27833e4dae2a 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev,
1204static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, 1204static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1205 uint cmd_in, ulong arg) 1205 uint cmd_in, ulong arg)
1206{ 1206{
1207 int rc = 0; 1207 static const int sg_version_num = 30527;
1208 int rc = 0, timeout;
1208 struct gendisk *disk = bdev->bd_disk; 1209 struct gendisk *disk = bdev->bd_disk;
1209 struct skd_device *skdev = disk->private_data; 1210 struct skd_device *skdev = disk->private_data;
1210 void __user *p = (void *)arg; 1211 int __user *p = (int __user *)arg;
1211 1212
1212 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", 1213 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1213 skdev->name, __func__, __LINE__, 1214 skdev->name, __func__, __LINE__,
@@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1218 1219
1219 switch (cmd_in) { 1220 switch (cmd_in) {
1220 case SG_SET_TIMEOUT: 1221 case SG_SET_TIMEOUT:
1222 rc = get_user(timeout, p);
1223 if (!rc)
1224 disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1225 break;
1221 case SG_GET_TIMEOUT: 1226 case SG_GET_TIMEOUT:
1227 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1228 break;
1222 case SG_GET_VERSION_NUM: 1229 case SG_GET_VERSION_NUM:
1223 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); 1230 rc = put_user(sg_version_num, p);
1224 break; 1231 break;
1225 case SG_IO: 1232 case SG_IO:
1226 rc = skd_ioctl_sg_io(skdev, mode, p); 1233 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1227 break; 1234 break;
1228 1235
1229 default: 1236 default:
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 0e93ad7b8511..c8e072caf56f 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
567 if (!crq) 567 if (!crq)
568 return NULL; 568 return NULL;
569 569
570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); 570 rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
571 if (IS_ERR(rq)) { 571 if (IS_ERR(rq)) {
572 spin_lock_irqsave(&host->lock, flags); 572 spin_lock_irqsave(&host->lock, flags);
573 carm_put_request(host, crq); 573 carm_put_request(host, crq);
@@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
620 spin_unlock_irq(&host->lock); 620 spin_unlock_irq(&host->lock);
621 621
622 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 622 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
623 crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
624 crq->rq->special = crq; 623 crq->rq->special = crq;
625 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 624 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
626 625
@@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
661 crq->msg_bucket = (u32) rc; 660 crq->msg_bucket = (u32) rc;
662 661
663 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 662 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
664 crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
665 crq->rq->special = crq; 663 crq->rq->special = crq;
666 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 664 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
667 665
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 10332c24f961..a363170e45b1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -52,11 +52,13 @@ struct virtio_blk {
52}; 52};
53 53
54struct virtblk_req { 54struct virtblk_req {
55 struct request *req; 55#ifdef CONFIG_VIRTIO_BLK_SCSI
56 struct virtio_blk_outhdr out_hdr; 56 struct scsi_request sreq; /* for SCSI passthrough, must be first */
57 u8 sense[SCSI_SENSE_BUFFERSIZE];
57 struct virtio_scsi_inhdr in_hdr; 58 struct virtio_scsi_inhdr in_hdr;
59#endif
60 struct virtio_blk_outhdr out_hdr;
58 u8 status; 61 u8 status;
59 u8 sense[SCSI_SENSE_BUFFERSIZE];
60 struct scatterlist sg[]; 62 struct scatterlist sg[];
61}; 63};
62 64
@@ -72,28 +74,88 @@ static inline int virtblk_result(struct virtblk_req *vbr)
72 } 74 }
73} 75}
74 76
75static int __virtblk_add_req(struct virtqueue *vq, 77/*
76 struct virtblk_req *vbr, 78 * If this is a packet command we need a couple of additional headers. Behind
77 struct scatterlist *data_sg, 79 * the normal outhdr we put a segment with the scsi command block, and before
78 bool have_data) 80 * the normal inhdr we put the sense data and the inhdr with additional status
81 * information.
82 */
83#ifdef CONFIG_VIRTIO_BLK_SCSI
84static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
85 struct scatterlist *data_sg, bool have_data)
79{ 86{
80 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; 87 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
81 unsigned int num_out = 0, num_in = 0; 88 unsigned int num_out = 0, num_in = 0;
82 __virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
83 89
84 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); 90 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
85 sgs[num_out++] = &hdr; 91 sgs[num_out++] = &hdr;
92 sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
93 sgs[num_out++] = &cmd;
94
95 if (have_data) {
96 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
97 sgs[num_out++] = data_sg;
98 else
99 sgs[num_out + num_in++] = data_sg;
100 }
101
102 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
103 sgs[num_out + num_in++] = &sense;
104 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
105 sgs[num_out + num_in++] = &inhdr;
106 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
107 sgs[num_out + num_in++] = &status;
108
109 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
110}
111
112static inline void virtblk_scsi_reques_done(struct request *req)
113{
114 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
115 struct virtio_blk *vblk = req->q->queuedata;
116 struct scsi_request *sreq = &vbr->sreq;
117
118 sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
119 sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
120 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
121}
122
123static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
124 unsigned int cmd, unsigned long data)
125{
126 struct gendisk *disk = bdev->bd_disk;
127 struct virtio_blk *vblk = disk->private_data;
86 128
87 /* 129 /*
88 * If this is a packet command we need a couple of additional headers. 130 * Only allow the generic SCSI ioctls if the host can support it.
89 * Behind the normal outhdr we put a segment with the scsi command
90 * block, and before the normal inhdr we put the sense data and the
91 * inhdr with additional status information.
92 */ 131 */
93 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { 132 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
94 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); 133 return -ENOTTY;
95 sgs[num_out++] = &cmd; 134
96 } 135 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
136 (void __user *)data);
137}
138#else
139static inline int virtblk_add_req_scsi(struct virtqueue *vq,
140 struct virtblk_req *vbr, struct scatterlist *data_sg,
141 bool have_data)
142{
143 return -EIO;
144}
145static inline void virtblk_scsi_reques_done(struct request *req)
146{
147}
148#define virtblk_ioctl NULL
149#endif /* CONFIG_VIRTIO_BLK_SCSI */
150
151static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
152 struct scatterlist *data_sg, bool have_data)
153{
154 struct scatterlist hdr, status, *sgs[3];
155 unsigned int num_out = 0, num_in = 0;
156
157 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
158 sgs[num_out++] = &hdr;
97 159
98 if (have_data) { 160 if (have_data) {
99 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) 161 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
@@ -102,14 +164,6 @@ static int __virtblk_add_req(struct virtqueue *vq,
102 sgs[num_out + num_in++] = data_sg; 164 sgs[num_out + num_in++] = data_sg;
103 } 165 }
104 166
105 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
106 memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
107 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
108 sgs[num_out + num_in++] = &sense;
109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
110 sgs[num_out + num_in++] = &inhdr;
111 }
112
113 sg_init_one(&status, &vbr->status, sizeof(vbr->status)); 167 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
114 sgs[num_out + num_in++] = &status; 168 sgs[num_out + num_in++] = &status;
115 169
@@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq,
119static inline void virtblk_request_done(struct request *req) 173static inline void virtblk_request_done(struct request *req)
120{ 174{
121 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 175 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
122 struct virtio_blk *vblk = req->q->queuedata;
123 int error = virtblk_result(vbr); 176 int error = virtblk_result(vbr);
124 177
125 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { 178 switch (req_op(req)) {
126 req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); 179 case REQ_OP_SCSI_IN:
127 req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); 180 case REQ_OP_SCSI_OUT:
128 req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); 181 virtblk_scsi_reques_done(req);
129 } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) { 182 break;
183 case REQ_OP_DRV_IN:
130 req->errors = (error != 0); 184 req->errors = (error != 0);
185 break;
131 } 186 }
132 187
133 blk_mq_end_request(req, error); 188 blk_mq_end_request(req, error);
@@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq)
146 do { 201 do {
147 virtqueue_disable_cb(vq); 202 virtqueue_disable_cb(vq);
148 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { 203 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
149 blk_mq_complete_request(vbr->req, vbr->req->errors); 204 struct request *req = blk_mq_rq_from_pdu(vbr);
205
206 blk_mq_complete_request(req, req->errors);
150 req_done = true; 207 req_done = true;
151 } 208 }
152 if (unlikely(virtqueue_is_broken(vq))) 209 if (unlikely(virtqueue_is_broken(vq)))
@@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
170 int qid = hctx->queue_num; 227 int qid = hctx->queue_num;
171 int err; 228 int err;
172 bool notify = false; 229 bool notify = false;
230 u32 type;
173 231
174 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 232 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
175 233
176 vbr->req = req; 234 switch (req_op(req)) {
177 if (req_op(req) == REQ_OP_FLUSH) { 235 case REQ_OP_READ:
178 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); 236 case REQ_OP_WRITE:
179 vbr->out_hdr.sector = 0; 237 type = 0;
180 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 238 break;
181 } else { 239 case REQ_OP_FLUSH:
182 switch (req->cmd_type) { 240 type = VIRTIO_BLK_T_FLUSH;
183 case REQ_TYPE_FS: 241 break;
184 vbr->out_hdr.type = 0; 242 case REQ_OP_SCSI_IN:
185 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); 243 case REQ_OP_SCSI_OUT:
186 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 244 type = VIRTIO_BLK_T_SCSI_CMD;
187 break; 245 break;
188 case REQ_TYPE_BLOCK_PC: 246 case REQ_OP_DRV_IN:
189 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); 247 type = VIRTIO_BLK_T_GET_ID;
190 vbr->out_hdr.sector = 0; 248 break;
191 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); 249 default:
192 break; 250 WARN_ON_ONCE(1);
193 case REQ_TYPE_DRV_PRIV: 251 return BLK_MQ_RQ_QUEUE_ERROR;
194 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
195 vbr->out_hdr.sector = 0;
196 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
197 break;
198 default:
199 /* We don't put anything else in the queue. */
200 BUG();
201 }
202 } 252 }
203 253
254 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
255 vbr->out_hdr.sector = type ?
256 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
257 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
258
204 blk_mq_start_request(req); 259 blk_mq_start_request(req);
205 260
206 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); 261 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
207 if (num) { 262 if (num) {
208 if (rq_data_dir(vbr->req) == WRITE) 263 if (rq_data_dir(req) == WRITE)
209 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT); 264 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
210 else 265 else
211 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN); 266 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
212 } 267 }
213 268
214 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 269 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
215 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); 270 if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
271 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
272 else
273 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
216 if (err) { 274 if (err) {
217 virtqueue_kick(vblk->vqs[qid].vq); 275 virtqueue_kick(vblk->vqs[qid].vq);
218 blk_mq_stop_hw_queue(hctx); 276 blk_mq_stop_hw_queue(hctx);
@@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
242 struct request *req; 300 struct request *req;
243 int err; 301 int err;
244 302
245 req = blk_get_request(q, READ, GFP_KERNEL); 303 req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
246 if (IS_ERR(req)) 304 if (IS_ERR(req))
247 return PTR_ERR(req); 305 return PTR_ERR(req);
248 req->cmd_type = REQ_TYPE_DRV_PRIV;
249 306
250 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 307 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
251 if (err) 308 if (err)
@@ -257,22 +314,6 @@ out:
257 return err; 314 return err;
258} 315}
259 316
260static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
261 unsigned int cmd, unsigned long data)
262{
263 struct gendisk *disk = bdev->bd_disk;
264 struct virtio_blk *vblk = disk->private_data;
265
266 /*
267 * Only allow the generic SCSI ioctls if the host can support it.
268 */
269 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
270 return -ENOTTY;
271
272 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
273 (void __user *)data);
274}
275
276/* We provide getgeo only to please some old bootloader/partitioning tools */ 317/* We provide getgeo only to please some old bootloader/partitioning tools */
277static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 318static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
278{ 319{
@@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq,
538 struct virtio_blk *vblk = data; 579 struct virtio_blk *vblk = data;
539 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); 580 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
540 581
582#ifdef CONFIG_VIRTIO_BLK_SCSI
583 vbr->sreq.sense = vbr->sense;
584#endif
541 sg_init_table(vbr->sg, vblk->sg_elems); 585 sg_init_table(vbr->sg, vblk->sg_elems);
542 return 0; 586 return 0;
543} 587}
@@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = {
821 865
822static unsigned int features_legacy[] = { 866static unsigned int features_legacy[] = {
823 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, 867 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
824 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, 868 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
869#ifdef CONFIG_VIRTIO_BLK_SCSI
870 VIRTIO_BLK_F_SCSI,
871#endif
825 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE, 872 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
826 VIRTIO_BLK_F_MQ, 873 VIRTIO_BLK_F_MQ,
827} 874}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 265f1a7072e9..5067a0a952cb 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
865static inline bool blkif_request_flush_invalid(struct request *req, 865static inline bool blkif_request_flush_invalid(struct request *req,
866 struct blkfront_info *info) 866 struct blkfront_info *info)
867{ 867{
868 return ((req->cmd_type != REQ_TYPE_FS) || 868 return (blk_rq_is_passthrough(req) ||
869 ((req_op(req) == REQ_OP_FLUSH) && 869 ((req_op(req) == REQ_OP_FLUSH) &&
870 !info->feature_flush) || 870 !info->feature_flush) ||
871 ((req->cmd_flags & REQ_FUA) && 871 ((req->cmd_flags & REQ_FUA) &&
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c4328d9d9981..757dce2147e0 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
468 struct request *req; 468 struct request *req;
469 469
470 while ((req = blk_peek_request(q)) != NULL) { 470 while ((req = blk_peek_request(q)) != NULL) {
471 if (req->cmd_type == REQ_TYPE_FS) 471 if (!blk_rq_is_passthrough(req))
472 break; 472 break;
473 blk_start_request(req); 473 blk_start_request(req);
474 __blk_end_request_all(req, -EIO); 474 __blk_end_request_all(req, -EIO);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e5ab7d9e8c45..3cd7856156b4 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram)
117{ 117{
118 revalidate_disk(zram->disk); 118 revalidate_disk(zram->disk);
119 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ 119 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
120 zram->disk->queue->backing_dev_info.capabilities |= 120 zram->disk->queue->backing_dev_info->capabilities |=
121 BDI_CAP_STABLE_WRITES; 121 BDI_CAP_STABLE_WRITES;
122} 122}
123 123
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bbbd3caa927c..87739649eac2 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -281,8 +281,8 @@
281#include <linux/fcntl.h> 281#include <linux/fcntl.h>
282#include <linux/blkdev.h> 282#include <linux/blkdev.h>
283#include <linux/times.h> 283#include <linux/times.h>
284
285#include <linux/uaccess.h> 284#include <linux/uaccess.h>
285#include <scsi/scsi_request.h>
286 286
287/* used to tell the module to turn on full debugging messages */ 287/* used to tell the module to turn on full debugging messages */
288static bool debug; 288static bool debug;
@@ -2170,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2170{ 2170{
2171 struct request_queue *q = cdi->disk->queue; 2171 struct request_queue *q = cdi->disk->queue;
2172 struct request *rq; 2172 struct request *rq;
2173 struct scsi_request *req;
2173 struct bio *bio; 2174 struct bio *bio;
2174 unsigned int len; 2175 unsigned int len;
2175 int nr, ret = 0; 2176 int nr, ret = 0;
@@ -2188,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2188 2189
2189 len = nr * CD_FRAMESIZE_RAW; 2190 len = nr * CD_FRAMESIZE_RAW;
2190 2191
2191 rq = blk_get_request(q, READ, GFP_KERNEL); 2192 rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
2192 if (IS_ERR(rq)) { 2193 if (IS_ERR(rq)) {
2193 ret = PTR_ERR(rq); 2194 ret = PTR_ERR(rq);
2194 break; 2195 break;
2195 } 2196 }
2196 blk_rq_set_block_pc(rq); 2197 req = scsi_req(rq);
2198 scsi_req_init(rq);
2197 2199
2198 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); 2200 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
2199 if (ret) { 2201 if (ret) {
@@ -2201,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2201 break; 2203 break;
2202 } 2204 }
2203 2205
2204 rq->cmd[0] = GPCMD_READ_CD; 2206 req->cmd[0] = GPCMD_READ_CD;
2205 rq->cmd[1] = 1 << 2; 2207 req->cmd[1] = 1 << 2;
2206 rq->cmd[2] = (lba >> 24) & 0xff; 2208 req->cmd[2] = (lba >> 24) & 0xff;
2207 rq->cmd[3] = (lba >> 16) & 0xff; 2209 req->cmd[3] = (lba >> 16) & 0xff;
2208 rq->cmd[4] = (lba >> 8) & 0xff; 2210 req->cmd[4] = (lba >> 8) & 0xff;
2209 rq->cmd[5] = lba & 0xff; 2211 req->cmd[5] = lba & 0xff;
2210 rq->cmd[6] = (nr >> 16) & 0xff; 2212 req->cmd[6] = (nr >> 16) & 0xff;
2211 rq->cmd[7] = (nr >> 8) & 0xff; 2213 req->cmd[7] = (nr >> 8) & 0xff;
2212 rq->cmd[8] = nr & 0xff; 2214 req->cmd[8] = nr & 0xff;
2213 rq->cmd[9] = 0xf8; 2215 req->cmd[9] = 0xf8;
2214 2216
2215 rq->cmd_len = 12; 2217 req->cmd_len = 12;
2216 rq->timeout = 60 * HZ; 2218 rq->timeout = 60 * HZ;
2217 bio = rq->bio; 2219 bio = rq->bio;
2218 2220
2219 if (blk_execute_rq(q, cdi->disk, rq, 0)) { 2221 if (blk_execute_rq(q, cdi->disk, rq, 0)) {
2220 struct request_sense *s = rq->sense; 2222 struct request_sense *s = req->sense;
2221 ret = -EIO; 2223 ret = -EIO;
2222 cdi->last_sense = s->sense_key; 2224 cdi->last_sense = s->sense_key;
2223 } 2225 }
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1afab6558d0c..1372763a948f 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq)
659 struct request *req; 659 struct request *req;
660 660
661 while ((req = blk_fetch_request(rq)) != NULL) { 661 while ((req = blk_fetch_request(rq)) != NULL) {
662 if (req->cmd_type != REQ_TYPE_FS) { 662 switch (req_op(req)) {
663 printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 663 case REQ_OP_READ:
664 __blk_end_request_all(req, -EIO); 664 /*
665 continue; 665 * Add to list of deferred work and then schedule
666 } 666 * workqueue.
667 if (rq_data_dir(req) != READ) { 667 */
668 list_add_tail(&req->queuelist, &gdrom_deferred);
669 schedule_work(&work);
670 break;
671 case REQ_OP_WRITE:
668 pr_notice("Read only device - write request ignored\n"); 672 pr_notice("Read only device - write request ignored\n");
669 __blk_end_request_all(req, -EIO); 673 __blk_end_request_all(req, -EIO);
670 continue; 674 break;
675 default:
676 printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
677 __blk_end_request_all(req, -EIO);
678 break;
671 } 679 }
672
673 /*
674 * Add to list of deferred work and then schedule
675 * workqueue.
676 */
677 list_add_tail(&req->queuelist, &gdrom_deferred);
678 schedule_work(&work);
679 } 680 }
680} 681}
681 682
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 39ea67f9b066..c99a25c075bc 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -10,6 +10,7 @@ menuconfig IDE
10 tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)" 10 tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
11 depends on HAVE_IDE 11 depends on HAVE_IDE
12 depends on BLOCK 12 depends on BLOCK
13 select BLK_SCSI_REQUEST
13 ---help--- 14 ---help---
14 If you say Y here, your kernel will be able to manage ATA/(E)IDE and 15 If you say Y here, your kernel will be able to manage ATA/(E)IDE and
15 ATAPI units. The most common cases are IDE hard drives and ATAPI 16 ATAPI units. The most common cases are IDE hard drives and ATAPI
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index f90ea221f7f2..feb30061123b 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
92 struct request *rq; 92 struct request *rq;
93 int error; 93 int error;
94 94
95 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 95 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
96 rq->cmd_type = REQ_TYPE_DRV_PRIV; 96 scsi_req_init(rq);
97 ide_req(rq)->type = ATA_PRIV_MISC;
97 rq->special = (char *)pc; 98 rq->special = (char *)pc;
98 99
99 if (buf && bufflen) { 100 if (buf && bufflen) {
@@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
103 goto put_req; 104 goto put_req;
104 } 105 }
105 106
106 memcpy(rq->cmd, pc->c, 12); 107 memcpy(scsi_req(rq)->cmd, pc->c, 12);
107 if (drive->media == ide_tape) 108 if (drive->media == ide_tape)
108 rq->cmd[13] = REQ_IDETAPE_PC1; 109 scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
109 error = blk_execute_rq(drive->queue, disk, rq, 0); 110 error = blk_execute_rq(drive->queue, disk, rq, 0);
110put_req: 111put_req:
111 blk_put_request(rq); 112 blk_put_request(rq);
@@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
171void ide_prep_sense(ide_drive_t *drive, struct request *rq) 172void ide_prep_sense(ide_drive_t *drive, struct request *rq)
172{ 173{
173 struct request_sense *sense = &drive->sense_data; 174 struct request_sense *sense = &drive->sense_data;
174 struct request *sense_rq = &drive->sense_rq; 175 struct request *sense_rq = drive->sense_rq;
176 struct scsi_request *req = scsi_req(sense_rq);
175 unsigned int cmd_len, sense_len; 177 unsigned int cmd_len, sense_len;
176 int err; 178 int err;
177 179
@@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
191 193
192 BUG_ON(sense_len > sizeof(*sense)); 194 BUG_ON(sense_len > sizeof(*sense));
193 195
194 if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed) 196 if (ata_sense_request(rq) || drive->sense_rq_armed)
195 return; 197 return;
196 198
197 memset(sense, 0, sizeof(*sense)); 199 memset(sense, 0, sizeof(*sense));
198 200
199 blk_rq_init(rq->q, sense_rq); 201 blk_rq_init(rq->q, sense_rq);
202 scsi_req_init(sense_rq);
200 203
201 err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len, 204 err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
202 GFP_NOIO); 205 GFP_NOIO);
@@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
208 } 211 }
209 212
210 sense_rq->rq_disk = rq->rq_disk; 213 sense_rq->rq_disk = rq->rq_disk;
211 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; 214 sense_rq->cmd_flags = REQ_OP_DRV_IN;
212 sense_rq->cmd[4] = cmd_len; 215 ide_req(sense_rq)->type = ATA_PRIV_SENSE;
213 sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
214 sense_rq->rq_flags |= RQF_PREEMPT; 216 sense_rq->rq_flags |= RQF_PREEMPT;
215 217
218 req->cmd[0] = GPCMD_REQUEST_SENSE;
219 req->cmd[4] = cmd_len;
216 if (drive->media == ide_tape) 220 if (drive->media == ide_tape)
217 sense_rq->cmd[13] = REQ_IDETAPE_PC1; 221 req->cmd[13] = REQ_IDETAPE_PC1;
218 222
219 drive->sense_rq_armed = true; 223 drive->sense_rq_armed = true;
220} 224}
@@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
229 return -ENOMEM; 233 return -ENOMEM;
230 } 234 }
231 235
232 drive->sense_rq.special = special; 236 drive->sense_rq->special = special;
233 drive->sense_rq_armed = false; 237 drive->sense_rq_armed = false;
234 238
235 drive->hwif->rq = NULL; 239 drive->hwif->rq = NULL;
236 240
237 elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); 241 elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
238 return 0; 242 return 0;
239} 243}
240EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 244EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
@@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
247void ide_retry_pc(ide_drive_t *drive) 251void ide_retry_pc(ide_drive_t *drive)
248{ 252{
249 struct request *failed_rq = drive->hwif->rq; 253 struct request *failed_rq = drive->hwif->rq;
250 struct request *sense_rq = &drive->sense_rq; 254 struct request *sense_rq = drive->sense_rq;
251 struct ide_atapi_pc *pc = &drive->request_sense_pc; 255 struct ide_atapi_pc *pc = &drive->request_sense_pc;
252 256
253 (void)ide_read_error(drive); 257 (void)ide_read_error(drive);
254 258
255 /* init pc from sense_rq */ 259 /* init pc from sense_rq */
256 ide_init_pc(pc); 260 ide_init_pc(pc);
257 memcpy(pc->c, sense_rq->cmd, 12); 261 memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
258 262
259 if (drive->media == ide_tape) 263 if (drive->media == ide_tape)
260 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; 264 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
@@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive)
286 * commands/drives support that. Let ide_timer_expiry keep polling us 290 * commands/drives support that. Let ide_timer_expiry keep polling us
287 * for these. 291 * for these.
288 */ 292 */
289 switch (rq->cmd[0]) { 293 switch (scsi_req(rq)->cmd[0]) {
290 case GPCMD_BLANK: 294 case GPCMD_BLANK:
291 case GPCMD_FORMAT_UNIT: 295 case GPCMD_FORMAT_UNIT:
292 case GPCMD_RESERVE_RZONE_TRACK: 296 case GPCMD_RESERVE_RZONE_TRACK:
@@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive)
297 default: 301 default:
298 if (!(rq->rq_flags & RQF_QUIET)) 302 if (!(rq->rq_flags & RQF_QUIET))
299 printk(KERN_INFO PFX "cmd 0x%x timed out\n", 303 printk(KERN_INFO PFX "cmd 0x%x timed out\n",
300 rq->cmd[0]); 304 scsi_req(rq)->cmd[0]);
301 wait = 0; 305 wait = 0;
302 break; 306 break;
303 } 307 }
@@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
307 311
308int ide_cd_get_xferlen(struct request *rq) 312int ide_cd_get_xferlen(struct request *rq)
309{ 313{
310 switch (rq->cmd_type) { 314 switch (req_op(rq)) {
311 case REQ_TYPE_FS: 315 default:
312 return 32768; 316 return 32768;
313 case REQ_TYPE_ATA_SENSE: 317 case REQ_OP_SCSI_IN:
314 case REQ_TYPE_BLOCK_PC: 318 case REQ_OP_SCSI_OUT:
315 case REQ_TYPE_ATA_PC:
316 return blk_rq_bytes(rq); 319 return blk_rq_bytes(rq);
317 default: 320 case REQ_OP_DRV_IN:
318 return 0; 321 case REQ_OP_DRV_OUT:
322 switch (ide_req(rq)->type) {
323 case ATA_PRIV_PC:
324 case ATA_PRIV_SENSE:
325 return blk_rq_bytes(rq);
326 default:
327 return 0;
328 }
319 } 329 }
320} 330}
321EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); 331EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
@@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
374 drive->name, __func__, ireason); 384 drive->name, __func__, ireason);
375 } 385 }
376 386
377 if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) 387 if (dev_is_idecd(drive) && ata_pc_request(rq))
378 rq->rq_flags |= RQF_FAILED; 388 rq->rq_flags |= RQF_FAILED;
379 389
380 return 1; 390 return 1;
@@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
420 ? "write" : "read"); 430 ? "write" : "read");
421 pc->flags |= PC_FLAG_DMA_ERROR; 431 pc->flags |= PC_FLAG_DMA_ERROR;
422 } else 432 } else
423 rq->resid_len = 0; 433 scsi_req(rq)->resid_len = 0;
424 debug_log("%s: DMA finished\n", drive->name); 434 debug_log("%s: DMA finished\n", drive->name);
425 } 435 }
426 436
@@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
436 local_irq_enable_in_hardirq(); 446 local_irq_enable_in_hardirq();
437 447
438 if (drive->media == ide_tape && 448 if (drive->media == ide_tape &&
439 (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) 449 (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
440 stat &= ~ATA_ERR; 450 stat &= ~ATA_ERR;
441 451
442 if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) { 452 if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
@@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
446 if (drive->media != ide_tape) 456 if (drive->media != ide_tape)
447 pc->rq->errors++; 457 pc->rq->errors++;
448 458
449 if (rq->cmd[0] == REQUEST_SENSE) { 459 if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
450 printk(KERN_ERR PFX "%s: I/O error in request " 460 printk(KERN_ERR PFX "%s: I/O error in request "
451 "sense command\n", drive->name); 461 "sense command\n", drive->name);
452 return ide_do_reset(drive); 462 return ide_do_reset(drive);
@@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
477 if (uptodate == 0) 487 if (uptodate == 0)
478 drive->failed_pc = NULL; 488 drive->failed_pc = NULL;
479 489
480 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { 490 if (ata_misc_request(rq)) {
481 rq->errors = 0; 491 rq->errors = 0;
482 error = 0; 492 error = 0;
483 } else { 493 } else {
484 494
485 if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { 495 if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
486 if (rq->errors == 0) 496 if (rq->errors == 0)
487 rq->errors = -EIO; 497 rq->errors = -EIO;
488 } 498 }
@@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
512 ide_pio_bytes(drive, cmd, write, done); 522 ide_pio_bytes(drive, cmd, write, done);
513 523
514 /* Update transferred byte count */ 524 /* Update transferred byte count */
515 rq->resid_len -= done; 525 scsi_req(rq)->resid_len -= done;
516 526
517 bcount -= done; 527 bcount -= done;
518 528
@@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
520 ide_pad_transfer(drive, write, bcount); 530 ide_pad_transfer(drive, write, bcount);
521 531
522 debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n", 532 debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
523 rq->cmd[0], done, bcount, rq->resid_len); 533 rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
524 534
525 /* And set the interrupt handler again */ 535 /* And set the interrupt handler again */
526 ide_set_handler(drive, ide_pc_intr, timeout); 536 ide_set_handler(drive, ide_pc_intr, timeout);
@@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
603 613
604 if (dev_is_idecd(drive)) { 614 if (dev_is_idecd(drive)) {
605 /* ATAPI commands get padded out to 12 bytes minimum */ 615 /* ATAPI commands get padded out to 12 bytes minimum */
606 cmd_len = COMMAND_SIZE(rq->cmd[0]); 616 cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
607 if (cmd_len < ATAPI_MIN_CDB_BYTES) 617 if (cmd_len < ATAPI_MIN_CDB_BYTES)
608 cmd_len = ATAPI_MIN_CDB_BYTES; 618 cmd_len = ATAPI_MIN_CDB_BYTES;
609 619
@@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
650 660
651 /* Send the actual packet */ 661 /* Send the actual packet */
652 if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) 662 if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
653 hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); 663 hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
654 664
655 /* Begin DMA, if necessary */ 665 /* Begin DMA, if necessary */
656 if (dev_is_idecd(drive)) { 666 if (dev_is_idecd(drive)) {
@@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
695 bytes, 63 * 1024)); 705 bytes, 63 * 1024));
696 706
697 /* We haven't transferred any data yet */ 707 /* We haven't transferred any data yet */
698 rq->resid_len = bcount; 708 scsi_req(rq)->resid_len = bcount;
699 709
700 if (pc->flags & PC_FLAG_DMA_ERROR) { 710 if (pc->flags & PC_FLAG_DMA_ERROR) {
701 pc->flags &= ~PC_FLAG_DMA_ERROR; 711 pc->flags &= ~PC_FLAG_DMA_ERROR;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ab9232e1e16f..aef00511ca86 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
121 * don't log START_STOP unit with LoEj set, since we cannot 121 * don't log START_STOP unit with LoEj set, since we cannot
122 * reliably check if drive can auto-close 122 * reliably check if drive can auto-close
123 */ 123 */
124 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) 124 if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
125 break; 125 break;
126 log = 1; 126 log = 1;
127 break; 127 break;
@@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
163 * toc has not been recorded yet, it will fail with 05/24/00 (which is a 163 * toc has not been recorded yet, it will fail with 05/24/00 (which is a
164 * confusing error) 164 * confusing error)
165 */ 165 */
166 if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) 166 if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
167 if (sense->sense_key == 0x05 && sense->asc == 0x24) 167 if (sense->sense_key == 0x05 && sense->asc == 0x24)
168 return; 168 return;
169 169
@@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
176 if (!sense->valid) 176 if (!sense->valid)
177 break; 177 break;
178 if (failed_command == NULL || 178 if (failed_command == NULL ||
179 failed_command->cmd_type != REQ_TYPE_FS) 179 blk_rq_is_passthrough(failed_command))
180 break; 180 break;
181 sector = (sense->information[0] << 24) | 181 sector = (sense->information[0] << 24) |
182 (sense->information[1] << 16) | 182 (sense->information[1] << 16) |
@@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
210static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) 210static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
211{ 211{
212 /* 212 /*
213 * For REQ_TYPE_ATA_SENSE, "rq->special" points to the original 213 * For ATA_PRIV_SENSE, "rq->special" points to the original
214 * failed request. Also, the sense data should be read 214 * failed request. Also, the sense data should be read
215 * directly from rq which might be different from the original 215 * directly from rq which might be different from the original
216 * sense buffer if it got copied during mapping. 216 * sense buffer if it got copied during mapping.
@@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
219 void *sense = bio_data(rq->bio); 219 void *sense = bio_data(rq->bio);
220 220
221 if (failed) { 221 if (failed) {
222 if (failed->sense) { 222 /*
223 /* 223 * Sense is always read into drive->sense_data, copy back to the
224 * Sense is always read into drive->sense_data. 224 * original request.
225 * Copy back if the failed request has its 225 */
226 * sense pointer set. 226 memcpy(scsi_req(failed)->sense, sense, 18);
227 */ 227 scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
228 memcpy(failed->sense, sense, 18);
229 failed->sense_len = rq->sense_len;
230 }
231 cdrom_analyze_sense_data(drive, failed); 228 cdrom_analyze_sense_data(drive, failed);
232 229
233 if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) 230 if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
@@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
285 "stat 0x%x", 282 "stat 0x%x",
286 rq->cmd[0], rq->cmd_type, err, stat); 283 rq->cmd[0], rq->cmd_type, err, stat);
287 284
288 if (rq->cmd_type == REQ_TYPE_ATA_SENSE) { 285 if (ata_sense_request(rq)) {
289 /* 286 /*
290 * We got an error trying to get sense info from the drive 287 * We got an error trying to get sense info from the drive
291 * (probably while trying to recover from a former error). 288 * (probably while trying to recover from a former error).
@@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
296 } 293 }
297 294
298 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ 295 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
299 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) 296 if (blk_rq_is_scsi(rq) && !rq->errors)
300 rq->errors = SAM_STAT_CHECK_CONDITION; 297 rq->errors = SAM_STAT_CHECK_CONDITION;
301 298
302 if (blk_noretry_request(rq)) 299 if (blk_noretry_request(rq))
@@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
304 301
305 switch (sense_key) { 302 switch (sense_key) {
306 case NOT_READY: 303 case NOT_READY:
307 if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { 304 if (req_op(rq) == REQ_OP_WRITE) {
308 if (ide_cd_breathe(drive, rq)) 305 if (ide_cd_breathe(drive, rq))
309 return 1; 306 return 1;
310 } else { 307 } else {
311 cdrom_saw_media_change(drive); 308 cdrom_saw_media_change(drive);
312 309
313 if (rq->cmd_type == REQ_TYPE_FS && 310 if (!blk_rq_is_passthrough(rq) &&
314 !(rq->rq_flags & RQF_QUIET)) 311 !(rq->rq_flags & RQF_QUIET))
315 printk(KERN_ERR PFX "%s: tray open\n", 312 printk(KERN_ERR PFX "%s: tray open\n",
316 drive->name); 313 drive->name);
@@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
320 case UNIT_ATTENTION: 317 case UNIT_ATTENTION:
321 cdrom_saw_media_change(drive); 318 cdrom_saw_media_change(drive);
322 319
323 if (rq->cmd_type != REQ_TYPE_FS) 320 if (blk_rq_is_passthrough(rq))
324 return 0; 321 return 0;
325 322
326 /* 323 /*
@@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
338 * 335 *
339 * cdrom_log_sense() knows this! 336 * cdrom_log_sense() knows this!
340 */ 337 */
341 if (rq->cmd[0] == GPCMD_START_STOP_UNIT) 338 if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
342 break; 339 break;
343 /* fall-through */ 340 /* fall-through */
344 case DATA_PROTECT: 341 case DATA_PROTECT:
@@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
368 do_end_request = 1; 365 do_end_request = 1;
369 break; 366 break;
370 default: 367 default:
371 if (rq->cmd_type != REQ_TYPE_FS) 368 if (blk_rq_is_passthrough(rq))
372 break; 369 break;
373 if (err & ~ATA_ABORTED) { 370 if (err & ~ATA_ABORTED) {
374 /* go to the default handler for other errors */ 371 /* go to the default handler for other errors */
@@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
379 do_end_request = 1; 376 do_end_request = 1;
380 } 377 }
381 378
382 if (rq->cmd_type != REQ_TYPE_FS) { 379 if (blk_rq_is_passthrough(rq)) {
383 rq->rq_flags |= RQF_FAILED; 380 rq->rq_flags |= RQF_FAILED;
384 do_end_request = 1; 381 do_end_request = 1;
385 } 382 }
@@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
414 * Some of the trailing request sense fields are optional, 411 * Some of the trailing request sense fields are optional,
415 * and some drives don't send them. Sigh. 412 * and some drives don't send them. Sigh.
416 */ 413 */
417 if (rq->cmd[0] == GPCMD_REQUEST_SENSE && 414 if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
418 cmd->nleft > 0 && cmd->nleft <= 5) 415 cmd->nleft > 0 && cmd->nleft <= 5)
419 cmd->nleft = 0; 416 cmd->nleft = 0;
420} 417}
@@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
425 req_flags_t rq_flags) 422 req_flags_t rq_flags)
426{ 423{
427 struct cdrom_info *info = drive->driver_data; 424 struct cdrom_info *info = drive->driver_data;
428 struct request_sense local_sense;
429 int retries = 10; 425 int retries = 10;
430 req_flags_t flags = 0; 426 bool failed;
431
432 if (!sense)
433 sense = &local_sense;
434 427
435 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " 428 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
436 "rq_flags: 0x%x", 429 "rq_flags: 0x%x",
@@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
440 do { 433 do {
441 struct request *rq; 434 struct request *rq;
442 int error; 435 int error;
436 bool delay = false;
443 437
444 rq = blk_get_request(drive->queue, write, __GFP_RECLAIM); 438 rq = blk_get_request(drive->queue,
445 439 write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
446 memcpy(rq->cmd, cmd, BLK_MAX_CDB); 440 scsi_req_init(rq);
447 rq->cmd_type = REQ_TYPE_ATA_PC; 441 memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
448 rq->sense = sense; 442 ide_req(rq)->type = ATA_PRIV_PC;
449 rq->rq_flags |= rq_flags; 443 rq->rq_flags |= rq_flags;
450 rq->timeout = timeout; 444 rq->timeout = timeout;
451 if (buffer) { 445 if (buffer) {
@@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
460 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 454 error = blk_execute_rq(drive->queue, info->disk, rq, 0);
461 455
462 if (buffer) 456 if (buffer)
463 *bufflen = rq->resid_len; 457 *bufflen = scsi_req(rq)->resid_len;
464 458 if (sense)
465 flags = rq->rq_flags; 459 memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
466 blk_put_request(rq);
467 460
468 /* 461 /*
469 * FIXME: we should probably abort/retry or something in case of 462 * FIXME: we should probably abort/retry or something in case of
470 * failure. 463 * failure.
471 */ 464 */
472 if (flags & RQF_FAILED) { 465 failed = (rq->rq_flags & RQF_FAILED) != 0;
466 if (failed) {
473 /* 467 /*
474 * The request failed. Retry if it was due to a unit 468 * The request failed. Retry if it was due to a unit
475 * attention status (usually means media was changed). 469 * attention status (usually means media was changed).
476 */ 470 */
477 struct request_sense *reqbuf = sense; 471 struct request_sense *reqbuf = scsi_req(rq)->sense;
478 472
479 if (reqbuf->sense_key == UNIT_ATTENTION) 473 if (reqbuf->sense_key == UNIT_ATTENTION)
480 cdrom_saw_media_change(drive); 474 cdrom_saw_media_change(drive);
@@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
485 * a disk. Retry, but wait a little to give 479 * a disk. Retry, but wait a little to give
486 * the drive time to complete the load. 480 * the drive time to complete the load.
487 */ 481 */
488 ssleep(2); 482 delay = true;
489 } else { 483 } else {
490 /* otherwise, don't retry */ 484 /* otherwise, don't retry */
491 retries = 0; 485 retries = 0;
492 } 486 }
493 --retries; 487 --retries;
494 } 488 }
495 489 blk_put_request(rq);
496 /* end of retry loop */ 490 if (delay)
497 } while ((flags & RQF_FAILED) && retries >= 0); 491 ssleep(2);
492 } while (failed && retries >= 0);
498 493
499 /* return an error if the command failed */ 494 /* return an error if the command failed */
500 return (flags & RQF_FAILED) ? -EIO : 0; 495 return failed ? -EIO : 0;
501} 496}
502 497
503/* 498/*
@@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
526 ide_expiry_t *expiry = NULL; 521 ide_expiry_t *expiry = NULL;
527 int dma_error = 0, dma, thislen, uptodate = 0; 522 int dma_error = 0, dma, thislen, uptodate = 0;
528 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; 523 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
529 int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE); 524 int sense = ata_sense_request(rq);
530 unsigned int timeout; 525 unsigned int timeout;
531 u16 len; 526 u16 len;
532 u8 ireason, stat; 527 u8 ireason, stat;
@@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
569 564
570 ide_read_bcount_and_ireason(drive, &len, &ireason); 565 ide_read_bcount_and_ireason(drive, &len, &ireason);
571 566
572 thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; 567 thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
573 if (thislen > len) 568 if (thislen > len)
574 thislen = len; 569 thislen = len;
575 570
@@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
578 573
579 /* If DRQ is clear, the command has completed. */ 574 /* If DRQ is clear, the command has completed. */
580 if ((stat & ATA_DRQ) == 0) { 575 if ((stat & ATA_DRQ) == 0) {
581 if (rq->cmd_type == REQ_TYPE_FS) { 576 switch (req_op(rq)) {
577 default:
582 /* 578 /*
583 * If we're not done reading/writing, complain. 579 * If we're not done reading/writing, complain.
584 * Otherwise, complete the command normally. 580 * Otherwise, complete the command normally.
@@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
592 rq->rq_flags |= RQF_FAILED; 588 rq->rq_flags |= RQF_FAILED;
593 uptodate = 0; 589 uptodate = 0;
594 } 590 }
595 } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 591 goto out_end;
592 case REQ_OP_DRV_IN:
593 case REQ_OP_DRV_OUT:
596 ide_cd_request_sense_fixup(drive, cmd); 594 ide_cd_request_sense_fixup(drive, cmd);
597 595
598 uptodate = cmd->nleft ? 0 : 1; 596 uptodate = cmd->nleft ? 0 : 1;
@@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
608 606
609 if (!uptodate) 607 if (!uptodate)
610 rq->rq_flags |= RQF_FAILED; 608 rq->rq_flags |= RQF_FAILED;
609 goto out_end;
610 case REQ_OP_SCSI_IN:
611 case REQ_OP_SCSI_OUT:
612 goto out_end;
611 } 613 }
612 goto out_end;
613 } 614 }
614 615
615 rc = ide_check_ireason(drive, rq, len, ireason, write); 616 rc = ide_check_ireason(drive, rq, len, ireason, write);
@@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
636 len -= blen; 637 len -= blen;
637 638
638 if (sense && write == 0) 639 if (sense && write == 0)
639 rq->sense_len += blen; 640 scsi_req(rq)->sense_len += blen;
640 } 641 }
641 642
642 /* pad, if necessary */ 643 /* pad, if necessary */
643 if (len > 0) { 644 if (len > 0) {
644 if (rq->cmd_type != REQ_TYPE_FS || write == 0) 645 if (blk_rq_is_passthrough(rq) || write == 0)
645 ide_pad_transfer(drive, write, len); 646 ide_pad_transfer(drive, write, len);
646 else { 647 else {
647 printk(KERN_ERR PFX "%s: confused, missing data\n", 648 printk(KERN_ERR PFX "%s: confused, missing data\n",
@@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
650 } 651 }
651 } 652 }
652 653
653 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 654 switch (req_op(rq)) {
655 case REQ_OP_SCSI_IN:
656 case REQ_OP_SCSI_OUT:
654 timeout = rq->timeout; 657 timeout = rq->timeout;
655 } else { 658 break;
659 case REQ_OP_DRV_IN:
660 case REQ_OP_DRV_OUT:
661 expiry = ide_cd_expiry;
662 /*FALLTHRU*/
663 default:
656 timeout = ATAPI_WAIT_PC; 664 timeout = ATAPI_WAIT_PC;
657 if (rq->cmd_type != REQ_TYPE_FS) 665 break;
658 expiry = ide_cd_expiry;
659 } 666 }
660 667
661 hwif->expiry = expiry; 668 hwif->expiry = expiry;
@@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
663 return ide_started; 670 return ide_started;
664 671
665out_end: 672out_end:
666 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { 673 if (blk_rq_is_scsi(rq) && rc == 0) {
667 rq->resid_len = 0; 674 scsi_req(rq)->resid_len = 0;
668 blk_end_request_all(rq, 0); 675 blk_end_request_all(rq, 0);
669 hwif->rq = NULL; 676 hwif->rq = NULL;
670 } else { 677 } else {
671 if (sense && uptodate) 678 if (sense && uptodate)
672 ide_cd_complete_failed_rq(drive, rq); 679 ide_cd_complete_failed_rq(drive, rq);
673 680
674 if (rq->cmd_type == REQ_TYPE_FS) { 681 if (!blk_rq_is_passthrough(rq)) {
675 if (cmd->nleft == 0) 682 if (cmd->nleft == 0)
676 uptodate = 1; 683 uptodate = 1;
677 } else { 684 } else {
@@ -684,10 +691,10 @@ out_end:
684 return ide_stopped; 691 return ide_stopped;
685 692
686 /* make sure it's fully ended */ 693 /* make sure it's fully ended */
687 if (rq->cmd_type != REQ_TYPE_FS) { 694 if (blk_rq_is_passthrough(rq)) {
688 rq->resid_len -= cmd->nbytes - cmd->nleft; 695 scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
689 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 696 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
690 rq->resid_len += cmd->last_xfer_len; 697 scsi_req(rq)->resid_len += cmd->last_xfer_len;
691 } 698 }
692 699
693 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); 700 ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
@@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
744 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", 751 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
745 rq->cmd[0], rq->cmd_type); 752 rq->cmd[0], rq->cmd_type);
746 753
747 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 754 if (blk_rq_is_scsi(rq))
748 rq->rq_flags |= RQF_QUIET; 755 rq->rq_flags |= RQF_QUIET;
749 else 756 else
750 rq->rq_flags &= ~RQF_FAILED; 757 rq->rq_flags &= ~RQF_FAILED;
@@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
786 if (drive->debug_mask & IDE_DBG_RQ) 793 if (drive->debug_mask & IDE_DBG_RQ)
787 blk_dump_rq_flags(rq, "ide_cd_do_request"); 794 blk_dump_rq_flags(rq, "ide_cd_do_request");
788 795
789 switch (rq->cmd_type) { 796 switch (req_op(rq)) {
790 case REQ_TYPE_FS: 797 default:
791 if (cdrom_start_rw(drive, rq) == ide_stopped) 798 if (cdrom_start_rw(drive, rq) == ide_stopped)
792 goto out_end; 799 goto out_end;
793 break; 800 break;
794 case REQ_TYPE_ATA_SENSE: 801 case REQ_OP_SCSI_IN:
795 case REQ_TYPE_BLOCK_PC: 802 case REQ_OP_SCSI_OUT:
796 case REQ_TYPE_ATA_PC: 803 handle_pc:
797 if (!rq->timeout) 804 if (!rq->timeout)
798 rq->timeout = ATAPI_WAIT_PC; 805 rq->timeout = ATAPI_WAIT_PC;
799
800 cdrom_do_block_pc(drive, rq); 806 cdrom_do_block_pc(drive, rq);
801 break; 807 break;
802 case REQ_TYPE_DRV_PRIV: 808 case REQ_OP_DRV_IN:
803 /* right now this can only be a reset... */ 809 case REQ_OP_DRV_OUT:
804 uptodate = 1; 810 switch (ide_req(rq)->type) {
805 goto out_end; 811 case ATA_PRIV_MISC:
806 default: 812 /* right now this can only be a reset... */
807 BUG(); 813 uptodate = 1;
814 goto out_end;
815 case ATA_PRIV_SENSE:
816 case ATA_PRIV_PC:
817 goto handle_pc;
818 default:
819 BUG();
820 }
808 } 821 }
809 822
810 /* prepare sense request for this command */ 823 /* prepare sense request for this command */
@@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
817 830
818 cmd.rq = rq; 831 cmd.rq = rq;
819 832
820 if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 833 if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
821 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 834 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
822 ide_map_sg(drive, &cmd); 835 ide_map_sg(drive, &cmd);
823 } 836 }
@@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1312 int hard_sect = queue_logical_block_size(q); 1325 int hard_sect = queue_logical_block_size(q);
1313 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); 1326 long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
1314 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1327 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1328 struct scsi_request *req = scsi_req(rq);
1315 1329
1316 memset(rq->cmd, 0, BLK_MAX_CDB); 1330 memset(req->cmd, 0, BLK_MAX_CDB);
1317 1331
1318 if (rq_data_dir(rq) == READ) 1332 if (rq_data_dir(rq) == READ)
1319 rq->cmd[0] = GPCMD_READ_10; 1333 req->cmd[0] = GPCMD_READ_10;
1320 else 1334 else
1321 rq->cmd[0] = GPCMD_WRITE_10; 1335 req->cmd[0] = GPCMD_WRITE_10;
1322 1336
1323 /* 1337 /*
1324 * fill in lba 1338 * fill in lba
1325 */ 1339 */
1326 rq->cmd[2] = (block >> 24) & 0xff; 1340 req->cmd[2] = (block >> 24) & 0xff;
1327 rq->cmd[3] = (block >> 16) & 0xff; 1341 req->cmd[3] = (block >> 16) & 0xff;
1328 rq->cmd[4] = (block >> 8) & 0xff; 1342 req->cmd[4] = (block >> 8) & 0xff;
1329 rq->cmd[5] = block & 0xff; 1343 req->cmd[5] = block & 0xff;
1330 1344
1331 /* 1345 /*
1332 * and transfer length 1346 * and transfer length
1333 */ 1347 */
1334 rq->cmd[7] = (blocks >> 8) & 0xff; 1348 req->cmd[7] = (blocks >> 8) & 0xff;
1335 rq->cmd[8] = blocks & 0xff; 1349 req->cmd[8] = blocks & 0xff;
1336 rq->cmd_len = 10; 1350 req->cmd_len = 10;
1337 return BLKPREP_OK; 1351 return BLKPREP_OK;
1338} 1352}
1339 1353
@@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1343 */ 1357 */
1344static int ide_cdrom_prep_pc(struct request *rq) 1358static int ide_cdrom_prep_pc(struct request *rq)
1345{ 1359{
1346 u8 *c = rq->cmd; 1360 u8 *c = scsi_req(rq)->cmd;
1347 1361
1348 /* transform 6-byte read/write commands to the 10-byte version */ 1362 /* transform 6-byte read/write commands to the 10-byte version */
1349 if (c[0] == READ_6 || c[0] == WRITE_6) { 1363 if (c[0] == READ_6 || c[0] == WRITE_6) {
@@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
1354 c[2] = 0; 1368 c[2] = 0;
1355 c[1] &= 0xe0; 1369 c[1] &= 0xe0;
1356 c[0] += (READ_10 - READ_6); 1370 c[0] += (READ_10 - READ_6);
1357 rq->cmd_len = 10; 1371 scsi_req(rq)->cmd_len = 10;
1358 return BLKPREP_OK; 1372 return BLKPREP_OK;
1359 } 1373 }
1360 1374
@@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
1373 1387
1374static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 1388static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
1375{ 1389{
1376 if (rq->cmd_type == REQ_TYPE_FS) 1390 if (!blk_rq_is_passthrough(rq))
1377 return ide_cdrom_prep_fs(q, rq); 1391 return ide_cdrom_prep_fs(q, rq);
1378 else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1392 else if (blk_rq_is_scsi(rq))
1379 return ide_cdrom_prep_pc(rq); 1393 return ide_cdrom_prep_pc(rq);
1380 1394
1381 return 0; 1395 return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index f085e3a2e1d6..9fcefbc8425e 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
303 struct request *rq; 303 struct request *rq;
304 int ret; 304 int ret;
305 305
306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 306 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
307 rq->cmd_type = REQ_TYPE_DRV_PRIV; 307 scsi_req_init(rq);
308 ide_req(rq)->type = ATA_PRIV_MISC;
308 rq->rq_flags = RQF_QUIET; 309 rq->rq_flags = RQF_QUIET;
309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); 310 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
310 blk_put_request(rq); 311 blk_put_request(rq);
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index f079ca2f260b..58a6feb74c02 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
315 while (hi > lo) { 315 while (hi > lo) {
316 mid = (lo + hi) / 2; 316 mid = (lo + hi) / 2;
317 if (packet_command_texts[mid].packet_command == 317 if (packet_command_texts[mid].packet_command ==
318 failed_command->cmd[0]) { 318 scsi_req(failed_command)->cmd[0]) {
319 s = packet_command_texts[mid].text; 319 s = packet_command_texts[mid].text;
320 break; 320 break;
321 } 321 }
322 if (packet_command_texts[mid].packet_command > 322 if (packet_command_texts[mid].packet_command >
323 failed_command->cmd[0]) 323 scsi_req(failed_command)->cmd[0])
324 hi = mid; 324 hi = mid;
325 else 325 else
326 lo = mid + 1; 326 lo = mid + 1;
@@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
329 printk(KERN_ERR " The failed \"%s\" packet command " 329 printk(KERN_ERR " The failed \"%s\" packet command "
330 "was: \n \"", s); 330 "was: \n \"", s);
331 for (i = 0; i < BLK_MAX_CDB; i++) 331 for (i = 0; i < BLK_MAX_CDB; i++)
332 printk(KERN_CONT "%02x ", failed_command->cmd[i]); 332 printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
333 printk(KERN_CONT "\"\n"); 333 printk(KERN_CONT "\"\n");
334 } 334 }
335 335
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 0dd43b4fcec6..a45dda5386e4 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
165 if (!(setting->flags & DS_SYNC)) 165 if (!(setting->flags & DS_SYNC))
166 return setting->set(drive, arg); 166 return setting->set(drive, arg);
167 167
168 rq = blk_get_request(q, READ, __GFP_RECLAIM); 168 rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
169 rq->cmd_type = REQ_TYPE_DRV_PRIV; 169 scsi_req_init(rq);
170 rq->cmd_len = 5; 170 ide_req(rq)->type = ATA_PRIV_MISC;
171 rq->cmd[0] = REQ_DEVSET_EXEC; 171 scsi_req(rq)->cmd_len = 5;
172 *(int *)&rq->cmd[1] = arg; 172 scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
173 *(int *)&scsi_req(rq)->cmd[1] = arg;
173 rq->special = setting->set; 174 rq->special = setting->set;
174 175
175 if (blk_execute_rq(q, NULL, rq, 0)) 176 if (blk_execute_rq(q, NULL, rq, 0))
@@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
183{ 184{
184 int err, (*setfunc)(ide_drive_t *, int) = rq->special; 185 int err, (*setfunc)(ide_drive_t *, int) = rq->special;
185 186
186 err = setfunc(drive, *(int *)&rq->cmd[1]); 187 err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
187 if (err) 188 if (err)
188 rq->errors = err; 189 rq->errors = err;
189 ide_complete_rq(drive, err, blk_rq_bytes(rq)); 190 ide_complete_rq(drive, err, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 5ceace542b77..186159715b71 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
184 ide_hwif_t *hwif = drive->hwif; 184 ide_hwif_t *hwif = drive->hwif;
185 185
186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); 186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
187 BUG_ON(rq->cmd_type != REQ_TYPE_FS); 187 BUG_ON(blk_rq_is_passthrough(rq));
188 188
189 ledtrig_disk_activity(); 189 ledtrig_disk_activity();
190 190
@@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
452 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 452 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
453 cmd->tf_flags = IDE_TFLAG_DYN; 453 cmd->tf_flags = IDE_TFLAG_DYN;
454 cmd->protocol = ATA_PROT_NODATA; 454 cmd->protocol = ATA_PROT_NODATA;
455 455 rq->cmd_flags &= ~REQ_OP_MASK;
456 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 456 rq->cmd_flags |= REQ_OP_DRV_OUT;
457 ide_req(rq)->type = ATA_PRIV_TASKFILE;
457 rq->special = cmd; 458 rq->special = cmd;
458 cmd->rq = rq; 459 cmd->rq = rq;
459 460
@@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
477 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) 478 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
478 return -EBUSY; 479 return -EBUSY;
479 480
480 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 481 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
481 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 482 scsi_req_init(rq);
483 ide_req(rq)->type = ATA_PRIV_TASKFILE;
482 484
483 drive->mult_req = arg; 485 drive->mult_req = arg;
484 drive->special_flags |= IDE_SFLAG_SET_MULTMODE; 486 drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index d6da011299f5..cf3af6840368 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
123 return ide_stopped; 123 return ide_stopped;
124 124
125 /* retry only "normal" I/O: */ 125 /* retry only "normal" I/O: */
126 if (rq->cmd_type != REQ_TYPE_FS) { 126 if (blk_rq_is_passthrough(rq)) {
127 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 127 if (ata_taskfile_request(rq)) {
128 struct ide_cmd *cmd = rq->special; 128 struct ide_cmd *cmd = rq->special;
129 129
130 if (cmd) 130 if (cmd)
@@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
147{ 147{
148 struct request *rq = drive->hwif->rq; 148 struct request *rq = drive->hwif->rq;
149 149
150 if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV && 150 if (rq && ata_misc_request(rq) &&
151 rq->cmd[0] == REQ_DRIVE_RESET) { 151 scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
152 if (err <= 0 && rq->errors == 0) 152 if (err <= 0 && rq->errors == 0)
153 rq->errors = -EIO; 153 rq->errors = -EIO;
154 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); 154 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f079d8d1d856..a69e8013f1df 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
72 drive->failed_pc = NULL; 72 drive->failed_pc = NULL;
73 73
74 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 74 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
75 rq->cmd_type == REQ_TYPE_BLOCK_PC) 75 (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
76 uptodate = 1; /* FIXME */ 76 uptodate = 1; /* FIXME */
77 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 77 else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
78 78
@@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
97 "Aborting request!\n"); 97 "Aborting request!\n");
98 } 98 }
99 99
100 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) 100 if (ata_misc_request(rq))
101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; 101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
102 102
103 return uptodate; 103 return uptodate;
@@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
203 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); 203 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
204 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); 204 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
205 205
206 memcpy(rq->cmd, pc->c, 12); 206 memcpy(scsi_req(rq)->cmd, pc->c, 12);
207 207
208 pc->rq = rq; 208 pc->rq = rq;
209 if (cmd == WRITE) 209 if (cmd == WRITE)
@@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
216 struct ide_atapi_pc *pc, struct request *rq) 216 struct ide_atapi_pc *pc, struct request *rq)
217{ 217{
218 ide_init_pc(pc); 218 ide_init_pc(pc);
219 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 219 memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
220 pc->rq = rq; 220 pc->rq = rq;
221 if (blk_rq_bytes(rq)) { 221 if (blk_rq_bytes(rq)) {
222 pc->flags |= PC_FLAG_DMA_OK; 222 pc->flags |= PC_FLAG_DMA_OK;
@@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
246 } else 246 } else
247 printk(KERN_ERR PFX "%s: I/O error\n", drive->name); 247 printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
248 248
249 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { 249 if (ata_misc_request(rq)) {
250 rq->errors = 0; 250 rq->errors = 0;
251 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 251 ide_complete_rq(drive, 0, blk_rq_bytes(rq));
252 return ide_stopped; 252 return ide_stopped;
@@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
254 goto out_end; 254 goto out_end;
255 } 255 }
256 256
257 switch (rq->cmd_type) { 257 switch (req_op(rq)) {
258 case REQ_TYPE_FS: 258 default:
259 if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 259 if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
260 (blk_rq_sectors(rq) % floppy->bs_factor)) { 260 (blk_rq_sectors(rq) % floppy->bs_factor)) {
261 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 261 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
265 pc = &floppy->queued_pc; 265 pc = &floppy->queued_pc;
266 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 266 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
267 break; 267 break;
268 case REQ_TYPE_DRV_PRIV: 268 case REQ_OP_SCSI_IN:
269 case REQ_TYPE_ATA_SENSE: 269 case REQ_OP_SCSI_OUT:
270 pc = (struct ide_atapi_pc *)rq->special;
271 break;
272 case REQ_TYPE_BLOCK_PC:
273 pc = &floppy->queued_pc; 270 pc = &floppy->queued_pc;
274 idefloppy_blockpc_cmd(floppy, pc, rq); 271 idefloppy_blockpc_cmd(floppy, pc, rq);
275 break; 272 break;
276 default: 273 case REQ_OP_DRV_IN:
277 BUG(); 274 case REQ_OP_DRV_OUT:
275 switch (ide_req(rq)->type) {
276 case ATA_PRIV_MISC:
277 case ATA_PRIV_SENSE:
278 pc = (struct ide_atapi_pc *)rq->special;
279 break;
280 default:
281 BUG();
282 }
278 } 283 }
279 284
280 ide_prep_sense(drive, rq); 285 ide_prep_sense(drive, rq);
@@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
286 291
287 cmd.rq = rq; 292 cmd.rq = rq;
288 293
289 if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 294 if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
290 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 295 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
291 ide_map_sg(drive, &cmd); 296 ide_map_sg(drive, &cmd);
292 } 297 }
@@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
296 return ide_floppy_issue_pc(drive, &cmd, pc); 301 return ide_floppy_issue_pc(drive, &cmd, pc);
297out_end: 302out_end:
298 drive->failed_pc = NULL; 303 drive->failed_pc = NULL;
299 if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 304 if (blk_rq_is_passthrough(rq) && rq->errors == 0)
300 rq->errors = -EIO; 305 rq->errors = -EIO;
301 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); 306 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
302 return ide_stopped; 307 return ide_stopped;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 201e43fcbc94..043b1fb963cb 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
102 drive->dev_flags |= IDE_DFLAG_PARKED; 102 drive->dev_flags |= IDE_DFLAG_PARKED;
103 } 103 }
104 104
105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 105 if (rq && ata_taskfile_request(rq)) {
106 struct ide_cmd *orig_cmd = rq->special; 106 struct ide_cmd *orig_cmd = rq->special;
107 107
108 if (cmd->tf_flags & IDE_TFLAG_DYN) 108 if (cmd->tf_flags & IDE_TFLAG_DYN)
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
135 135
136void ide_kill_rq(ide_drive_t *drive, struct request *rq) 136void ide_kill_rq(ide_drive_t *drive, struct request *rq)
137{ 137{
138 u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk; 138 u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
139 u8 media = drive->media; 139 u8 media = drive->media;
140 140
141 drive->failed_pc = NULL; 141 drive->failed_pc = NULL;
@@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
145 } else { 145 } else {
146 if (media == ide_tape) 146 if (media == ide_tape)
147 rq->errors = IDE_DRV_ERROR_GENERAL; 147 rq->errors = IDE_DRV_ERROR_GENERAL;
148 else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 148 else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
149 rq->errors = -EIO; 149 rq->errors = -EIO;
150 } 150 }
151 151
@@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
279 279
280static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 280static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
281{ 281{
282 u8 cmd = rq->cmd[0]; 282 u8 cmd = scsi_req(rq)->cmd[0];
283 283
284 switch (cmd) { 284 switch (cmd) {
285 case REQ_PARK_HEADS: 285 case REQ_PARK_HEADS:
@@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
340 if (drive->current_speed == 0xff) 340 if (drive->current_speed == 0xff)
341 ide_config_drive_speed(drive, drive->desired_speed); 341 ide_config_drive_speed(drive, drive->desired_speed);
342 342
343 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 343 if (ata_taskfile_request(rq))
344 return execute_drive_cmd(drive, rq); 344 return execute_drive_cmd(drive, rq);
345 else if (ata_pm_request(rq)) { 345 else if (ata_pm_request(rq)) {
346 struct ide_pm_state *pm = rq->special; 346 struct ide_pm_state *pm = rq->special;
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
353 pm->pm_step == IDE_PM_COMPLETED) 353 pm->pm_step == IDE_PM_COMPLETED)
354 ide_complete_pm_rq(drive, rq); 354 ide_complete_pm_rq(drive, rq);
355 return startstop; 355 return startstop;
356 } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV) 356 } else if (!rq->rq_disk && ata_misc_request(rq))
357 /* 357 /*
358 * TODO: Once all ULDs have been modified to 358 * TODO: Once all ULDs have been modified to
359 * check for specific op codes rather than 359 * check for specific op codes rather than
@@ -545,6 +545,7 @@ repeat:
545 goto plug_device; 545 goto plug_device;
546 } 546 }
547 547
548 scsi_req(rq)->resid_len = blk_rq_bytes(rq);
548 hwif->rq = rq; 549 hwif->rq = rq;
549 550
550 spin_unlock_irq(&hwif->lock); 551 spin_unlock_irq(&hwif->lock);
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d05db2469209..248a3e0ceb46 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
125 if (NULL == (void *) arg) { 125 if (NULL == (void *) arg) {
126 struct request *rq; 126 struct request *rq;
127 127
128 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 128 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 129 scsi_req_init(rq);
130 ide_req(rq)->type = ATA_PRIV_TASKFILE;
130 err = blk_execute_rq(drive->queue, NULL, rq, 0); 131 err = blk_execute_rq(drive->queue, NULL, rq, 0);
131 blk_put_request(rq); 132 blk_put_request(rq);
132 133
@@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive)
221 struct request *rq; 222 struct request *rq;
222 int ret = 0; 223 int ret = 0;
223 224
224 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 225 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
225 rq->cmd_type = REQ_TYPE_DRV_PRIV; 226 scsi_req_init(rq);
226 rq->cmd_len = 1; 227 ide_req(rq)->type = ATA_PRIV_MISC;
227 rq->cmd[0] = REQ_DRIVE_RESET; 228 scsi_req(rq)->cmd_len = 1;
229 scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
228 if (blk_execute_rq(drive->queue, NULL, rq, 1)) 230 if (blk_execute_rq(drive->queue, NULL, rq, 1))
229 ret = rq->errors; 231 ret = rq->errors;
230 blk_put_request(rq); 232 blk_put_request(rq);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 2d7dca56dd24..101aed9a61ca 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
31 } 31 }
32 spin_unlock_irq(&hwif->lock); 32 spin_unlock_irq(&hwif->lock);
33 33
34 rq = blk_get_request(q, READ, __GFP_RECLAIM); 34 rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
35 rq->cmd[0] = REQ_PARK_HEADS; 35 scsi_req_init(rq);
36 rq->cmd_len = 1; 36 scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
37 rq->cmd_type = REQ_TYPE_DRV_PRIV; 37 scsi_req(rq)->cmd_len = 1;
38 ide_req(rq)->type = ATA_PRIV_MISC;
38 rq->special = &timeout; 39 rq->special = &timeout;
39 rc = blk_execute_rq(q, NULL, rq, 1); 40 rc = blk_execute_rq(q, NULL, rq, 1);
40 blk_put_request(rq); 41 blk_put_request(rq);
@@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
45 * Make sure that *some* command is sent to the drive after the 46 * Make sure that *some* command is sent to the drive after the
46 * timeout has expired, so power management will be reenabled. 47 * timeout has expired, so power management will be reenabled.
47 */ 48 */
48 rq = blk_get_request(q, READ, GFP_NOWAIT); 49 rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
50 scsi_req_init(rq);
49 if (IS_ERR(rq)) 51 if (IS_ERR(rq))
50 goto out; 52 goto out;
51 53
52 rq->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
53 rq->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
54 rq->cmd_type = REQ_TYPE_DRV_PRIV; 56 ide_req(rq)->type = ATA_PRIV_MISC;
55 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); 57 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
56 58
57out: 59out:
@@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
64 struct ide_taskfile *tf = &cmd.tf; 66 struct ide_taskfile *tf = &cmd.tf;
65 67
66 memset(&cmd, 0, sizeof(cmd)); 68 memset(&cmd, 0, sizeof(cmd));
67 if (rq->cmd[0] == REQ_PARK_HEADS) { 69 if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
68 drive->sleep = *(unsigned long *)rq->special; 70 drive->sleep = *(unsigned long *)rq->special;
69 drive->dev_flags |= IDE_DFLAG_SLEEPING; 71 drive->dev_flags |= IDE_DFLAG_SLEEPING;
70 tf->command = ATA_CMD_IDLEIMMEDIATE; 72 tf->command = ATA_CMD_IDLEIMMEDIATE;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index a015acdffb39..ec951be4b0c8 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
18 } 18 }
19 19
20 memset(&rqpm, 0, sizeof(rqpm)); 20 memset(&rqpm, 0, sizeof(rqpm));
21 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 21 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
22 rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; 22 scsi_req_init(rq);
23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
23 rq->special = &rqpm; 24 rq->special = &rqpm;
24 rqpm.pm_step = IDE_PM_START_SUSPEND; 25 rqpm.pm_step = IDE_PM_START_SUSPEND;
25 if (mesg.event == PM_EVENT_PRETHAW) 26 if (mesg.event == PM_EVENT_PRETHAW)
@@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev)
88 } 89 }
89 90
90 memset(&rqpm, 0, sizeof(rqpm)); 91 memset(&rqpm, 0, sizeof(rqpm));
91 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 92 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; 93 scsi_req_init(rq);
94 ide_req(rq)->type = ATA_PRIV_PM_RESUME;
93 rq->rq_flags |= RQF_PREEMPT; 95 rq->rq_flags |= RQF_PREEMPT;
94 rq->special = &rqpm; 96 rq->special = &rqpm;
95 rqpm.pm_step = IDE_PM_START_RESUME; 97 rqpm.pm_step = IDE_PM_START_RESUME;
@@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
221 223
222#ifdef DEBUG_PM 224#ifdef DEBUG_PM
223 printk("%s: completing PM request, %s\n", drive->name, 225 printk("%s: completing PM request, %s\n", drive->name,
224 (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume"); 226 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
225#endif 227#endif
226 spin_lock_irqsave(q->queue_lock, flags); 228 spin_lock_irqsave(q->queue_lock, flags);
227 if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) 229 if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
228 blk_stop_queue(q); 230 blk_stop_queue(q);
229 else 231 else
230 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 232 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
240{ 242{
241 struct ide_pm_state *pm = rq->special; 243 struct ide_pm_state *pm = rq->special;
242 244
243 if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND && 245 if (blk_rq_is_private(rq) &&
246 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
244 pm->pm_step == IDE_PM_START_SUSPEND) 247 pm->pm_step == IDE_PM_START_SUSPEND)
245 /* Mark drive blocked when starting the suspend sequence. */ 248 /* Mark drive blocked when starting the suspend sequence. */
246 drive->dev_flags |= IDE_DFLAG_BLOCKED; 249 drive->dev_flags |= IDE_DFLAG_BLOCKED;
247 else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME && 250 else if (blk_rq_is_private(rq) &&
251 ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
248 pm->pm_step == IDE_PM_START_RESUME) { 252 pm->pm_step == IDE_PM_START_RESUME) {
249 /* 253 /*
250 * The first thing we do on wakeup is to wait for BSY bit to 254 * The first thing we do on wakeup is to wait for BSY bit to
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 330e319419e6..a74ae8df4bb8 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
741 } 741 }
742} 742}
743 743
744static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
745{
746 struct ide_request *req = blk_mq_rq_to_pdu(rq);
747
748 req->sreq.sense = req->sense;
749 return 0;
750}
751
744/* 752/*
745 * init request queue 753 * init request queue
746 */ 754 */
@@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive)
758 * limits and LBA48 we could raise it but as yet 766 * limits and LBA48 we could raise it but as yet
759 * do not. 767 * do not.
760 */ 768 */
761 769 q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
762 q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
763 if (!q) 770 if (!q)
764 return 1; 771 return 1;
765 772
773 q->request_fn = do_ide_request;
774 q->init_rq_fn = ide_init_rq;
775 q->cmd_size = sizeof(struct ide_request);
776 if (blk_init_allocated_queue(q) < 0) {
777 blk_cleanup_queue(q);
778 return 1;
779 }
780
766 q->queuedata = drive; 781 q->queuedata = drive;
767 blk_queue_segment_boundary(q, 0xffff); 782 blk_queue_segment_boundary(q, 0xffff);
768 783
@@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
1131 ide_port_for_each_dev(i, drive, hwif) { 1146 ide_port_for_each_dev(i, drive, hwif) {
1132 u8 j = (hwif->index * MAX_DRIVES) + i; 1147 u8 j = (hwif->index * MAX_DRIVES) + i;
1133 u16 *saved_id = drive->id; 1148 u16 *saved_id = drive->id;
1149 struct request *saved_sense_rq = drive->sense_rq;
1134 1150
1135 memset(drive, 0, sizeof(*drive)); 1151 memset(drive, 0, sizeof(*drive));
1136 memset(saved_id, 0, SECTOR_SIZE); 1152 memset(saved_id, 0, SECTOR_SIZE);
1137 drive->id = saved_id; 1153 drive->id = saved_id;
1154 drive->sense_rq = saved_sense_rq;
1138 1155
1139 drive->media = ide_disk; 1156 drive->media = ide_disk;
1140 drive->select = (i << 4) | ATA_DEVICE_OBS; 1157 drive->select = (i << 4) | ATA_DEVICE_OBS;
@@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
1241 int i; 1258 int i;
1242 1259
1243 ide_port_for_each_dev(i, drive, hwif) { 1260 ide_port_for_each_dev(i, drive, hwif) {
1261 kfree(drive->sense_rq);
1244 kfree(drive->id); 1262 kfree(drive->id);
1245 kfree(drive); 1263 kfree(drive);
1246 } 1264 }
@@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
1248 1266
1249static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) 1267static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
1250{ 1268{
1269 ide_drive_t *drive;
1251 int i; 1270 int i;
1252 1271
1253 for (i = 0; i < MAX_DRIVES; i++) { 1272 for (i = 0; i < MAX_DRIVES; i++) {
1254 ide_drive_t *drive;
1255
1256 drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node); 1273 drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
1257 if (drive == NULL) 1274 if (drive == NULL)
1258 goto out_nomem; 1275 goto out_nomem;
@@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
1267 */ 1284 */
1268 drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); 1285 drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
1269 if (drive->id == NULL) 1286 if (drive->id == NULL)
1270 goto out_nomem; 1287 goto out_free_drive;
1288
1289 drive->sense_rq = kmalloc(sizeof(struct request) +
1290 sizeof(struct ide_request), GFP_KERNEL);
1291 if (!drive->sense_rq)
1292 goto out_free_id;
1271 1293
1272 hwif->devices[i] = drive; 1294 hwif->devices[i] = drive;
1273 } 1295 }
1274 return 0; 1296 return 0;
1275 1297
1298out_free_id:
1299 kfree(drive->id);
1300out_free_drive:
1301 kfree(drive);
1276out_nomem: 1302out_nomem:
1277 ide_port_free_devices(hwif); 1303 ide_port_free_devices(hwif);
1278 return -ENOMEM; 1304 return -ENOMEM;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 9ecf4e35adcd..3c1b7974d66d 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
282 282
283 /* correct remaining bytes to transfer */ 283 /* correct remaining bytes to transfer */
284 if (pc->flags & PC_FLAG_DMA_ERROR) 284 if (pc->flags & PC_FLAG_DMA_ERROR)
285 rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); 285 scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
286 286
287 /* 287 /*
288 * If error was the result of a zero-length read or write command, 288 * If error was the result of a zero-length read or write command,
@@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
316 pc->flags |= PC_FLAG_ABORT; 316 pc->flags |= PC_FLAG_ABORT;
317 } 317 }
318 if (!(pc->flags & PC_FLAG_ABORT) && 318 if (!(pc->flags & PC_FLAG_ABORT) &&
319 (blk_rq_bytes(rq) - rq->resid_len)) 319 (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
320 pc->retries = IDETAPE_MAX_PC_RETRIES + 1; 320 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
321 } 321 }
322} 322}
@@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
348 "itself - Aborting request!\n"); 348 "itself - Aborting request!\n");
349 } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) { 349 } else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
350 unsigned int blocks = 350 unsigned int blocks =
351 (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size; 351 (blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
352 352
353 tape->avg_size += blocks * tape->blk_size; 353 tape->avg_size += blocks * tape->blk_size;
354 354
@@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
560 pc->flags |= PC_FLAG_WRITING; 560 pc->flags |= PC_FLAG_WRITING;
561 } 561 }
562 562
563 memcpy(rq->cmd, pc->c, 12); 563 memcpy(scsi_req(rq)->cmd, pc->c, 12);
564} 564}
565 565
566static ide_startstop_t idetape_do_request(ide_drive_t *drive, 566static ide_startstop_t idetape_do_request(ide_drive_t *drive,
@@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
570 idetape_tape_t *tape = drive->driver_data; 570 idetape_tape_t *tape = drive->driver_data;
571 struct ide_atapi_pc *pc = NULL; 571 struct ide_atapi_pc *pc = NULL;
572 struct ide_cmd cmd; 572 struct ide_cmd cmd;
573 struct scsi_request *req = scsi_req(rq);
573 u8 stat; 574 u8 stat;
574 575
575 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u", 576 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
576 rq->cmd[0], (unsigned long long)blk_rq_pos(rq), 577 req->cmd[0], (unsigned long long)blk_rq_pos(rq),
577 blk_rq_sectors(rq)); 578 blk_rq_sectors(rq));
578 579
579 BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV || 580 BUG_ON(!blk_rq_is_private(rq));
580 rq->cmd_type == REQ_TYPE_ATA_SENSE)); 581 BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
582 ide_req(rq)->type != ATA_PRIV_SENSE);
581 583
582 /* Retry a failed packet command */ 584 /* Retry a failed packet command */
583 if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { 585 if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
592 stat = hwif->tp_ops->read_status(hwif); 594 stat = hwif->tp_ops->read_status(hwif);
593 595
594 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && 596 if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
595 (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) 597 (req->cmd[13] & REQ_IDETAPE_PC2) == 0)
596 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; 598 drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
597 599
598 if (drive->dev_flags & IDE_DFLAG_POST_RESET) { 600 if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
@@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
609 } else if (time_after(jiffies, tape->dsc_timeout)) { 611 } else if (time_after(jiffies, tape->dsc_timeout)) {
610 printk(KERN_ERR "ide-tape: %s: DSC timeout\n", 612 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
611 tape->name); 613 tape->name);
612 if (rq->cmd[13] & REQ_IDETAPE_PC2) { 614 if (req->cmd[13] & REQ_IDETAPE_PC2) {
613 idetape_media_access_finished(drive); 615 idetape_media_access_finished(drive);
614 return ide_stopped; 616 return ide_stopped;
615 } else { 617 } else {
@@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
626 tape->postponed_rq = false; 628 tape->postponed_rq = false;
627 } 629 }
628 630
629 if (rq->cmd[13] & REQ_IDETAPE_READ) { 631 if (req->cmd[13] & REQ_IDETAPE_READ) {
630 pc = &tape->queued_pc; 632 pc = &tape->queued_pc;
631 ide_tape_create_rw_cmd(tape, pc, rq, READ_6); 633 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
632 goto out; 634 goto out;
633 } 635 }
634 if (rq->cmd[13] & REQ_IDETAPE_WRITE) { 636 if (req->cmd[13] & REQ_IDETAPE_WRITE) {
635 pc = &tape->queued_pc; 637 pc = &tape->queued_pc;
636 ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6); 638 ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
637 goto out; 639 goto out;
638 } 640 }
639 if (rq->cmd[13] & REQ_IDETAPE_PC1) { 641 if (req->cmd[13] & REQ_IDETAPE_PC1) {
640 pc = (struct ide_atapi_pc *)rq->special; 642 pc = (struct ide_atapi_pc *)rq->special;
641 rq->cmd[13] &= ~(REQ_IDETAPE_PC1); 643 req->cmd[13] &= ~(REQ_IDETAPE_PC1);
642 rq->cmd[13] |= REQ_IDETAPE_PC2; 644 req->cmd[13] |= REQ_IDETAPE_PC2;
643 goto out; 645 goto out;
644 } 646 }
645 if (rq->cmd[13] & REQ_IDETAPE_PC2) { 647 if (req->cmd[13] & REQ_IDETAPE_PC2) {
646 idetape_media_access_finished(drive); 648 idetape_media_access_finished(drive);
647 return ide_stopped; 649 return ide_stopped;
648 } 650 }
@@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
852 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); 854 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
853 BUG_ON(size < 0 || size % tape->blk_size); 855 BUG_ON(size < 0 || size % tape->blk_size);
854 856
855 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 857 rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
856 rq->cmd_type = REQ_TYPE_DRV_PRIV; 858 scsi_req_init(rq);
857 rq->cmd[13] = cmd; 859 ide_req(rq)->type = ATA_PRIV_MISC;
860 scsi_req(rq)->cmd[13] = cmd;
858 rq->rq_disk = tape->disk; 861 rq->rq_disk = tape->disk;
859 rq->__sector = tape->first_frame; 862 rq->__sector = tape->first_frame;
860 863
@@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
868 blk_execute_rq(drive->queue, tape->disk, rq, 0); 871 blk_execute_rq(drive->queue, tape->disk, rq, 0);
869 872
870 /* calculate the number of transferred bytes and update buffer state */ 873 /* calculate the number of transferred bytes and update buffer state */
871 size -= rq->resid_len; 874 size -= scsi_req(rq)->resid_len;
872 tape->cur = tape->buf; 875 tape->cur = tape->buf;
873 if (cmd == REQ_IDETAPE_READ) 876 if (cmd == REQ_IDETAPE_READ)
874 tape->valid = size; 877 tape->valid = size;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index a716693417a3..247b9faccce1 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
428{ 428{
429 struct request *rq; 429 struct request *rq;
430 int error; 430 int error;
431 int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
432 431
433 rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); 432 rq = blk_get_request(drive->queue,
434 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 433 (cmd->tf_flags & IDE_TFLAG_WRITE) ?
434 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
435 scsi_req_init(rq);
436 ide_req(rq)->type = ATA_PRIV_TASKFILE;
435 437
436 /* 438 /*
437 * (ks) We transfer currently only whole sectors. 439 * (ks) We transfer currently only whole sectors.
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 247853ea1368..c3062b53056f 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -54,7 +54,7 @@
54#define DRV_NAME "sis5513" 54#define DRV_NAME "sis5513"
55 55
56/* registers layout and init values are chipset family dependent */ 56/* registers layout and init values are chipset family dependent */
57 57#undef ATA_16
58#define ATA_16 0x01 58#define ATA_16 0x01
59#define ATA_33 0x02 59#define ATA_33 0x02
60#define ATA_66 0x03 60#define ATA_66 0x03
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 01035e718c1c..709c9cc34369 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
1009 struct request_queue *q = bdev_get_queue(dc->bdev); 1009 struct request_queue *q = bdev_get_queue(dc->bdev);
1010 int ret = 0; 1010 int ret = 0;
1011 1011
1012 if (bdi_congested(&q->backing_dev_info, bits)) 1012 if (bdi_congested(q->backing_dev_info, bits))
1013 return 1; 1013 return 1;
1014 1014
1015 if (cached_dev_get(dc)) { 1015 if (cached_dev_get(dc)) {
@@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
1018 1018
1019 for_each_cache(ca, d->c, i) { 1019 for_each_cache(ca, d->c, i) {
1020 q = bdev_get_queue(ca->bdev); 1020 q = bdev_get_queue(ca->bdev);
1021 ret |= bdi_congested(&q->backing_dev_info, bits); 1021 ret |= bdi_congested(q->backing_dev_info, bits);
1022 } 1022 }
1023 1023
1024 cached_dev_put(dc); 1024 cached_dev_put(dc);
@@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1032 struct gendisk *g = dc->disk.disk; 1032 struct gendisk *g = dc->disk.disk;
1033 1033
1034 g->queue->make_request_fn = cached_dev_make_request; 1034 g->queue->make_request_fn = cached_dev_make_request;
1035 g->queue->backing_dev_info.congested_fn = cached_dev_congested; 1035 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1036 dc->disk.cache_miss = cached_dev_cache_miss; 1036 dc->disk.cache_miss = cached_dev_cache_miss;
1037 dc->disk.ioctl = cached_dev_ioctl; 1037 dc->disk.ioctl = cached_dev_ioctl;
1038} 1038}
@@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
1125 1125
1126 for_each_cache(ca, d->c, i) { 1126 for_each_cache(ca, d->c, i) {
1127 q = bdev_get_queue(ca->bdev); 1127 q = bdev_get_queue(ca->bdev);
1128 ret |= bdi_congested(&q->backing_dev_info, bits); 1128 ret |= bdi_congested(q->backing_dev_info, bits);
1129 } 1129 }
1130 1130
1131 return ret; 1131 return ret;
@@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
1136 struct gendisk *g = d->disk; 1136 struct gendisk *g = d->disk;
1137 1137
1138 g->queue->make_request_fn = flash_dev_make_request; 1138 g->queue->make_request_fn = flash_dev_make_request;
1139 g->queue->backing_dev_info.congested_fn = flash_dev_congested; 1139 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1140 d->cache_miss = flash_dev_cache_miss; 1140 d->cache_miss = flash_dev_cache_miss;
1141 d->ioctl = flash_dev_ioctl; 1141 d->ioctl = flash_dev_ioctl;
1142} 1142}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 3a19cbc8b230..85e3f21c2514 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
807 blk_queue_make_request(q, NULL); 807 blk_queue_make_request(q, NULL);
808 d->disk->queue = q; 808 d->disk->queue = q;
809 q->queuedata = d; 809 q->queuedata = d;
810 q->backing_dev_info.congested_data = d; 810 q->backing_dev_info->congested_data = d;
811 q->limits.max_hw_sectors = UINT_MAX; 811 q->limits.max_hw_sectors = UINT_MAX;
812 q->limits.max_sectors = UINT_MAX; 812 q->limits.max_sectors = UINT_MAX;
813 q->limits.max_segment_size = UINT_MAX; 813 q->limits.max_segment_size = UINT_MAX;
@@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
1132 set_capacity(dc->disk.disk, 1132 set_capacity(dc->disk.disk,
1133 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1133 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1134 1134
1135 dc->disk.disk->queue->backing_dev_info.ra_pages = 1135 dc->disk.disk->queue->backing_dev_info->ra_pages =
1136 max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1136 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1137 q->backing_dev_info.ra_pages); 1137 q->backing_dev_info->ra_pages);
1138 1138
1139 bch_cached_dev_request_init(dc); 1139 bch_cached_dev_request_init(dc);
1140 bch_cached_dev_writeback_init(dc); 1140 bch_cached_dev_writeback_init(dc);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5b9cf56de8ef..894bc14469c8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
2284static int is_congested(struct dm_dev *dev, int bdi_bits) 2284static int is_congested(struct dm_dev *dev, int bdi_bits)
2285{ 2285{
2286 struct request_queue *q = bdev_get_queue(dev->bdev); 2286 struct request_queue *q = bdev_get_queue(dev->bdev);
2287 return bdi_congested(&q->backing_dev_info, bdi_bits); 2287 return bdi_congested(q->backing_dev_info, bdi_bits);
2288} 2288}
2289 2289
2290static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2290static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 40ceba1fe8be..136fda3ff9e5 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -92,7 +92,6 @@ struct mapped_device {
92 * io objects are allocated from here. 92 * io objects are allocated from here.
93 */ 93 */
94 mempool_t *io_pool; 94 mempool_t *io_pool;
95 mempool_t *rq_pool;
96 95
97 struct bio_set *bs; 96 struct bio_set *bs;
98 97
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index bf2b2676cb8a..9fab33b113c4 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
1379static int dev_is_congested(struct dm_dev *dev, int bdi_bits) 1379static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
1380{ 1380{
1381 struct request_queue *q = bdev_get_queue(dev->bdev); 1381 struct request_queue *q = bdev_get_queue(dev->bdev);
1382 return bdi_congested(&q->backing_dev_info, bdi_bits); 1382 return bdi_congested(q->backing_dev_info, bdi_bits);
1383} 1383}
1384 1384
1385static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1385static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3570bcb7a4a4..7f223dbed49f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -92,12 +92,6 @@ struct multipath {
92 92
93 unsigned queue_mode; 93 unsigned queue_mode;
94 94
95 /*
96 * We must use a mempool of dm_mpath_io structs so that we
97 * can resubmit bios on error.
98 */
99 mempool_t *mpio_pool;
100
101 struct mutex work_mutex; 95 struct mutex work_mutex;
102 struct work_struct trigger_event; 96 struct work_struct trigger_event;
103 97
@@ -115,8 +109,6 @@ struct dm_mpath_io {
115 109
116typedef int (*action_fn) (struct pgpath *pgpath); 110typedef int (*action_fn) (struct pgpath *pgpath);
117 111
118static struct kmem_cache *_mpio_cache;
119
120static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 112static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
121static void trigger_event(struct work_struct *work); 113static void trigger_event(struct work_struct *work);
122static void activate_path(struct work_struct *work); 114static void activate_path(struct work_struct *work);
@@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
209 init_waitqueue_head(&m->pg_init_wait); 201 init_waitqueue_head(&m->pg_init_wait);
210 mutex_init(&m->work_mutex); 202 mutex_init(&m->work_mutex);
211 203
212 m->mpio_pool = NULL;
213 m->queue_mode = DM_TYPE_NONE; 204 m->queue_mode = DM_TYPE_NONE;
214 205
215 m->ti = ti; 206 m->ti = ti;
@@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
229 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED; 220 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
230 else 221 else
231 m->queue_mode = DM_TYPE_REQUEST_BASED; 222 m->queue_mode = DM_TYPE_REQUEST_BASED;
232 } 223 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
233
234 if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
235 unsigned min_ios = dm_get_reserved_rq_based_ios();
236
237 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
238 if (!m->mpio_pool)
239 return -ENOMEM;
240 }
241 else if (m->queue_mode == DM_TYPE_BIO_BASED) {
242 INIT_WORK(&m->process_queued_bios, process_queued_bios); 224 INIT_WORK(&m->process_queued_bios, process_queued_bios);
243 /* 225 /*
244 * bio-based doesn't support any direct scsi_dh management; 226 * bio-based doesn't support any direct scsi_dh management;
@@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
263 245
264 kfree(m->hw_handler_name); 246 kfree(m->hw_handler_name);
265 kfree(m->hw_handler_params); 247 kfree(m->hw_handler_params);
266 mempool_destroy(m->mpio_pool);
267 kfree(m); 248 kfree(m);
268} 249}
269 250
@@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
272 return info->ptr; 253 return info->ptr;
273} 254}
274 255
275static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
276{
277 struct dm_mpath_io *mpio;
278
279 if (!m->mpio_pool) {
280 /* Use blk-mq pdu memory requested via per_io_data_size */
281 mpio = get_mpio(info);
282 memset(mpio, 0, sizeof(*mpio));
283 return mpio;
284 }
285
286 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
287 if (!mpio)
288 return NULL;
289
290 memset(mpio, 0, sizeof(*mpio));
291 info->ptr = mpio;
292
293 return mpio;
294}
295
296static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
297{
298 /* Only needed for non blk-mq (.request_fn) multipath */
299 if (m->mpio_pool) {
300 struct dm_mpath_io *mpio = info->ptr;
301
302 info->ptr = NULL;
303 mempool_free(mpio, m->mpio_pool);
304 }
305}
306
307static size_t multipath_per_bio_data_size(void) 256static size_t multipath_per_bio_data_size(void)
308{ 257{
309 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details); 258 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
@@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
530/* 479/*
531 * Map cloned requests (request-based multipath) 480 * Map cloned requests (request-based multipath)
532 */ 481 */
533static int __multipath_map(struct dm_target *ti, struct request *clone, 482static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
534 union map_info *map_context, 483 union map_info *map_context,
535 struct request *rq, struct request **__clone) 484 struct request **__clone)
536{ 485{
537 struct multipath *m = ti->private; 486 struct multipath *m = ti->private;
538 int r = DM_MAPIO_REQUEUE; 487 int r = DM_MAPIO_REQUEUE;
539 size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); 488 size_t nr_bytes = blk_rq_bytes(rq);
540 struct pgpath *pgpath; 489 struct pgpath *pgpath;
541 struct block_device *bdev; 490 struct block_device *bdev;
542 struct dm_mpath_io *mpio; 491 struct dm_mpath_io *mpio = get_mpio(map_context);
492 struct request *clone;
543 493
544 /* Do we need to select a new pgpath? */ 494 /* Do we need to select a new pgpath? */
545 pgpath = lockless_dereference(m->current_pgpath); 495 pgpath = lockless_dereference(m->current_pgpath);
@@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
556 return r; 506 return r;
557 } 507 }
558 508
559 mpio = set_mpio(m, map_context); 509 memset(mpio, 0, sizeof(*mpio));
560 if (!mpio)
561 /* ENOMEM, requeue */
562 return r;
563
564 mpio->pgpath = pgpath; 510 mpio->pgpath = pgpath;
565 mpio->nr_bytes = nr_bytes; 511 mpio->nr_bytes = nr_bytes;
566 512
567 bdev = pgpath->path.dev->bdev; 513 bdev = pgpath->path.dev->bdev;
568 514
569 if (clone) { 515 clone = blk_get_request(bdev_get_queue(bdev),
570 /* 516 rq->cmd_flags | REQ_NOMERGE,
571 * Old request-based interface: allocated clone is passed in. 517 GFP_ATOMIC);
572 * Used by: .request_fn stacked on .request_fn path(s). 518 if (IS_ERR(clone)) {
573 */ 519 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
574 clone->q = bdev_get_queue(bdev); 520 return r;
575 clone->rq_disk = bdev->bd_disk;
576 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
577 } else {
578 /*
579 * blk-mq request-based interface; used by both:
580 * .request_fn stacked on blk-mq path(s) and
581 * blk-mq stacked on blk-mq path(s).
582 */
583 clone = blk_mq_alloc_request(bdev_get_queue(bdev),
584 rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
585 if (IS_ERR(clone)) {
586 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
587 clear_request_fn_mpio(m, map_context);
588 return r;
589 }
590 clone->bio = clone->biotail = NULL;
591 clone->rq_disk = bdev->bd_disk;
592 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
593 *__clone = clone;
594 } 521 }
522 clone->bio = clone->biotail = NULL;
523 clone->rq_disk = bdev->bd_disk;
524 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
525 *__clone = clone;
595 526
596 if (pgpath->pg->ps.type->start_io) 527 if (pgpath->pg->ps.type->start_io)
597 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 528 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
600 return DM_MAPIO_REMAPPED; 531 return DM_MAPIO_REMAPPED;
601} 532}
602 533
603static int multipath_map(struct dm_target *ti, struct request *clone,
604 union map_info *map_context)
605{
606 return __multipath_map(ti, clone, map_context, NULL, NULL);
607}
608
609static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
610 union map_info *map_context,
611 struct request **clone)
612{
613 return __multipath_map(ti, NULL, map_context, rq, clone);
614}
615
616static void multipath_release_clone(struct request *clone) 534static void multipath_release_clone(struct request *clone)
617{ 535{
618 blk_mq_free_request(clone); 536 blk_put_request(clone);
619} 537}
620 538
621/* 539/*
@@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1187 ti->num_write_same_bios = 1; 1105 ti->num_write_same_bios = 1;
1188 if (m->queue_mode == DM_TYPE_BIO_BASED) 1106 if (m->queue_mode == DM_TYPE_BIO_BASED)
1189 ti->per_io_data_size = multipath_per_bio_data_size(); 1107 ti->per_io_data_size = multipath_per_bio_data_size();
1190 else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 1108 else
1191 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1109 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1192 1110
1193 return 0; 1111 return 0;
@@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1610 if (ps->type->end_io) 1528 if (ps->type->end_io)
1611 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1529 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1612 } 1530 }
1613 clear_request_fn_mpio(m, map_context);
1614 1531
1615 return r; 1532 return r;
1616} 1533}
@@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
2060 .module = THIS_MODULE, 1977 .module = THIS_MODULE,
2061 .ctr = multipath_ctr, 1978 .ctr = multipath_ctr,
2062 .dtr = multipath_dtr, 1979 .dtr = multipath_dtr,
2063 .map_rq = multipath_map,
2064 .clone_and_map_rq = multipath_clone_and_map, 1980 .clone_and_map_rq = multipath_clone_and_map,
2065 .release_clone_rq = multipath_release_clone, 1981 .release_clone_rq = multipath_release_clone,
2066 .rq_end_io = multipath_end_io, 1982 .rq_end_io = multipath_end_io,
@@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
2080{ 1996{
2081 int r; 1997 int r;
2082 1998
2083 /* allocate a slab for the dm_mpath_ios */
2084 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2085 if (!_mpio_cache)
2086 return -ENOMEM;
2087
2088 r = dm_register_target(&multipath_target); 1999 r = dm_register_target(&multipath_target);
2089 if (r < 0) { 2000 if (r < 0) {
2090 DMERR("request-based register failed %d", r); 2001 DMERR("request-based register failed %d", r);
@@ -2120,8 +2031,6 @@ bad_alloc_kmpath_handlerd:
2120bad_alloc_kmultipathd: 2031bad_alloc_kmultipathd:
2121 dm_unregister_target(&multipath_target); 2032 dm_unregister_target(&multipath_target);
2122bad_register_target: 2033bad_register_target:
2123 kmem_cache_destroy(_mpio_cache);
2124
2125 return r; 2034 return r;
2126} 2035}
2127 2036
@@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
2131 destroy_workqueue(kmultipathd); 2040 destroy_workqueue(kmultipathd);
2132 2041
2133 dm_unregister_target(&multipath_target); 2042 dm_unregister_target(&multipath_target);
2134 kmem_cache_destroy(_mpio_cache);
2135} 2043}
2136 2044
2137module_init(dm_multipath_init); 2045module_init(dm_multipath_init);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e702fc69a83..67d76f21fecd 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
109 dm_mq_stop_queue(q); 109 dm_mq_stop_queue(q);
110} 110}
111 111
112static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
113 gfp_t gfp_mask)
114{
115 return mempool_alloc(md->io_pool, gfp_mask);
116}
117
118static void free_old_rq_tio(struct dm_rq_target_io *tio)
119{
120 mempool_free(tio, tio->md->io_pool);
121}
122
123static struct request *alloc_old_clone_request(struct mapped_device *md,
124 gfp_t gfp_mask)
125{
126 return mempool_alloc(md->rq_pool, gfp_mask);
127}
128
129static void free_old_clone_request(struct mapped_device *md, struct request *rq)
130{
131 mempool_free(rq, md->rq_pool);
132}
133
134/* 112/*
135 * Partial completion handling for request-based dm 113 * Partial completion handling for request-based dm
136 */ 114 */
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
185 163
186static struct dm_rq_target_io *tio_from_request(struct request *rq) 164static struct dm_rq_target_io *tio_from_request(struct request *rq)
187{ 165{
188 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 166 return blk_mq_rq_to_pdu(rq);
189} 167}
190 168
191static void rq_end_stats(struct mapped_device *md, struct request *orig) 169static void rq_end_stats(struct mapped_device *md, struct request *orig)
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
233 dm_put(md); 211 dm_put(md);
234} 212}
235 213
236static void free_rq_clone(struct request *clone)
237{
238 struct dm_rq_target_io *tio = clone->end_io_data;
239 struct mapped_device *md = tio->md;
240
241 blk_rq_unprep_clone(clone);
242
243 /*
244 * It is possible for a clone_old_rq() allocated clone to
245 * get passed in -- it may not yet have a request_queue.
246 * This is known to occur if the error target replaces
247 * a multipath target that has a request_fn queue stacked
248 * on blk-mq queue(s).
249 */
250 if (clone->q && clone->q->mq_ops)
251 /* stacked on blk-mq queue(s) */
252 tio->ti->type->release_clone_rq(clone);
253 else if (!md->queue->mq_ops)
254 /* request_fn queue stacked on request_fn queue(s) */
255 free_old_clone_request(md, clone);
256
257 if (!md->queue->mq_ops)
258 free_old_rq_tio(tio);
259}
260
261/* 214/*
262 * Complete the clone and the original request. 215 * Complete the clone and the original request.
263 * Must be called without clone's queue lock held, 216 * Must be called without clone's queue lock held,
@@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
270 struct mapped_device *md = tio->md; 223 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig; 224 struct request *rq = tio->orig;
272 225
273 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 226 blk_rq_unprep_clone(clone);
274 rq->errors = clone->errors; 227 tio->ti->type->release_clone_rq(clone);
275 rq->resid_len = clone->resid_len;
276
277 if (rq->sense)
278 /*
279 * We are using the sense buffer of the original
280 * request.
281 * So setting the length of the sense data is enough.
282 */
283 rq->sense_len = clone->sense_len;
284 }
285 228
286 free_rq_clone(clone);
287 rq_end_stats(md, rq); 229 rq_end_stats(md, rq);
288 if (!rq->q->mq_ops) 230 if (!rq->q->mq_ops)
289 blk_end_request_all(rq, error); 231 blk_end_request_all(rq, error);
@@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
292 rq_completed(md, rw, true); 234 rq_completed(md, rw, true);
293} 235}
294 236
295static void dm_unprep_request(struct request *rq)
296{
297 struct dm_rq_target_io *tio = tio_from_request(rq);
298 struct request *clone = tio->clone;
299
300 if (!rq->q->mq_ops) {
301 rq->special = NULL;
302 rq->rq_flags &= ~RQF_DONTPREP;
303 }
304
305 if (clone)
306 free_rq_clone(clone);
307 else if (!tio->md->queue->mq_ops)
308 free_old_rq_tio(tio);
309}
310
311/* 237/*
312 * Requeue the original request of a clone. 238 * Requeue the original request of a clone.
313 */ 239 */
@@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
346 int rw = rq_data_dir(rq); 272 int rw = rq_data_dir(rq);
347 273
348 rq_end_stats(md, rq); 274 rq_end_stats(md, rq);
349 dm_unprep_request(rq); 275 if (tio->clone) {
276 blk_rq_unprep_clone(tio->clone);
277 tio->ti->type->release_clone_rq(tio->clone);
278 }
350 279
351 if (!rq->q->mq_ops) 280 if (!rq->q->mq_ops)
352 dm_old_requeue_request(rq); 281 dm_old_requeue_request(rq);
@@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
401 if (!clone) { 330 if (!clone) {
402 rq_end_stats(tio->md, rq); 331 rq_end_stats(tio->md, rq);
403 rw = rq_data_dir(rq); 332 rw = rq_data_dir(rq);
404 if (!rq->q->mq_ops) { 333 if (!rq->q->mq_ops)
405 blk_end_request_all(rq, tio->error); 334 blk_end_request_all(rq, tio->error);
406 rq_completed(tio->md, rw, false); 335 else
407 free_old_rq_tio(tio);
408 } else {
409 blk_mq_end_request(rq, tio->error); 336 blk_mq_end_request(rq, tio->error);
410 rq_completed(tio->md, rw, false); 337 rq_completed(tio->md, rw, false);
411 }
412 return; 338 return;
413 } 339 }
414 340
@@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
452{ 378{
453 struct dm_rq_target_io *tio = clone->end_io_data; 379 struct dm_rq_target_io *tio = clone->end_io_data;
454 380
455 if (!clone->q->mq_ops) {
456 /*
457 * For just cleaning up the information of the queue in which
458 * the clone was dispatched.
459 * The clone is *NOT* freed actually here because it is alloced
460 * from dm own mempool (RQF_ALLOCED isn't set).
461 */
462 __blk_put_request(clone->q, clone);
463 }
464
465 /* 381 /*
466 * Actual request completion is done in a softirq context which doesn't 382 * Actual request completion is done in a softirq context which doesn't
467 * hold the clone's queue lock. Otherwise, deadlock could occur because: 383 * hold the clone's queue lock. Otherwise, deadlock could occur because:
@@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
511 if (r) 427 if (r)
512 return r; 428 return r;
513 429
514 clone->cmd = rq->cmd;
515 clone->cmd_len = rq->cmd_len;
516 clone->sense = rq->sense;
517 clone->end_io = end_clone_request; 430 clone->end_io = end_clone_request;
518 clone->end_io_data = tio; 431 clone->end_io_data = tio;
519 432
@@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
522 return 0; 435 return 0;
523} 436}
524 437
525static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
526 struct dm_rq_target_io *tio, gfp_t gfp_mask)
527{
528 /*
529 * Create clone for use with .request_fn request_queue
530 */
531 struct request *clone;
532
533 clone = alloc_old_clone_request(md, gfp_mask);
534 if (!clone)
535 return NULL;
536
537 blk_rq_init(NULL, clone);
538 if (setup_clone(clone, rq, tio, gfp_mask)) {
539 /* -ENOMEM */
540 free_old_clone_request(md, clone);
541 return NULL;
542 }
543
544 return clone;
545}
546
547static void map_tio_request(struct kthread_work *work); 438static void map_tio_request(struct kthread_work *work);
548 439
549static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 440static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
@@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
565 kthread_init_work(&tio->work, map_tio_request); 456 kthread_init_work(&tio->work, map_tio_request);
566} 457}
567 458
568static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
569 struct mapped_device *md,
570 gfp_t gfp_mask)
571{
572 struct dm_rq_target_io *tio;
573 int srcu_idx;
574 struct dm_table *table;
575
576 tio = alloc_old_rq_tio(md, gfp_mask);
577 if (!tio)
578 return NULL;
579
580 init_tio(tio, rq, md);
581
582 table = dm_get_live_table(md, &srcu_idx);
583 /*
584 * Must clone a request if this .request_fn DM device
585 * is stacked on .request_fn device(s).
586 */
587 if (!dm_table_all_blk_mq_devices(table)) {
588 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
589 dm_put_live_table(md, srcu_idx);
590 free_old_rq_tio(tio);
591 return NULL;
592 }
593 }
594 dm_put_live_table(md, srcu_idx);
595
596 return tio;
597}
598
599/*
600 * Called with the queue lock held.
601 */
602static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
603{
604 struct mapped_device *md = q->queuedata;
605 struct dm_rq_target_io *tio;
606
607 if (unlikely(rq->special)) {
608 DMWARN("Already has something in rq->special.");
609 return BLKPREP_KILL;
610 }
611
612 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
613 if (!tio)
614 return BLKPREP_DEFER;
615
616 rq->special = tio;
617 rq->rq_flags |= RQF_DONTPREP;
618
619 return BLKPREP_OK;
620}
621
622/* 459/*
623 * Returns: 460 * Returns:
624 * DM_MAPIO_* : the request has been processed as indicated 461 * DM_MAPIO_* : the request has been processed as indicated
@@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
633 struct request *rq = tio->orig; 470 struct request *rq = tio->orig;
634 struct request *clone = NULL; 471 struct request *clone = NULL;
635 472
636 if (tio->clone) { 473 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
637 clone = tio->clone;
638 r = ti->type->map_rq(ti, clone, &tio->info);
639 if (r == DM_MAPIO_DELAY_REQUEUE)
640 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
641 } else {
642 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
643 if (r < 0) {
644 /* The target wants to complete the I/O */
645 dm_kill_unmapped_request(rq, r);
646 return r;
647 }
648 if (r == DM_MAPIO_REMAPPED &&
649 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
650 /* -ENOMEM */
651 ti->type->release_clone_rq(clone);
652 return DM_MAPIO_REQUEUE;
653 }
654 }
655
656 switch (r) { 474 switch (r) {
657 case DM_MAPIO_SUBMITTED: 475 case DM_MAPIO_SUBMITTED:
658 /* The target has taken the I/O to submit by itself later */ 476 /* The target has taken the I/O to submit by itself later */
659 break; 477 break;
660 case DM_MAPIO_REMAPPED: 478 case DM_MAPIO_REMAPPED:
479 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
480 /* -ENOMEM */
481 ti->type->release_clone_rq(clone);
482 return DM_MAPIO_REQUEUE;
483 }
484
661 /* The target has remapped the I/O so dispatch it */ 485 /* The target has remapped the I/O so dispatch it */
662 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 486 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
663 blk_rq_pos(rq)); 487 blk_rq_pos(rq));
@@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
716 dm_get(md); 540 dm_get(md);
717} 541}
718 542
543static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
544{
545 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
546
547 /*
548 * Must initialize md member of tio, otherwise it won't
549 * be available in dm_mq_queue_rq.
550 */
551 tio->md = md;
552
553 if (md->init_tio_pdu) {
554 /* target-specific per-io data is immediately after the tio */
555 tio->info.ptr = tio + 1;
556 }
557
558 return 0;
559}
560
561static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
562{
563 return __dm_rq_init_rq(q->rq_alloc_data, rq);
564}
565
719static void map_tio_request(struct kthread_work *work) 566static void map_tio_request(struct kthread_work *work)
720{ 567{
721 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 568 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
@@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
814 dm_start_request(md, rq); 661 dm_start_request(md, rq);
815 662
816 tio = tio_from_request(rq); 663 tio = tio_from_request(rq);
664 init_tio(tio, rq, md);
817 /* Establish tio->ti before queuing work (map_tio_request) */ 665 /* Establish tio->ti before queuing work (map_tio_request) */
818 tio->ti = ti; 666 tio->ti = ti;
819 kthread_queue_work(&md->kworker, &tio->work); 667 kthread_queue_work(&md->kworker, &tio->work);
@@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
824/* 672/*
825 * Fully initialize a .request_fn request-based queue. 673 * Fully initialize a .request_fn request-based queue.
826 */ 674 */
827int dm_old_init_request_queue(struct mapped_device *md) 675int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
828{ 676{
677 struct dm_target *immutable_tgt;
678
829 /* Fully initialize the queue */ 679 /* Fully initialize the queue */
830 if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) 680 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
681 md->queue->rq_alloc_data = md;
682 md->queue->request_fn = dm_old_request_fn;
683 md->queue->init_rq_fn = dm_rq_init_rq;
684
685 immutable_tgt = dm_table_get_immutable_target(t);
686 if (immutable_tgt && immutable_tgt->per_io_data_size) {
687 /* any target-specific per-io data is immediately after the tio */
688 md->queue->cmd_size += immutable_tgt->per_io_data_size;
689 md->init_tio_pdu = true;
690 }
691 if (blk_init_allocated_queue(md->queue) < 0)
831 return -EINVAL; 692 return -EINVAL;
832 693
833 /* disable dm_old_request_fn's merge heuristic by default */ 694 /* disable dm_old_request_fn's merge heuristic by default */
@@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
835 696
836 dm_init_normal_md_queue(md); 697 dm_init_normal_md_queue(md);
837 blk_queue_softirq_done(md->queue, dm_softirq_done); 698 blk_queue_softirq_done(md->queue, dm_softirq_done);
838 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
839 699
840 /* Initialize the request-based DM worker thread */ 700 /* Initialize the request-based DM worker thread */
841 kthread_init_worker(&md->kworker); 701 kthread_init_worker(&md->kworker);
@@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
856 unsigned int hctx_idx, unsigned int request_idx, 716 unsigned int hctx_idx, unsigned int request_idx,
857 unsigned int numa_node) 717 unsigned int numa_node)
858{ 718{
859 struct mapped_device *md = data; 719 return __dm_rq_init_rq(data, rq);
860 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
861
862 /*
863 * Must initialize md member of tio, otherwise it won't
864 * be available in dm_mq_queue_rq.
865 */
866 tio->md = md;
867
868 if (md->init_tio_pdu) {
869 /* target-specific per-io data is immediately after the tio */
870 tio->info.ptr = tio + 1;
871 }
872
873 return 0;
874} 720}
875 721
876static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 722static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 4da06cae7bad..f0020d21b95f 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
48bool dm_use_blk_mq_default(void); 48bool dm_use_blk_mq_default(void);
49bool dm_use_blk_mq(struct mapped_device *md); 49bool dm_use_blk_mq(struct mapped_device *md);
50 50
51int dm_old_init_request_queue(struct mapped_device *md); 51int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t); 52int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
53void dm_mq_cleanup_mapped_device(struct mapped_device *md); 53void dm_mq_cleanup_mapped_device(struct mapped_device *md);
54 54
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0a427de23ed2..3ad16d9c9d5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1750 char b[BDEVNAME_SIZE]; 1750 char b[BDEVNAME_SIZE];
1751 1751
1752 if (likely(q)) 1752 if (likely(q))
1753 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 1753 r |= bdi_congested(q->backing_dev_info, bdi_bits);
1754 else 1754 else
1755 DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 1755 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1756 dm_device_name(t->md), 1756 dm_device_name(t->md),
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 710ae28fd618..43d3445b121d 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
131 return -EIO; 131 return -EIO;
132} 132}
133 133
134static int io_err_map_rq(struct dm_target *ti, struct request *clone,
135 union map_info *map_context)
136{
137 return -EIO;
138}
139
140static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, 134static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
141 union map_info *map_context, 135 union map_info *map_context,
142 struct request **clone) 136 struct request **clone)
@@ -161,7 +155,6 @@ static struct target_type error_target = {
161 .ctr = io_err_ctr, 155 .ctr = io_err_ctr,
162 .dtr = io_err_dtr, 156 .dtr = io_err_dtr,
163 .map = io_err_map, 157 .map = io_err_map,
164 .map_rq = io_err_map_rq,
165 .clone_and_map_rq = io_err_clone_and_map_rq, 158 .clone_and_map_rq = io_err_clone_and_map_rq,
166 .release_clone_rq = io_err_release_clone_rq, 159 .release_clone_rq = io_err_release_clone_rq,
167 .direct_access = io_err_direct_access, 160 .direct_access = io_err_direct_access,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 110982db4b48..2b266a2b5035 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2711 return 1; 2711 return 1;
2712 2712
2713 q = bdev_get_queue(pt->data_dev->bdev); 2713 q = bdev_get_queue(pt->data_dev->bdev);
2714 return bdi_congested(&q->backing_dev_info, bdi_bits); 2714 return bdi_congested(q->backing_dev_info, bdi_bits);
2715} 2715}
2716 2716
2717static void requeue_bios(struct pool *pool) 2717static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3086da5664f3..5bd9ab06a562 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
91 */ 91 */
92struct dm_md_mempools { 92struct dm_md_mempools {
93 mempool_t *io_pool; 93 mempool_t *io_pool;
94 mempool_t *rq_pool;
95 struct bio_set *bs; 94 struct bio_set *bs;
96}; 95};
97 96
@@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
466 465
467 if (r > 0) { 466 if (r > 0) {
468 /* 467 /*
469 * Target determined this ioctl is being issued against 468 * Target determined this ioctl is being issued against a
470 * a logical partition of the parent bdev; so extra 469 * subset of the parent bdev; require extra privileges.
471 * validation is needed.
472 */ 470 */
473 r = scsi_verify_blk_ioctl(NULL, cmd); 471 if (!capable(CAP_SYS_RAWIO)) {
474 if (r) 472 DMWARN_LIMIT(
473 "%s: sending ioctl %x to DM device without required privilege.",
474 current->comm, cmd);
475 r = -ENOIOCTLCMD;
475 goto out; 476 goto out;
477 }
476 } 478 }
477 479
478 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 480 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
@@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
1314 * With request-based DM we only need to check the 1316 * With request-based DM we only need to check the
1315 * top-level queue for congestion. 1317 * top-level queue for congestion.
1316 */ 1318 */
1317 r = md->queue->backing_dev_info.wb.state & bdi_bits; 1319 r = md->queue->backing_dev_info->wb.state & bdi_bits;
1318 } else { 1320 } else {
1319 map = dm_get_live_table_fast(md); 1321 map = dm_get_live_table_fast(md);
1320 if (map) 1322 if (map)
@@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md)
1397 * - must do so here (in alloc_dev callchain) before queue is used 1399 * - must do so here (in alloc_dev callchain) before queue is used
1398 */ 1400 */
1399 md->queue->queuedata = md; 1401 md->queue->queuedata = md;
1400 md->queue->backing_dev_info.congested_data = md; 1402 md->queue->backing_dev_info->congested_data = md;
1401} 1403}
1402 1404
1403void dm_init_normal_md_queue(struct mapped_device *md) 1405void dm_init_normal_md_queue(struct mapped_device *md)
@@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
1408 /* 1410 /*
1409 * Initialize aspects of queue that aren't relevant for blk-mq 1411 * Initialize aspects of queue that aren't relevant for blk-mq
1410 */ 1412 */
1411 md->queue->backing_dev_info.congested_fn = dm_any_congested; 1413 md->queue->backing_dev_info->congested_fn = dm_any_congested;
1412 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1414 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1413} 1415}
1414 1416
@@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
1419 if (md->kworker_task) 1421 if (md->kworker_task)
1420 kthread_stop(md->kworker_task); 1422 kthread_stop(md->kworker_task);
1421 mempool_destroy(md->io_pool); 1423 mempool_destroy(md->io_pool);
1422 mempool_destroy(md->rq_pool);
1423 if (md->bs) 1424 if (md->bs)
1424 bioset_free(md->bs); 1425 bioset_free(md->bs);
1425 1426
@@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1595 goto out; 1596 goto out;
1596 } 1597 }
1597 1598
1598 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 1599 BUG_ON(!p || md->io_pool || md->bs);
1599 1600
1600 md->io_pool = p->io_pool; 1601 md->io_pool = p->io_pool;
1601 p->io_pool = NULL; 1602 p->io_pool = NULL;
1602 md->rq_pool = p->rq_pool;
1603 p->rq_pool = NULL;
1604 md->bs = p->bs; 1603 md->bs = p->bs;
1605 p->bs = NULL; 1604 p->bs = NULL;
1606 1605
@@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
1777 1776
1778 switch (type) { 1777 switch (type) {
1779 case DM_TYPE_REQUEST_BASED: 1778 case DM_TYPE_REQUEST_BASED:
1780 r = dm_old_init_request_queue(md); 1779 r = dm_old_init_request_queue(md, t);
1781 if (r) { 1780 if (r) {
1782 DMERR("Cannot initialize queue for request-based mapped device"); 1781 DMERR("Cannot initialize queue for request-based mapped device");
1783 return r; 1782 return r;
@@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2493 unsigned integrity, unsigned per_io_data_size) 2492 unsigned integrity, unsigned per_io_data_size)
2494{ 2493{
2495 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); 2494 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2496 struct kmem_cache *cachep = NULL;
2497 unsigned int pool_size = 0; 2495 unsigned int pool_size = 0;
2498 unsigned int front_pad; 2496 unsigned int front_pad;
2499 2497
@@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2503 switch (type) { 2501 switch (type) {
2504 case DM_TYPE_BIO_BASED: 2502 case DM_TYPE_BIO_BASED:
2505 case DM_TYPE_DAX_BIO_BASED: 2503 case DM_TYPE_DAX_BIO_BASED:
2506 cachep = _io_cache;
2507 pool_size = dm_get_reserved_bio_based_ios(); 2504 pool_size = dm_get_reserved_bio_based_ios();
2508 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 2505 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2506
2507 pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
2508 if (!pools->io_pool)
2509 goto out;
2509 break; 2510 break;
2510 case DM_TYPE_REQUEST_BASED: 2511 case DM_TYPE_REQUEST_BASED:
2511 cachep = _rq_tio_cache;
2512 pool_size = dm_get_reserved_rq_based_ios();
2513 pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
2514 if (!pools->rq_pool)
2515 goto out;
2516 /* fall through to setup remaining rq-based pools */
2517 case DM_TYPE_MQ_REQUEST_BASED: 2512 case DM_TYPE_MQ_REQUEST_BASED:
2518 if (!pool_size) 2513 pool_size = dm_get_reserved_rq_based_ios();
2519 pool_size = dm_get_reserved_rq_based_ios();
2520 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 2514 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2521 /* per_io_data_size is used for blk-mq pdu at queue allocation */ 2515 /* per_io_data_size is used for blk-mq pdu at queue allocation */
2522 break; 2516 break;
@@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
2524 BUG(); 2518 BUG();
2525 } 2519 }
2526 2520
2527 if (cachep) {
2528 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2529 if (!pools->io_pool)
2530 goto out;
2531 }
2532
2533 pools->bs = bioset_create_nobvec(pool_size, front_pad); 2521 pools->bs = bioset_create_nobvec(pool_size, front_pad);
2534 if (!pools->bs) 2522 if (!pools->bs)
2535 goto out; 2523 goto out;
@@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
2551 return; 2539 return;
2552 2540
2553 mempool_destroy(pools->io_pool); 2541 mempool_destroy(pools->io_pool);
2554 mempool_destroy(pools->rq_pool);
2555 2542
2556 if (pools->bs) 2543 if (pools->bs)
2557 bioset_free(pools->bs); 2544 bioset_free(pools->bs);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08b9654..f298b01f7ab3 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
95/* 95/*
96 * To check whether the target type is request-based or not (bio-based). 96 * To check whether the target type is request-based or not (bio-based).
97 */ 97 */
98#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 98#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
99 ((t)->type->clone_and_map_rq != NULL))
100 99
101/* 100/*
102 * To check whether the target type is a hybrid (capable of being 101 * To check whether the target type is a hybrid (capable of being
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5975c9915684..f1c7bbac31a5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
62 62
63 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 63 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 64 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
65 ret |= bdi_congested(&q->backing_dev_info, bits); 65 ret |= bdi_congested(q->backing_dev_info, bits);
66 } 66 }
67 67
68 return ret; 68 return ret;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 01175dac0db6..ba485dcf1064 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev)
5346 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5346 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5347 else 5347 else
5348 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); 5348 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5349 mddev->queue->backing_dev_info.congested_data = mddev; 5349 mddev->queue->backing_dev_info->congested_data = mddev;
5350 mddev->queue->backing_dev_info.congested_fn = md_congested; 5350 mddev->queue->backing_dev_info->congested_fn = md_congested;
5351 } 5351 }
5352 if (pers->sync_request) { 5352 if (pers->sync_request) {
5353 if (mddev->kobj.sd && 5353 if (mddev->kobj.sd &&
@@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
5704 5704
5705 __md_stop_writes(mddev); 5705 __md_stop_writes(mddev);
5706 __md_stop(mddev); 5706 __md_stop(mddev);
5707 mddev->queue->backing_dev_info.congested_fn = NULL; 5707 mddev->queue->backing_dev_info->congested_fn = NULL;
5708 5708
5709 /* tell userspace to handle 'inactive' */ 5709 /* tell userspace to handle 'inactive' */
5710 sysfs_notify_dirent_safe(mddev->sysfs_state); 5710 sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index aa8c4e5c1ee2..d457afa672d5 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
169 if (rdev && !test_bit(Faulty, &rdev->flags)) { 169 if (rdev && !test_bit(Faulty, &rdev->flags)) {
170 struct request_queue *q = bdev_get_queue(rdev->bdev); 170 struct request_queue *q = bdev_get_queue(rdev->bdev);
171 171
172 ret |= bdi_congested(&q->backing_dev_info, bits); 172 ret |= bdi_congested(q->backing_dev_info, bits);
173 /* Just like multipath_map, we just check the 173 /* Just like multipath_map, we just check the
174 * first available device 174 * first available device
175 */ 175 */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 848365d474f3..d6585239bff2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
41 for (i = 0; i < raid_disks && !ret ; i++) { 41 for (i = 0; i < raid_disks && !ret ; i++) {
42 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 42 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
43 43
44 ret |= bdi_congested(&q->backing_dev_info, bits); 44 ret |= bdi_congested(q->backing_dev_info, bits);
45 } 45 }
46 return ret; 46 return ret;
47} 47}
@@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
420 */ 420 */
421 int stripe = mddev->raid_disks * 421 int stripe = mddev->raid_disks *
422 (mddev->chunk_sectors << 9) / PAGE_SIZE; 422 (mddev->chunk_sectors << 9) / PAGE_SIZE;
423 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 423 if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
424 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 424 mddev->queue->backing_dev_info->ra_pages = 2* stripe;
425 } 425 }
426 426
427 dump_zones(mddev); 427 dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 7b0f647bcccb..830ff2b20346 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
744 * non-congested targets, it can be removed 744 * non-congested targets, it can be removed
745 */ 745 */
746 if ((bits & (1 << WB_async_congested)) || 1) 746 if ((bits & (1 << WB_async_congested)) || 1)
747 ret |= bdi_congested(&q->backing_dev_info, bits); 747 ret |= bdi_congested(q->backing_dev_info, bits);
748 else 748 else
749 ret &= bdi_congested(&q->backing_dev_info, bits); 749 ret &= bdi_congested(q->backing_dev_info, bits);
750 } 750 }
751 } 751 }
752 rcu_read_unlock(); 752 rcu_read_unlock();
@@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1170 int i, disks; 1170 int i, disks;
1171 struct bitmap *bitmap = mddev->bitmap; 1171 struct bitmap *bitmap = mddev->bitmap;
1172 unsigned long flags; 1172 unsigned long flags;
1173 const int op = bio_op(bio);
1174 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1175 const unsigned long do_flush_fua = (bio->bi_opf &
1176 (REQ_PREFLUSH | REQ_FUA));
1177 struct md_rdev *blocked_rdev; 1173 struct md_rdev *blocked_rdev;
1178 struct blk_plug_cb *cb; 1174 struct blk_plug_cb *cb;
1179 struct raid1_plug_cb *plug = NULL; 1175 struct raid1_plug_cb *plug = NULL;
@@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1389 conf->mirrors[i].rdev->data_offset); 1385 conf->mirrors[i].rdev->data_offset);
1390 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1386 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1391 mbio->bi_end_io = raid1_end_write_request; 1387 mbio->bi_end_io = raid1_end_write_request;
1392 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); 1388 mbio->bi_opf = bio_op(bio) |
1389 (bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
1393 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && 1390 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1394 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) && 1391 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1395 conf->raid_disks - mddev->degraded > 1) 1392 conf->raid_disks - mddev->degraded > 1)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1920756828df..6bc5c2a85160 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
860 if (rdev && !test_bit(Faulty, &rdev->flags)) { 860 if (rdev && !test_bit(Faulty, &rdev->flags)) {
861 struct request_queue *q = bdev_get_queue(rdev->bdev); 861 struct request_queue *q = bdev_get_queue(rdev->bdev);
862 862
863 ret |= bdi_congested(&q->backing_dev_info, bits); 863 ret |= bdi_congested(q->backing_dev_info, bits);
864 } 864 }
865 } 865 }
866 rcu_read_unlock(); 866 rcu_read_unlock();
@@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
3841 * maybe... 3841 * maybe...
3842 */ 3842 */
3843 stripe /= conf->geo.near_copies; 3843 stripe /= conf->geo.near_copies;
3844 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 3844 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
3845 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 3845 mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
3846 } 3846 }
3847 3847
3848 if (md_integrity_register(mddev)) 3848 if (md_integrity_register(mddev))
@@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
4643 int stripe = conf->geo.raid_disks * 4643 int stripe = conf->geo.raid_disks *
4644 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); 4644 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4645 stripe /= conf->geo.near_copies; 4645 stripe /= conf->geo.near_copies;
4646 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4646 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
4647 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4647 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
4648 } 4648 }
4649 conf->fullsync = 0; 4649 conf->fullsync = 0;
4650} 4650}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3c7e106c12a2..6214e699342c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6331 mddev_suspend(mddev); 6331 mddev_suspend(mddev);
6332 conf->skip_copy = new; 6332 conf->skip_copy = new;
6333 if (new) 6333 if (new)
6334 mddev->queue->backing_dev_info.capabilities |= 6334 mddev->queue->backing_dev_info->capabilities |=
6335 BDI_CAP_STABLE_WRITES; 6335 BDI_CAP_STABLE_WRITES;
6336 else 6336 else
6337 mddev->queue->backing_dev_info.capabilities &= 6337 mddev->queue->backing_dev_info->capabilities &=
6338 ~BDI_CAP_STABLE_WRITES; 6338 ~BDI_CAP_STABLE_WRITES;
6339 mddev_resume(mddev); 6339 mddev_resume(mddev);
6340 } 6340 }
@@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
7153 int data_disks = conf->previous_raid_disks - conf->max_degraded; 7153 int data_disks = conf->previous_raid_disks - conf->max_degraded;
7154 int stripe = data_disks * 7154 int stripe = data_disks *
7155 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 7155 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
7156 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7156 if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
7157 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7157 mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
7158 7158
7159 chunk_size = mddev->chunk_sectors << 9; 7159 chunk_size = mddev->chunk_sectors << 9;
7160 blk_queue_io_min(mddev->queue, chunk_size); 7160 blk_queue_io_min(mddev->queue, chunk_size);
@@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
7763 int data_disks = conf->raid_disks - conf->max_degraded; 7763 int data_disks = conf->raid_disks - conf->max_degraded;
7764 int stripe = data_disks * ((conf->chunk_sectors << 9) 7764 int stripe = data_disks * ((conf->chunk_sectors << 9)
7765 / PAGE_SIZE); 7765 / PAGE_SIZE);
7766 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7766 if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
7767 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7767 conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
7768 } 7768 }
7769 } 7769 }
7770} 7770}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index f3512404bc52..99e651c27fb7 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev,
2000 return 0; 2000 return 0;
2001} 2001}
2002 2002
2003static int msb_prepare_req(struct request_queue *q, struct request *req)
2004{
2005 if (req->cmd_type != REQ_TYPE_FS) {
2006 blk_dump_rq_flags(req, "MS unsupported request");
2007 return BLKPREP_KILL;
2008 }
2009 req->rq_flags |= RQF_DONTPREP;
2010 return BLKPREP_OK;
2011}
2012
2013static void msb_submit_req(struct request_queue *q) 2003static void msb_submit_req(struct request_queue *q)
2014{ 2004{
2015 struct memstick_dev *card = q->queuedata; 2005 struct memstick_dev *card = q->queuedata;
@@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card)
2132 } 2122 }
2133 2123
2134 msb->queue->queuedata = card; 2124 msb->queue->queuedata = card;
2135 blk_queue_prep_rq(msb->queue, msb_prepare_req);
2136 2125
2137 blk_queue_bounce_limit(msb->queue, limit); 2126 blk_queue_bounce_limit(msb->queue, limit);
2138 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); 2127 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fa0746d182ff..c00d8a266878 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card)
827 spin_unlock_irqrestore(&msb->q_lock, flags); 827 spin_unlock_irqrestore(&msb->q_lock, flags);
828} 828}
829 829
830static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
831{
832 if (req->cmd_type != REQ_TYPE_FS) {
833 blk_dump_rq_flags(req, "MSPro unsupported request");
834 return BLKPREP_KILL;
835 }
836
837 req->rq_flags |= RQF_DONTPREP;
838
839 return BLKPREP_OK;
840}
841
842static void mspro_block_submit_req(struct request_queue *q) 830static void mspro_block_submit_req(struct request_queue *q)
843{ 831{
844 struct memstick_dev *card = q->queuedata; 832 struct memstick_dev *card = q->queuedata;
@@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
1228 } 1216 }
1229 1217
1230 msb->queue->queuedata = card; 1218 msb->queue->queuedata = card;
1231 blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
1232 1219
1233 blk_queue_bounce_limit(msb->queue, limit); 1220 blk_queue_bounce_limit(msb->queue, limit);
1234 blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); 1221 blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ee1667acde4..b8c4b2ba7519 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2320,10 +2320,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2320 SmpPassthroughReply_t *smprep; 2320 SmpPassthroughReply_t *smprep;
2321 2321
2322 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 2322 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
2323 memcpy(req->sense, smprep, sizeof(*smprep)); 2323 memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
2324 req->sense_len = sizeof(*smprep); 2324 scsi_req(req)->sense_len = sizeof(*smprep);
2325 req->resid_len = 0; 2325 scsi_req(req)->resid_len = 0;
2326 rsp->resid_len -= smprep->ResponseDataLength; 2326 scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
2327 } else { 2327 } else {
2328 printk(MYIOC_s_ERR_FMT 2328 printk(MYIOC_s_ERR_FMT
2329 "%s: smp passthru reply failed to be returned\n", 2329 "%s: smp passthru reply failed to be returned\n",
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index a6496d8027bc..033f641eb8b7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,15 +30,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
30{ 30{
31 struct mmc_queue *mq = q->queuedata; 31 struct mmc_queue *mq = q->queuedata;
32 32
33 /*
34 * We only like normal block requests and discards.
35 */
36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37 req_op(req) != REQ_OP_SECURE_ERASE) {
38 blk_dump_rq_flags(req, "MMC bad request");
39 return BLKPREP_KILL;
40 }
41
42 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 33 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
43 return BLKPREP_KILL; 34 return BLKPREP_KILL;
44 35
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index df8a5ef334c0..6b8d5cd7dbf6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
84 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 84 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
85 buf = bio_data(req->bio); 85 buf = bio_data(req->bio);
86 86
87 if (req->cmd_type != REQ_TYPE_FS)
88 return -EIO;
89
90 if (req_op(req) == REQ_OP_FLUSH) 87 if (req_op(req) == REQ_OP_FLUSH)
91 return tr->flush(dev); 88 return tr->flush(dev);
92 89
@@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
94 get_capacity(req->rq_disk)) 91 get_capacity(req->rq_disk))
95 return -EIO; 92 return -EIO;
96 93
97 if (req_op(req) == REQ_OP_DISCARD) 94 switch (req_op(req)) {
95 case REQ_OP_DISCARD:
98 return tr->discard(dev, block, nsect); 96 return tr->discard(dev, block, nsect);
99 97 case REQ_OP_READ:
100 if (rq_data_dir(req) == READ) {
101 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 98 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
102 if (tr->readsect(dev, block, buf)) 99 if (tr->readsect(dev, block, buf))
103 return -EIO; 100 return -EIO;
104 rq_flush_dcache_pages(req); 101 rq_flush_dcache_pages(req);
105 return 0; 102 return 0;
106 } else { 103 case REQ_OP_WRITE:
107 if (!tr->writesect) 104 if (!tr->writesect)
108 return -EIO; 105 return -EIO;
109 106
@@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
112 if (tr->writesect(dev, block, buf)) 109 if (tr->writesect(dev, block, buf))
113 return -EIO; 110 return -EIO;
114 return 0; 111 return 0;
112 default:
113 return -EIO;
115 } 114 }
116} 115}
117 116
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index d1e6931c132f..c80869e60909 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -323,16 +323,15 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
323 struct ubiblock *dev = hctx->queue->queuedata; 323 struct ubiblock *dev = hctx->queue->queuedata;
324 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 324 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
325 325
326 if (req->cmd_type != REQ_TYPE_FS) 326 switch (req_op(req)) {
327 case REQ_OP_READ:
328 ubi_sgl_init(&pdu->usgl);
329 queue_work(dev->wq, &pdu->work);
330 return BLK_MQ_RQ_QUEUE_OK;
331 default:
327 return BLK_MQ_RQ_QUEUE_ERROR; 332 return BLK_MQ_RQ_QUEUE_ERROR;
333 }
328 334
329 if (rq_data_dir(req) != READ)
330 return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */
331
332 ubi_sgl_init(&pdu->usgl);
333 queue_work(dev->wq, &pdu->work);
334
335 return BLK_MQ_RQ_QUEUE_OK;
336} 335}
337 336
338static int ubiblock_init_request(void *data, struct request *req, 337static int ubiblock_init_request(void *data, struct request *req,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 138c6fa00cd5..44a1a257e0b5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -208,18 +208,18 @@ EXPORT_SYMBOL_GPL(nvme_requeue_req);
208struct request *nvme_alloc_request(struct request_queue *q, 208struct request *nvme_alloc_request(struct request_queue *q,
209 struct nvme_command *cmd, unsigned int flags, int qid) 209 struct nvme_command *cmd, unsigned int flags, int qid)
210{ 210{
211 unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
211 struct request *req; 212 struct request *req;
212 213
213 if (qid == NVME_QID_ANY) { 214 if (qid == NVME_QID_ANY) {
214 req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); 215 req = blk_mq_alloc_request(q, op, flags);
215 } else { 216 } else {
216 req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, 217 req = blk_mq_alloc_request_hctx(q, op, flags,
217 qid ? qid - 1 : 0); 218 qid ? qid - 1 : 0);
218 } 219 }
219 if (IS_ERR(req)) 220 if (IS_ERR(req))
220 return req; 221 return req;
221 222
222 req->cmd_type = REQ_TYPE_DRV_PRIV;
223 req->cmd_flags |= REQ_FAILFAST_DRIVER; 223 req->cmd_flags |= REQ_FAILFAST_DRIVER;
224 nvme_req(req)->cmd = cmd; 224 nvme_req(req)->cmd = cmd;
225 225
@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
238static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 238static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
239 struct nvme_command *cmnd) 239 struct nvme_command *cmnd)
240{ 240{
241 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
241 struct nvme_dsm_range *range; 242 struct nvme_dsm_range *range;
242 unsigned int nr_bytes = blk_rq_bytes(req); 243 struct bio *bio;
243 244
244 range = kmalloc(sizeof(*range), GFP_ATOMIC); 245 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
245 if (!range) 246 if (!range)
246 return BLK_MQ_RQ_QUEUE_BUSY; 247 return BLK_MQ_RQ_QUEUE_BUSY;
247 248
248 range->cattr = cpu_to_le32(0); 249 __rq_for_each_bio(bio, req) {
249 range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); 250 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
250 range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); 251 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
252
253 range[n].cattr = cpu_to_le32(0);
254 range[n].nlb = cpu_to_le32(nlb);
255 range[n].slba = cpu_to_le64(slba);
256 n++;
257 }
258
259 if (WARN_ON_ONCE(n != segments)) {
260 kfree(range);
261 return BLK_MQ_RQ_QUEUE_ERROR;
262 }
251 263
252 memset(cmnd, 0, sizeof(*cmnd)); 264 memset(cmnd, 0, sizeof(*cmnd));
253 cmnd->dsm.opcode = nvme_cmd_dsm; 265 cmnd->dsm.opcode = nvme_cmd_dsm;
254 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); 266 cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
255 cmnd->dsm.nr = 0; 267 cmnd->dsm.nr = segments - 1;
256 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 268 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
257 269
258 req->special_vec.bv_page = virt_to_page(range); 270 req->special_vec.bv_page = virt_to_page(range);
259 req->special_vec.bv_offset = offset_in_page(range); 271 req->special_vec.bv_offset = offset_in_page(range);
260 req->special_vec.bv_len = sizeof(*range); 272 req->special_vec.bv_len = sizeof(*range) * segments;
261 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 273 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
262 274
263 return BLK_MQ_RQ_QUEUE_OK; 275 return BLK_MQ_RQ_QUEUE_OK;
@@ -309,17 +321,27 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
309{ 321{
310 int ret = BLK_MQ_RQ_QUEUE_OK; 322 int ret = BLK_MQ_RQ_QUEUE_OK;
311 323
312 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 324 switch (req_op(req)) {
325 case REQ_OP_DRV_IN:
326 case REQ_OP_DRV_OUT:
313 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 327 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
314 else if (req_op(req) == REQ_OP_FLUSH) 328 break;
329 case REQ_OP_FLUSH:
315 nvme_setup_flush(ns, cmd); 330 nvme_setup_flush(ns, cmd);
316 else if (req_op(req) == REQ_OP_DISCARD) 331 break;
332 case REQ_OP_DISCARD:
317 ret = nvme_setup_discard(ns, req, cmd); 333 ret = nvme_setup_discard(ns, req, cmd);
318 else 334 break;
335 case REQ_OP_READ:
336 case REQ_OP_WRITE:
319 nvme_setup_rw(ns, req, cmd); 337 nvme_setup_rw(ns, req, cmd);
338 break;
339 default:
340 WARN_ON_ONCE(1);
341 return BLK_MQ_RQ_QUEUE_ERROR;
342 }
320 343
321 cmd->common.command_id = req->tag; 344 cmd->common.command_id = req->tag;
322
323 return ret; 345 return ret;
324} 346}
325EXPORT_SYMBOL_GPL(nvme_setup_cmd); 347EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -868,6 +890,9 @@ static void nvme_config_discard(struct nvme_ns *ns)
868 struct nvme_ctrl *ctrl = ns->ctrl; 890 struct nvme_ctrl *ctrl = ns->ctrl;
869 u32 logical_block_size = queue_logical_block_size(ns->queue); 891 u32 logical_block_size = queue_logical_block_size(ns->queue);
870 892
893 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
894 NVME_DSM_MAX_RANGES);
895
871 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) 896 if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
872 ns->queue->limits.discard_zeroes_data = 1; 897 ns->queue->limits.discard_zeroes_data = 1;
873 else 898 else
@@ -876,6 +901,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
876 ns->queue->limits.discard_alignment = logical_block_size; 901 ns->queue->limits.discard_alignment = logical_block_size;
877 ns->queue->limits.discard_granularity = logical_block_size; 902 ns->queue->limits.discard_granularity = logical_block_size;
878 blk_queue_max_discard_sectors(ns->queue, UINT_MAX); 903 blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
904 blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
879 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); 905 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
880} 906}
881 907
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e65041c640cb..fb51a8de9b29 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1937,7 +1937,7 @@ nvme_fc_complete_rq(struct request *rq)
1937 return; 1937 return;
1938 } 1938 }
1939 1939
1940 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) 1940 if (blk_rq_is_passthrough(rq))
1941 error = rq->errors; 1941 error = rq->errors;
1942 else 1942 else
1943 error = nvme_error_status(rq->errors); 1943 error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d67d0d0a3bc0..ddc51adb594d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -589,7 +589,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
589 */ 589 */
590 if (ns && ns->ms && !blk_integrity_rq(req)) { 590 if (ns && ns->ms && !blk_integrity_rq(req)) {
591 if (!(ns->pi_type && ns->ms == 8) && 591 if (!(ns->pi_type && ns->ms == 8) &&
592 req->cmd_type != REQ_TYPE_DRV_PRIV) { 592 !blk_rq_is_passthrough(req)) {
593 blk_mq_end_request(req, -EFAULT); 593 blk_mq_end_request(req, -EFAULT);
594 return BLK_MQ_RQ_QUEUE_OK; 594 return BLK_MQ_RQ_QUEUE_OK;
595 } 595 }
@@ -646,7 +646,7 @@ static void nvme_complete_rq(struct request *req)
646 return; 646 return;
647 } 647 }
648 648
649 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 649 if (blk_rq_is_passthrough(req))
650 error = req->errors; 650 error = req->errors;
651 else 651 else
652 error = nvme_error_status(req->errors); 652 error = nvme_error_status(req->errors);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 557f29b1f1bb..a75e95d42b3f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1423,7 +1423,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1423 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1423 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1424 struct nvme_command *cmd = nvme_req(rq)->cmd; 1424 struct nvme_command *cmd = nvme_req(rq)->cmd;
1425 1425
1426 if (rq->cmd_type != REQ_TYPE_DRV_PRIV || 1426 if (!blk_rq_is_passthrough(rq) ||
1427 cmd->common.opcode != nvme_fabrics_command || 1427 cmd->common.opcode != nvme_fabrics_command ||
1428 cmd->fabrics.fctype != nvme_fabrics_type_connect) 1428 cmd->fabrics.fctype != nvme_fabrics_type_connect)
1429 return false; 1429 return false;
@@ -1471,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1471 ib_dma_sync_single_for_device(dev, sqe->dma, 1471 ib_dma_sync_single_for_device(dev, sqe->dma,
1472 sizeof(struct nvme_command), DMA_TO_DEVICE); 1472 sizeof(struct nvme_command), DMA_TO_DEVICE);
1473 1473
1474 if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) 1474 if (req_op(rq) == REQ_OP_FLUSH)
1475 flush = true; 1475 flush = true;
1476 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1476 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1477 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1477 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
@@ -1522,7 +1522,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
1522 return; 1522 return;
1523 } 1523 }
1524 1524
1525 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) 1525 if (blk_rq_is_passthrough(rq))
1526 error = rq->errors; 1526 error = rq->errors;
1527 else 1527 else
1528 error = nvme_error_status(rq->errors); 1528 error = nvme_error_status(rq->errors);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index a5c09e703bd8..f49ae2758bb7 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -43,6 +43,7 @@
43#include <asm/unaligned.h> 43#include <asm/unaligned.h>
44#include <scsi/sg.h> 44#include <scsi/sg.h>
45#include <scsi/scsi.h> 45#include <scsi/scsi.h>
46#include <scsi/scsi_request.h>
46 47
47#include "nvme.h" 48#include "nvme.h"
48 49
@@ -2347,12 +2348,14 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2347 2348
2348static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) 2349static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2349{ 2350{
2350 u8 cmd[BLK_MAX_CDB]; 2351 u8 cmd[16];
2351 int retcode; 2352 int retcode;
2352 unsigned int opcode; 2353 unsigned int opcode;
2353 2354
2354 if (hdr->cmdp == NULL) 2355 if (hdr->cmdp == NULL)
2355 return -EMSGSIZE; 2356 return -EMSGSIZE;
2357 if (hdr->cmd_len > sizeof(cmd))
2358 return -EINVAL;
2356 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) 2359 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2357 return -EFAULT; 2360 return -EFAULT;
2358 2361
@@ -2451,8 +2454,6 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
2451 return -EFAULT; 2454 return -EFAULT;
2452 if (hdr.interface_id != 'S') 2455 if (hdr.interface_id != 'S')
2453 return -EINVAL; 2456 return -EINVAL;
2454 if (hdr.cmd_len > BLK_MAX_CDB)
2455 return -EINVAL;
2456 2457
2457 /* 2458 /*
2458 * A positive return code means a NVMe status, which has been 2459 * A positive return code means a NVMe status, which has been
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9aaa70071ae5..f3862e38f574 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -104,7 +104,7 @@ static void nvme_loop_complete_rq(struct request *req)
104 return; 104 return;
105 } 105 }
106 106
107 if (req->cmd_type == REQ_TYPE_DRV_PRIV) 107 if (blk_rq_is_passthrough(req))
108 error = req->errors; 108 error = req->errors;
109 else 109 else
110 error = nvme_error_status(req->errors); 110 error = nvme_error_status(req->errors);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f16ea6964ec..152de6817875 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -300,13 +300,6 @@ static void scm_blk_request(struct request_queue *rq)
300 struct request *req; 300 struct request *req;
301 301
302 while ((req = blk_peek_request(rq))) { 302 while ((req = blk_peek_request(rq))) {
303 if (req->cmd_type != REQ_TYPE_FS) {
304 blk_start_request(req);
305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
306 __blk_end_request_all(req, -EIO);
307 continue;
308 }
309
310 if (!scm_permit_request(bdev, req)) 303 if (!scm_permit_request(bdev, req))
311 goto out; 304 goto out;
312 305
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a4f6b0d95515..d4023bf1e739 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -18,6 +18,7 @@ config SCSI
18 depends on BLOCK 18 depends on BLOCK
19 select SCSI_DMA if HAS_DMA 19 select SCSI_DMA if HAS_DMA
20 select SG_POOL 20 select SG_POOL
21 select BLK_SCSI_REQUEST
21 ---help--- 22 ---help---
22 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or 23 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
23 any other SCSI device under Linux, say Y and make sure that you know 24 any other SCSI device under Linux, say Y and make sure that you know
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 5b80746980b8..4a7679f6c73d 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -88,12 +88,6 @@ struct clariion_dh_data {
88 */ 88 */
89 unsigned char buffer[CLARIION_BUFFER_SIZE]; 89 unsigned char buffer[CLARIION_BUFFER_SIZE];
90 /* 90 /*
91 * SCSI sense buffer for commands -- assumes serial issuance
92 * and completion sequence of all commands for same multipath.
93 */
94 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
95 unsigned int senselen;
96 /*
97 * LUN state 91 * LUN state
98 */ 92 */
99 int lun_state; 93 int lun_state;
@@ -116,44 +110,38 @@ struct clariion_dh_data {
116/* 110/*
117 * Parse MODE_SELECT cmd reply. 111 * Parse MODE_SELECT cmd reply.
118 */ 112 */
119static int trespass_endio(struct scsi_device *sdev, char *sense) 113static int trespass_endio(struct scsi_device *sdev,
114 struct scsi_sense_hdr *sshdr)
120{ 115{
121 int err = SCSI_DH_IO; 116 int err = SCSI_DH_IO;
122 struct scsi_sense_hdr sshdr;
123
124 if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
125 sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
126 "0x%2x, 0x%2x while sending CLARiiON trespass "
127 "command.\n", CLARIION_NAME, sshdr.sense_key,
128 sshdr.asc, sshdr.ascq);
129 117
130 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && 118 sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
131 (sshdr.ascq == 0x00)) { 119 "0x%2x, 0x%2x while sending CLARiiON trespass "
132 /* 120 "command.\n", CLARIION_NAME, sshdr->sense_key,
133 * Array based copy in progress -- do not send 121 sshdr->asc, sshdr->ascq);
134 * mode_select or copy will be aborted mid-stream. 122
135 */ 123 if (sshdr->sense_key == 0x05 && sshdr->asc == 0x04 &&
136 sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in " 124 sshdr->ascq == 0x00) {
137 "progress while sending CLARiiON trespass " 125 /*
138 "command.\n", CLARIION_NAME); 126 * Array based copy in progress -- do not send
139 err = SCSI_DH_DEV_TEMP_BUSY; 127 * mode_select or copy will be aborted mid-stream.
140 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && 128 */
141 (sshdr.ascq == 0x03)) { 129 sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
142 /* 130 "progress while sending CLARiiON trespass "
143 * LUN Not Ready - Manual Intervention Required 131 "command.\n", CLARIION_NAME);
144 * indicates in-progress ucode upgrade (NDU). 132 err = SCSI_DH_DEV_TEMP_BUSY;
145 */ 133 } else if (sshdr->sense_key == 0x02 && sshdr->asc == 0x04 &&
146 sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress " 134 sshdr->ascq == 0x03) {
147 "ucode upgrade NDU operation while sending " 135 /*
148 "CLARiiON trespass command.\n", CLARIION_NAME); 136 * LUN Not Ready - Manual Intervention Required
149 err = SCSI_DH_DEV_TEMP_BUSY; 137 * indicates in-progress ucode upgrade (NDU).
150 } else 138 */
151 err = SCSI_DH_DEV_FAILED; 139 sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
152 } else { 140 "ucode upgrade NDU operation while sending "
153 sdev_printk(KERN_INFO, sdev, 141 "CLARiiON trespass command.\n", CLARIION_NAME);
154 "%s: failed to send MODE SELECT, no sense available\n", 142 err = SCSI_DH_DEV_TEMP_BUSY;
155 CLARIION_NAME); 143 } else
156 } 144 err = SCSI_DH_DEV_FAILED;
157 return err; 145 return err;
158} 146}
159 147
@@ -257,103 +245,15 @@ out:
257 return sp_model; 245 return sp_model;
258} 246}
259 247
260/*
261 * Get block request for REQ_BLOCK_PC command issued to path. Currently
262 * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
263 *
264 * Uses data and sense buffers in hardware handler context structure and
265 * assumes serial servicing of commands, both issuance and completion.
266 */
267static struct request *get_req(struct scsi_device *sdev, int cmd,
268 unsigned char *buffer)
269{
270 struct request *rq;
271 int len = 0;
272
273 rq = blk_get_request(sdev->request_queue,
274 (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
275 if (IS_ERR(rq)) {
276 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
277 return NULL;
278 }
279
280 blk_rq_set_block_pc(rq);
281 rq->cmd_len = COMMAND_SIZE(cmd);
282 rq->cmd[0] = cmd;
283
284 switch (cmd) {
285 case MODE_SELECT:
286 len = sizeof(short_trespass);
287 rq->cmd[1] = 0x10;
288 rq->cmd[4] = len;
289 break;
290 case MODE_SELECT_10:
291 len = sizeof(long_trespass);
292 rq->cmd[1] = 0x10;
293 rq->cmd[8] = len;
294 break;
295 case INQUIRY:
296 len = CLARIION_BUFFER_SIZE;
297 rq->cmd[4] = len;
298 memset(buffer, 0, len);
299 break;
300 default:
301 BUG_ON(1);
302 break;
303 }
304
305 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
306 REQ_FAILFAST_DRIVER;
307 rq->timeout = CLARIION_TIMEOUT;
308 rq->retries = CLARIION_RETRIES;
309
310 if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
311 blk_put_request(rq);
312 return NULL;
313 }
314
315 return rq;
316}
317
318static int send_inquiry_cmd(struct scsi_device *sdev, int page,
319 struct clariion_dh_data *csdev)
320{
321 struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
322 int err;
323
324 if (!rq)
325 return SCSI_DH_RES_TEMP_UNAVAIL;
326
327 rq->sense = csdev->sense;
328 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
329 rq->sense_len = csdev->senselen = 0;
330
331 rq->cmd[0] = INQUIRY;
332 if (page != 0) {
333 rq->cmd[1] = 1;
334 rq->cmd[2] = page;
335 }
336 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
337 if (err == -EIO) {
338 sdev_printk(KERN_INFO, sdev,
339 "%s: failed to send %s INQUIRY: %x\n",
340 CLARIION_NAME, page?"EVPD":"standard",
341 rq->errors);
342 csdev->senselen = rq->sense_len;
343 err = SCSI_DH_IO;
344 }
345
346 blk_put_request(rq);
347
348 return err;
349}
350
351static int send_trespass_cmd(struct scsi_device *sdev, 248static int send_trespass_cmd(struct scsi_device *sdev,
352 struct clariion_dh_data *csdev) 249 struct clariion_dh_data *csdev)
353{ 250{
354 struct request *rq;
355 unsigned char *page22; 251 unsigned char *page22;
356 int err, len, cmd; 252 unsigned char cdb[COMMAND_SIZE(MODE_SELECT)];
253 int err, res = SCSI_DH_OK, len;
254 struct scsi_sense_hdr sshdr;
255 u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
256 REQ_FAILFAST_DRIVER;
357 257
358 if (csdev->flags & CLARIION_SHORT_TRESPASS) { 258 if (csdev->flags & CLARIION_SHORT_TRESPASS) {
359 page22 = short_trespass; 259 page22 = short_trespass;
@@ -361,40 +261,37 @@ static int send_trespass_cmd(struct scsi_device *sdev,
361 /* Set Honor Reservations bit */ 261 /* Set Honor Reservations bit */
362 page22[6] |= 0x80; 262 page22[6] |= 0x80;
363 len = sizeof(short_trespass); 263 len = sizeof(short_trespass);
364 cmd = MODE_SELECT; 264 cdb[0] = MODE_SELECT;
265 cdb[1] = 0x10;
266 cdb[4] = len;
365 } else { 267 } else {
366 page22 = long_trespass; 268 page22 = long_trespass;
367 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS)) 269 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
368 /* Set Honor Reservations bit */ 270 /* Set Honor Reservations bit */
369 page22[10] |= 0x80; 271 page22[10] |= 0x80;
370 len = sizeof(long_trespass); 272 len = sizeof(long_trespass);
371 cmd = MODE_SELECT_10; 273 cdb[0] = MODE_SELECT_10;
274 cdb[8] = len;
372 } 275 }
373 BUG_ON((len > CLARIION_BUFFER_SIZE)); 276 BUG_ON((len > CLARIION_BUFFER_SIZE));
374 memcpy(csdev->buffer, page22, len); 277 memcpy(csdev->buffer, page22, len);
375 278
376 rq = get_req(sdev, cmd, csdev->buffer); 279 err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
377 if (!rq) 280 csdev->buffer, len, &sshdr,
378 return SCSI_DH_RES_TEMP_UNAVAIL; 281 CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
379 282 NULL, req_flags, 0);
380 rq->sense = csdev->sense; 283 if (err) {
381 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 284 if (scsi_sense_valid(&sshdr))
382 rq->sense_len = csdev->senselen = 0; 285 res = trespass_endio(sdev, &sshdr);
383 286 else {
384 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
385 if (err == -EIO) {
386 if (rq->sense_len) {
387 err = trespass_endio(sdev, csdev->sense);
388 } else {
389 sdev_printk(KERN_INFO, sdev, 287 sdev_printk(KERN_INFO, sdev,
390 "%s: failed to send MODE SELECT: %x\n", 288 "%s: failed to send MODE SELECT: %x\n",
391 CLARIION_NAME, rq->errors); 289 CLARIION_NAME, err);
290 res = SCSI_DH_IO;
392 } 291 }
393 } 292 }
394 293
395 blk_put_request(rq); 294 return res;
396
397 return err;
398} 295}
399 296
400static int clariion_check_sense(struct scsi_device *sdev, 297static int clariion_check_sense(struct scsi_device *sdev,
@@ -464,21 +361,7 @@ static int clariion_std_inquiry(struct scsi_device *sdev,
464 int err; 361 int err;
465 char *sp_model; 362 char *sp_model;
466 363
467 err = send_inquiry_cmd(sdev, 0, csdev); 364 sp_model = parse_sp_model(sdev, sdev->inquiry);
468 if (err != SCSI_DH_OK && csdev->senselen) {
469 struct scsi_sense_hdr sshdr;
470
471 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
472 &sshdr)) {
473 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
474 "%02x/%02x/%02x\n", CLARIION_NAME,
475 sshdr.sense_key, sshdr.asc, sshdr.ascq);
476 }
477 err = SCSI_DH_IO;
478 goto out;
479 }
480
481 sp_model = parse_sp_model(sdev, csdev->buffer);
482 if (!sp_model) { 365 if (!sp_model) {
483 err = SCSI_DH_DEV_UNSUPP; 366 err = SCSI_DH_DEV_UNSUPP;
484 goto out; 367 goto out;
@@ -500,30 +383,12 @@ out:
500static int clariion_send_inquiry(struct scsi_device *sdev, 383static int clariion_send_inquiry(struct scsi_device *sdev,
501 struct clariion_dh_data *csdev) 384 struct clariion_dh_data *csdev)
502{ 385{
503 int err, retry = CLARIION_RETRIES; 386 int err = SCSI_DH_IO;
504 387
505retry: 388 if (!scsi_get_vpd_page(sdev, 0xC0, csdev->buffer,
506 err = send_inquiry_cmd(sdev, 0xC0, csdev); 389 CLARIION_BUFFER_SIZE))
507 if (err != SCSI_DH_OK && csdev->senselen) {
508 struct scsi_sense_hdr sshdr;
509
510 err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
511 &sshdr);
512 if (!err)
513 return SCSI_DH_IO;
514
515 err = clariion_check_sense(sdev, &sshdr);
516 if (retry > 0 && err == ADD_TO_MLQUEUE) {
517 retry--;
518 goto retry;
519 }
520 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
521 "%02x/%02x/%02x\n", CLARIION_NAME,
522 sshdr.sense_key, sshdr.asc, sshdr.ascq);
523 err = SCSI_DH_IO;
524 } else {
525 err = parse_sp_info_reply(sdev, csdev); 390 err = parse_sp_info_reply(sdev, csdev);
526 } 391
527 return err; 392 return err;
528} 393}
529 394
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 308e87195dc1..be43c940636d 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -38,13 +38,10 @@
38#define HP_SW_PATH_PASSIVE 1 38#define HP_SW_PATH_PASSIVE 1
39 39
40struct hp_sw_dh_data { 40struct hp_sw_dh_data {
41 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
42 int path_state; 41 int path_state;
43 int retries; 42 int retries;
44 int retry_cnt; 43 int retry_cnt;
45 struct scsi_device *sdev; 44 struct scsi_device *sdev;
46 activate_complete callback_fn;
47 void *callback_data;
48}; 45};
49 46
50static int hp_sw_start_stop(struct hp_sw_dh_data *); 47static int hp_sw_start_stop(struct hp_sw_dh_data *);
@@ -56,43 +53,34 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *);
56 * 53 *
57 * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path 54 * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
58 */ 55 */
59static int tur_done(struct scsi_device *sdev, unsigned char *sense) 56static int tur_done(struct scsi_device *sdev, struct hp_sw_dh_data *h,
57 struct scsi_sense_hdr *sshdr)
60{ 58{
61 struct scsi_sense_hdr sshdr; 59 int ret = SCSI_DH_IO;
62 int ret;
63 60
64 ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 61 switch (sshdr->sense_key) {
65 if (!ret) {
66 sdev_printk(KERN_WARNING, sdev,
67 "%s: sending tur failed, no sense available\n",
68 HP_SW_NAME);
69 ret = SCSI_DH_IO;
70 goto done;
71 }
72 switch (sshdr.sense_key) {
73 case UNIT_ATTENTION: 62 case UNIT_ATTENTION:
74 ret = SCSI_DH_IMM_RETRY; 63 ret = SCSI_DH_IMM_RETRY;
75 break; 64 break;
76 case NOT_READY: 65 case NOT_READY:
77 if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) { 66 if (sshdr->asc == 0x04 && sshdr->ascq == 2) {
78 /* 67 /*
79 * LUN not ready - Initialization command required 68 * LUN not ready - Initialization command required
80 * 69 *
81 * This is the passive path 70 * This is the passive path
82 */ 71 */
83 ret = SCSI_DH_DEV_OFFLINED; 72 h->path_state = HP_SW_PATH_PASSIVE;
73 ret = SCSI_DH_OK;
84 break; 74 break;
85 } 75 }
86 /* Fallthrough */ 76 /* Fallthrough */
87 default: 77 default:
88 sdev_printk(KERN_WARNING, sdev, 78 sdev_printk(KERN_WARNING, sdev,
89 "%s: sending tur failed, sense %x/%x/%x\n", 79 "%s: sending tur failed, sense %x/%x/%x\n",
90 HP_SW_NAME, sshdr.sense_key, sshdr.asc, 80 HP_SW_NAME, sshdr->sense_key, sshdr->asc,
91 sshdr.ascq); 81 sshdr->ascq);
92 break; 82 break;
93 } 83 }
94
95done:
96 return ret; 84 return ret;
97} 85}
98 86
@@ -105,131 +93,36 @@ done:
105 */ 93 */
106static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) 94static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
107{ 95{
108 struct request *req; 96 unsigned char cmd[6] = { TEST_UNIT_READY };
109 int ret; 97 struct scsi_sense_hdr sshdr;
98 int ret = SCSI_DH_OK, res;
99 u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
100 REQ_FAILFAST_DRIVER;
110 101
111retry: 102retry:
112 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); 103 res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
113 if (IS_ERR(req)) 104 HP_SW_TIMEOUT, HP_SW_RETRIES,
114 return SCSI_DH_RES_TEMP_UNAVAIL; 105 NULL, req_flags, 0);
115 106 if (res) {
116 blk_rq_set_block_pc(req); 107 if (scsi_sense_valid(&sshdr))
117 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 108 ret = tur_done(sdev, h, &sshdr);
118 REQ_FAILFAST_DRIVER; 109 else {
119 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
120 req->cmd[0] = TEST_UNIT_READY;
121 req->timeout = HP_SW_TIMEOUT;
122 req->sense = h->sense;
123 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
124 req->sense_len = 0;
125
126 ret = blk_execute_rq(req->q, NULL, req, 1);
127 if (ret == -EIO) {
128 if (req->sense_len > 0) {
129 ret = tur_done(sdev, h->sense);
130 } else {
131 sdev_printk(KERN_WARNING, sdev, 110 sdev_printk(KERN_WARNING, sdev,
132 "%s: sending tur failed with %x\n", 111 "%s: sending tur failed with %x\n",
133 HP_SW_NAME, req->errors); 112 HP_SW_NAME, res);
134 ret = SCSI_DH_IO; 113 ret = SCSI_DH_IO;
135 } 114 }
136 } else { 115 } else {
137 h->path_state = HP_SW_PATH_ACTIVE; 116 h->path_state = HP_SW_PATH_ACTIVE;
138 ret = SCSI_DH_OK; 117 ret = SCSI_DH_OK;
139 } 118 }
140 if (ret == SCSI_DH_IMM_RETRY) { 119 if (ret == SCSI_DH_IMM_RETRY)
141 blk_put_request(req);
142 goto retry; 120 goto retry;
143 }
144 if (ret == SCSI_DH_DEV_OFFLINED) {
145 h->path_state = HP_SW_PATH_PASSIVE;
146 ret = SCSI_DH_OK;
147 }
148
149 blk_put_request(req);
150 121
151 return ret; 122 return ret;
152} 123}
153 124
154/* 125/*
155 * start_done - Handle START STOP UNIT return status
156 * @sdev: sdev the command has been sent to
157 * @errors: blk error code
158 */
159static int start_done(struct scsi_device *sdev, unsigned char *sense)
160{
161 struct scsi_sense_hdr sshdr;
162 int rc;
163
164 rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
165 if (!rc) {
166 sdev_printk(KERN_WARNING, sdev,
167 "%s: sending start_stop_unit failed, "
168 "no sense available\n",
169 HP_SW_NAME);
170 return SCSI_DH_IO;
171 }
172 switch (sshdr.sense_key) {
173 case NOT_READY:
174 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
175 /*
176 * LUN not ready - manual intervention required
177 *
178 * Switch-over in progress, retry.
179 */
180 rc = SCSI_DH_RETRY;
181 break;
182 }
183 /* fall through */
184 default:
185 sdev_printk(KERN_WARNING, sdev,
186 "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
187 HP_SW_NAME, sshdr.sense_key, sshdr.asc,
188 sshdr.ascq);
189 rc = SCSI_DH_IO;
190 }
191
192 return rc;
193}
194
195static void start_stop_endio(struct request *req, int error)
196{
197 struct hp_sw_dh_data *h = req->end_io_data;
198 unsigned err = SCSI_DH_OK;
199
200 if (error || host_byte(req->errors) != DID_OK ||
201 msg_byte(req->errors) != COMMAND_COMPLETE) {
202 sdev_printk(KERN_WARNING, h->sdev,
203 "%s: sending start_stop_unit failed with %x\n",
204 HP_SW_NAME, req->errors);
205 err = SCSI_DH_IO;
206 goto done;
207 }
208
209 if (req->sense_len > 0) {
210 err = start_done(h->sdev, h->sense);
211 if (err == SCSI_DH_RETRY) {
212 err = SCSI_DH_IO;
213 if (--h->retry_cnt) {
214 blk_put_request(req);
215 err = hp_sw_start_stop(h);
216 if (err == SCSI_DH_OK)
217 return;
218 }
219 }
220 }
221done:
222 req->end_io_data = NULL;
223 __blk_put_request(req->q, req);
224 if (h->callback_fn) {
225 h->callback_fn(h->callback_data, err);
226 h->callback_fn = h->callback_data = NULL;
227 }
228 return;
229
230}
231
232/*
233 * hp_sw_start_stop - Send START STOP UNIT command 126 * hp_sw_start_stop - Send START STOP UNIT command
234 * @sdev: sdev command should be sent to 127 * @sdev: sdev command should be sent to
235 * 128 *
@@ -237,26 +130,48 @@ done:
237 */ 130 */
238static int hp_sw_start_stop(struct hp_sw_dh_data *h) 131static int hp_sw_start_stop(struct hp_sw_dh_data *h)
239{ 132{
240 struct request *req; 133 unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
241 134 struct scsi_sense_hdr sshdr;
242 req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); 135 struct scsi_device *sdev = h->sdev;
243 if (IS_ERR(req)) 136 int res, rc = SCSI_DH_OK;
244 return SCSI_DH_RES_TEMP_UNAVAIL; 137 int retry_cnt = HP_SW_RETRIES;
245 138 u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
246 blk_rq_set_block_pc(req); 139 REQ_FAILFAST_DRIVER;
247 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
248 REQ_FAILFAST_DRIVER;
249 req->cmd_len = COMMAND_SIZE(START_STOP);
250 req->cmd[0] = START_STOP;
251 req->cmd[4] = 1; /* Start spin cycle */
252 req->timeout = HP_SW_TIMEOUT;
253 req->sense = h->sense;
254 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
255 req->sense_len = 0;
256 req->end_io_data = h;
257 140
258 blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); 141retry:
259 return SCSI_DH_OK; 142 res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
143 HP_SW_TIMEOUT, HP_SW_RETRIES,
144 NULL, req_flags, 0);
145 if (res) {
146 if (!scsi_sense_valid(&sshdr)) {
147 sdev_printk(KERN_WARNING, sdev,
148 "%s: sending start_stop_unit failed, "
149 "no sense available\n", HP_SW_NAME);
150 return SCSI_DH_IO;
151 }
152 switch (sshdr.sense_key) {
153 case NOT_READY:
154 if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
155 /*
156 * LUN not ready - manual intervention required
157 *
158 * Switch-over in progress, retry.
159 */
160 if (--retry_cnt)
161 goto retry;
162 rc = SCSI_DH_RETRY;
163 break;
164 }
165 /* fall through */
166 default:
167 sdev_printk(KERN_WARNING, sdev,
168 "%s: sending start_stop_unit failed, "
169 "sense %x/%x/%x\n", HP_SW_NAME,
170 sshdr.sense_key, sshdr.asc, sshdr.ascq);
171 rc = SCSI_DH_IO;
172 }
173 }
174 return rc;
260} 175}
261 176
262static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) 177static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
@@ -290,15 +205,8 @@ static int hp_sw_activate(struct scsi_device *sdev,
290 205
291 ret = hp_sw_tur(sdev, h); 206 ret = hp_sw_tur(sdev, h);
292 207
293 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { 208 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE)
294 h->retry_cnt = h->retries;
295 h->callback_fn = fn;
296 h->callback_data = data;
297 ret = hp_sw_start_stop(h); 209 ret = hp_sw_start_stop(h);
298 if (ret == SCSI_DH_OK)
299 return 0;
300 h->callback_fn = h->callback_data = NULL;
301 }
302 210
303 if (fn) 211 if (fn)
304 fn(data, ret); 212 fn(data, ret);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 00d9c326158e..b64eaae8533d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -205,7 +205,6 @@ struct rdac_dh_data {
205#define RDAC_NON_PREFERRED 1 205#define RDAC_NON_PREFERRED 1
206 char preferred; 206 char preferred;
207 207
208 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
209 union { 208 union {
210 struct c2_inquiry c2; 209 struct c2_inquiry c2;
211 struct c4_inquiry c4; 210 struct c4_inquiry c4;
@@ -262,40 +261,12 @@ do { \
262 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ 261 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
263} while (0); 262} while (0);
264 263
265static struct request *get_rdac_req(struct scsi_device *sdev, 264static unsigned int rdac_failover_get(struct rdac_controller *ctlr,
266 void *buffer, unsigned buflen, int rw) 265 struct list_head *list,
266 unsigned char *cdb)
267{ 267{
268 struct request *rq; 268 struct scsi_device *sdev = ctlr->ms_sdev;
269 struct request_queue *q = sdev->request_queue; 269 struct rdac_dh_data *h = sdev->handler_data;
270
271 rq = blk_get_request(q, rw, GFP_NOIO);
272
273 if (IS_ERR(rq)) {
274 sdev_printk(KERN_INFO, sdev,
275 "get_rdac_req: blk_get_request failed.\n");
276 return NULL;
277 }
278 blk_rq_set_block_pc(rq);
279
280 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
281 blk_put_request(rq);
282 sdev_printk(KERN_INFO, sdev,
283 "get_rdac_req: blk_rq_map_kern failed.\n");
284 return NULL;
285 }
286
287 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
288 REQ_FAILFAST_DRIVER;
289 rq->retries = RDAC_RETRIES;
290 rq->timeout = RDAC_TIMEOUT;
291
292 return rq;
293}
294
295static struct request *rdac_failover_get(struct scsi_device *sdev,
296 struct rdac_dh_data *h, struct list_head *list)
297{
298 struct request *rq;
299 struct rdac_mode_common *common; 270 struct rdac_mode_common *common;
300 unsigned data_size; 271 unsigned data_size;
301 struct rdac_queue_data *qdata; 272 struct rdac_queue_data *qdata;
@@ -332,27 +303,17 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
332 lun_table[qdata->h->lun] = 0x81; 303 lun_table[qdata->h->lun] = 0x81;
333 } 304 }
334 305
335 /* get request for block layer packet command */
336 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
337 if (!rq)
338 return NULL;
339
340 /* Prepare the command. */ 306 /* Prepare the command. */
341 if (h->ctlr->use_ms10) { 307 if (h->ctlr->use_ms10) {
342 rq->cmd[0] = MODE_SELECT_10; 308 cdb[0] = MODE_SELECT_10;
343 rq->cmd[7] = data_size >> 8; 309 cdb[7] = data_size >> 8;
344 rq->cmd[8] = data_size & 0xff; 310 cdb[8] = data_size & 0xff;
345 } else { 311 } else {
346 rq->cmd[0] = MODE_SELECT; 312 cdb[0] = MODE_SELECT;
347 rq->cmd[4] = data_size; 313 cdb[4] = data_size;
348 } 314 }
349 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
350
351 rq->sense = h->sense;
352 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
353 rq->sense_len = 0;
354 315
355 return rq; 316 return data_size;
356} 317}
357 318
358static void release_controller(struct kref *kref) 319static void release_controller(struct kref *kref)
@@ -400,46 +361,14 @@ static struct rdac_controller *get_controller(int index, char *array_name,
400 return ctlr; 361 return ctlr;
401} 362}
402 363
403static int submit_inquiry(struct scsi_device *sdev, int page_code,
404 unsigned int len, struct rdac_dh_data *h)
405{
406 struct request *rq;
407 struct request_queue *q = sdev->request_queue;
408 int err = SCSI_DH_RES_TEMP_UNAVAIL;
409
410 rq = get_rdac_req(sdev, &h->inq, len, READ);
411 if (!rq)
412 goto done;
413
414 /* Prepare the command. */
415 rq->cmd[0] = INQUIRY;
416 rq->cmd[1] = 1;
417 rq->cmd[2] = page_code;
418 rq->cmd[4] = len;
419 rq->cmd_len = COMMAND_SIZE(INQUIRY);
420
421 rq->sense = h->sense;
422 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
423 rq->sense_len = 0;
424
425 err = blk_execute_rq(q, NULL, rq, 1);
426 if (err == -EIO)
427 err = SCSI_DH_IO;
428
429 blk_put_request(rq);
430done:
431 return err;
432}
433
434static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, 364static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
435 char *array_name, u8 *array_id) 365 char *array_name, u8 *array_id)
436{ 366{
437 int err, i; 367 int err = SCSI_DH_IO, i;
438 struct c8_inquiry *inqp; 368 struct c8_inquiry *inqp = &h->inq.c8;
439 369
440 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); 370 if (!scsi_get_vpd_page(sdev, 0xC8, (unsigned char *)inqp,
441 if (err == SCSI_DH_OK) { 371 sizeof(struct c8_inquiry))) {
442 inqp = &h->inq.c8;
443 if (inqp->page_code != 0xc8) 372 if (inqp->page_code != 0xc8)
444 return SCSI_DH_NOSYS; 373 return SCSI_DH_NOSYS;
445 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || 374 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
@@ -453,20 +382,20 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
453 *(array_name+ARRAY_LABEL_LEN-1) = '\0'; 382 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
454 memset(array_id, 0, UNIQUE_ID_LEN); 383 memset(array_id, 0, UNIQUE_ID_LEN);
455 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); 384 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
385 err = SCSI_DH_OK;
456 } 386 }
457 return err; 387 return err;
458} 388}
459 389
460static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) 390static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
461{ 391{
462 int err, access_state; 392 int err = SCSI_DH_IO, access_state;
463 struct rdac_dh_data *tmp; 393 struct rdac_dh_data *tmp;
464 struct c9_inquiry *inqp; 394 struct c9_inquiry *inqp = &h->inq.c9;
465 395
466 h->state = RDAC_STATE_ACTIVE; 396 h->state = RDAC_STATE_ACTIVE;
467 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); 397 if (!scsi_get_vpd_page(sdev, 0xC9, (unsigned char *)inqp,
468 if (err == SCSI_DH_OK) { 398 sizeof(struct c9_inquiry))) {
469 inqp = &h->inq.c9;
470 /* detect the operating mode */ 399 /* detect the operating mode */
471 if ((inqp->avte_cvp >> 5) & 0x1) 400 if ((inqp->avte_cvp >> 5) & 0x1)
472 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ 401 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
@@ -501,6 +430,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
501 tmp->sdev->access_state = access_state; 430 tmp->sdev->access_state = access_state;
502 } 431 }
503 rcu_read_unlock(); 432 rcu_read_unlock();
433 err = SCSI_DH_OK;
504 } 434 }
505 435
506 return err; 436 return err;
@@ -509,12 +439,11 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
509static int initialize_controller(struct scsi_device *sdev, 439static int initialize_controller(struct scsi_device *sdev,
510 struct rdac_dh_data *h, char *array_name, u8 *array_id) 440 struct rdac_dh_data *h, char *array_name, u8 *array_id)
511{ 441{
512 int err, index; 442 int err = SCSI_DH_IO, index;
513 struct c4_inquiry *inqp; 443 struct c4_inquiry *inqp = &h->inq.c4;
514 444
515 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); 445 if (!scsi_get_vpd_page(sdev, 0xC4, (unsigned char *)inqp,
516 if (err == SCSI_DH_OK) { 446 sizeof(struct c4_inquiry))) {
517 inqp = &h->inq.c4;
518 /* get the controller index */ 447 /* get the controller index */
519 if (inqp->slot_id[1] == 0x31) 448 if (inqp->slot_id[1] == 0x31)
520 index = 0; 449 index = 0;
@@ -530,18 +459,18 @@ static int initialize_controller(struct scsi_device *sdev,
530 h->sdev = sdev; 459 h->sdev = sdev;
531 } 460 }
532 spin_unlock(&list_lock); 461 spin_unlock(&list_lock);
462 err = SCSI_DH_OK;
533 } 463 }
534 return err; 464 return err;
535} 465}
536 466
537static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) 467static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
538{ 468{
539 int err; 469 int err = SCSI_DH_IO;
540 struct c2_inquiry *inqp; 470 struct c2_inquiry *inqp = &h->inq.c2;
541 471
542 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h); 472 if (!scsi_get_vpd_page(sdev, 0xC2, (unsigned char *)inqp,
543 if (err == SCSI_DH_OK) { 473 sizeof(struct c2_inquiry))) {
544 inqp = &h->inq.c2;
545 /* 474 /*
546 * If more than MODE6_MAX_LUN luns are supported, use 475 * If more than MODE6_MAX_LUN luns are supported, use
547 * mode select 10 476 * mode select 10
@@ -550,36 +479,35 @@ static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
550 h->ctlr->use_ms10 = 1; 479 h->ctlr->use_ms10 = 1;
551 else 480 else
552 h->ctlr->use_ms10 = 0; 481 h->ctlr->use_ms10 = 0;
482 err = SCSI_DH_OK;
553 } 483 }
554 return err; 484 return err;
555} 485}
556 486
557static int mode_select_handle_sense(struct scsi_device *sdev, 487static int mode_select_handle_sense(struct scsi_device *sdev,
558 unsigned char *sensebuf) 488 struct scsi_sense_hdr *sense_hdr)
559{ 489{
560 struct scsi_sense_hdr sense_hdr; 490 int err = SCSI_DH_IO;
561 int err = SCSI_DH_IO, ret;
562 struct rdac_dh_data *h = sdev->handler_data; 491 struct rdac_dh_data *h = sdev->handler_data;
563 492
564 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 493 if (!scsi_sense_valid(sense_hdr))
565 if (!ret)
566 goto done; 494 goto done;
567 495
568 switch (sense_hdr.sense_key) { 496 switch (sense_hdr->sense_key) {
569 case NO_SENSE: 497 case NO_SENSE:
570 case ABORTED_COMMAND: 498 case ABORTED_COMMAND:
571 case UNIT_ATTENTION: 499 case UNIT_ATTENTION:
572 err = SCSI_DH_RETRY; 500 err = SCSI_DH_RETRY;
573 break; 501 break;
574 case NOT_READY: 502 case NOT_READY:
575 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01) 503 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
576 /* LUN Not Ready and is in the Process of Becoming 504 /* LUN Not Ready and is in the Process of Becoming
577 * Ready 505 * Ready
578 */ 506 */
579 err = SCSI_DH_RETRY; 507 err = SCSI_DH_RETRY;
580 break; 508 break;
581 case ILLEGAL_REQUEST: 509 case ILLEGAL_REQUEST:
582 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36) 510 if (sense_hdr->asc == 0x91 && sense_hdr->ascq == 0x36)
583 /* 511 /*
584 * Command Lock contention 512 * Command Lock contention
585 */ 513 */
@@ -592,7 +520,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
592 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 520 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
593 "MODE_SELECT returned with sense %02x/%02x/%02x", 521 "MODE_SELECT returned with sense %02x/%02x/%02x",
594 (char *) h->ctlr->array_name, h->ctlr->index, 522 (char *) h->ctlr->array_name, h->ctlr->index,
595 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); 523 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
596 524
597done: 525done:
598 return err; 526 return err;
@@ -602,13 +530,16 @@ static void send_mode_select(struct work_struct *work)
602{ 530{
603 struct rdac_controller *ctlr = 531 struct rdac_controller *ctlr =
604 container_of(work, struct rdac_controller, ms_work); 532 container_of(work, struct rdac_controller, ms_work);
605 struct request *rq;
606 struct scsi_device *sdev = ctlr->ms_sdev; 533 struct scsi_device *sdev = ctlr->ms_sdev;
607 struct rdac_dh_data *h = sdev->handler_data; 534 struct rdac_dh_data *h = sdev->handler_data;
608 struct request_queue *q = sdev->request_queue; 535 int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
609 int err, retry_cnt = RDAC_RETRY_COUNT;
610 struct rdac_queue_data *tmp, *qdata; 536 struct rdac_queue_data *tmp, *qdata;
611 LIST_HEAD(list); 537 LIST_HEAD(list);
538 unsigned char cdb[COMMAND_SIZE(MODE_SELECT_10)];
539 struct scsi_sense_hdr sshdr;
540 unsigned int data_size;
541 u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
542 REQ_FAILFAST_DRIVER;
612 543
613 spin_lock(&ctlr->ms_lock); 544 spin_lock(&ctlr->ms_lock);
614 list_splice_init(&ctlr->ms_head, &list); 545 list_splice_init(&ctlr->ms_head, &list);
@@ -616,21 +547,19 @@ static void send_mode_select(struct work_struct *work)
616 ctlr->ms_sdev = NULL; 547 ctlr->ms_sdev = NULL;
617 spin_unlock(&ctlr->ms_lock); 548 spin_unlock(&ctlr->ms_lock);
618 549
619retry: 550 retry:
620 err = SCSI_DH_RES_TEMP_UNAVAIL; 551 data_size = rdac_failover_get(ctlr, &list, cdb);
621 rq = rdac_failover_get(sdev, h, &list);
622 if (!rq)
623 goto done;
624 552
625 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " 553 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
626 "%s MODE_SELECT command", 554 "%s MODE_SELECT command",
627 (char *) h->ctlr->array_name, h->ctlr->index, 555 (char *) h->ctlr->array_name, h->ctlr->index,
628 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); 556 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
629 557
630 err = blk_execute_rq(q, NULL, rq, 1); 558 if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
631 blk_put_request(rq); 559 &h->ctlr->mode_select, data_size, &sshdr,
632 if (err != SCSI_DH_OK) { 560 RDAC_TIMEOUT * HZ,
633 err = mode_select_handle_sense(sdev, h->sense); 561 RDAC_RETRIES, NULL, req_flags, 0)) {
562 err = mode_select_handle_sense(sdev, &sshdr);
634 if (err == SCSI_DH_RETRY && retry_cnt--) 563 if (err == SCSI_DH_RETRY && retry_cnt--)
635 goto retry; 564 goto retry;
636 if (err == SCSI_DH_IMM_RETRY) 565 if (err == SCSI_DH_IMM_RETRY)
@@ -643,7 +572,6 @@ retry:
643 (char *) h->ctlr->array_name, h->ctlr->index); 572 (char *) h->ctlr->array_name, h->ctlr->index);
644 } 573 }
645 574
646done:
647 list_for_each_entry_safe(qdata, tmp, &list, entry) { 575 list_for_each_entry_safe(qdata, tmp, &list, entry) {
648 list_del(&qdata->entry); 576 list_del(&qdata->entry);
649 if (err == SCSI_DH_OK) 577 if (err == SCSI_DH_OK)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 258a3f9a2519..831a1c8b9f89 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -213,6 +213,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
213 goto fail; 213 goto fail;
214 } 214 }
215 215
216 error = scsi_init_sense_cache(shost);
217 if (error)
218 goto fail;
219
216 if (shost_use_blk_mq(shost)) { 220 if (shost_use_blk_mq(shost)) {
217 error = scsi_mq_setup_tags(shost); 221 error = scsi_mq_setup_tags(shost);
218 if (error) 222 if (error)
@@ -226,19 +230,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
226 } 230 }
227 } 231 }
228 232
229 /*
230 * Note that we allocate the freelist even for the MQ case for now,
231 * as we need a command set aside for scsi_reset_provider. Having
232 * the full host freelist and one command available for that is a
233 * little heavy-handed, but avoids introducing a special allocator
234 * just for this. Eventually the structure of scsi_reset_provider
235 * will need a major overhaul.
236 */
237 error = scsi_setup_command_freelist(shost);
238 if (error)
239 goto out_destroy_tags;
240
241
242 if (!shost->shost_gendev.parent) 233 if (!shost->shost_gendev.parent)
243 shost->shost_gendev.parent = dev ? dev : &platform_bus; 234 shost->shost_gendev.parent = dev ? dev : &platform_bus;
244 if (!dma_dev) 235 if (!dma_dev)
@@ -258,7 +249,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
258 249
259 error = device_add(&shost->shost_gendev); 250 error = device_add(&shost->shost_gendev);
260 if (error) 251 if (error)
261 goto out_destroy_freelist; 252 goto out_disable_runtime_pm;
262 253
263 scsi_host_set_state(shost, SHOST_RUNNING); 254 scsi_host_set_state(shost, SHOST_RUNNING);
264 get_device(shost->shost_gendev.parent); 255 get_device(shost->shost_gendev.parent);
@@ -308,13 +299,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
308 device_del(&shost->shost_dev); 299 device_del(&shost->shost_dev);
309 out_del_gendev: 300 out_del_gendev:
310 device_del(&shost->shost_gendev); 301 device_del(&shost->shost_gendev);
311 out_destroy_freelist: 302 out_disable_runtime_pm:
312 device_disable_async_suspend(&shost->shost_gendev); 303 device_disable_async_suspend(&shost->shost_gendev);
313 pm_runtime_disable(&shost->shost_gendev); 304 pm_runtime_disable(&shost->shost_gendev);
314 pm_runtime_set_suspended(&shost->shost_gendev); 305 pm_runtime_set_suspended(&shost->shost_gendev);
315 pm_runtime_put_noidle(&shost->shost_gendev); 306 pm_runtime_put_noidle(&shost->shost_gendev);
316 scsi_destroy_command_freelist(shost);
317 out_destroy_tags:
318 if (shost_use_blk_mq(shost)) 307 if (shost_use_blk_mq(shost))
319 scsi_mq_destroy_tags(shost); 308 scsi_mq_destroy_tags(shost);
320 fail: 309 fail:
@@ -355,7 +344,6 @@ static void scsi_host_dev_release(struct device *dev)
355 kfree(dev_name(&shost->shost_dev)); 344 kfree(dev_name(&shost->shost_dev));
356 } 345 }
357 346
358 scsi_destroy_command_freelist(shost);
359 if (shost_use_blk_mq(shost)) { 347 if (shost_use_blk_mq(shost)) {
360 if (shost->tag_set.tags) 348 if (shost->tag_set.tags)
361 scsi_mq_destroy_tags(shost); 349 scsi_mq_destroy_tags(shost);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cbc0c5fe5a60..c611412a8de9 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5539,8 +5539,8 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5539 * Retries always go down the normal I/O path. 5539 * Retries always go down the normal I/O path.
5540 */ 5540 */
5541 if (likely(cmd->retries == 0 && 5541 if (likely(cmd->retries == 0 &&
5542 cmd->request->cmd_type == REQ_TYPE_FS && 5542 !blk_rq_is_passthrough(cmd->request) &&
5543 h->acciopath_status)) { 5543 h->acciopath_status)) {
5544 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); 5544 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5545 if (rc == 0) 5545 if (rc == 0)
5546 return 0; 5546 return 0;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 919736a74ffa..aa76f36abe03 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2095,7 +2095,7 @@ int fc_lport_bsg_request(struct bsg_job *job)
2095 2095
2096 bsg_reply->reply_payload_rcv_len = 0; 2096 bsg_reply->reply_payload_rcv_len = 0;
2097 if (rsp) 2097 if (rsp)
2098 rsp->resid_len = job->reply_payload.payload_len; 2098 scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
2099 2099
2100 mutex_lock(&lport->lp_mutex); 2100 mutex_lock(&lport->lp_mutex);
2101 2101
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 022bb6e10d98..570b2cb2da43 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2174,12 +2174,12 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2174 bio_data(rsp->bio), blk_rq_bytes(rsp)); 2174 bio_data(rsp->bio), blk_rq_bytes(rsp));
2175 if (ret > 0) { 2175 if (ret > 0) {
2176 /* positive number is the untransferred residual */ 2176 /* positive number is the untransferred residual */
2177 rsp->resid_len = ret; 2177 scsi_req(rsp)->resid_len = ret;
2178 req->resid_len = 0; 2178 scsi_req(req)->resid_len = 0;
2179 ret = 0; 2179 ret = 0;
2180 } else if (ret == 0) { 2180 } else if (ret == 0) {
2181 rsp->resid_len = 0; 2181 scsi_req(rsp)->resid_len = 0;
2182 req->resid_len = 0; 2182 scsi_req(req)->resid_len = 0;
2183 } 2183 }
2184 2184
2185 return ret; 2185 return ret;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index d24792575169..45cbbc44f4d7 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -274,15 +274,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
274 274
275 switch (req_data[1]) { 275 switch (req_data[1]) {
276 case SMP_REPORT_GENERAL: 276 case SMP_REPORT_GENERAL:
277 req->resid_len -= 8; 277 scsi_req(req)->resid_len -= 8;
278 rsp->resid_len -= 32; 278 scsi_req(rsp)->resid_len -= 32;
279 resp_data[2] = SMP_RESP_FUNC_ACC; 279 resp_data[2] = SMP_RESP_FUNC_ACC;
280 resp_data[9] = sas_ha->num_phys; 280 resp_data[9] = sas_ha->num_phys;
281 break; 281 break;
282 282
283 case SMP_REPORT_MANUF_INFO: 283 case SMP_REPORT_MANUF_INFO:
284 req->resid_len -= 8; 284 scsi_req(req)->resid_len -= 8;
285 rsp->resid_len -= 64; 285 scsi_req(rsp)->resid_len -= 64;
286 resp_data[2] = SMP_RESP_FUNC_ACC; 286 resp_data[2] = SMP_RESP_FUNC_ACC;
287 memcpy(resp_data + 12, shost->hostt->name, 287 memcpy(resp_data + 12, shost->hostt->name,
288 SAS_EXPANDER_VENDOR_ID_LEN); 288 SAS_EXPANDER_VENDOR_ID_LEN);
@@ -295,13 +295,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
295 break; 295 break;
296 296
297 case SMP_DISCOVER: 297 case SMP_DISCOVER:
298 req->resid_len -= 16; 298 scsi_req(req)->resid_len -= 16;
299 if ((int)req->resid_len < 0) { 299 if ((int)scsi_req(req)->resid_len < 0) {
300 req->resid_len = 0; 300 scsi_req(req)->resid_len = 0;
301 error = -EINVAL; 301 error = -EINVAL;
302 goto out; 302 goto out;
303 } 303 }
304 rsp->resid_len -= 56; 304 scsi_req(rsp)->resid_len -= 56;
305 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 305 sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
306 break; 306 break;
307 307
@@ -311,13 +311,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
311 break; 311 break;
312 312
313 case SMP_REPORT_PHY_SATA: 313 case SMP_REPORT_PHY_SATA:
314 req->resid_len -= 16; 314 scsi_req(req)->resid_len -= 16;
315 if ((int)req->resid_len < 0) { 315 if ((int)scsi_req(req)->resid_len < 0) {
316 req->resid_len = 0; 316 scsi_req(req)->resid_len = 0;
317 error = -EINVAL; 317 error = -EINVAL;
318 goto out; 318 goto out;
319 } 319 }
320 rsp->resid_len -= 60; 320 scsi_req(rsp)->resid_len -= 60;
321 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 321 sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
322 break; 322 break;
323 323
@@ -331,15 +331,15 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
331 int to_write = req_data[4]; 331 int to_write = req_data[4];
332 332
333 if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || 333 if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
334 req->resid_len < base_frame_size + to_write * 4) { 334 scsi_req(req)->resid_len < base_frame_size + to_write * 4) {
335 resp_data[2] = SMP_RESP_INV_FRM_LEN; 335 resp_data[2] = SMP_RESP_INV_FRM_LEN;
336 break; 336 break;
337 } 337 }
338 338
339 to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], 339 to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
340 req_data[3], to_write, &req_data[8]); 340 req_data[3], to_write, &req_data[8]);
341 req->resid_len -= base_frame_size + to_write * 4; 341 scsi_req(req)->resid_len -= base_frame_size + to_write * 4;
342 rsp->resid_len -= 8; 342 scsi_req(rsp)->resid_len -= 8;
343 break; 343 break;
344 } 344 }
345 345
@@ -348,13 +348,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
348 break; 348 break;
349 349
350 case SMP_PHY_CONTROL: 350 case SMP_PHY_CONTROL:
351 req->resid_len -= 44; 351 scsi_req(req)->resid_len -= 44;
352 if ((int)req->resid_len < 0) { 352 if ((int)scsi_req(req)->resid_len < 0) {
353 req->resid_len = 0; 353 scsi_req(req)->resid_len = 0;
354 error = -EINVAL; 354 error = -EINVAL;
355 goto out; 355 goto out;
356 } 356 }
357 rsp->resid_len -= 8; 357 scsi_req(rsp)->resid_len -= 8;
358 sas_phy_control(sas_ha, req_data[9], req_data[10], 358 sas_phy_control(sas_ha, req_data[9], req_data[10],
359 req_data[32] >> 4, req_data[33] >> 4, 359 req_data[32] >> 4, req_data[33] >> 4,
360 resp_data); 360 resp_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 0b5b423b1db0..c6d550551504 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4723,7 +4723,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4723 * then scsi-ml does not need to handle this misbehavior. 4723 * then scsi-ml does not need to handle this misbehavior.
4724 */ 4724 */
4725 sector_sz = scmd->device->sector_size; 4725 sector_sz = scmd->device->sector_size;
4726 if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && 4726 if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
4727 xfer_cnt % sector_sz)) { 4727 xfer_cnt % sector_sz)) {
4728 sdev_printk(KERN_INFO, scmd->device, 4728 sdev_printk(KERN_INFO, scmd->device,
4729 "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", 4729 "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 7f1d5785bc30..e7a7a704a315 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2057,10 +2057,10 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2057 ioc->name, __func__, 2057 ioc->name, __func__,
2058 le16_to_cpu(mpi_reply->ResponseDataLength))); 2058 le16_to_cpu(mpi_reply->ResponseDataLength)));
2059 2059
2060 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 2060 memcpy(scsi_req(req)->sense, mpi_reply, sizeof(*mpi_reply));
2061 req->sense_len = sizeof(*mpi_reply); 2061 scsi_req(req)->sense_len = sizeof(*mpi_reply);
2062 req->resid_len = 0; 2062 scsi_req(req)->resid_len = 0;
2063 rsp->resid_len -= 2063 scsi_req(rsp)->resid_len -=
2064 le16_to_cpu(mpi_reply->ResponseDataLength); 2064 le16_to_cpu(mpi_reply->ResponseDataLength);
2065 2065
2066 /* check if the resp needs to be copied from the allocated 2066 /* check if the resp needs to be copied from the allocated
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index ef99f62831fb..30b905080c61 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -48,6 +48,7 @@
48#include <scsi/osd_sense.h> 48#include <scsi/osd_sense.h>
49 49
50#include <scsi/scsi_device.h> 50#include <scsi/scsi_device.h>
51#include <scsi/scsi_request.h>
51 52
52#include "osd_debug.h" 53#include "osd_debug.h"
53 54
@@ -477,11 +478,13 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
477{ 478{
478 or->async_error = error; 479 or->async_error = error;
479 or->req_errors = req->errors ? : error; 480 or->req_errors = req->errors ? : error;
480 or->sense_len = req->sense_len; 481 or->sense_len = scsi_req(req)->sense_len;
482 if (or->sense_len)
483 memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
481 if (or->out.req) 484 if (or->out.req)
482 or->out.residual = or->out.req->resid_len; 485 or->out.residual = scsi_req(or->out.req)->resid_len;
483 if (or->in.req) 486 if (or->in.req)
484 or->in.residual = or->in.req->resid_len; 487 or->in.residual = scsi_req(or->in.req)->resid_len;
485} 488}
486 489
487int osd_execute_request(struct osd_request *or) 490int osd_execute_request(struct osd_request *or)
@@ -1562,10 +1565,11 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
1562 struct bio *bio = oii->bio; 1565 struct bio *bio = oii->bio;
1563 int ret; 1566 int ret;
1564 1567
1565 req = blk_get_request(q, has_write ? WRITE : READ, flags); 1568 req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
1569 flags);
1566 if (IS_ERR(req)) 1570 if (IS_ERR(req))
1567 return req; 1571 return req;
1568 blk_rq_set_block_pc(req); 1572 scsi_req_init(req);
1569 1573
1570 for_each_bio(bio) { 1574 for_each_bio(bio) {
1571 struct bio *bounce_bio = bio; 1575 struct bio *bounce_bio = bio;
@@ -1599,8 +1603,6 @@ static int _init_blk_request(struct osd_request *or,
1599 1603
1600 req->timeout = or->timeout; 1604 req->timeout = or->timeout;
1601 req->retries = or->retries; 1605 req->retries = or->retries;
1602 req->sense = or->sense;
1603 req->sense_len = 0;
1604 1606
1605 if (has_out) { 1607 if (has_out) {
1606 or->out.req = req; 1608 or->out.req = req;
@@ -1612,7 +1614,7 @@ static int _init_blk_request(struct osd_request *or,
1612 ret = PTR_ERR(req); 1614 ret = PTR_ERR(req);
1613 goto out; 1615 goto out;
1614 } 1616 }
1615 blk_rq_set_block_pc(req); 1617 scsi_req_init(req);
1616 or->in.req = or->request->next_rq = req; 1618 or->in.req = or->request->next_rq = req;
1617 } 1619 }
1618 } else if (has_in) 1620 } else if (has_in)
@@ -1699,8 +1701,8 @@ int osd_finalize_request(struct osd_request *or,
1699 1701
1700 osd_sec_sign_cdb(&or->cdb, cap_key); 1702 osd_sec_sign_cdb(&or->cdb, cap_key);
1701 1703
1702 or->request->cmd = or->cdb.buff; 1704 scsi_req(or->request)->cmd = or->cdb.buff;
1703 or->request->cmd_len = _osd_req_cdb_len(or); 1705 scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or);
1704 1706
1705 return 0; 1707 return 0;
1706} 1708}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index e8196c55b633..451de6c5e3c9 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -322,6 +322,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
322/* Wakeup from interrupt */ 322/* Wakeup from interrupt */
323static void osst_end_async(struct request *req, int update) 323static void osst_end_async(struct request *req, int update)
324{ 324{
325 struct scsi_request *rq = scsi_req(req);
325 struct osst_request *SRpnt = req->end_io_data; 326 struct osst_request *SRpnt = req->end_io_data;
326 struct osst_tape *STp = SRpnt->stp; 327 struct osst_tape *STp = SRpnt->stp;
327 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; 328 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
@@ -330,6 +331,8 @@ static void osst_end_async(struct request *req, int update)
330#if DEBUG 331#if DEBUG
331 STp->write_pending = 0; 332 STp->write_pending = 0;
332#endif 333#endif
334 if (rq->sense_len)
335 memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
333 if (SRpnt->waiting) 336 if (SRpnt->waiting)
334 complete(SRpnt->waiting); 337 complete(SRpnt->waiting);
335 338
@@ -357,17 +360,20 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
357 int use_sg, int timeout, int retries) 360 int use_sg, int timeout, int retries)
358{ 361{
359 struct request *req; 362 struct request *req;
363 struct scsi_request *rq;
360 struct page **pages = NULL; 364 struct page **pages = NULL;
361 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; 365 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
362 366
363 int err = 0; 367 int err = 0;
364 int write = (data_direction == DMA_TO_DEVICE); 368 int write = (data_direction == DMA_TO_DEVICE);
365 369
366 req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); 370 req = blk_get_request(SRpnt->stp->device->request_queue,
371 write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
367 if (IS_ERR(req)) 372 if (IS_ERR(req))
368 return DRIVER_ERROR << 24; 373 return DRIVER_ERROR << 24;
369 374
370 blk_rq_set_block_pc(req); 375 rq = scsi_req(req);
376 scsi_req_init(req);
371 req->rq_flags |= RQF_QUIET; 377 req->rq_flags |= RQF_QUIET;
372 378
373 SRpnt->bio = NULL; 379 SRpnt->bio = NULL;
@@ -404,11 +410,9 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
404 goto free_req; 410 goto free_req;
405 } 411 }
406 412
407 req->cmd_len = cmd_len; 413 rq->cmd_len = cmd_len;
408 memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 414 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
409 memcpy(req->cmd, cmd, req->cmd_len); 415 memcpy(rq->cmd, cmd, rq->cmd_len);
410 req->sense = SRpnt->sense;
411 req->sense_len = 0;
412 req->timeout = timeout; 416 req->timeout = timeout;
413 req->retries = retries; 417 req->retries = retries;
414 req->end_io_data = SRpnt; 418 req->end_io_data = SRpnt;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 1bf8061ff803..40ca75bbcb9d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -921,7 +921,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
921 921
922 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 922 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
923 sizeof(response) + sizeof(uint8_t); 923 sizeof(response) + sizeof(uint8_t);
924 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 924 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
925 sizeof(struct fc_bsg_reply); 925 sizeof(struct fc_bsg_reply);
926 memcpy(fw_sts_ptr, response, sizeof(response)); 926 memcpy(fw_sts_ptr, response, sizeof(response));
927 fw_sts_ptr += sizeof(response); 927 fw_sts_ptr += sizeof(response);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a94b0b6bd030..9281bf47cbed 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1468,7 +1468,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1468 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1468 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1469 le16_to_cpu(((struct els_sts_entry_24xx *) 1469 le16_to_cpu(((struct els_sts_entry_24xx *)
1470 pkt)->total_byte_count)); 1470 pkt)->total_byte_count));
1471 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1471 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1472 sizeof(struct fc_bsg_reply);
1472 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1473 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1473 } 1474 }
1474 else { 1475 else {
@@ -1482,7 +1483,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1482 pkt)->error_subcode_2)); 1483 pkt)->error_subcode_2));
1483 res = DID_ERROR << 16; 1484 res = DID_ERROR << 16;
1484 bsg_reply->reply_payload_rcv_len = 0; 1485 bsg_reply->reply_payload_rcv_len = 0;
1485 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1486 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1487 sizeof(struct fc_bsg_reply);
1486 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1488 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1487 } 1489 }
1488 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1490 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 02f1de18bc2b..96c33e292eba 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2244,7 +2244,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2244 memcpy(fstatus.reserved_3, 2244 memcpy(fstatus.reserved_3,
2245 pkt->reserved_2, 20 * sizeof(uint8_t)); 2245 pkt->reserved_2, 20 * sizeof(uint8_t));
2246 2246
2247 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 2247 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
2248 sizeof(struct fc_bsg_reply); 2248 sizeof(struct fc_bsg_reply);
2249 2249
2250 memcpy(fw_sts_ptr, (uint8_t *)&fstatus, 2250 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 75455d4dab68..7bfbcfa7af40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -98,176 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain);
98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); 98ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99EXPORT_SYMBOL(scsi_sd_pm_domain); 99EXPORT_SYMBOL(scsi_sd_pm_domain);
100 100
101struct scsi_host_cmd_pool {
102 struct kmem_cache *cmd_slab;
103 struct kmem_cache *sense_slab;
104 unsigned int users;
105 char *cmd_name;
106 char *sense_name;
107 unsigned int slab_flags;
108 gfp_t gfp_mask;
109};
110
111static struct scsi_host_cmd_pool scsi_cmd_pool = {
112 .cmd_name = "scsi_cmd_cache",
113 .sense_name = "scsi_sense_cache",
114 .slab_flags = SLAB_HWCACHE_ALIGN,
115};
116
117static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
118 .cmd_name = "scsi_cmd_cache(DMA)",
119 .sense_name = "scsi_sense_cache(DMA)",
120 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
121 .gfp_mask = __GFP_DMA,
122};
123
124static DEFINE_MUTEX(host_cmd_pool_mutex);
125
126/**
127 * scsi_host_free_command - internal function to release a command
128 * @shost: host to free the command for
129 * @cmd: command to release
130 *
131 * the command must previously have been allocated by
132 * scsi_host_alloc_command.
133 */
134static void
135scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
136{
137 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
138
139 if (cmd->prot_sdb)
140 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
141 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
142 kmem_cache_free(pool->cmd_slab, cmd);
143}
144
145/**
146 * scsi_host_alloc_command - internal function to allocate command
147 * @shost: SCSI host whose pool to allocate from
148 * @gfp_mask: mask for the allocation
149 *
150 * Returns a fully allocated command with sense buffer and protection
151 * data buffer (where applicable) or NULL on failure
152 */
153static struct scsi_cmnd *
154scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
155{
156 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
157 struct scsi_cmnd *cmd;
158
159 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
160 if (!cmd)
161 goto fail;
162
163 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
164 gfp_mask | pool->gfp_mask);
165 if (!cmd->sense_buffer)
166 goto fail_free_cmd;
167
168 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
169 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
170 if (!cmd->prot_sdb)
171 goto fail_free_sense;
172 }
173
174 return cmd;
175
176fail_free_sense:
177 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
178fail_free_cmd:
179 kmem_cache_free(pool->cmd_slab, cmd);
180fail:
181 return NULL;
182}
183
184/**
185 * __scsi_get_command - Allocate a struct scsi_cmnd
186 * @shost: host to transmit command
187 * @gfp_mask: allocation mask
188 *
189 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
190 * host's free_list if necessary.
191 */
192static struct scsi_cmnd *
193__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
194{
195 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
196
197 if (unlikely(!cmd)) {
198 unsigned long flags;
199
200 spin_lock_irqsave(&shost->free_list_lock, flags);
201 if (likely(!list_empty(&shost->free_list))) {
202 cmd = list_entry(shost->free_list.next,
203 struct scsi_cmnd, list);
204 list_del_init(&cmd->list);
205 }
206 spin_unlock_irqrestore(&shost->free_list_lock, flags);
207
208 if (cmd) {
209 void *buf, *prot;
210
211 buf = cmd->sense_buffer;
212 prot = cmd->prot_sdb;
213
214 memset(cmd, 0, sizeof(*cmd));
215
216 cmd->sense_buffer = buf;
217 cmd->prot_sdb = prot;
218 }
219 }
220
221 return cmd;
222}
223
224/**
225 * scsi_get_command - Allocate and setup a scsi command block
226 * @dev: parent scsi device
227 * @gfp_mask: allocator flags
228 *
229 * Returns: The allocated scsi command structure.
230 */
231struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
232{
233 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
234 unsigned long flags;
235
236 if (unlikely(cmd == NULL))
237 return NULL;
238
239 cmd->device = dev;
240 INIT_LIST_HEAD(&cmd->list);
241 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
242 spin_lock_irqsave(&dev->list_lock, flags);
243 list_add_tail(&cmd->list, &dev->cmd_list);
244 spin_unlock_irqrestore(&dev->list_lock, flags);
245 cmd->jiffies_at_alloc = jiffies;
246 return cmd;
247}
248
249/**
250 * __scsi_put_command - Free a struct scsi_cmnd
251 * @shost: dev->host
252 * @cmd: Command to free
253 */
254static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
255{
256 unsigned long flags;
257
258 if (unlikely(list_empty(&shost->free_list))) {
259 spin_lock_irqsave(&shost->free_list_lock, flags);
260 if (list_empty(&shost->free_list)) {
261 list_add(&cmd->list, &shost->free_list);
262 cmd = NULL;
263 }
264 spin_unlock_irqrestore(&shost->free_list_lock, flags);
265 }
266
267 if (likely(cmd != NULL))
268 scsi_host_free_command(shost, cmd);
269}
270
271/** 101/**
272 * scsi_put_command - Free a scsi command block 102 * scsi_put_command - Free a scsi command block
273 * @cmd: command block to free 103 * @cmd: command block to free
@@ -287,188 +117,6 @@ void scsi_put_command(struct scsi_cmnd *cmd)
287 spin_unlock_irqrestore(&cmd->device->list_lock, flags); 117 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
288 118
289 BUG_ON(delayed_work_pending(&cmd->abort_work)); 119 BUG_ON(delayed_work_pending(&cmd->abort_work));
290
291 __scsi_put_command(cmd->device->host, cmd);
292}
293
294static struct scsi_host_cmd_pool *
295scsi_find_host_cmd_pool(struct Scsi_Host *shost)
296{
297 if (shost->hostt->cmd_size)
298 return shost->hostt->cmd_pool;
299 if (shost->unchecked_isa_dma)
300 return &scsi_cmd_dma_pool;
301 return &scsi_cmd_pool;
302}
303
304static void
305scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
306{
307 kfree(pool->sense_name);
308 kfree(pool->cmd_name);
309 kfree(pool);
310}
311
312static struct scsi_host_cmd_pool *
313scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
314{
315 struct scsi_host_template *hostt = shost->hostt;
316 struct scsi_host_cmd_pool *pool;
317
318 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
319 if (!pool)
320 return NULL;
321
322 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
323 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
324 if (!pool->cmd_name || !pool->sense_name) {
325 scsi_free_host_cmd_pool(pool);
326 return NULL;
327 }
328
329 pool->slab_flags = SLAB_HWCACHE_ALIGN;
330 if (shost->unchecked_isa_dma) {
331 pool->slab_flags |= SLAB_CACHE_DMA;
332 pool->gfp_mask = __GFP_DMA;
333 }
334
335 if (hostt->cmd_size)
336 hostt->cmd_pool = pool;
337
338 return pool;
339}
340
341static struct scsi_host_cmd_pool *
342scsi_get_host_cmd_pool(struct Scsi_Host *shost)
343{
344 struct scsi_host_template *hostt = shost->hostt;
345 struct scsi_host_cmd_pool *retval = NULL, *pool;
346 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
347
348 /*
349 * Select a command slab for this host and create it if not
350 * yet existent.
351 */
352 mutex_lock(&host_cmd_pool_mutex);
353 pool = scsi_find_host_cmd_pool(shost);
354 if (!pool) {
355 pool = scsi_alloc_host_cmd_pool(shost);
356 if (!pool)
357 goto out;
358 }
359
360 if (!pool->users) {
361 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
362 pool->slab_flags, NULL);
363 if (!pool->cmd_slab)
364 goto out_free_pool;
365
366 pool->sense_slab = kmem_cache_create(pool->sense_name,
367 SCSI_SENSE_BUFFERSIZE, 0,
368 pool->slab_flags, NULL);
369 if (!pool->sense_slab)
370 goto out_free_slab;
371 }
372
373 pool->users++;
374 retval = pool;
375out:
376 mutex_unlock(&host_cmd_pool_mutex);
377 return retval;
378
379out_free_slab:
380 kmem_cache_destroy(pool->cmd_slab);
381out_free_pool:
382 if (hostt->cmd_size) {
383 scsi_free_host_cmd_pool(pool);
384 hostt->cmd_pool = NULL;
385 }
386 goto out;
387}
388
389static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
390{
391 struct scsi_host_template *hostt = shost->hostt;
392 struct scsi_host_cmd_pool *pool;
393
394 mutex_lock(&host_cmd_pool_mutex);
395 pool = scsi_find_host_cmd_pool(shost);
396
397 /*
398 * This may happen if a driver has a mismatched get and put
399 * of the command pool; the driver should be implicated in
400 * the stack trace
401 */
402 BUG_ON(pool->users == 0);
403
404 if (!--pool->users) {
405 kmem_cache_destroy(pool->cmd_slab);
406 kmem_cache_destroy(pool->sense_slab);
407 if (hostt->cmd_size) {
408 scsi_free_host_cmd_pool(pool);
409 hostt->cmd_pool = NULL;
410 }
411 }
412 mutex_unlock(&host_cmd_pool_mutex);
413}
414
415/**
416 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
417 * @shost: host to allocate the freelist for.
418 *
419 * Description: The command freelist protects against system-wide out of memory
420 * deadlock by preallocating one SCSI command structure for each host, so the
421 * system can always write to a swap file on a device associated with that host.
422 *
423 * Returns: Nothing.
424 */
425int scsi_setup_command_freelist(struct Scsi_Host *shost)
426{
427 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
428 struct scsi_cmnd *cmd;
429
430 spin_lock_init(&shost->free_list_lock);
431 INIT_LIST_HEAD(&shost->free_list);
432
433 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
434 if (!shost->cmd_pool)
435 return -ENOMEM;
436
437 /*
438 * Get one backup command for this host.
439 */
440 cmd = scsi_host_alloc_command(shost, gfp_mask);
441 if (!cmd) {
442 scsi_put_host_cmd_pool(shost);
443 shost->cmd_pool = NULL;
444 return -ENOMEM;
445 }
446 list_add(&cmd->list, &shost->free_list);
447 return 0;
448}
449
450/**
451 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
452 * @shost: host whose freelist is going to be destroyed
453 */
454void scsi_destroy_command_freelist(struct Scsi_Host *shost)
455{
456 /*
457 * If cmd_pool is NULL the free list was not initialized, so
458 * do not attempt to release resources.
459 */
460 if (!shost->cmd_pool)
461 return;
462
463 while (!list_empty(&shost->free_list)) {
464 struct scsi_cmnd *cmd;
465
466 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
467 list_del_init(&cmd->list);
468 scsi_host_free_command(shost, cmd);
469 }
470 shost->cmd_pool = NULL;
471 scsi_put_host_cmd_pool(shost);
472} 120}
473 121
474#ifdef CONFIG_SCSI_LOGGING 122#ifdef CONFIG_SCSI_LOGGING
@@ -590,7 +238,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
590 "(result %x)\n", cmd->result)); 238 "(result %x)\n", cmd->result));
591 239
592 good_bytes = scsi_bufflen(cmd); 240 good_bytes = scsi_bufflen(cmd);
593 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 241 if (!blk_rq_is_passthrough(cmd->request)) {
594 int old_good_bytes = good_bytes; 242 int old_good_bytes = good_bytes;
595 drv = scsi_cmd_to_driver(cmd); 243 drv = scsi_cmd_to_driver(cmd);
596 if (drv->done) 244 if (drv->done)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 996e134d79fa..9e82fa5715bc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1106,7 +1106,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
1106 1106
1107static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) 1107static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1108{ 1108{
1109 if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 1109 if (!blk_rq_is_passthrough(scmd->request)) {
1110 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); 1110 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1111 if (sdrv->eh_action) 1111 if (sdrv->eh_action)
1112 rtn = sdrv->eh_action(scmd, rtn); 1112 rtn = sdrv->eh_action(scmd, rtn);
@@ -1746,7 +1746,7 @@ check_type:
1746 * the check condition was retryable. 1746 * the check condition was retryable.
1747 */ 1747 */
1748 if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || 1748 if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1749 scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) 1749 blk_rq_is_passthrough(scmd->request))
1750 return 1; 1750 return 1;
1751 else 1751 else
1752 return 0; 1752 return 0;
@@ -1968,25 +1968,25 @@ static void eh_lock_door_done(struct request *req, int uptodate)
1968static void scsi_eh_lock_door(struct scsi_device *sdev) 1968static void scsi_eh_lock_door(struct scsi_device *sdev)
1969{ 1969{
1970 struct request *req; 1970 struct request *req;
1971 struct scsi_request *rq;
1971 1972
1972 /* 1973 /*
1973 * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a 1974 * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
1974 * request becomes available 1975 * request becomes available
1975 */ 1976 */
1976 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1977 req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL);
1977 if (IS_ERR(req)) 1978 if (IS_ERR(req))
1978 return; 1979 return;
1980 rq = scsi_req(req);
1981 scsi_req_init(req);
1979 1982
1980 blk_rq_set_block_pc(req); 1983 rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1981 1984 rq->cmd[1] = 0;
1982 req->cmd[0] = ALLOW_MEDIUM_REMOVAL; 1985 rq->cmd[2] = 0;
1983 req->cmd[1] = 0; 1986 rq->cmd[3] = 0;
1984 req->cmd[2] = 0; 1987 rq->cmd[4] = SCSI_REMOVAL_PREVENT;
1985 req->cmd[3] = 0; 1988 rq->cmd[5] = 0;
1986 req->cmd[4] = SCSI_REMOVAL_PREVENT; 1989 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
1987 req->cmd[5] = 0;
1988
1989 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1990 1990
1991 req->rq_flags |= RQF_QUIET; 1991 req->rq_flags |= RQF_QUIET;
1992 req->timeout = 10 * HZ; 1992 req->timeout = 10 * HZ;
@@ -2331,7 +2331,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2331{ 2331{
2332 struct scsi_cmnd *scmd; 2332 struct scsi_cmnd *scmd;
2333 struct Scsi_Host *shost = dev->host; 2333 struct Scsi_Host *shost = dev->host;
2334 struct request req; 2334 struct request *rq;
2335 unsigned long flags; 2335 unsigned long flags;
2336 int error = 0, rtn, val; 2336 int error = 0, rtn, val;
2337 2337
@@ -2346,14 +2346,16 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2346 return -EIO; 2346 return -EIO;
2347 2347
2348 error = -EIO; 2348 error = -EIO;
2349 scmd = scsi_get_command(dev, GFP_KERNEL); 2349 rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
2350 if (!scmd) 2350 shost->hostt->cmd_size, GFP_KERNEL);
2351 if (!rq)
2351 goto out_put_autopm_host; 2352 goto out_put_autopm_host;
2353 blk_rq_init(NULL, rq);
2352 2354
2353 blk_rq_init(NULL, &req); 2355 scmd = (struct scsi_cmnd *)(rq + 1);
2354 scmd->request = &req; 2356 scsi_init_command(dev, scmd);
2355 2357 scmd->request = rq;
2356 scmd->cmnd = req.cmd; 2358 scmd->cmnd = scsi_req(rq)->cmd;
2357 2359
2358 scmd->scsi_done = scsi_reset_provider_done_command; 2360 scmd->scsi_done = scsi_reset_provider_done_command;
2359 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 2361 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -2413,6 +2415,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2413 scsi_run_host_queues(shost); 2415 scsi_run_host_queues(shost);
2414 2416
2415 scsi_put_command(scmd); 2417 scsi_put_command(scmd);
2418 kfree(rq);
2416 2419
2417out_put_autopm_host: 2420out_put_autopm_host:
2418 scsi_autopm_put_host(shost); 2421 scsi_autopm_put_host(shost);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e9e1e141af9c..90f65c8f487a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -37,8 +37,59 @@
37#include "scsi_priv.h" 37#include "scsi_priv.h"
38#include "scsi_logging.h" 38#include "scsi_logging.h"
39 39
40static struct kmem_cache *scsi_sdb_cache;
41static struct kmem_cache *scsi_sense_cache;
42static struct kmem_cache *scsi_sense_isadma_cache;
43static DEFINE_MUTEX(scsi_sense_cache_mutex);
40 44
41struct kmem_cache *scsi_sdb_cache; 45static inline struct kmem_cache *
46scsi_select_sense_cache(struct Scsi_Host *shost)
47{
48 return shost->unchecked_isa_dma ?
49 scsi_sense_isadma_cache : scsi_sense_cache;
50}
51
52static void scsi_free_sense_buffer(struct Scsi_Host *shost,
53 unsigned char *sense_buffer)
54{
55 kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
56}
57
58static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
59 gfp_t gfp_mask, int numa_node)
60{
61 return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
62 numa_node);
63}
64
65int scsi_init_sense_cache(struct Scsi_Host *shost)
66{
67 struct kmem_cache *cache;
68 int ret = 0;
69
70 cache = scsi_select_sense_cache(shost);
71 if (cache)
72 return 0;
73
74 mutex_lock(&scsi_sense_cache_mutex);
75 if (shost->unchecked_isa_dma) {
76 scsi_sense_isadma_cache =
77 kmem_cache_create("scsi_sense_cache(DMA)",
78 SCSI_SENSE_BUFFERSIZE, 0,
79 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
80 if (!scsi_sense_isadma_cache)
81 ret = -ENOMEM;
82 } else {
83 scsi_sense_cache =
84 kmem_cache_create("scsi_sense_cache",
85 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
86 if (!scsi_sense_cache)
87 ret = -ENOMEM;
88 }
89
90 mutex_unlock(&scsi_sense_cache_mutex);
91 return ret;
92}
42 93
43/* 94/*
44 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 95 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
@@ -168,22 +219,23 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
168 req_flags_t rq_flags, int *resid) 219 req_flags_t rq_flags, int *resid)
169{ 220{
170 struct request *req; 221 struct request *req;
171 int write = (data_direction == DMA_TO_DEVICE); 222 struct scsi_request *rq;
172 int ret = DRIVER_ERROR << 24; 223 int ret = DRIVER_ERROR << 24;
173 224
174 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); 225 req = blk_get_request(sdev->request_queue,
226 data_direction == DMA_TO_DEVICE ?
227 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
175 if (IS_ERR(req)) 228 if (IS_ERR(req))
176 return ret; 229 return ret;
177 blk_rq_set_block_pc(req); 230 rq = scsi_req(req);
231 scsi_req_init(req);
178 232
179 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 233 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
180 buffer, bufflen, __GFP_RECLAIM)) 234 buffer, bufflen, __GFP_RECLAIM))
181 goto out; 235 goto out;
182 236
183 req->cmd_len = COMMAND_SIZE(cmd[0]); 237 rq->cmd_len = COMMAND_SIZE(cmd[0]);
184 memcpy(req->cmd, cmd, req->cmd_len); 238 memcpy(rq->cmd, cmd, rq->cmd_len);
185 req->sense = sense;
186 req->sense_len = 0;
187 req->retries = retries; 239 req->retries = retries;
188 req->timeout = timeout; 240 req->timeout = timeout;
189 req->cmd_flags |= flags; 241 req->cmd_flags |= flags;
@@ -200,11 +252,13 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
200 * is invalid. Prevent the garbage from being misinterpreted 252 * is invalid. Prevent the garbage from being misinterpreted
201 * and prevent security leaks by zeroing out the excess data. 253 * and prevent security leaks by zeroing out the excess data.
202 */ 254 */
203 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 255 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
204 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 256 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
205 257
206 if (resid) 258 if (resid)
207 *resid = req->resid_len; 259 *resid = rq->resid_len;
260 if (sense && rq->sense_len)
261 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
208 ret = req->errors; 262 ret = req->errors;
209 out: 263 out:
210 blk_put_request(req); 264 blk_put_request(req);
@@ -529,7 +583,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
529 583
530static void scsi_uninit_cmd(struct scsi_cmnd *cmd) 584static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
531{ 585{
532 if (cmd->request->cmd_type == REQ_TYPE_FS) { 586 if (!blk_rq_is_passthrough(cmd->request)) {
533 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 587 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
534 588
535 if (drv->uninit_command) 589 if (drv->uninit_command)
@@ -645,14 +699,13 @@ static bool scsi_end_request(struct request *req, int error,
645 699
646 if (bidi_bytes) 700 if (bidi_bytes)
647 scsi_release_bidi_buffers(cmd); 701 scsi_release_bidi_buffers(cmd);
702 scsi_release_buffers(cmd);
703 scsi_put_command(cmd);
648 704
649 spin_lock_irqsave(q->queue_lock, flags); 705 spin_lock_irqsave(q->queue_lock, flags);
650 blk_finish_request(req, error); 706 blk_finish_request(req, error);
651 spin_unlock_irqrestore(q->queue_lock, flags); 707 spin_unlock_irqrestore(q->queue_lock, flags);
652 708
653 scsi_release_buffers(cmd);
654
655 scsi_put_command(cmd);
656 scsi_run_queue(q); 709 scsi_run_queue(q);
657 } 710 }
658 711
@@ -754,18 +807,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
754 sense_deferred = scsi_sense_is_deferred(&sshdr); 807 sense_deferred = scsi_sense_is_deferred(&sshdr);
755 } 808 }
756 809
757 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 810 if (blk_rq_is_passthrough(req)) {
758 if (result) { 811 if (result) {
759 if (sense_valid && req->sense) { 812 if (sense_valid) {
760 /* 813 /*
761 * SG_IO wants current and deferred errors 814 * SG_IO wants current and deferred errors
762 */ 815 */
763 int len = 8 + cmd->sense_buffer[7]; 816 scsi_req(req)->sense_len =
764 817 min(8 + cmd->sense_buffer[7],
765 if (len > SCSI_SENSE_BUFFERSIZE) 818 SCSI_SENSE_BUFFERSIZE);
766 len = SCSI_SENSE_BUFFERSIZE;
767 memcpy(req->sense, cmd->sense_buffer, len);
768 req->sense_len = len;
769 } 819 }
770 if (!sense_deferred) 820 if (!sense_deferred)
771 error = __scsi_error_from_host_byte(cmd, result); 821 error = __scsi_error_from_host_byte(cmd, result);
@@ -775,14 +825,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
775 */ 825 */
776 req->errors = cmd->result; 826 req->errors = cmd->result;
777 827
778 req->resid_len = scsi_get_resid(cmd); 828 scsi_req(req)->resid_len = scsi_get_resid(cmd);
779 829
780 if (scsi_bidi_cmnd(cmd)) { 830 if (scsi_bidi_cmnd(cmd)) {
781 /* 831 /*
782 * Bidi commands Must be complete as a whole, 832 * Bidi commands Must be complete as a whole,
783 * both sides at once. 833 * both sides at once.
784 */ 834 */
785 req->next_rq->resid_len = scsi_in(cmd)->resid; 835 scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
786 if (scsi_end_request(req, 0, blk_rq_bytes(req), 836 if (scsi_end_request(req, 0, blk_rq_bytes(req),
787 blk_rq_bytes(req->next_rq))) 837 blk_rq_bytes(req->next_rq)))
788 BUG(); 838 BUG();
@@ -790,15 +840,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
790 } 840 }
791 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 841 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
792 /* 842 /*
793 * Certain non BLOCK_PC requests are commands that don't 843 * Flush commands do not transfers any data, and thus cannot use
794 * actually transfer anything (FLUSH), so cannot use
795 * good_bytes != blk_rq_bytes(req) as the signal for an error. 844 * good_bytes != blk_rq_bytes(req) as the signal for an error.
796 * This sets the error explicitly for the problem case. 845 * This sets the error explicitly for the problem case.
797 */ 846 */
798 error = __scsi_error_from_host_byte(cmd, result); 847 error = __scsi_error_from_host_byte(cmd, result);
799 } 848 }
800 849
801 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 850 /* no bidi support for !blk_rq_is_passthrough yet */
802 BUG_ON(blk_bidi_rq(req)); 851 BUG_ON(blk_bidi_rq(req));
803 852
804 /* 853 /*
@@ -810,8 +859,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
810 blk_rq_sectors(req), good_bytes)); 859 blk_rq_sectors(req), good_bytes));
811 860
812 /* 861 /*
813 * Recovered errors need reporting, but they're always treated 862 * Recovered errors need reporting, but they're always treated as
814 * as success, so fiddle the result code here. For BLOCK_PC 863 * success, so fiddle the result code here. For passthrough requests
815 * we already took a copy of the original into rq->errors which 864 * we already took a copy of the original into rq->errors which
816 * is what gets returned to the user 865 * is what gets returned to the user
817 */ 866 */
@@ -825,7 +874,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
825 else if (!(req->rq_flags & RQF_QUIET)) 874 else if (!(req->rq_flags & RQF_QUIET))
826 scsi_print_sense(cmd); 875 scsi_print_sense(cmd);
827 result = 0; 876 result = 0;
828 /* BLOCK_PC may have set error */ 877 /* for passthrough error may be set */
829 error = 0; 878 error = 0;
830 } 879 }
831 880
@@ -1109,42 +1158,33 @@ err_exit:
1109} 1158}
1110EXPORT_SYMBOL(scsi_init_io); 1159EXPORT_SYMBOL(scsi_init_io);
1111 1160
1112static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1161void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1113 struct request *req)
1114{ 1162{
1115 struct scsi_cmnd *cmd; 1163 void *buf = cmd->sense_buffer;
1116 1164 void *prot = cmd->prot_sdb;
1117 if (!req->special) { 1165 unsigned long flags;
1118 /* Bail if we can't get a reference to the device */
1119 if (!get_device(&sdev->sdev_gendev))
1120 return NULL;
1121
1122 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1123 if (unlikely(!cmd)) {
1124 put_device(&sdev->sdev_gendev);
1125 return NULL;
1126 }
1127 req->special = cmd;
1128 } else {
1129 cmd = req->special;
1130 }
1131 1166
1132 /* pull a tag out of the request if we have one */ 1167 /* zero out the cmd, except for the embedded scsi_request */
1133 cmd->tag = req->tag; 1168 memset((char *)cmd + sizeof(cmd->req), 0,
1134 cmd->request = req; 1169 sizeof(*cmd) - sizeof(cmd->req));
1135 1170
1136 cmd->cmnd = req->cmd; 1171 cmd->device = dev;
1137 cmd->prot_op = SCSI_PROT_NORMAL; 1172 cmd->sense_buffer = buf;
1173 cmd->prot_sdb = prot;
1174 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1175 cmd->jiffies_at_alloc = jiffies;
1138 1176
1139 return cmd; 1177 spin_lock_irqsave(&dev->list_lock, flags);
1178 list_add_tail(&cmd->list, &dev->cmd_list);
1179 spin_unlock_irqrestore(&dev->list_lock, flags);
1140} 1180}
1141 1181
1142static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1182static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
1143{ 1183{
1144 struct scsi_cmnd *cmd = req->special; 1184 struct scsi_cmnd *cmd = req->special;
1145 1185
1146 /* 1186 /*
1147 * BLOCK_PC requests may transfer data, in which case they must 1187 * Passthrough requests may transfer data, in which case they must
1148 * a bio attached to them. Or they might contain a SCSI command 1188 * a bio attached to them. Or they might contain a SCSI command
1149 * that does not transfer data, in which case they may optionally 1189 * that does not transfer data, in which case they may optionally
1150 * submit a request without an attached bio. 1190 * submit a request without an attached bio.
@@ -1159,14 +1199,15 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1159 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1199 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1160 } 1200 }
1161 1201
1162 cmd->cmd_len = req->cmd_len; 1202 cmd->cmd_len = scsi_req(req)->cmd_len;
1203 cmd->cmnd = scsi_req(req)->cmd;
1163 cmd->transfersize = blk_rq_bytes(req); 1204 cmd->transfersize = blk_rq_bytes(req);
1164 cmd->allowed = req->retries; 1205 cmd->allowed = req->retries;
1165 return BLKPREP_OK; 1206 return BLKPREP_OK;
1166} 1207}
1167 1208
1168/* 1209/*
1169 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1210 * Setup a normal block command. These are simple request from filesystems
1170 * that still need to be translated to SCSI CDBs from the ULD. 1211 * that still need to be translated to SCSI CDBs from the ULD.
1171 */ 1212 */
1172static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1213static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1179,6 +1220,7 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1179 return ret; 1220 return ret;
1180 } 1221 }
1181 1222
1223 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1182 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1224 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1183 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1225 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1184} 1226}
@@ -1194,14 +1236,10 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1194 else 1236 else
1195 cmd->sc_data_direction = DMA_FROM_DEVICE; 1237 cmd->sc_data_direction = DMA_FROM_DEVICE;
1196 1238
1197 switch (req->cmd_type) { 1239 if (blk_rq_is_scsi(req))
1198 case REQ_TYPE_FS: 1240 return scsi_setup_scsi_cmnd(sdev, req);
1241 else
1199 return scsi_setup_fs_cmnd(sdev, req); 1242 return scsi_setup_fs_cmnd(sdev, req);
1200 case REQ_TYPE_BLOCK_PC:
1201 return scsi_setup_blk_pc_cmnd(sdev, req);
1202 default:
1203 return BLKPREP_KILL;
1204 }
1205} 1243}
1206 1244
1207static int 1245static int
@@ -1297,19 +1335,28 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1297static int scsi_prep_fn(struct request_queue *q, struct request *req) 1335static int scsi_prep_fn(struct request_queue *q, struct request *req)
1298{ 1336{
1299 struct scsi_device *sdev = q->queuedata; 1337 struct scsi_device *sdev = q->queuedata;
1300 struct scsi_cmnd *cmd; 1338 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1301 int ret; 1339 int ret;
1302 1340
1303 ret = scsi_prep_state_check(sdev, req); 1341 ret = scsi_prep_state_check(sdev, req);
1304 if (ret != BLKPREP_OK) 1342 if (ret != BLKPREP_OK)
1305 goto out; 1343 goto out;
1306 1344
1307 cmd = scsi_get_cmd_from_req(sdev, req); 1345 if (!req->special) {
1308 if (unlikely(!cmd)) { 1346 /* Bail if we can't get a reference to the device */
1309 ret = BLKPREP_DEFER; 1347 if (unlikely(!get_device(&sdev->sdev_gendev))) {
1310 goto out; 1348 ret = BLKPREP_DEFER;
1349 goto out;
1350 }
1351
1352 scsi_init_command(sdev, cmd);
1353 req->special = cmd;
1311 } 1354 }
1312 1355
1356 cmd->tag = req->tag;
1357 cmd->request = req;
1358 cmd->prot_op = SCSI_PROT_NORMAL;
1359
1313 ret = scsi_setup_cmnd(sdev, req); 1360 ret = scsi_setup_cmnd(sdev, req);
1314out: 1361out:
1315 return scsi_prep_return(q, req, ret); 1362 return scsi_prep_return(q, req, ret);
@@ -1826,7 +1873,9 @@ static int scsi_mq_prep_fn(struct request *req)
1826 unsigned char *sense_buf = cmd->sense_buffer; 1873 unsigned char *sense_buf = cmd->sense_buffer;
1827 struct scatterlist *sg; 1874 struct scatterlist *sg;
1828 1875
1829 memset(cmd, 0, sizeof(struct scsi_cmnd)); 1876 /* zero out the cmd, except for the embedded scsi_request */
1877 memset((char *)cmd + sizeof(cmd->req), 0,
1878 sizeof(*cmd) - sizeof(cmd->req));
1830 1879
1831 req->special = cmd; 1880 req->special = cmd;
1832 1881
@@ -1836,7 +1885,6 @@ static int scsi_mq_prep_fn(struct request *req)
1836 1885
1837 cmd->tag = req->tag; 1886 cmd->tag = req->tag;
1838 1887
1839 cmd->cmnd = req->cmd;
1840 cmd->prot_op = SCSI_PROT_NORMAL; 1888 cmd->prot_op = SCSI_PROT_NORMAL;
1841 1889
1842 INIT_LIST_HEAD(&cmd->list); 1890 INIT_LIST_HEAD(&cmd->list);
@@ -1911,7 +1959,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1911 if (!scsi_host_queue_ready(q, shost, sdev)) 1959 if (!scsi_host_queue_ready(q, shost, sdev))
1912 goto out_dec_target_busy; 1960 goto out_dec_target_busy;
1913 1961
1914
1915 if (!(req->rq_flags & RQF_DONTPREP)) { 1962 if (!(req->rq_flags & RQF_DONTPREP)) {
1916 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1963 ret = prep_to_mq(scsi_mq_prep_fn(req));
1917 if (ret != BLK_MQ_RQ_QUEUE_OK) 1964 if (ret != BLK_MQ_RQ_QUEUE_OK)
@@ -1981,21 +2028,24 @@ static int scsi_init_request(void *data, struct request *rq,
1981 unsigned int hctx_idx, unsigned int request_idx, 2028 unsigned int hctx_idx, unsigned int request_idx,
1982 unsigned int numa_node) 2029 unsigned int numa_node)
1983{ 2030{
2031 struct Scsi_Host *shost = data;
1984 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2032 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1985 2033
1986 cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL, 2034 cmd->sense_buffer =
1987 numa_node); 2035 scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
1988 if (!cmd->sense_buffer) 2036 if (!cmd->sense_buffer)
1989 return -ENOMEM; 2037 return -ENOMEM;
2038 cmd->req.sense = cmd->sense_buffer;
1990 return 0; 2039 return 0;
1991} 2040}
1992 2041
1993static void scsi_exit_request(void *data, struct request *rq, 2042static void scsi_exit_request(void *data, struct request *rq,
1994 unsigned int hctx_idx, unsigned int request_idx) 2043 unsigned int hctx_idx, unsigned int request_idx)
1995{ 2044{
2045 struct Scsi_Host *shost = data;
1996 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2046 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1997 2047
1998 kfree(cmd->sense_buffer); 2048 scsi_free_sense_buffer(shost, cmd->sense_buffer);
1999} 2049}
2000 2050
2001static int scsi_map_queues(struct blk_mq_tag_set *set) 2051static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2028,7 +2078,7 @@ static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
2028 return bounce_limit; 2078 return bounce_limit;
2029} 2079}
2030 2080
2031static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) 2081void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2032{ 2082{
2033 struct device *dev = shost->dma_dev; 2083 struct device *dev = shost->dma_dev;
2034 2084
@@ -2063,28 +2113,64 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2063 */ 2113 */
2064 blk_queue_dma_alignment(q, 0x03); 2114 blk_queue_dma_alignment(q, 0x03);
2065} 2115}
2116EXPORT_SYMBOL_GPL(__scsi_init_queue);
2066 2117
2067struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 2118static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
2068 request_fn_proc *request_fn)
2069{ 2119{
2070 struct request_queue *q; 2120 struct Scsi_Host *shost = q->rq_alloc_data;
2121 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2071 2122
2072 q = blk_init_queue(request_fn, NULL); 2123 memset(cmd, 0, sizeof(*cmd));
2073 if (!q) 2124
2074 return NULL; 2125 cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
2075 __scsi_init_queue(shost, q); 2126 if (!cmd->sense_buffer)
2076 return q; 2127 goto fail;
2128 cmd->req.sense = cmd->sense_buffer;
2129
2130 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
2131 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
2132 if (!cmd->prot_sdb)
2133 goto fail_free_sense;
2134 }
2135
2136 return 0;
2137
2138fail_free_sense:
2139 scsi_free_sense_buffer(shost, cmd->sense_buffer);
2140fail:
2141 return -ENOMEM;
2142}
2143
2144static void scsi_exit_rq(struct request_queue *q, struct request *rq)
2145{
2146 struct Scsi_Host *shost = q->rq_alloc_data;
2147 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
2148
2149 if (cmd->prot_sdb)
2150 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
2151 scsi_free_sense_buffer(shost, cmd->sense_buffer);
2077} 2152}
2078EXPORT_SYMBOL(__scsi_alloc_queue);
2079 2153
2080struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 2154struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
2081{ 2155{
2156 struct Scsi_Host *shost = sdev->host;
2082 struct request_queue *q; 2157 struct request_queue *q;
2083 2158
2084 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 2159 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
2085 if (!q) 2160 if (!q)
2086 return NULL; 2161 return NULL;
2162 q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
2163 q->rq_alloc_data = shost;
2164 q->request_fn = scsi_request_fn;
2165 q->init_rq_fn = scsi_init_rq;
2166 q->exit_rq_fn = scsi_exit_rq;
2167
2168 if (blk_init_allocated_queue(q) < 0) {
2169 blk_cleanup_queue(q);
2170 return NULL;
2171 }
2087 2172
2173 __scsi_init_queue(shost, q);
2088 blk_queue_prep_rq(q, scsi_prep_fn); 2174 blk_queue_prep_rq(q, scsi_prep_fn);
2089 blk_queue_unprep_rq(q, scsi_unprep_fn); 2175 blk_queue_unprep_rq(q, scsi_unprep_fn);
2090 blk_queue_softirq_done(q, scsi_softirq_done); 2176 blk_queue_softirq_done(q, scsi_softirq_done);
@@ -2208,6 +2294,8 @@ int __init scsi_init_queue(void)
2208 2294
2209void scsi_exit_queue(void) 2295void scsi_exit_queue(void)
2210{ 2296{
2297 kmem_cache_destroy(scsi_sense_cache);
2298 kmem_cache_destroy(scsi_sense_isadma_cache);
2211 kmem_cache_destroy(scsi_sdb_cache); 2299 kmem_cache_destroy(scsi_sdb_cache);
2212} 2300}
2213 2301
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 193636a59adf..99bfc985e190 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -30,8 +30,8 @@ extern void scsi_exit_hosts(void);
30 30
31/* scsi.c */ 31/* scsi.c */
32extern bool scsi_use_blk_mq; 32extern bool scsi_use_blk_mq;
33extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 33int scsi_init_sense_cache(struct Scsi_Host *shost);
34extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 34void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
35#ifdef CONFIG_SCSI_LOGGING 35#ifdef CONFIG_SCSI_LOGGING
36void scsi_log_send(struct scsi_cmnd *cmd); 36void scsi_log_send(struct scsi_cmnd *cmd);
37void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 37void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -96,7 +96,6 @@ extern void scsi_exit_queue(void);
96extern void scsi_evt_thread(struct work_struct *work); 96extern void scsi_evt_thread(struct work_struct *work);
97struct request_queue; 97struct request_queue;
98struct request; 98struct request;
99extern struct kmem_cache *scsi_sdb_cache;
100 99
101/* scsi_proc.c */ 100/* scsi_proc.c */
102#ifdef CONFIG_SCSI_PROC_FS 101#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 03577bde6ac5..13dcb9ba823c 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3765,7 +3765,6 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3765 struct device *dev = &shost->shost_gendev; 3765 struct device *dev = &shost->shost_gendev;
3766 struct fc_internal *i = to_fc_internal(shost->transportt); 3766 struct fc_internal *i = to_fc_internal(shost->transportt);
3767 struct request_queue *q; 3767 struct request_queue *q;
3768 int err;
3769 char bsg_name[20]; 3768 char bsg_name[20];
3770 3769
3771 fc_host->rqst_q = NULL; 3770 fc_host->rqst_q = NULL;
@@ -3776,23 +3775,14 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3776 snprintf(bsg_name, sizeof(bsg_name), 3775 snprintf(bsg_name, sizeof(bsg_name),
3777 "fc_host%d", shost->host_no); 3776 "fc_host%d", shost->host_no);
3778 3777
3779 q = __scsi_alloc_queue(shost, bsg_request_fn); 3778 q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
3780 if (!q) { 3779 if (IS_ERR(q)) {
3781 dev_err(dev,
3782 "fc_host%d: bsg interface failed to initialize - no request queue\n",
3783 shost->host_no);
3784 return -ENOMEM;
3785 }
3786
3787 err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
3788 i->f->dd_bsg_size);
3789 if (err) {
3790 dev_err(dev, 3780 dev_err(dev,
3791 "fc_host%d: bsg interface failed to initialize - setup queue\n", 3781 "fc_host%d: bsg interface failed to initialize - setup queue\n",
3792 shost->host_no); 3782 shost->host_no);
3793 blk_cleanup_queue(q); 3783 return PTR_ERR(q);
3794 return err;
3795 } 3784 }
3785 __scsi_init_queue(shost, q);
3796 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3786 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3797 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); 3787 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
3798 fc_host->rqst_q = q; 3788 fc_host->rqst_q = q;
@@ -3824,26 +3814,18 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3824 struct device *dev = &rport->dev; 3814 struct device *dev = &rport->dev;
3825 struct fc_internal *i = to_fc_internal(shost->transportt); 3815 struct fc_internal *i = to_fc_internal(shost->transportt);
3826 struct request_queue *q; 3816 struct request_queue *q;
3827 int err;
3828 3817
3829 rport->rqst_q = NULL; 3818 rport->rqst_q = NULL;
3830 3819
3831 if (!i->f->bsg_request) 3820 if (!i->f->bsg_request)
3832 return -ENOTSUPP; 3821 return -ENOTSUPP;
3833 3822
3834 q = __scsi_alloc_queue(shost, bsg_request_fn); 3823 q = bsg_setup_queue(dev, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
3835 if (!q) { 3824 if (IS_ERR(q)) {
3836 dev_err(dev, "bsg interface failed to initialize - no request queue\n");
3837 return -ENOMEM;
3838 }
3839
3840 err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
3841 if (err) {
3842 dev_err(dev, "failed to setup bsg queue\n"); 3825 dev_err(dev, "failed to setup bsg queue\n");
3843 blk_cleanup_queue(q); 3826 return PTR_ERR(q);
3844 return err;
3845 } 3827 }
3846 3828 __scsi_init_queue(shost, q);
3847 blk_queue_prep_rq(q, fc_bsg_rport_prep); 3829 blk_queue_prep_rq(q, fc_bsg_rport_prep);
3848 blk_queue_rq_timed_out(q, fc_bsg_job_timeout); 3830 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3849 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 3831 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 42bca619f854..568c9f26a561 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1537,24 +1537,18 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
1537 struct iscsi_internal *i = to_iscsi_internal(shost->transportt); 1537 struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
1538 struct request_queue *q; 1538 struct request_queue *q;
1539 char bsg_name[20]; 1539 char bsg_name[20];
1540 int ret;
1541 1540
1542 if (!i->iscsi_transport->bsg_request) 1541 if (!i->iscsi_transport->bsg_request)
1543 return -ENOTSUPP; 1542 return -ENOTSUPP;
1544 1543
1545 snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); 1544 snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
1546 1545 q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
1547 q = __scsi_alloc_queue(shost, bsg_request_fn); 1546 if (IS_ERR(q)) {
1548 if (!q)
1549 return -ENOMEM;
1550
1551 ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
1552 if (ret) {
1553 shost_printk(KERN_ERR, shost, "bsg interface failed to " 1547 shost_printk(KERN_ERR, shost, "bsg interface failed to "
1554 "initialize - no request queue\n"); 1548 "initialize - no request queue\n");
1555 blk_cleanup_queue(q); 1549 return PTR_ERR(q);
1556 return ret;
1557 } 1550 }
1551 __scsi_init_queue(shost, q);
1558 1552
1559 ihost->bsg_q = q; 1553 ihost->bsg_q = q;
1560 return 0; 1554 return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 60b651bfaa01..126a5ee00987 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -33,6 +33,7 @@
33#include <linux/bsg.h> 33#include <linux/bsg.h>
34 34
35#include <scsi/scsi.h> 35#include <scsi/scsi.h>
36#include <scsi/scsi_request.h>
36#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
38#include <scsi/scsi_transport.h> 39#include <scsi/scsi_transport.h>
@@ -177,6 +178,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
177 while ((req = blk_fetch_request(q)) != NULL) { 178 while ((req = blk_fetch_request(q)) != NULL) {
178 spin_unlock_irq(q->queue_lock); 179 spin_unlock_irq(q->queue_lock);
179 180
181 scsi_req(req)->resid_len = blk_rq_bytes(req);
182 if (req->next_rq)
183 scsi_req(req->next_rq)->resid_len =
184 blk_rq_bytes(req->next_rq);
180 handler = to_sas_internal(shost->transportt)->f->smp_handler; 185 handler = to_sas_internal(shost->transportt)->f->smp_handler;
181 ret = handler(shost, rphy, req); 186 ret = handler(shost, rphy, req);
182 req->errors = ret; 187 req->errors = ret;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1f5d92a25a49..40b4038c019e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -781,7 +781,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
781 rq->special_vec.bv_len = len; 781 rq->special_vec.bv_len = len;
782 782
783 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; 783 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
784 rq->resid_len = len; 784 scsi_req(rq)->resid_len = len;
785 785
786 ret = scsi_init_io(cmd); 786 ret = scsi_init_io(cmd);
787out: 787out:
@@ -1179,7 +1179,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1179 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1179 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1180 __free_page(rq->special_vec.bv_page); 1180 __free_page(rq->special_vec.bv_page);
1181 1181
1182 if (SCpnt->cmnd != rq->cmd) { 1182 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1183 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1183 mempool_free(SCpnt->cmnd, sd_cdb_pool);
1184 SCpnt->cmnd = NULL; 1184 SCpnt->cmnd = NULL;
1185 SCpnt->cmd_len = 0; 1185 SCpnt->cmd_len = 0;
@@ -1750,9 +1750,6 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1750 unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); 1750 unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1751 unsigned int good_bytes; 1751 unsigned int good_bytes;
1752 1752
1753 if (scmd->request->cmd_type != REQ_TYPE_FS)
1754 return 0;
1755
1756 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, 1753 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
1757 SCSI_SENSE_BUFFERSIZE, 1754 SCSI_SENSE_BUFFERSIZE,
1758 &bad_lba); 1755 &bad_lba);
@@ -3082,6 +3079,23 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
3082 put_device(&sdkp->dev); 3079 put_device(&sdkp->dev);
3083} 3080}
3084 3081
3082struct sd_devt {
3083 int idx;
3084 struct disk_devt disk_devt;
3085};
3086
3087void sd_devt_release(struct disk_devt *disk_devt)
3088{
3089 struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
3090 disk_devt);
3091
3092 spin_lock(&sd_index_lock);
3093 ida_remove(&sd_index_ida, sd_devt->idx);
3094 spin_unlock(&sd_index_lock);
3095
3096 kfree(sd_devt);
3097}
3098
3085/** 3099/**
3086 * sd_probe - called during driver initialization and whenever a 3100 * sd_probe - called during driver initialization and whenever a
3087 * new scsi device is attached to the system. It is called once 3101 * new scsi device is attached to the system. It is called once
@@ -3103,6 +3117,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
3103static int sd_probe(struct device *dev) 3117static int sd_probe(struct device *dev)
3104{ 3118{
3105 struct scsi_device *sdp = to_scsi_device(dev); 3119 struct scsi_device *sdp = to_scsi_device(dev);
3120 struct sd_devt *sd_devt;
3106 struct scsi_disk *sdkp; 3121 struct scsi_disk *sdkp;
3107 struct gendisk *gd; 3122 struct gendisk *gd;
3108 int index; 3123 int index;
@@ -3128,9 +3143,13 @@ static int sd_probe(struct device *dev)
3128 if (!sdkp) 3143 if (!sdkp)
3129 goto out; 3144 goto out;
3130 3145
3146 sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
3147 if (!sd_devt)
3148 goto out_free;
3149
3131 gd = alloc_disk(SD_MINORS); 3150 gd = alloc_disk(SD_MINORS);
3132 if (!gd) 3151 if (!gd)
3133 goto out_free; 3152 goto out_free_devt;
3134 3153
3135 do { 3154 do {
3136 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL)) 3155 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3146,6 +3165,11 @@ static int sd_probe(struct device *dev)
3146 goto out_put; 3165 goto out_put;
3147 } 3166 }
3148 3167
3168 atomic_set(&sd_devt->disk_devt.count, 1);
3169 sd_devt->disk_devt.release = sd_devt_release;
3170 sd_devt->idx = index;
3171 gd->disk_devt = &sd_devt->disk_devt;
3172
3149 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); 3173 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3150 if (error) { 3174 if (error) {
3151 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n"); 3175 sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3185,13 +3209,14 @@ static int sd_probe(struct device *dev)
3185 return 0; 3209 return 0;
3186 3210
3187 out_free_index: 3211 out_free_index:
3188 spin_lock(&sd_index_lock); 3212 put_disk_devt(&sd_devt->disk_devt);
3189 ida_remove(&sd_index_ida, index); 3213 sd_devt = NULL;
3190 spin_unlock(&sd_index_lock);
3191 out_put: 3214 out_put:
3192 put_disk(gd); 3215 put_disk(gd);
3193 out_free: 3216 out_free:
3194 kfree(sdkp); 3217 kfree(sdkp);
3218 out_free_devt:
3219 kfree(sd_devt);
3195 out: 3220 out:
3196 scsi_autopm_put_device(sdp); 3221 scsi_autopm_put_device(sdp);
3197 return error; 3222 return error;
@@ -3250,10 +3275,7 @@ static void scsi_disk_release(struct device *dev)
3250 struct scsi_disk *sdkp = to_scsi_disk(dev); 3275 struct scsi_disk *sdkp = to_scsi_disk(dev);
3251 struct gendisk *disk = sdkp->disk; 3276 struct gendisk *disk = sdkp->disk;
3252 3277
3253 spin_lock(&sd_index_lock); 3278 put_disk_devt(disk->disk_devt);
3254 ida_remove(&sd_index_ida, sdkp->index);
3255 spin_unlock(&sd_index_lock);
3256
3257 disk->private_data = NULL; 3279 disk->private_data = NULL;
3258 put_disk(disk); 3280 put_disk(disk);
3259 put_device(&sdkp->device->sdev_gendev); 3281 put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dbe5b4b95df0..e0e308b7e01a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -781,9 +781,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
781 } 781 }
782 if (atomic_read(&sdp->detaching)) { 782 if (atomic_read(&sdp->detaching)) {
783 if (srp->bio) { 783 if (srp->bio) {
784 if (srp->rq->cmd != srp->rq->__cmd) 784 scsi_req_free_cmd(scsi_req(srp->rq));
785 kfree(srp->rq->cmd);
786
787 blk_end_request_all(srp->rq, -EIO); 785 blk_end_request_all(srp->rq, -EIO);
788 srp->rq = NULL; 786 srp->rq = NULL;
789 } 787 }
@@ -1279,6 +1277,7 @@ static void
1279sg_rq_end_io(struct request *rq, int uptodate) 1277sg_rq_end_io(struct request *rq, int uptodate)
1280{ 1278{
1281 struct sg_request *srp = rq->end_io_data; 1279 struct sg_request *srp = rq->end_io_data;
1280 struct scsi_request *req = scsi_req(rq);
1282 Sg_device *sdp; 1281 Sg_device *sdp;
1283 Sg_fd *sfp; 1282 Sg_fd *sfp;
1284 unsigned long iflags; 1283 unsigned long iflags;
@@ -1297,9 +1296,9 @@ sg_rq_end_io(struct request *rq, int uptodate)
1297 if (unlikely(atomic_read(&sdp->detaching))) 1296 if (unlikely(atomic_read(&sdp->detaching)))
1298 pr_info("%s: device detaching\n", __func__); 1297 pr_info("%s: device detaching\n", __func__);
1299 1298
1300 sense = rq->sense; 1299 sense = req->sense;
1301 result = rq->errors; 1300 result = rq->errors;
1302 resid = rq->resid_len; 1301 resid = req->resid_len;
1303 1302
1304 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, 1303 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
1305 "sg_cmd_done: pack_id=%d, res=0x%x\n", 1304 "sg_cmd_done: pack_id=%d, res=0x%x\n",
@@ -1333,6 +1332,10 @@ sg_rq_end_io(struct request *rq, int uptodate)
1333 sdp->device->changed = 1; 1332 sdp->device->changed = 1;
1334 } 1333 }
1335 } 1334 }
1335
1336 if (req->sense_len)
1337 memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
1338
1336 /* Rely on write phase to clean out srp status values, so no "else" */ 1339 /* Rely on write phase to clean out srp status values, so no "else" */
1337 1340
1338 /* 1341 /*
@@ -1342,8 +1345,7 @@ sg_rq_end_io(struct request *rq, int uptodate)
1342 * blk_rq_unmap_user() can be called from user context. 1345 * blk_rq_unmap_user() can be called from user context.
1343 */ 1346 */
1344 srp->rq = NULL; 1347 srp->rq = NULL;
1345 if (rq->cmd != rq->__cmd) 1348 scsi_req_free_cmd(scsi_req(rq));
1346 kfree(rq->cmd);
1347 __blk_put_request(rq->q, rq); 1349 __blk_put_request(rq->q, rq);
1348 1350
1349 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1351 write_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1658,6 +1660,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1658{ 1660{
1659 int res; 1661 int res;
1660 struct request *rq; 1662 struct request *rq;
1663 struct scsi_request *req;
1661 Sg_fd *sfp = srp->parentfp; 1664 Sg_fd *sfp = srp->parentfp;
1662 sg_io_hdr_t *hp = &srp->header; 1665 sg_io_hdr_t *hp = &srp->header;
1663 int dxfer_len = (int) hp->dxfer_len; 1666 int dxfer_len = (int) hp->dxfer_len;
@@ -1695,22 +1698,23 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1695 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually 1698 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1696 * does not sleep except under memory pressure. 1699 * does not sleep except under memory pressure.
1697 */ 1700 */
1698 rq = blk_get_request(q, rw, GFP_KERNEL); 1701 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
1702 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
1699 if (IS_ERR(rq)) { 1703 if (IS_ERR(rq)) {
1700 kfree(long_cmdp); 1704 kfree(long_cmdp);
1701 return PTR_ERR(rq); 1705 return PTR_ERR(rq);
1702 } 1706 }
1707 req = scsi_req(rq);
1703 1708
1704 blk_rq_set_block_pc(rq); 1709 scsi_req_init(rq);
1705 1710
1706 if (hp->cmd_len > BLK_MAX_CDB) 1711 if (hp->cmd_len > BLK_MAX_CDB)
1707 rq->cmd = long_cmdp; 1712 req->cmd = long_cmdp;
1708 memcpy(rq->cmd, cmd, hp->cmd_len); 1713 memcpy(req->cmd, cmd, hp->cmd_len);
1709 rq->cmd_len = hp->cmd_len; 1714 req->cmd_len = hp->cmd_len;
1710 1715
1711 srp->rq = rq; 1716 srp->rq = rq;
1712 rq->end_io_data = srp; 1717 rq->end_io_data = srp;
1713 rq->sense = srp->sense_b;
1714 rq->retries = SG_DEFAULT_RETRIES; 1718 rq->retries = SG_DEFAULT_RETRIES;
1715 1719
1716 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1720 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
@@ -1786,8 +1790,7 @@ sg_finish_rem_req(Sg_request *srp)
1786 ret = blk_rq_unmap_user(srp->bio); 1790 ret = blk_rq_unmap_user(srp->bio);
1787 1791
1788 if (srp->rq) { 1792 if (srp->rq) {
1789 if (srp->rq->cmd != srp->rq->__cmd) 1793 scsi_req_free_cmd(scsi_req(srp->rq));
1790 kfree(srp->rq->cmd);
1791 blk_put_request(srp->rq); 1794 blk_put_request(srp->rq);
1792 } 1795 }
1793 1796
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8702d9cf8040..11c0dfb3dfa3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4499,7 +4499,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4499 if (pqi_is_logical_device(device)) { 4499 if (pqi_is_logical_device(device)) {
4500 raid_bypassed = false; 4500 raid_bypassed = false;
4501 if (device->offload_enabled && 4501 if (device->offload_enabled &&
4502 scmd->request->cmd_type == REQ_TYPE_FS) { 4502 !blk_rq_is_passthrough(scmd->request)) {
4503 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 4503 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4504 scmd, queue_group); 4504 scmd, queue_group);
4505 if (rc == 0 || 4505 if (rc == 0 ||
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 013bfe049a48..0b29b9329b1c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -437,14 +437,17 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
437 goto out; 437 goto out;
438 } 438 }
439 439
440 if (rq_data_dir(rq) == WRITE) { 440 switch (req_op(rq)) {
441 case REQ_OP_WRITE:
441 if (!cd->writeable) 442 if (!cd->writeable)
442 goto out; 443 goto out;
443 SCpnt->cmnd[0] = WRITE_10; 444 SCpnt->cmnd[0] = WRITE_10;
444 cd->cdi.media_written = 1; 445 cd->cdi.media_written = 1;
445 } else if (rq_data_dir(rq) == READ) { 446 break;
447 case REQ_OP_READ:
446 SCpnt->cmnd[0] = READ_10; 448 SCpnt->cmnd[0] = READ_10;
447 } else { 449 break;
450 default:
448 blk_dump_rq_flags(rq, "Unknown sr command"); 451 blk_dump_rq_flags(rq, "Unknown sr command");
449 goto out; 452 goto out;
450 } 453 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5f35b863e1a7..81212d4bd9bf 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -475,7 +475,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
475 ktime_t now; 475 ktime_t now;
476 476
477 now = ktime_get(); 477 now = ktime_get();
478 if (req->cmd[0] == WRITE_6) { 478 if (scsi_req(req)->cmd[0] == WRITE_6) {
479 now = ktime_sub(now, STp->stats->write_time); 479 now = ktime_sub(now, STp->stats->write_time);
480 atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time); 480 atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
481 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); 481 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -489,7 +489,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
489 } else 489 } else
490 atomic64_add(atomic_read(&STp->stats->last_write_size), 490 atomic64_add(atomic_read(&STp->stats->last_write_size),
491 &STp->stats->write_byte_cnt); 491 &STp->stats->write_byte_cnt);
492 } else if (req->cmd[0] == READ_6) { 492 } else if (scsi_req(req)->cmd[0] == READ_6) {
493 now = ktime_sub(now, STp->stats->read_time); 493 now = ktime_sub(now, STp->stats->read_time);
494 atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time); 494 atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
495 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); 495 atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
@@ -514,15 +514,18 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
514static void st_scsi_execute_end(struct request *req, int uptodate) 514static void st_scsi_execute_end(struct request *req, int uptodate)
515{ 515{
516 struct st_request *SRpnt = req->end_io_data; 516 struct st_request *SRpnt = req->end_io_data;
517 struct scsi_request *rq = scsi_req(req);
517 struct scsi_tape *STp = SRpnt->stp; 518 struct scsi_tape *STp = SRpnt->stp;
518 struct bio *tmp; 519 struct bio *tmp;
519 520
520 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 521 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
521 STp->buffer->cmdstat.residual = req->resid_len; 522 STp->buffer->cmdstat.residual = rq->resid_len;
522 523
523 st_do_stats(STp, req); 524 st_do_stats(STp, req);
524 525
525 tmp = SRpnt->bio; 526 tmp = SRpnt->bio;
527 if (rq->sense_len)
528 memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
526 if (SRpnt->waiting) 529 if (SRpnt->waiting)
527 complete(SRpnt->waiting); 530 complete(SRpnt->waiting);
528 531
@@ -535,17 +538,18 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
535 int timeout, int retries) 538 int timeout, int retries)
536{ 539{
537 struct request *req; 540 struct request *req;
541 struct scsi_request *rq;
538 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; 542 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
539 int err = 0; 543 int err = 0;
540 int write = (data_direction == DMA_TO_DEVICE);
541 struct scsi_tape *STp = SRpnt->stp; 544 struct scsi_tape *STp = SRpnt->stp;
542 545
543 req = blk_get_request(SRpnt->stp->device->request_queue, write, 546 req = blk_get_request(SRpnt->stp->device->request_queue,
544 GFP_KERNEL); 547 data_direction == DMA_TO_DEVICE ?
548 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL);
545 if (IS_ERR(req)) 549 if (IS_ERR(req))
546 return DRIVER_ERROR << 24; 550 return DRIVER_ERROR << 24;
547 551 rq = scsi_req(req);
548 blk_rq_set_block_pc(req); 552 scsi_req_init(req);
549 req->rq_flags |= RQF_QUIET; 553 req->rq_flags |= RQF_QUIET;
550 554
551 mdata->null_mapped = 1; 555 mdata->null_mapped = 1;
@@ -571,11 +575,9 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
571 } 575 }
572 576
573 SRpnt->bio = req->bio; 577 SRpnt->bio = req->bio;
574 req->cmd_len = COMMAND_SIZE(cmd[0]); 578 rq->cmd_len = COMMAND_SIZE(cmd[0]);
575 memset(req->cmd, 0, BLK_MAX_CDB); 579 memset(rq->cmd, 0, BLK_MAX_CDB);
576 memcpy(req->cmd, cmd, req->cmd_len); 580 memcpy(rq->cmd, cmd, rq->cmd_len);
577 req->sense = SRpnt->sense;
578 req->sense_len = 0;
579 req->timeout = timeout; 581 req->timeout = timeout;
580 req->retries = retries; 582 req->retries = retries;
581 req->end_io_data = SRpnt; 583 req->end_io_data = SRpnt;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 88db6992420e..bcf7d05d1aab 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -260,7 +260,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
260{ 260{
261 int wanted_len = cmd->SCp.this_residual; 261 int wanted_len = cmd->SCp.this_residual;
262 262
263 if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS) 263 if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request))
264 return 0; 264 return 0;
265 265
266 return wanted_len; 266 return wanted_len;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 257361280510..e2bc99980f75 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -4,6 +4,7 @@ menuconfig TARGET_CORE
4 depends on SCSI && BLOCK 4 depends on SCSI && BLOCK
5 select CONFIGFS_FS 5 select CONFIGFS_FS
6 select CRC_T10DIF 6 select CRC_T10DIF
7 select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
7 default n 8 default n
8 help 9 help
9 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled 10 Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 04d7aa7390d0..a8f8e53f2f57 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1005,7 +1005,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1005 scsi_command_size(cmd->t_task_cdb)); 1005 scsi_command_size(cmd->t_task_cdb));
1006 1006
1007 req = blk_get_request(pdv->pdv_sd->request_queue, 1007 req = blk_get_request(pdv->pdv_sd->request_queue,
1008 (cmd->data_direction == DMA_TO_DEVICE), 1008 cmd->data_direction == DMA_TO_DEVICE ?
1009 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
1009 GFP_KERNEL); 1010 GFP_KERNEL);
1010 if (IS_ERR(req)) { 1011 if (IS_ERR(req)) {
1011 pr_err("PSCSI: blk_get_request() failed\n"); 1012 pr_err("PSCSI: blk_get_request() failed\n");
@@ -1013,7 +1014,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1013 goto fail; 1014 goto fail;
1014 } 1015 }
1015 1016
1016 blk_rq_set_block_pc(req); 1017 scsi_req_init(req);
1017 1018
1018 if (sgl) { 1019 if (sgl) {
1019 ret = pscsi_map_sg(cmd, sgl, sgl_nents, req); 1020 ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
@@ -1023,10 +1024,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1023 1024
1024 req->end_io = pscsi_req_done; 1025 req->end_io = pscsi_req_done;
1025 req->end_io_data = cmd; 1026 req->end_io_data = cmd;
1026 req->cmd_len = scsi_command_size(pt->pscsi_cdb); 1027 scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
1027 req->cmd = &pt->pscsi_cdb[0]; 1028 scsi_req(req)->cmd = &pt->pscsi_cdb[0];
1028 req->sense = &pt->pscsi_sense[0];
1029 req->sense_len = 0;
1030 if (pdv->pdv_sd->type == TYPE_DISK) 1029 if (pdv->pdv_sd->type == TYPE_DISK)
1031 req->timeout = PS_TIMEOUT_DISK; 1030 req->timeout = PS_TIMEOUT_DISK;
1032 else 1031 else
@@ -1075,7 +1074,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
1075 struct pscsi_plugin_task *pt = cmd->priv; 1074 struct pscsi_plugin_task *pt = cmd->priv;
1076 1075
1077 pt->pscsi_result = req->errors; 1076 pt->pscsi_result = req->errors;
1078 pt->pscsi_resid = req->resid_len; 1077 pt->pscsi_resid = scsi_req(req)->resid_len;
1079 1078
1080 cmd->scsi_status = status_byte(pt->pscsi_result) << 1; 1079 cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
1081 if (cmd->scsi_status) { 1080 if (cmd->scsi_status) {
@@ -1096,6 +1095,7 @@ static void pscsi_req_done(struct request *req, int uptodate)
1096 break; 1095 break;
1097 } 1096 }
1098 1097
1098 memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
1099 __blk_put_request(req->q, req); 1099 __blk_put_request(req->q, req);
1100 kfree(pt); 1100 kfree(pt);
1101} 1101}