diff options
Diffstat (limited to 'drivers/message')
-rw-r--r-- | drivers/message/fusion/mptsas.c | 22 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 43 |
2 files changed, 31 insertions, 34 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index a9019f081b97..79f5433359f9 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1277 | /* do we need to support multiple segments? */ | 1277 | /* do we need to support multiple segments? */ |
1278 | if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { | 1278 | if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { |
1279 | printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", | 1279 | printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", |
1280 | ioc->name, __func__, req->bio->bi_vcnt, req->data_len, | 1280 | ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req), |
1281 | rsp->bio->bi_vcnt, rsp->data_len); | 1281 | rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); |
1282 | return -EINVAL; | 1282 | return -EINVAL; |
1283 | } | 1283 | } |
1284 | 1284 | ||
@@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1295 | smpreq = (SmpPassthroughRequest_t *)mf; | 1295 | smpreq = (SmpPassthroughRequest_t *)mf; |
1296 | memset(smpreq, 0, sizeof(*smpreq)); | 1296 | memset(smpreq, 0, sizeof(*smpreq)); |
1297 | 1297 | ||
1298 | smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); | 1298 | smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); |
1299 | smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; | 1299 | smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; |
1300 | 1300 | ||
1301 | if (rphy) | 1301 | if (rphy) |
@@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1321 | MPI_SGE_FLAGS_END_OF_BUFFER | | 1321 | MPI_SGE_FLAGS_END_OF_BUFFER | |
1322 | MPI_SGE_FLAGS_DIRECTION | | 1322 | MPI_SGE_FLAGS_DIRECTION | |
1323 | mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; | 1323 | mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; |
1324 | flagsLength |= (req->data_len - 4); | 1324 | flagsLength |= (blk_rq_bytes(req) - 4); |
1325 | 1325 | ||
1326 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), | 1326 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), |
1327 | req->data_len, PCI_DMA_BIDIRECTIONAL); | 1327 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); |
1328 | if (!dma_addr_out) | 1328 | if (!dma_addr_out) |
1329 | goto put_mf; | 1329 | goto put_mf; |
1330 | mpt_add_sge(psge, flagsLength, dma_addr_out); | 1330 | mpt_add_sge(psge, flagsLength, dma_addr_out); |
@@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1332 | 1332 | ||
1333 | /* response */ | 1333 | /* response */ |
1334 | flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; | 1334 | flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; |
1335 | flagsLength |= rsp->data_len + 4; | 1335 | flagsLength |= blk_rq_bytes(rsp) + 4; |
1336 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), | 1336 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), |
1337 | rsp->data_len, PCI_DMA_BIDIRECTIONAL); | 1337 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); |
1338 | if (!dma_addr_in) | 1338 | if (!dma_addr_in) |
1339 | goto unmap; | 1339 | goto unmap; |
1340 | mpt_add_sge(psge, flagsLength, dma_addr_in); | 1340 | mpt_add_sge(psge, flagsLength, dma_addr_in); |
@@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1357 | smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; | 1357 | smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; |
1358 | memcpy(req->sense, smprep, sizeof(*smprep)); | 1358 | memcpy(req->sense, smprep, sizeof(*smprep)); |
1359 | req->sense_len = sizeof(*smprep); | 1359 | req->sense_len = sizeof(*smprep); |
1360 | req->data_len = 0; | 1360 | req->resid_len = 0; |
1361 | rsp->data_len -= smprep->ResponseDataLength; | 1361 | rsp->resid_len -= smprep->ResponseDataLength; |
1362 | } else { | 1362 | } else { |
1363 | printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", | 1363 | printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", |
1364 | ioc->name, __func__); | 1364 | ioc->name, __func__); |
@@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
1366 | } | 1366 | } |
1367 | unmap: | 1367 | unmap: |
1368 | if (dma_addr_out) | 1368 | if (dma_addr_out) |
1369 | pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, | 1369 | pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), |
1370 | PCI_DMA_BIDIRECTIONAL); | 1370 | PCI_DMA_BIDIRECTIONAL); |
1371 | if (dma_addr_in) | 1371 | if (dma_addr_in) |
1372 | pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, | 1372 | pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp), |
1373 | PCI_DMA_BIDIRECTIONAL); | 1373 | PCI_DMA_BIDIRECTIONAL); |
1374 | put_mf: | 1374 | put_mf: |
1375 | if (mf) | 1375 | if (mf) |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a443e136dc41..335d4c78a775 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error, | |||
426 | struct request_queue *q = req->q; | 426 | struct request_queue *q = req->q; |
427 | unsigned long flags; | 427 | unsigned long flags; |
428 | 428 | ||
429 | if (blk_end_request(req, error, nr_bytes)) { | 429 | if (blk_end_request(req, error, nr_bytes)) |
430 | int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); | ||
431 | |||
432 | if (blk_pc_request(req)) | ||
433 | leftover = req->data_len; | ||
434 | |||
435 | if (error) | 430 | if (error) |
436 | blk_end_request(req, -EIO, leftover); | 431 | blk_end_request_all(req, -EIO); |
437 | } | ||
438 | 432 | ||
439 | spin_lock_irqsave(q->queue_lock, flags); | 433 | spin_lock_irqsave(q->queue_lock, flags); |
440 | 434 | ||
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req) | |||
761 | break; | 755 | break; |
762 | 756 | ||
763 | case CACHE_SMARTFETCH: | 757 | case CACHE_SMARTFETCH: |
764 | if (req->nr_sectors > 16) | 758 | if (blk_rq_sectors(req) > 16) |
765 | ctl_flags = 0x201F0008; | 759 | ctl_flags = 0x201F0008; |
766 | else | 760 | else |
767 | ctl_flags = 0x001F0000; | 761 | ctl_flags = 0x001F0000; |
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req) | |||
781 | ctl_flags = 0x001F0010; | 775 | ctl_flags = 0x001F0010; |
782 | break; | 776 | break; |
783 | case CACHE_SMARTBACK: | 777 | case CACHE_SMARTBACK: |
784 | if (req->nr_sectors > 16) | 778 | if (blk_rq_sectors(req) > 16) |
785 | ctl_flags = 0x001F0004; | 779 | ctl_flags = 0x001F0004; |
786 | else | 780 | else |
787 | ctl_flags = 0x001F0010; | 781 | ctl_flags = 0x001F0010; |
788 | break; | 782 | break; |
789 | case CACHE_SMARTTHROUGH: | 783 | case CACHE_SMARTTHROUGH: |
790 | if (req->nr_sectors > 16) | 784 | if (blk_rq_sectors(req) > 16) |
791 | ctl_flags = 0x001F0004; | 785 | ctl_flags = 0x001F0004; |
792 | else | 786 | else |
793 | ctl_flags = 0x001F0010; | 787 | ctl_flags = 0x001F0010; |
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req) | |||
800 | if (c->adaptec) { | 794 | if (c->adaptec) { |
801 | u8 cmd[10]; | 795 | u8 cmd[10]; |
802 | u32 scsi_flags; | 796 | u32 scsi_flags; |
803 | u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; | 797 | u16 hwsec; |
804 | 798 | ||
799 | hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; | ||
805 | memset(cmd, 0, 10); | 800 | memset(cmd, 0, 10); |
806 | 801 | ||
807 | sgl_offset = SGL_OFFSET_12; | 802 | sgl_offset = SGL_OFFSET_12; |
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req) | |||
827 | 822 | ||
828 | *mptr++ = cpu_to_le32(scsi_flags); | 823 | *mptr++ = cpu_to_le32(scsi_flags); |
829 | 824 | ||
830 | *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); | 825 | *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); |
831 | *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); | 826 | *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); |
832 | 827 | ||
833 | memcpy(mptr, cmd, 10); | 828 | memcpy(mptr, cmd, 10); |
834 | mptr += 4; | 829 | mptr += 4; |
835 | *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); | 830 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
836 | } else | 831 | } else |
837 | #endif | 832 | #endif |
838 | { | 833 | { |
839 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); | 834 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); |
840 | *mptr++ = cpu_to_le32(ctl_flags); | 835 | *mptr++ = cpu_to_le32(ctl_flags); |
841 | *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); | 836 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
842 | *mptr++ = | 837 | *mptr++ = |
843 | cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); | 838 | cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); |
844 | *mptr++ = | 839 | *mptr++ = |
845 | cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); | 840 | cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); |
846 | } | 841 | } |
847 | 842 | ||
848 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { | 843 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { |
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
883 | struct request *req; | 878 | struct request *req; |
884 | 879 | ||
885 | while (!blk_queue_plugged(q)) { | 880 | while (!blk_queue_plugged(q)) { |
886 | req = elv_next_request(q); | 881 | req = blk_peek_request(q); |
887 | if (!req) | 882 | if (!req) |
888 | break; | 883 | break; |
889 | 884 | ||
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
896 | 891 | ||
897 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { | 892 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { |
898 | if (!i2o_block_transfer(req)) { | 893 | if (!i2o_block_transfer(req)) { |
899 | blkdev_dequeue_request(req); | 894 | blk_start_request(req); |
900 | continue; | 895 | continue; |
901 | } else | 896 | } else |
902 | osm_info("transfer error\n"); | 897 | osm_info("transfer error\n"); |
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
922 | blk_stop_queue(q); | 917 | blk_stop_queue(q); |
923 | break; | 918 | break; |
924 | } | 919 | } |
925 | } else | 920 | } else { |
926 | end_request(req, 0); | 921 | blk_start_request(req); |
922 | __blk_end_request_all(req, -EIO); | ||
923 | } | ||
927 | } | 924 | } |
928 | }; | 925 | }; |
929 | 926 | ||
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev) | |||
1082 | */ | 1079 | */ |
1083 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || | 1080 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || |
1084 | !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { | 1081 | !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { |
1085 | blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); | 1082 | blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); |
1086 | } else | 1083 | } else |
1087 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); | 1084 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); |
1088 | 1085 | ||