diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-06-12 11:02:03 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-06-12 11:02:03 -0400 |
commit | 82681a318f9f028ea64e61f24bbd9ac535531921 (patch) | |
tree | 529b6a5b4fd040fb54b7672b1a224ebd47445876 /drivers/message | |
parent | 3860c97bd60a4525bb62eb90e3e7d2f02662ac59 (diff) | |
parent | 8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff) |
[SCSI] Merge branch 'linus'
Conflicts:
drivers/message/fusion/mptsas.c
fixed up conflict between req->data_len accessors and mptsas driver updates.
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/message')
-rw-r--r-- | drivers/message/fusion/mptsas.c | 22 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 43 |
2 files changed, 31 insertions, 34 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 14c490a767a4..20e0b447e8e8 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -2135,8 +2135,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2135 | /* do we need to support multiple segments? */ | 2135 | /* do we need to support multiple segments? */ |
2136 | if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { | 2136 | if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { |
2137 | printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", | 2137 | printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", |
2138 | ioc->name, __func__, req->bio->bi_vcnt, req->data_len, | 2138 | ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req), |
2139 | rsp->bio->bi_vcnt, rsp->data_len); | 2139 | rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); |
2140 | return -EINVAL; | 2140 | return -EINVAL; |
2141 | } | 2141 | } |
2142 | 2142 | ||
@@ -2153,7 +2153,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2153 | smpreq = (SmpPassthroughRequest_t *)mf; | 2153 | smpreq = (SmpPassthroughRequest_t *)mf; |
2154 | memset(smpreq, 0, sizeof(*smpreq)); | 2154 | memset(smpreq, 0, sizeof(*smpreq)); |
2155 | 2155 | ||
2156 | smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); | 2156 | smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); |
2157 | smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; | 2157 | smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; |
2158 | 2158 | ||
2159 | if (rphy) | 2159 | if (rphy) |
@@ -2179,10 +2179,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2179 | MPI_SGE_FLAGS_END_OF_BUFFER | | 2179 | MPI_SGE_FLAGS_END_OF_BUFFER | |
2180 | MPI_SGE_FLAGS_DIRECTION) | 2180 | MPI_SGE_FLAGS_DIRECTION) |
2181 | << MPI_SGE_FLAGS_SHIFT; | 2181 | << MPI_SGE_FLAGS_SHIFT; |
2182 | flagsLength |= (req->data_len - 4); | 2182 | flagsLength |= (blk_rq_bytes(req) - 4); |
2183 | 2183 | ||
2184 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), | 2184 | dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), |
2185 | req->data_len, PCI_DMA_BIDIRECTIONAL); | 2185 | blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); |
2186 | if (!dma_addr_out) | 2186 | if (!dma_addr_out) |
2187 | goto put_mf; | 2187 | goto put_mf; |
2188 | ioc->add_sge(psge, flagsLength, dma_addr_out); | 2188 | ioc->add_sge(psge, flagsLength, dma_addr_out); |
@@ -2195,9 +2195,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2195 | MPI_SGE_FLAGS_END_OF_BUFFER; | 2195 | MPI_SGE_FLAGS_END_OF_BUFFER; |
2196 | 2196 | ||
2197 | flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; | 2197 | flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; |
2198 | flagsLength |= rsp->data_len + 4; | 2198 | flagsLength |= blk_rq_bytes(rsp) + 4; |
2199 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), | 2199 | dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), |
2200 | rsp->data_len, PCI_DMA_BIDIRECTIONAL); | 2200 | blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); |
2201 | if (!dma_addr_in) | 2201 | if (!dma_addr_in) |
2202 | goto unmap; | 2202 | goto unmap; |
2203 | ioc->add_sge(psge, flagsLength, dma_addr_in); | 2203 | ioc->add_sge(psge, flagsLength, dma_addr_in); |
@@ -2221,8 +2221,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2221 | smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; | 2221 | smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; |
2222 | memcpy(req->sense, smprep, sizeof(*smprep)); | 2222 | memcpy(req->sense, smprep, sizeof(*smprep)); |
2223 | req->sense_len = sizeof(*smprep); | 2223 | req->sense_len = sizeof(*smprep); |
2224 | req->data_len = 0; | 2224 | req->resid_len = 0; |
2225 | rsp->data_len -= smprep->ResponseDataLength; | 2225 | rsp->resid_len -= smprep->ResponseDataLength; |
2226 | } else { | 2226 | } else { |
2227 | printk(MYIOC_s_ERR_FMT | 2227 | printk(MYIOC_s_ERR_FMT |
2228 | "%s: smp passthru reply failed to be returned\n", | 2228 | "%s: smp passthru reply failed to be returned\n", |
@@ -2231,10 +2231,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
2231 | } | 2231 | } |
2232 | unmap: | 2232 | unmap: |
2233 | if (dma_addr_out) | 2233 | if (dma_addr_out) |
2234 | pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, | 2234 | pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), |
2235 | PCI_DMA_BIDIRECTIONAL); | 2235 | PCI_DMA_BIDIRECTIONAL); |
2236 | if (dma_addr_in) | 2236 | if (dma_addr_in) |
2237 | pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, | 2237 | pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp), |
2238 | PCI_DMA_BIDIRECTIONAL); | 2238 | PCI_DMA_BIDIRECTIONAL); |
2239 | put_mf: | 2239 | put_mf: |
2240 | if (mf) | 2240 | if (mf) |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a443e136dc41..335d4c78a775 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error, | |||
426 | struct request_queue *q = req->q; | 426 | struct request_queue *q = req->q; |
427 | unsigned long flags; | 427 | unsigned long flags; |
428 | 428 | ||
429 | if (blk_end_request(req, error, nr_bytes)) { | 429 | if (blk_end_request(req, error, nr_bytes)) |
430 | int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); | ||
431 | |||
432 | if (blk_pc_request(req)) | ||
433 | leftover = req->data_len; | ||
434 | |||
435 | if (error) | 430 | if (error) |
436 | blk_end_request(req, -EIO, leftover); | 431 | blk_end_request_all(req, -EIO); |
437 | } | ||
438 | 432 | ||
439 | spin_lock_irqsave(q->queue_lock, flags); | 433 | spin_lock_irqsave(q->queue_lock, flags); |
440 | 434 | ||
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req) | |||
761 | break; | 755 | break; |
762 | 756 | ||
763 | case CACHE_SMARTFETCH: | 757 | case CACHE_SMARTFETCH: |
764 | if (req->nr_sectors > 16) | 758 | if (blk_rq_sectors(req) > 16) |
765 | ctl_flags = 0x201F0008; | 759 | ctl_flags = 0x201F0008; |
766 | else | 760 | else |
767 | ctl_flags = 0x001F0000; | 761 | ctl_flags = 0x001F0000; |
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req) | |||
781 | ctl_flags = 0x001F0010; | 775 | ctl_flags = 0x001F0010; |
782 | break; | 776 | break; |
783 | case CACHE_SMARTBACK: | 777 | case CACHE_SMARTBACK: |
784 | if (req->nr_sectors > 16) | 778 | if (blk_rq_sectors(req) > 16) |
785 | ctl_flags = 0x001F0004; | 779 | ctl_flags = 0x001F0004; |
786 | else | 780 | else |
787 | ctl_flags = 0x001F0010; | 781 | ctl_flags = 0x001F0010; |
788 | break; | 782 | break; |
789 | case CACHE_SMARTTHROUGH: | 783 | case CACHE_SMARTTHROUGH: |
790 | if (req->nr_sectors > 16) | 784 | if (blk_rq_sectors(req) > 16) |
791 | ctl_flags = 0x001F0004; | 785 | ctl_flags = 0x001F0004; |
792 | else | 786 | else |
793 | ctl_flags = 0x001F0010; | 787 | ctl_flags = 0x001F0010; |
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req) | |||
800 | if (c->adaptec) { | 794 | if (c->adaptec) { |
801 | u8 cmd[10]; | 795 | u8 cmd[10]; |
802 | u32 scsi_flags; | 796 | u32 scsi_flags; |
803 | u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; | 797 | u16 hwsec; |
804 | 798 | ||
799 | hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; | ||
805 | memset(cmd, 0, 10); | 800 | memset(cmd, 0, 10); |
806 | 801 | ||
807 | sgl_offset = SGL_OFFSET_12; | 802 | sgl_offset = SGL_OFFSET_12; |
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req) | |||
827 | 822 | ||
828 | *mptr++ = cpu_to_le32(scsi_flags); | 823 | *mptr++ = cpu_to_le32(scsi_flags); |
829 | 824 | ||
830 | *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); | 825 | *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); |
831 | *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); | 826 | *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); |
832 | 827 | ||
833 | memcpy(mptr, cmd, 10); | 828 | memcpy(mptr, cmd, 10); |
834 | mptr += 4; | 829 | mptr += 4; |
835 | *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); | 830 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
836 | } else | 831 | } else |
837 | #endif | 832 | #endif |
838 | { | 833 | { |
839 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); | 834 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); |
840 | *mptr++ = cpu_to_le32(ctl_flags); | 835 | *mptr++ = cpu_to_le32(ctl_flags); |
841 | *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); | 836 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
842 | *mptr++ = | 837 | *mptr++ = |
843 | cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); | 838 | cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); |
844 | *mptr++ = | 839 | *mptr++ = |
845 | cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); | 840 | cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); |
846 | } | 841 | } |
847 | 842 | ||
848 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { | 843 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { |
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
883 | struct request *req; | 878 | struct request *req; |
884 | 879 | ||
885 | while (!blk_queue_plugged(q)) { | 880 | while (!blk_queue_plugged(q)) { |
886 | req = elv_next_request(q); | 881 | req = blk_peek_request(q); |
887 | if (!req) | 882 | if (!req) |
888 | break; | 883 | break; |
889 | 884 | ||
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
896 | 891 | ||
897 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { | 892 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { |
898 | if (!i2o_block_transfer(req)) { | 893 | if (!i2o_block_transfer(req)) { |
899 | blkdev_dequeue_request(req); | 894 | blk_start_request(req); |
900 | continue; | 895 | continue; |
901 | } else | 896 | } else |
902 | osm_info("transfer error\n"); | 897 | osm_info("transfer error\n"); |
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
922 | blk_stop_queue(q); | 917 | blk_stop_queue(q); |
923 | break; | 918 | break; |
924 | } | 919 | } |
925 | } else | 920 | } else { |
926 | end_request(req, 0); | 921 | blk_start_request(req); |
922 | __blk_end_request_all(req, -EIO); | ||
923 | } | ||
927 | } | 924 | } |
928 | }; | 925 | }; |
929 | 926 | ||
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev) | |||
1082 | */ | 1079 | */ |
1083 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || | 1080 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || |
1084 | !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { | 1081 | !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { |
1085 | blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); | 1082 | blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); |
1086 | } else | 1083 | } else |
1087 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); | 1084 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); |
1088 | 1085 | ||