aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-25 11:26:59 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-05-29 17:54:22 -0400
commit63015bc9333907725f90a1691d0ade44e51cdcbf (patch)
tree0e5eb8d8e348dcd72e1bf60ab2c9786ef30b0ccc /drivers/scsi/ipr.c
parentc13e5566471d90ff2858f5cacaf27021d158e037 (diff)
[SCSI] ipr: convert to use the data buffer accessors
- remove the unnecessary map_single path. - convert to use the new accessors for the sg lists and the parameters. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c144
1 files changed, 40 insertions, 104 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 4baa79e6867..6411c458d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
540} 540}
541 541
542/** 542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct 544 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear 545 * @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
677 651
678 scsi_cmd->result |= (DID_ERROR << 16); 652 scsi_cmd->result |= (DID_ERROR << 16);
679 653
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 654 scsi_dma_unmap(ipr_cmd->scsi_cmd);
681 scsi_cmd->scsi_done(scsi_cmd); 655 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683} 657}
@@ -4285,93 +4259,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4285static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4259static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4286 struct ipr_cmnd *ipr_cmd) 4260 struct ipr_cmnd *ipr_cmd)
4287{ 4261{
4288 int i; 4262 int i, nseg;
4289 struct scatterlist *sglist; 4263 struct scatterlist *sg;
4290 u32 length; 4264 u32 length;
4291 u32 ioadl_flags = 0; 4265 u32 ioadl_flags = 0;
4292 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4266 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4293 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4267 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4294 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4268 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4295 4269
4296 length = scsi_cmd->request_bufflen; 4270 length = scsi_bufflen(scsi_cmd);
4297 4271 if (!length)
4298 if (length == 0)
4299 return 0; 4272 return 0;
4300 4273
4301 if (scsi_cmd->use_sg) { 4274 nseg = scsi_dma_map(scsi_cmd);
4302 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, 4275 if (nseg < 0) {
4303 scsi_cmd->request_buffer, 4276 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4304 scsi_cmd->use_sg, 4277 return -1;
4305 scsi_cmd->sc_data_direction); 4278 }
4306
4307 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4308 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4309 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4310 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4311 ioarcb->write_ioadl_len =
4312 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4313 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4314 ioadl_flags = IPR_IOADL_FLAGS_READ;
4315 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4316 ioarcb->read_ioadl_len =
4317 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4318 }
4319
4320 sglist = scsi_cmd->request_buffer;
4321 4279
4322 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 4280 ipr_cmd->dma_use_sg = nseg;
4323 ioadl = ioarcb->add_data.u.ioadl;
4324 ioarcb->write_ioadl_addr =
4325 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4326 offsetof(struct ipr_ioarcb, add_data));
4327 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4328 }
4329 4281
4330 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4282 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4331 ioadl[i].flags_and_data_len = 4283 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4332 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4284 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4333 ioadl[i].address = 4285 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4334 cpu_to_be32(sg_dma_address(&sglist[i])); 4286 ioarcb->write_ioadl_len =
4335 } 4287 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4288 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4289 ioadl_flags = IPR_IOADL_FLAGS_READ;
4290 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4291 ioarcb->read_ioadl_len =
4292 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4293 }
4336 4294
4337 if (likely(ipr_cmd->dma_use_sg)) { 4295 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4338 ioadl[i-1].flags_and_data_len |= 4296 ioadl = ioarcb->add_data.u.ioadl;
4339 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4297 ioarcb->write_ioadl_addr =
4340 return 0; 4298 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4341 } else 4299 offsetof(struct ipr_ioarcb, add_data));
4342 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4300 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4343 } else { 4301 }
4344 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4345 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4346 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4347 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4348 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4349 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4350 ioadl_flags = IPR_IOADL_FLAGS_READ;
4351 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4352 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4353 }
4354 4302
4355 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, 4303 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4356 scsi_cmd->request_buffer, length, 4304 ioadl[i].flags_and_data_len =
4357 scsi_cmd->sc_data_direction); 4305 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4358 4306 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4359 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4360 ioadl = ioarcb->add_data.u.ioadl;
4361 ioarcb->write_ioadl_addr =
4362 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4363 offsetof(struct ipr_ioarcb, add_data));
4364 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4365 ipr_cmd->dma_use_sg = 1;
4366 ioadl[0].flags_and_data_len =
4367 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4368 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4369 return 0;
4370 } else
4371 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4372 } 4307 }
4373 4308
4374 return -1; 4309 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4310 return 0;
4375} 4311}
4376 4312
4377/** 4313/**
@@ -4434,7 +4370,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4434 res->needs_sync_complete = 1; 4370 res->needs_sync_complete = 1;
4435 res->in_erp = 0; 4371 res->in_erp = 0;
4436 } 4372 }
4437 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4373 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4438 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4374 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4439 scsi_cmd->scsi_done(scsi_cmd); 4375 scsi_cmd->scsi_done(scsi_cmd);
4440} 4376}
@@ -4812,7 +4748,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4812 break; 4748 break;
4813 } 4749 }
4814 4750
4815 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4751 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4816 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4752 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4817 scsi_cmd->scsi_done(scsi_cmd); 4753 scsi_cmd->scsi_done(scsi_cmd);
4818} 4754}
@@ -4833,10 +4769,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4833 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4769 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4834 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4770 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4835 4771
4836 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); 4772 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4837 4773
4838 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 4774 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4839 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4775 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4840 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4776 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4841 scsi_cmd->scsi_done(scsi_cmd); 4777 scsi_cmd->scsi_done(scsi_cmd);
4842 } else 4778 } else