aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c144
1 files changed, 40 insertions, 104 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b3bf77f1ec0..f142eafb6fc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
540} 540}
541 541
542/** 542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct 544 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear 545 * @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
677 651
678 scsi_cmd->result |= (DID_ERROR << 16); 652 scsi_cmd->result |= (DID_ERROR << 16);
679 653
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 654 scsi_dma_unmap(ipr_cmd->scsi_cmd);
681 scsi_cmd->scsi_done(scsi_cmd); 655 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683} 657}
@@ -4298,93 +4272,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4298static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4272static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4299 struct ipr_cmnd *ipr_cmd) 4273 struct ipr_cmnd *ipr_cmd)
4300{ 4274{
4301 int i; 4275 int i, nseg;
4302 struct scatterlist *sglist; 4276 struct scatterlist *sg;
4303 u32 length; 4277 u32 length;
4304 u32 ioadl_flags = 0; 4278 u32 ioadl_flags = 0;
4305 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4279 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4306 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4280 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4307 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4281 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4308 4282
4309 length = scsi_cmd->request_bufflen; 4283 length = scsi_bufflen(scsi_cmd);
4310 4284 if (!length)
4311 if (length == 0)
4312 return 0; 4285 return 0;
4313 4286
4314 if (scsi_cmd->use_sg) { 4287 nseg = scsi_dma_map(scsi_cmd);
4315 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, 4288 if (nseg < 0) {
4316 scsi_cmd->request_buffer, 4289 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4317 scsi_cmd->use_sg, 4290 return -1;
4318 scsi_cmd->sc_data_direction); 4291 }
4319
4320 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4321 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4323 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4324 ioarcb->write_ioadl_len =
4325 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4326 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4327 ioadl_flags = IPR_IOADL_FLAGS_READ;
4328 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4329 ioarcb->read_ioadl_len =
4330 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4331 }
4332
4333 sglist = scsi_cmd->request_buffer;
4334 4292
4335 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 4293 ipr_cmd->dma_use_sg = nseg;
4336 ioadl = ioarcb->add_data.u.ioadl;
4337 ioarcb->write_ioadl_addr =
4338 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4339 offsetof(struct ipr_ioarcb, add_data));
4340 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4341 }
4342 4294
4343 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4295 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4344 ioadl[i].flags_and_data_len = 4296 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4345 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4297 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4346 ioadl[i].address = 4298 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4347 cpu_to_be32(sg_dma_address(&sglist[i])); 4299 ioarcb->write_ioadl_len =
4348 } 4300 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4301 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4302 ioadl_flags = IPR_IOADL_FLAGS_READ;
4303 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4304 ioarcb->read_ioadl_len =
4305 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4306 }
4349 4307
4350 if (likely(ipr_cmd->dma_use_sg)) { 4308 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4351 ioadl[i-1].flags_and_data_len |= 4309 ioadl = ioarcb->add_data.u.ioadl;
4352 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4310 ioarcb->write_ioadl_addr =
4353 return 0; 4311 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4354 } else 4312 offsetof(struct ipr_ioarcb, add_data));
4355 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4313 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4356 } else { 4314 }
4357 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4358 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4359 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4360 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4361 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4362 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4363 ioadl_flags = IPR_IOADL_FLAGS_READ;
4364 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4365 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4366 }
4367 4315
4368 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, 4316 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4369 scsi_cmd->request_buffer, length, 4317 ioadl[i].flags_and_data_len =
4370 scsi_cmd->sc_data_direction); 4318 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4371 4319 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4372 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4373 ioadl = ioarcb->add_data.u.ioadl;
4374 ioarcb->write_ioadl_addr =
4375 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4376 offsetof(struct ipr_ioarcb, add_data));
4377 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4378 ipr_cmd->dma_use_sg = 1;
4379 ioadl[0].flags_and_data_len =
4380 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4381 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4382 return 0;
4383 } else
4384 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4385 } 4320 }
4386 4321
4387 return -1; 4322 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4323 return 0;
4388} 4324}
4389 4325
4390/** 4326/**
@@ -4447,7 +4383,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4447 res->needs_sync_complete = 1; 4383 res->needs_sync_complete = 1;
4448 res->in_erp = 0; 4384 res->in_erp = 0;
4449 } 4385 }
4450 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4386 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4451 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4387 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4452 scsi_cmd->scsi_done(scsi_cmd); 4388 scsi_cmd->scsi_done(scsi_cmd);
4453} 4389}
@@ -4825,7 +4761,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4825 break; 4761 break;
4826 } 4762 }
4827 4763
4828 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4764 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4765 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4830 scsi_cmd->scsi_done(scsi_cmd); 4766 scsi_cmd->scsi_done(scsi_cmd);
4831} 4767}
@@ -4846,10 +4782,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4846 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4782 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4847 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4783 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4848 4784
4849 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); 4785 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4850 4786
4851 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 4787 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4852 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4788 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4853 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4789 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4854 scsi_cmd->scsi_done(scsi_cmd); 4790 scsi_cmd->scsi_done(scsi_cmd);
4855 } else 4791 } else