aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c144
1 files changed, 40 insertions, 104 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fa6ff295e568..072f57715658 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
540} 540}
541 541
542/** 542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct 544 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear 545 * @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
677 651
678 scsi_cmd->result |= (DID_ERROR << 16); 652 scsi_cmd->result |= (DID_ERROR << 16);
679 653
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 654 scsi_dma_unmap(ipr_cmd->scsi_cmd);
681 scsi_cmd->scsi_done(scsi_cmd); 655 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683} 657}
@@ -4292,93 +4266,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4292static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4266static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4293 struct ipr_cmnd *ipr_cmd) 4267 struct ipr_cmnd *ipr_cmd)
4294{ 4268{
4295 int i; 4269 int i, nseg;
4296 struct scatterlist *sglist; 4270 struct scatterlist *sg;
4297 u32 length; 4271 u32 length;
4298 u32 ioadl_flags = 0; 4272 u32 ioadl_flags = 0;
4299 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4273 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4300 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4274 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4301 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4275 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4302 4276
4303 length = scsi_cmd->request_bufflen; 4277 length = scsi_bufflen(scsi_cmd);
4304 4278 if (!length)
4305 if (length == 0)
4306 return 0; 4279 return 0;
4307 4280
4308 if (scsi_cmd->use_sg) { 4281 nseg = scsi_dma_map(scsi_cmd);
4309 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, 4282 if (nseg < 0) {
4310 scsi_cmd->request_buffer, 4283 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4311 scsi_cmd->use_sg, 4284 return -1;
4312 scsi_cmd->sc_data_direction); 4285 }
4313
4314 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4315 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4316 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4317 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4318 ioarcb->write_ioadl_len =
4319 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4320 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4321 ioadl_flags = IPR_IOADL_FLAGS_READ;
4322 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4323 ioarcb->read_ioadl_len =
4324 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4325 }
4326
4327 sglist = scsi_cmd->request_buffer;
4328 4286
4329 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 4287 ipr_cmd->dma_use_sg = nseg;
4330 ioadl = ioarcb->add_data.u.ioadl;
4331 ioarcb->write_ioadl_addr =
4332 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4333 offsetof(struct ipr_ioarcb, add_data));
4334 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4335 }
4336 4288
4337 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4289 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4338 ioadl[i].flags_and_data_len = 4290 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4339 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4291 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4340 ioadl[i].address = 4292 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4341 cpu_to_be32(sg_dma_address(&sglist[i])); 4293 ioarcb->write_ioadl_len =
4342 } 4294 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4295 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4296 ioadl_flags = IPR_IOADL_FLAGS_READ;
4297 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4298 ioarcb->read_ioadl_len =
4299 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4300 }
4343 4301
4344 if (likely(ipr_cmd->dma_use_sg)) { 4302 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4345 ioadl[i-1].flags_and_data_len |= 4303 ioadl = ioarcb->add_data.u.ioadl;
4346 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4304 ioarcb->write_ioadl_addr =
4347 return 0; 4305 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4348 } else 4306 offsetof(struct ipr_ioarcb, add_data));
4349 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4307 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4350 } else { 4308 }
4351 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4352 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4353 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4354 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4355 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4356 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4357 ioadl_flags = IPR_IOADL_FLAGS_READ;
4358 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4359 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4360 }
4361 4309
4362 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, 4310 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4363 scsi_cmd->request_buffer, length, 4311 ioadl[i].flags_and_data_len =
4364 scsi_cmd->sc_data_direction); 4312 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4365 4313 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4366 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4367 ioadl = ioarcb->add_data.u.ioadl;
4368 ioarcb->write_ioadl_addr =
4369 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4370 offsetof(struct ipr_ioarcb, add_data));
4371 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4372 ipr_cmd->dma_use_sg = 1;
4373 ioadl[0].flags_and_data_len =
4374 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4375 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4376 return 0;
4377 } else
4378 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4379 } 4314 }
4380 4315
4381 return -1; 4316 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4317 return 0;
4382} 4318}
4383 4319
4384/** 4320/**
@@ -4441,7 +4377,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4441 res->needs_sync_complete = 1; 4377 res->needs_sync_complete = 1;
4442 res->in_erp = 0; 4378 res->in_erp = 0;
4443 } 4379 }
4444 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4380 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4445 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4381 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4446 scsi_cmd->scsi_done(scsi_cmd); 4382 scsi_cmd->scsi_done(scsi_cmd);
4447} 4383}
@@ -4819,7 +4755,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4819 break; 4755 break;
4820 } 4756 }
4821 4757
4822 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4758 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4823 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4759 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4824 scsi_cmd->scsi_done(scsi_cmd); 4760 scsi_cmd->scsi_done(scsi_cmd);
4825} 4761}
@@ -4840,10 +4776,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4840 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4776 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4841 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4777 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4842 4778
4843 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); 4779 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4844 4780
4845 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 4781 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4846 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4782 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4847 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4783 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4848 scsi_cmd->scsi_done(scsi_cmd); 4784 scsi_cmd->scsi_done(scsi_cmd);
4849 } else 4785 } else