aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/libata-core.c145
-rw-r--r--drivers/ata/libata-scsi.c23
-rw-r--r--drivers/ata/pata_icside.c8
-rw-r--r--drivers/ata/sata_fsl.c13
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
9 files changed, 20 insertions, 193 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 29e71bddd6ff..3c06e457b4dc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1975,16 +1975,11 @@ static int ahci_port_start(struct ata_port *ap)
1975 struct ahci_port_priv *pp; 1975 struct ahci_port_priv *pp;
1976 void *mem; 1976 void *mem;
1977 dma_addr_t mem_dma; 1977 dma_addr_t mem_dma;
1978 int rc;
1979 1978
1980 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1979 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981 if (!pp) 1980 if (!pp)
1982 return -ENOMEM; 1981 return -ENOMEM;
1983 1982
1984 rc = ata_pad_alloc(ap, dev);
1985 if (rc)
1986 return rc;
1987
1988 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1983 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1989 GFP_KERNEL); 1984 GFP_KERNEL);
1990 if (!mem) 1985 if (!mem)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a109ccbda9ca..3587ac3fe3f3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,30 +4493,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4493 struct ata_port *ap = qc->ap; 4493 struct ata_port *ap = qc->ap;
4494 struct scatterlist *sg = qc->sg; 4494 struct scatterlist *sg = qc->sg;
4495 int dir = qc->dma_dir; 4495 int dir = qc->dma_dir;
4496 void *pad_buf = NULL;
4497 4496
4498 WARN_ON(sg == NULL); 4497 WARN_ON(sg == NULL);
4499 4498
4500 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); 4499 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4501 4500
4502 /* if we padded the buffer out to 32-bit bound, and data 4501 if (qc->n_elem)
4503 * xfer direction is from-device, we must copy from the 4502 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4504 * pad buffer back into the supplied buffer
4505 */
4506 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4507 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4508
4509 if (qc->mapped_n_elem)
4510 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4511 /* restore last sg */
4512 if (qc->last_sg)
4513 *qc->last_sg = qc->saved_last_sg;
4514 if (pad_buf) {
4515 struct scatterlist *psg = &qc->extra_sg[1];
4516 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4517 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4518 kunmap_atomic(addr, KM_IRQ0);
4519 }
4520 4503
4521 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4504 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4522 qc->sg = NULL; 4505 qc->sg = NULL;
@@ -4767,97 +4750,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4767 qc->cursg = qc->sg; 4750 qc->cursg = qc->sg;
4768} 4751}
4769 4752
4770static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4771 unsigned int *n_elem_extra,
4772 unsigned int *nbytes_extra)
4773{
4774 struct ata_port *ap = qc->ap;
4775 unsigned int n_elem = qc->n_elem;
4776 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4777
4778 *n_elem_extra = 0;
4779 *nbytes_extra = 0;
4780
4781 /* needs padding? */
4782 qc->pad_len = qc->nbytes & 3;
4783
4784 if (likely(!qc->pad_len))
4785 return n_elem;
4786
4787 /* locate last sg and save it */
4788 lsg = sg_last(qc->sg, n_elem);
4789 qc->last_sg = lsg;
4790 qc->saved_last_sg = *lsg;
4791
4792 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4793
4794 if (qc->pad_len) {
4795 struct scatterlist *psg = &qc->extra_sg[1];
4796 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4797 unsigned int offset;
4798
4799 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4800
4801 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4802
4803 /* psg->page/offset are used to copy to-be-written
4804 * data in this function or read data in ata_sg_clean.
4805 */
4806 offset = lsg->offset + lsg->length - qc->pad_len;
4807 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4808 qc->pad_len, offset_in_page(offset));
4809
4810 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4811 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4812 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4813 kunmap_atomic(addr, KM_IRQ0);
4814 }
4815
4816 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4817 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4818
4819 /* Trim the last sg entry and chain the original and
4820 * padding sg lists.
4821 *
4822 * Because chaining consumes one sg entry, one extra
4823 * sg entry is allocated and the last sg entry is
4824 * copied to it if the length isn't zero after padded
4825 * amount is removed.
4826 *
4827 * If the last sg entry is completely replaced by
4828 * padding sg entry, the first sg entry is skipped
4829 * while chaining.
4830 */
4831 lsg->length -= qc->pad_len;
4832 if (lsg->length) {
4833 copy_lsg = &qc->extra_sg[0];
4834 tsg = &qc->extra_sg[0];
4835 } else {
4836 n_elem--;
4837 tsg = &qc->extra_sg[1];
4838 }
4839
4840 esg = &qc->extra_sg[1];
4841
4842 (*n_elem_extra)++;
4843 (*nbytes_extra) += 4 - qc->pad_len;
4844 }
4845
4846 if (copy_lsg)
4847 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4848
4849 sg_chain(lsg, 1, tsg);
4850 sg_mark_end(esg);
4851
4852 /* sglist can't start with chaining sg entry, fast forward */
4853 if (qc->sg == lsg) {
4854 qc->sg = tsg;
4855 qc->cursg = tsg;
4856 }
4857
4858 return n_elem;
4859}
4860
4861/** 4753/**
4862 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4754 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4863 * @qc: Command with scatter-gather table to be mapped. 4755 * @qc: Command with scatter-gather table to be mapped.
@@ -4874,26 +4766,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4874static int ata_sg_setup(struct ata_queued_cmd *qc) 4766static int ata_sg_setup(struct ata_queued_cmd *qc)
4875{ 4767{
4876 struct ata_port *ap = qc->ap; 4768 struct ata_port *ap = qc->ap;
4877 unsigned int n_elem, n_elem_extra, nbytes_extra; 4769 unsigned int n_elem;
4878 4770
4879 VPRINTK("ENTER, ata%u\n", ap->print_id); 4771 VPRINTK("ENTER, ata%u\n", ap->print_id);
4880 4772
4881 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra); 4773 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4774 if (n_elem < 1)
4775 return -1;
4882 4776
4883 if (n_elem) { 4777 DPRINTK("%d sg elements mapped\n", n_elem);
4884 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4885 if (n_elem < 1) {
4886 /* restore last sg */
4887 if (qc->last_sg)
4888 *qc->last_sg = qc->saved_last_sg;
4889 return -1;
4890 }
4891 DPRINTK("%d sg elements mapped\n", n_elem);
4892 }
4893 4778
4894 qc->n_elem = qc->mapped_n_elem = n_elem; 4779 qc->n_elem = n_elem;
4895 qc->n_elem += n_elem_extra;
4896 qc->nbytes += nbytes_extra;
4897 qc->flags |= ATA_QCFLAG_DMAMAP; 4780 qc->flags |= ATA_QCFLAG_DMAMAP;
4898 4781
4899 return 0; 4782 return 0;
@@ -5962,9 +5845,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5962 */ 5845 */
5963 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5846 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5964 5847
5965 /* ata_sg_setup() may update nbytes */
5966 qc->raw_nbytes = qc->nbytes;
5967
5968 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5848 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5969 (ap->flags & ATA_FLAG_PIO_DMA))) 5849 (ap->flags & ATA_FLAG_PIO_DMA)))
5970 if (ata_sg_setup(qc)) 5850 if (ata_sg_setup(qc))
@@ -6573,19 +6453,12 @@ void ata_host_resume(struct ata_host *host)
6573int ata_port_start(struct ata_port *ap) 6453int ata_port_start(struct ata_port *ap)
6574{ 6454{
6575 struct device *dev = ap->dev; 6455 struct device *dev = ap->dev;
6576 int rc;
6577 6456
6578 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6457 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6579 GFP_KERNEL); 6458 GFP_KERNEL);
6580 if (!ap->prd) 6459 if (!ap->prd)
6581 return -ENOMEM; 6460 return -ENOMEM;
6582 6461
6583 rc = ata_pad_alloc(ap, dev);
6584 if (rc)
6585 return rc;
6586
6587 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6588 (unsigned long long)ap->prd_dma);
6589 return 0; 6462 return 0;
6590} 6463}
6591 6464
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1cea18f62abc..6e15c5ddae6d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -832,24 +832,16 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
832 /* configure max sectors */ 832 /* configure max sectors */
833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
834 834
835 /* SATA DMA transfers must be multiples of 4 byte, so 835 if (dev->class == ATA_DEV_ATAPI)
836 * we need to pad ATAPI transfers using an extra sg.
837 * Decrement max hw segments accordingly.
838 */
839 if (dev->class == ATA_DEV_ATAPI) {
840 struct request_queue *q = sdev->request_queue;
841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
842
843 /* set the min alignment */ 836 /* set the min alignment */
844 blk_queue_update_dma_alignment(sdev->request_queue, 837 blk_queue_update_dma_alignment(sdev->request_queue,
845 ATA_DMA_PAD_SZ - 1); 838 ATA_DMA_PAD_SZ - 1);
846 } else 839 else {
847 /* ATA devices must be sector aligned */ 840 /* ATA devices must be sector aligned */
848 blk_queue_update_dma_alignment(sdev->request_queue, 841 blk_queue_update_dma_alignment(sdev->request_queue,
849 ATA_SECT_SIZE - 1); 842 ATA_SECT_SIZE - 1);
850
851 if (dev->class == ATA_DEV_ATA)
852 sdev->manage_start_stop = 1; 843 sdev->manage_start_stop = 1;
844 }
853 845
854 if (dev->flags & ATA_DFLAG_AN) 846 if (dev->flags & ATA_DFLAG_AN)
855 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 847 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -2500,7 +2492,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2500 * want to set it properly, and for DMA where it is 2492 * want to set it properly, and for DMA where it is
2501 * effectively meaningless. 2493 * effectively meaningless.
2502 */ 2494 */
2503 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2495 nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
2504 2496
2505 /* Most ATAPI devices which honor transfer chunk size don't 2497 /* Most ATAPI devices which honor transfer chunk size don't
2506 * behave according to the spec when odd chunk size which 2498 * behave according to the spec when odd chunk size which
@@ -3555,7 +3547,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3555 * @ap: Port to initialize 3547 * @ap: Port to initialize
3556 * 3548 *
3557 * Called just after data structures for each port are 3549 * Called just after data structures for each port are
3558 * initialized. Allocates DMA pad. 3550 * initialized.
3559 * 3551 *
3560 * May be used as the port_start() entry in ata_port_operations. 3552 * May be used as the port_start() entry in ata_port_operations.
3561 * 3553 *
@@ -3564,7 +3556,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3564 */ 3556 */
3565int ata_sas_port_start(struct ata_port *ap) 3557int ata_sas_port_start(struct ata_port *ap)
3566{ 3558{
3567 return ata_pad_alloc(ap, ap->dev); 3559 return 0;
3568} 3560}
3569EXPORT_SYMBOL_GPL(ata_sas_port_start); 3561EXPORT_SYMBOL_GPL(ata_sas_port_start);
3570 3562
@@ -3572,8 +3564,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3572 * ata_port_stop - Undo ata_sas_port_start() 3564 * ata_port_stop - Undo ata_sas_port_start()
3573 * @ap: Port to shut down 3565 * @ap: Port to shut down
3574 * 3566 *
3575 * Frees the DMA pad.
3576 *
3577 * May be used as the port_stop() entry in ata_port_operations. 3567 * May be used as the port_stop() entry in ata_port_operations.
3578 * 3568 *
3579 * LOCKING: 3569 * LOCKING:
@@ -3582,7 +3572,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3582 3572
3583void ata_sas_port_stop(struct ata_port *ap) 3573void ata_sas_port_stop(struct ata_port *ap)
3584{ 3574{
3585 ata_pad_free(ap, ap->dev);
3586} 3575}
3587EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3576EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3588 3577
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 5b8586dac63b..f97068be2d79 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -304,12 +304,6 @@ static int icside_dma_init(struct pata_icside_info *info)
304} 304}
305 305
306 306
307static int pata_icside_port_start(struct ata_port *ap)
308{
309 /* No PRD to alloc */
310 return ata_pad_alloc(ap, ap->dev);
311}
312
313static struct scsi_host_template pata_icside_sht = { 307static struct scsi_host_template pata_icside_sht = {
314 .module = THIS_MODULE, 308 .module = THIS_MODULE,
315 .name = DRV_NAME, 309 .name = DRV_NAME,
@@ -389,8 +383,6 @@ static struct ata_port_operations pata_icside_port_ops = {
389 .irq_clear = ata_dummy_noret, 383 .irq_clear = ata_dummy_noret,
390 .irq_on = ata_irq_on, 384 .irq_on = ata_irq_on,
391 385
392 .port_start = pata_icside_port_start,
393
394 .bmdma_stop = pata_icside_bmdma_stop, 386 .bmdma_stop = pata_icside_bmdma_stop,
395 .bmdma_status = pata_icside_bmdma_status, 387 .bmdma_status = pata_icside_bmdma_status,
396}; 388};
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index efcb66b6ccef..9323dd0c7d8d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -601,21 +601,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
601 if (!pp) 601 if (!pp)
602 return -ENOMEM; 602 return -ENOMEM;
603 603
604 /*
605 * allocate per command dma alignment pad buffer, which is used
606 * internally by libATA to ensure that all transfers ending on
607 * unaligned boundaries are padded, to align on Dword boundaries
608 */
609 retval = ata_pad_alloc(ap, dev);
610 if (retval) {
611 kfree(pp);
612 return retval;
613 }
614
615 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 604 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
616 GFP_KERNEL); 605 GFP_KERNEL);
617 if (!mem) { 606 if (!mem) {
618 ata_pad_free(ap, dev);
619 kfree(pp); 607 kfree(pp);
620 return -ENOMEM; 608 return -ENOMEM;
621 } 609 }
@@ -694,7 +682,6 @@ static void sata_fsl_port_stop(struct ata_port *ap)
694 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, 682 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
695 pp->cmdslot, pp->cmdslot_paddr); 683 pp->cmdslot, pp->cmdslot_paddr);
696 684
697 ata_pad_free(ap, dev);
698 kfree(pp); 685 kfree(pp);
699} 686}
700 687
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 2ecd44db4142..1c1fbf375d9a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1158,17 +1158,13 @@ static int mv_port_start(struct ata_port *ap)
1158 struct mv_port_priv *pp; 1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap); 1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags; 1160 unsigned long flags;
1161 int tag, rc; 1161 int tag;
1162 1162
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp) 1164 if (!pp)
1165 return -ENOMEM; 1165 return -ENOMEM;
1166 ap->private_data = pp; 1166 ap->private_data = pp;
1167 1167
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
1170 return rc;
1171
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb) 1169 if (!pp->crqb)
1174 return -ENOMEM; 1170 return -ENOMEM;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index b4b1f91ea693..df7988df7908 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1234,7 +1234,6 @@ static int sil24_port_start(struct ata_port *ap)
1234 union sil24_cmd_block *cb; 1234 union sil24_cmd_block *cb;
1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
1236 dma_addr_t cb_dma; 1236 dma_addr_t cb_dma;
1237 int rc;
1238 1237
1239 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1238 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1240 if (!pp) 1239 if (!pp)
@@ -1247,10 +1246,6 @@ static int sil24_port_start(struct ata_port *ap)
1247 return -ENOMEM; 1246 return -ENOMEM;
1248 memset(cb, 0, cb_size); 1247 memset(cb, 0, cb_size);
1249 1248
1250 rc = ata_pad_alloc(ap, dev);
1251 if (rc)
1252 return rc;
1253
1254 pp->cmd_block = cb; 1249 pp->cmd_block = cb;
1255 pp->cmd_block_dma = cb_dma; 1250 pp->cmd_block_dma = cb_dma;
1256 1251
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2074701f7e76..c72014a3e7d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5140,7 +5140,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5142 struct ipr_ioadl_desc *last_ioadl = NULL; 5142 struct ipr_ioadl_desc *last_ioadl = NULL;
5143 int len = qc->nbytes + qc->pad_len; 5143 int len = qc->nbytes;
5144 struct scatterlist *sg; 5144 struct scatterlist *sg;
5145 unsigned int si; 5145 unsigned int si;
5146 5146
@@ -5206,7 +5206,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5209 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 5209 ipr_cmd->dma_use_sg = qc->n_elem;
5210 5210
5211 ipr_build_ata_ioadl(ipr_cmd, qc); 5211 ipr_build_ata_ioadl(ipr_cmd, qc);
5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0996f866f14c..7cd05b599a12 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -178,8 +178,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
178 task->uldd_task = qc; 178 task->uldd_task = qc;
179 if (ata_is_atapi(qc->tf.protocol)) { 179 if (ata_is_atapi(qc->tf.protocol)) {
180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
181 task->total_xfer_len = qc->nbytes + qc->pad_len; 181 task->total_xfer_len = qc->nbytes;
182 task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 182 task->num_scatter = qc->n_elem;
183 } else { 183 } else {
184 for_each_sg(qc->sg, sg, qc->n_elem, si) 184 for_each_sg(qc->sg, sg, qc->n_elem, si)
185 xfer += sg->length; 185 xfer += sg->length;