diff options
author | Tejun Heo <htejun@gmail.com> | 2007-12-05 02:43:11 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2008-01-23 05:24:14 -0500 |
commit | ff2aeb1eb64c8a4770a6304f9addbae9f9828646 (patch) | |
tree | c6febbec290ec6c40bf3abc7bcdb7188f5039443 /drivers | |
parent | f92a26365a72333f418abe82700c6030d4a1a807 (diff) |
libata: convert to chained sg
libata used private sg iterator to handle padding sg. Now that sg can
be chained, padding can be handled using standard sg ops. Convert to
chained sg.
* s/qc->__sg/qc->sg/
* s/qc->pad_sgent/qc->extra_sg[]/. Because chaining consumes one sg
entry. There need to be two extra sg entries. The renaming is also
for future addition of other extra sg entries.
* Padding setup is moved into ata_sg_setup_extra() which is organized
in a way that future addition of other extra sg entries is easy.
* qc->orig_n_elem is unused and removed.
* qc->n_elem now contains the number of sg entries that LLDs should
map. qc->mapped_n_elem is added to carry the original number of
mapped sgs for unmapping.
* The last sg of the original sg list is used to chain to extra sg
list. The original last sg is pointed to by qc->last_sg and the
content is stored in qc->saved_last_sg. It's restored during
ata_sg_clean().
* All sg walking code has been updated. Unnecessary assertions and
checks for conditions the core layer already guarantees are removed.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/ata/ahci.c | 18 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 201 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 2 | ||||
-rw-r--r-- | drivers/ata/pata_bf54x.c | 13 | ||||
-rw-r--r-- | drivers/ata/pata_icside.c | 3 | ||||
-rw-r--r-- | drivers/ata/pdc_adma.c | 3 | ||||
-rw-r--r-- | drivers/ata/sata_fsl.c | 4 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 3 | ||||
-rw-r--r-- | drivers/ata/sata_nv.c | 25 | ||||
-rw-r--r-- | drivers/ata/sata_promise.c | 40 | ||||
-rw-r--r-- | drivers/ata/sata_qstor.c | 13 | ||||
-rw-r--r-- | drivers/ata/sata_sil24.c | 6 | ||||
-rw-r--r-- | drivers/ata/sata_sx4.c | 4 | ||||
-rw-r--r-- | drivers/scsi/ipr.c | 3 | ||||
-rw-r--r-- | drivers/scsi/libsas/sas_ata.c | 10 |
15 files changed, 185 insertions, 163 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5eee91c73c90..cffad07c65bf 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1483,28 +1483,24 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
1483 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) | 1483 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) |
1484 | { | 1484 | { |
1485 | struct scatterlist *sg; | 1485 | struct scatterlist *sg; |
1486 | struct ahci_sg *ahci_sg; | 1486 | struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; |
1487 | unsigned int n_sg = 0; | 1487 | unsigned int si; |
1488 | 1488 | ||
1489 | VPRINTK("ENTER\n"); | 1489 | VPRINTK("ENTER\n"); |
1490 | 1490 | ||
1491 | /* | 1491 | /* |
1492 | * Next, the S/G list. | 1492 | * Next, the S/G list. |
1493 | */ | 1493 | */ |
1494 | ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; | 1494 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
1495 | ata_for_each_sg(sg, qc) { | ||
1496 | dma_addr_t addr = sg_dma_address(sg); | 1495 | dma_addr_t addr = sg_dma_address(sg); |
1497 | u32 sg_len = sg_dma_len(sg); | 1496 | u32 sg_len = sg_dma_len(sg); |
1498 | 1497 | ||
1499 | ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); | 1498 | ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); |
1500 | ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | 1499 | ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); |
1501 | ahci_sg->flags_size = cpu_to_le32(sg_len - 1); | 1500 | ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); |
1502 | |||
1503 | ahci_sg++; | ||
1504 | n_sg++; | ||
1505 | } | 1501 | } |
1506 | 1502 | ||
1507 | return n_sg; | 1503 | return si; |
1508 | } | 1504 | } |
1509 | 1505 | ||
1510 | static void ahci_qc_prep(struct ata_queued_cmd *qc) | 1506 | static void ahci_qc_prep(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 865428a64de3..e998028302da 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4471,13 +4471,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev, | |||
4471 | void ata_sg_clean(struct ata_queued_cmd *qc) | 4471 | void ata_sg_clean(struct ata_queued_cmd *qc) |
4472 | { | 4472 | { |
4473 | struct ata_port *ap = qc->ap; | 4473 | struct ata_port *ap = qc->ap; |
4474 | struct scatterlist *sg = qc->__sg; | 4474 | struct scatterlist *sg = qc->sg; |
4475 | int dir = qc->dma_dir; | 4475 | int dir = qc->dma_dir; |
4476 | void *pad_buf = NULL; | 4476 | void *pad_buf = NULL; |
4477 | 4477 | ||
4478 | WARN_ON(sg == NULL); | 4478 | WARN_ON(sg == NULL); |
4479 | 4479 | ||
4480 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); | 4480 | VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); |
4481 | 4481 | ||
4482 | /* if we padded the buffer out to 32-bit bound, and data | 4482 | /* if we padded the buffer out to 32-bit bound, and data |
4483 | * xfer direction is from-device, we must copy from the | 4483 | * xfer direction is from-device, we must copy from the |
@@ -4486,19 +4486,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4486 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | 4486 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) |
4487 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | 4487 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
4488 | 4488 | ||
4489 | if (qc->n_elem) | 4489 | if (qc->mapped_n_elem) |
4490 | dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); | 4490 | dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir); |
4491 | /* restore last sg */ | 4491 | /* restore last sg */ |
4492 | sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; | 4492 | if (qc->last_sg) |
4493 | *qc->last_sg = qc->saved_last_sg; | ||
4493 | if (pad_buf) { | 4494 | if (pad_buf) { |
4494 | struct scatterlist *psg = &qc->pad_sgent; | 4495 | struct scatterlist *psg = &qc->extra_sg[1]; |
4495 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); | 4496 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); |
4496 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | 4497 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); |
4497 | kunmap_atomic(addr, KM_IRQ0); | 4498 | kunmap_atomic(addr, KM_IRQ0); |
4498 | } | 4499 | } |
4499 | 4500 | ||
4500 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 4501 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
4501 | qc->__sg = NULL; | 4502 | qc->sg = NULL; |
4502 | } | 4503 | } |
4503 | 4504 | ||
4504 | /** | 4505 | /** |
@@ -4516,13 +4517,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc) | |||
4516 | { | 4517 | { |
4517 | struct ata_port *ap = qc->ap; | 4518 | struct ata_port *ap = qc->ap; |
4518 | struct scatterlist *sg; | 4519 | struct scatterlist *sg; |
4519 | unsigned int idx; | 4520 | unsigned int si, pi; |
4520 | 4521 | ||
4521 | WARN_ON(qc->__sg == NULL); | 4522 | pi = 0; |
4522 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); | 4523 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
4523 | |||
4524 | idx = 0; | ||
4525 | ata_for_each_sg(sg, qc) { | ||
4526 | u32 addr, offset; | 4524 | u32 addr, offset; |
4527 | u32 sg_len, len; | 4525 | u32 sg_len, len; |
4528 | 4526 | ||
@@ -4539,18 +4537,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc) | |||
4539 | if ((offset + sg_len) > 0x10000) | 4537 | if ((offset + sg_len) > 0x10000) |
4540 | len = 0x10000 - offset; | 4538 | len = 0x10000 - offset; |
4541 | 4539 | ||
4542 | ap->prd[idx].addr = cpu_to_le32(addr); | 4540 | ap->prd[pi].addr = cpu_to_le32(addr); |
4543 | ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); | 4541 | ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); |
4544 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); | 4542 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); |
4545 | 4543 | ||
4546 | idx++; | 4544 | pi++; |
4547 | sg_len -= len; | 4545 | sg_len -= len; |
4548 | addr += len; | 4546 | addr += len; |
4549 | } | 4547 | } |
4550 | } | 4548 | } |
4551 | 4549 | ||
4552 | if (idx) | 4550 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
4553 | ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
4554 | } | 4551 | } |
4555 | 4552 | ||
4556 | /** | 4553 | /** |
@@ -4570,13 +4567,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | |||
4570 | { | 4567 | { |
4571 | struct ata_port *ap = qc->ap; | 4568 | struct ata_port *ap = qc->ap; |
4572 | struct scatterlist *sg; | 4569 | struct scatterlist *sg; |
4573 | unsigned int idx; | 4570 | unsigned int si, pi; |
4574 | |||
4575 | WARN_ON(qc->__sg == NULL); | ||
4576 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); | ||
4577 | 4571 | ||
4578 | idx = 0; | 4572 | pi = 0; |
4579 | ata_for_each_sg(sg, qc) { | 4573 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
4580 | u32 addr, offset; | 4574 | u32 addr, offset; |
4581 | u32 sg_len, len, blen; | 4575 | u32 sg_len, len, blen; |
4582 | 4576 | ||
@@ -4594,25 +4588,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) | |||
4594 | len = 0x10000 - offset; | 4588 | len = 0x10000 - offset; |
4595 | 4589 | ||
4596 | blen = len & 0xffff; | 4590 | blen = len & 0xffff; |
4597 | ap->prd[idx].addr = cpu_to_le32(addr); | 4591 | ap->prd[pi].addr = cpu_to_le32(addr); |
4598 | if (blen == 0) { | 4592 | if (blen == 0) { |
4599 | /* Some PATA chipsets like the CS5530 can't | 4593 | /* Some PATA chipsets like the CS5530 can't |
4600 | cope with 0x0000 meaning 64K as the spec says */ | 4594 | cope with 0x0000 meaning 64K as the spec says */ |
4601 | ap->prd[idx].flags_len = cpu_to_le32(0x8000); | 4595 | ap->prd[pi].flags_len = cpu_to_le32(0x8000); |
4602 | blen = 0x8000; | 4596 | blen = 0x8000; |
4603 | ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000); | 4597 | ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); |
4604 | } | 4598 | } |
4605 | ap->prd[idx].flags_len = cpu_to_le32(blen); | 4599 | ap->prd[pi].flags_len = cpu_to_le32(blen); |
4606 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); | 4600 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); |
4607 | 4601 | ||
4608 | idx++; | 4602 | pi++; |
4609 | sg_len -= len; | 4603 | sg_len -= len; |
4610 | addr += len; | 4604 | addr += len; |
4611 | } | 4605 | } |
4612 | } | 4606 | } |
4613 | 4607 | ||
4614 | if (idx) | 4608 | ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
4615 | ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
4616 | } | 4609 | } |
4617 | 4610 | ||
4618 | /** | 4611 | /** |
@@ -4764,54 +4757,48 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } | |||
4764 | void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | 4757 | void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
4765 | unsigned int n_elem) | 4758 | unsigned int n_elem) |
4766 | { | 4759 | { |
4767 | qc->__sg = sg; | 4760 | qc->sg = sg; |
4768 | qc->n_elem = n_elem; | 4761 | qc->n_elem = n_elem; |
4769 | qc->orig_n_elem = n_elem; | 4762 | qc->cursg = qc->sg; |
4770 | qc->cursg = qc->__sg; | ||
4771 | } | 4763 | } |
4772 | 4764 | ||
4773 | /** | 4765 | static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc, |
4774 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. | 4766 | unsigned int *n_elem_extra) |
4775 | * @qc: Command with scatter-gather table to be mapped. | ||
4776 | * | ||
4777 | * DMA-map the scatter-gather table associated with queued_cmd @qc. | ||
4778 | * | ||
4779 | * LOCKING: | ||
4780 | * spin_lock_irqsave(host lock) | ||
4781 | * | ||
4782 | * RETURNS: | ||
4783 | * Zero on success, negative on error. | ||
4784 | * | ||
4785 | */ | ||
4786 | |||
4787 | static int ata_sg_setup(struct ata_queued_cmd *qc) | ||
4788 | { | 4767 | { |
4789 | struct ata_port *ap = qc->ap; | 4768 | struct ata_port *ap = qc->ap; |
4790 | struct scatterlist *sg = qc->__sg; | 4769 | unsigned int n_elem = qc->n_elem; |
4791 | struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); | 4770 | struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL; |
4792 | int n_elem, pre_n_elem, dir, trim_sg = 0; | ||
4793 | 4771 | ||
4794 | VPRINTK("ENTER, ata%u\n", ap->print_id); | 4772 | *n_elem_extra = 0; |
4773 | |||
4774 | /* needs padding? */ | ||
4775 | qc->pad_len = qc->nbytes & 3; | ||
4776 | |||
4777 | if (likely(!qc->pad_len)) | ||
4778 | return n_elem; | ||
4779 | |||
4780 | /* locate last sg and save it */ | ||
4781 | lsg = sg_last(qc->sg, n_elem); | ||
4782 | qc->last_sg = lsg; | ||
4783 | qc->saved_last_sg = *lsg; | ||
4784 | |||
4785 | sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg)); | ||
4795 | 4786 | ||
4796 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
4797 | qc->pad_len = lsg->length & 3; | ||
4798 | if (qc->pad_len) { | 4787 | if (qc->pad_len) { |
4788 | struct scatterlist *psg = &qc->extra_sg[1]; | ||
4799 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | 4789 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
4800 | struct scatterlist *psg = &qc->pad_sgent; | ||
4801 | unsigned int offset; | 4790 | unsigned int offset; |
4802 | 4791 | ||
4803 | WARN_ON(qc->dev->class != ATA_DEV_ATAPI); | 4792 | WARN_ON(qc->dev->class != ATA_DEV_ATAPI); |
4804 | 4793 | ||
4805 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | 4794 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); |
4806 | 4795 | ||
4807 | /* | 4796 | /* psg->page/offset are used to copy to-be-written |
4808 | * psg->page/offset are used to copy to-be-written | ||
4809 | * data in this function or read data in ata_sg_clean. | 4797 | * data in this function or read data in ata_sg_clean. |
4810 | */ | 4798 | */ |
4811 | offset = lsg->offset + lsg->length - qc->pad_len; | 4799 | offset = lsg->offset + lsg->length - qc->pad_len; |
4812 | sg_init_table(psg, 1); | ||
4813 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), | 4800 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), |
4814 | qc->pad_len, offset_in_page(offset)); | 4801 | qc->pad_len, offset_in_page(offset)); |
4815 | 4802 | ||
4816 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | 4803 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
4817 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); | 4804 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); |
@@ -4821,36 +4808,84 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
4821 | 4808 | ||
4822 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | 4809 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); |
4823 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | 4810 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; |
4824 | /* trim last sg */ | 4811 | |
4812 | /* Trim the last sg entry and chain the original and | ||
4813 | * padding sg lists. | ||
4814 | * | ||
4815 | * Because chaining consumes one sg entry, one extra | ||
4816 | * sg entry is allocated and the last sg entry is | ||
4817 | * copied to it if the length isn't zero after padded | ||
4818 | * amount is removed. | ||
4819 | * | ||
4820 | * If the last sg entry is completely replaced by | ||
4821 | * padding sg entry, the first sg entry is skipped | ||
4822 | * while chaining. | ||
4823 | */ | ||
4825 | lsg->length -= qc->pad_len; | 4824 | lsg->length -= qc->pad_len; |
4826 | if (lsg->length == 0) | 4825 | if (lsg->length) { |
4827 | trim_sg = 1; | 4826 | copy_lsg = &qc->extra_sg[0]; |
4827 | tsg = &qc->extra_sg[0]; | ||
4828 | } else { | ||
4829 | n_elem--; | ||
4830 | tsg = &qc->extra_sg[1]; | ||
4831 | } | ||
4832 | |||
4833 | esg = &qc->extra_sg[1]; | ||
4828 | 4834 | ||
4829 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | 4835 | (*n_elem_extra)++; |
4830 | qc->n_elem - 1, lsg->length, qc->pad_len); | ||
4831 | } | 4836 | } |
4832 | 4837 | ||
4833 | pre_n_elem = qc->n_elem; | 4838 | if (copy_lsg) |
4834 | if (trim_sg && pre_n_elem) | 4839 | sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset); |
4835 | pre_n_elem--; | ||
4836 | 4840 | ||
4837 | if (!pre_n_elem) { | 4841 | sg_chain(lsg, 1, tsg); |
4838 | n_elem = 0; | 4842 | sg_mark_end(esg); |
4839 | goto skip_map; | 4843 | |
4844 | /* sglist can't start with chaining sg entry, fast forward */ | ||
4845 | if (qc->sg == lsg) { | ||
4846 | qc->sg = tsg; | ||
4847 | qc->cursg = tsg; | ||
4840 | } | 4848 | } |
4841 | 4849 | ||
4842 | dir = qc->dma_dir; | 4850 | return n_elem; |
4843 | n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); | 4851 | } |
4844 | if (n_elem < 1) { | 4852 | |
4845 | /* restore last sg */ | 4853 | /** |
4846 | lsg->length += qc->pad_len; | 4854 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. |
4847 | return -1; | 4855 | * @qc: Command with scatter-gather table to be mapped. |
4856 | * | ||
4857 | * DMA-map the scatter-gather table associated with queued_cmd @qc. | ||
4858 | * | ||
4859 | * LOCKING: | ||
4860 | * spin_lock_irqsave(host lock) | ||
4861 | * | ||
4862 | * RETURNS: | ||
4863 | * Zero on success, negative on error. | ||
4864 | * | ||
4865 | */ | ||
4866 | static int ata_sg_setup(struct ata_queued_cmd *qc) | ||
4867 | { | ||
4868 | struct ata_port *ap = qc->ap; | ||
4869 | unsigned int n_elem, n_elem_extra; | ||
4870 | |||
4871 | VPRINTK("ENTER, ata%u\n", ap->print_id); | ||
4872 | |||
4873 | n_elem = ata_sg_setup_extra(qc, &n_elem_extra); | ||
4874 | |||
4875 | if (n_elem) { | ||
4876 | n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir); | ||
4877 | if (n_elem < 1) { | ||
4878 | /* restore last sg */ | ||
4879 | if (qc->last_sg) | ||
4880 | *qc->last_sg = qc->saved_last_sg; | ||
4881 | return -1; | ||
4882 | } | ||
4883 | DPRINTK("%d sg elements mapped\n", n_elem); | ||
4848 | } | 4884 | } |
4849 | 4885 | ||
4850 | DPRINTK("%d sg elements mapped\n", n_elem); | 4886 | qc->n_elem = qc->mapped_n_elem = n_elem; |
4887 | qc->n_elem += n_elem_extra; | ||
4851 | 4888 | ||
4852 | skip_map: | ||
4853 | qc->n_elem = n_elem; | ||
4854 | qc->flags |= ATA_QCFLAG_DMAMAP; | 4889 | qc->flags |= ATA_QCFLAG_DMAMAP; |
4855 | 4890 | ||
4856 | return 0; | 4891 | return 0; |
@@ -5912,7 +5947,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5912 | /* We guarantee to LLDs that they will have at least one | 5947 | /* We guarantee to LLDs that they will have at least one |
5913 | * non-zero sg if the command is a data command. | 5948 | * non-zero sg if the command is a data command. |
5914 | */ | 5949 | */ |
5915 | BUG_ON(ata_is_data(prot) && (!qc->__sg || !qc->n_elem || !qc->nbytes)); | 5950 | BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); |
5916 | 5951 | ||
5917 | if (ata_is_dma(prot) || (ata_is_pio(prot) && | 5952 | if (ata_is_dma(prot) || (ata_is_pio(prot) && |
5918 | (ap->flags & ATA_FLAG_PIO_DMA))) | 5953 | (ap->flags & ATA_FLAG_PIO_DMA))) |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5fd780e509d4..42bf61599730 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -517,7 +517,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, | |||
517 | qc->scsicmd = cmd; | 517 | qc->scsicmd = cmd; |
518 | qc->scsidone = done; | 518 | qc->scsidone = done; |
519 | 519 | ||
520 | qc->__sg = scsi_sglist(cmd); | 520 | qc->sg = scsi_sglist(cmd); |
521 | qc->n_elem = scsi_sg_count(cmd); | 521 | qc->n_elem = scsi_sg_count(cmd); |
522 | } else { | 522 | } else { |
523 | cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); | 523 | cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); |
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 41cd921082ba..a32e3c44a606 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
@@ -832,6 +832,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | |||
832 | { | 832 | { |
833 | unsigned short config = WDSIZE_16; | 833 | unsigned short config = WDSIZE_16; |
834 | struct scatterlist *sg; | 834 | struct scatterlist *sg; |
835 | unsigned int si; | ||
835 | 836 | ||
836 | pr_debug("in atapi dma setup\n"); | 837 | pr_debug("in atapi dma setup\n"); |
837 | /* Program the ATA_CTRL register with dir */ | 838 | /* Program the ATA_CTRL register with dir */ |
@@ -839,7 +840,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | |||
839 | /* fill the ATAPI DMA controller */ | 840 | /* fill the ATAPI DMA controller */ |
840 | set_dma_config(CH_ATAPI_TX, config); | 841 | set_dma_config(CH_ATAPI_TX, config); |
841 | set_dma_x_modify(CH_ATAPI_TX, 2); | 842 | set_dma_x_modify(CH_ATAPI_TX, 2); |
842 | ata_for_each_sg(sg, qc) { | 843 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
843 | set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); | 844 | set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); |
844 | set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); | 845 | set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); |
845 | } | 846 | } |
@@ -848,7 +849,7 @@ static void bfin_bmdma_setup(struct ata_queued_cmd *qc) | |||
848 | /* fill the ATAPI DMA controller */ | 849 | /* fill the ATAPI DMA controller */ |
849 | set_dma_config(CH_ATAPI_RX, config); | 850 | set_dma_config(CH_ATAPI_RX, config); |
850 | set_dma_x_modify(CH_ATAPI_RX, 2); | 851 | set_dma_x_modify(CH_ATAPI_RX, 2); |
851 | ata_for_each_sg(sg, qc) { | 852 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
852 | set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); | 853 | set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); |
853 | set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); | 854 | set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); |
854 | } | 855 | } |
@@ -867,6 +868,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
867 | struct ata_port *ap = qc->ap; | 868 | struct ata_port *ap = qc->ap; |
868 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; | 869 | void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; |
869 | struct scatterlist *sg; | 870 | struct scatterlist *sg; |
871 | unsigned int si; | ||
870 | 872 | ||
871 | pr_debug("in atapi dma start\n"); | 873 | pr_debug("in atapi dma start\n"); |
872 | if (!(ap->udma_mask || ap->mwdma_mask)) | 874 | if (!(ap->udma_mask || ap->mwdma_mask)) |
@@ -881,7 +883,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
881 | * data cache is enabled. Otherwise, this loop | 883 | * data cache is enabled. Otherwise, this loop |
882 | * is an empty loop and optimized out. | 884 | * is an empty loop and optimized out. |
883 | */ | 885 | */ |
884 | ata_for_each_sg(sg, qc) { | 886 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
885 | flush_dcache_range(sg_dma_address(sg), | 887 | flush_dcache_range(sg_dma_address(sg), |
886 | sg_dma_address(sg) + sg_dma_len(sg)); | 888 | sg_dma_address(sg) + sg_dma_len(sg)); |
887 | } | 889 | } |
@@ -910,7 +912,7 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) | |||
910 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); | 912 | ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST); |
911 | 913 | ||
912 | /* Set transfer length to buffer len */ | 914 | /* Set transfer length to buffer len */ |
913 | ata_for_each_sg(sg, qc) { | 915 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
914 | ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); | 916 | ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); |
915 | } | 917 | } |
916 | 918 | ||
@@ -932,6 +934,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc) | |||
932 | { | 934 | { |
933 | struct ata_port *ap = qc->ap; | 935 | struct ata_port *ap = qc->ap; |
934 | struct scatterlist *sg; | 936 | struct scatterlist *sg; |
937 | unsigned int si; | ||
935 | 938 | ||
936 | pr_debug("in atapi dma stop\n"); | 939 | pr_debug("in atapi dma stop\n"); |
937 | if (!(ap->udma_mask || ap->mwdma_mask)) | 940 | if (!(ap->udma_mask || ap->mwdma_mask)) |
@@ -950,7 +953,7 @@ static void bfin_bmdma_stop(struct ata_queued_cmd *qc) | |||
950 | * data cache is enabled. Otherwise, this loop | 953 | * data cache is enabled. Otherwise, this loop |
951 | * is an empty loop and optimized out. | 954 | * is an empty loop and optimized out. |
952 | */ | 955 | */ |
953 | ata_for_each_sg(sg, qc) { | 956 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
954 | invalidate_dcache_range( | 957 | invalidate_dcache_range( |
955 | sg_dma_address(sg), | 958 | sg_dma_address(sg), |
956 | sg_dma_address(sg) | 959 | sg_dma_address(sg) |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 842fe08a3c13..5b8586dac63b 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -224,6 +224,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |||
224 | struct pata_icside_state *state = ap->host->private_data; | 224 | struct pata_icside_state *state = ap->host->private_data; |
225 | struct scatterlist *sg, *rsg = state->sg; | 225 | struct scatterlist *sg, *rsg = state->sg; |
226 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; | 226 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; |
227 | unsigned int si; | ||
227 | 228 | ||
228 | /* | 229 | /* |
229 | * We are simplex; BUG if we try to fiddle with DMA | 230 | * We are simplex; BUG if we try to fiddle with DMA |
@@ -234,7 +235,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |||
234 | /* | 235 | /* |
235 | * Copy ATAs scattered sg list into a contiguous array of sg | 236 | * Copy ATAs scattered sg list into a contiguous array of sg |
236 | */ | 237 | */ |
237 | ata_for_each_sg(sg, qc) { | 238 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
238 | memcpy(rsg, sg, sizeof(*sg)); | 239 | memcpy(rsg, sg, sizeof(*sg)); |
239 | rsg++; | 240 | rsg++; |
240 | } | 241 | } |
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 459cb7bb7d7f..8e1b7e9c0ae4 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c | |||
@@ -321,8 +321,9 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
321 | u8 *buf = pp->pkt, *last_buf = NULL; | 321 | u8 *buf = pp->pkt, *last_buf = NULL; |
322 | int i = (2 + buf[3]) * 8; | 322 | int i = (2 + buf[3]) * 8; |
323 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); | 323 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); |
324 | unsigned int si; | ||
324 | 325 | ||
325 | ata_for_each_sg(sg, qc) { | 326 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
326 | u32 addr; | 327 | u32 addr; |
327 | u32 len; | 328 | u32 len; |
328 | 329 | ||
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index a3c33f165427..d041709dee1a 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -323,6 +323,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, | |||
323 | struct scatterlist *sg; | 323 | struct scatterlist *sg; |
324 | unsigned int num_prde = 0; | 324 | unsigned int num_prde = 0; |
325 | u32 ttl_dwords = 0; | 325 | u32 ttl_dwords = 0; |
326 | unsigned int si; | ||
326 | 327 | ||
327 | /* | 328 | /* |
328 | * NOTE : direct & indirect prdt's are contigiously allocated | 329 | * NOTE : direct & indirect prdt's are contigiously allocated |
@@ -333,13 +334,14 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, | |||
333 | struct prde *prd_ptr_to_indirect_ext = NULL; | 334 | struct prde *prd_ptr_to_indirect_ext = NULL; |
334 | unsigned indirect_ext_segment_sz = 0; | 335 | unsigned indirect_ext_segment_sz = 0; |
335 | dma_addr_t indirect_ext_segment_paddr; | 336 | dma_addr_t indirect_ext_segment_paddr; |
337 | unsigned int si; | ||
336 | 338 | ||
337 | VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd); | 339 | VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd); |
338 | 340 | ||
339 | indirect_ext_segment_paddr = cmd_desc_paddr + | 341 | indirect_ext_segment_paddr = cmd_desc_paddr + |
340 | SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; | 342 | SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; |
341 | 343 | ||
342 | ata_for_each_sg(sg, qc) { | 344 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
343 | dma_addr_t sg_addr = sg_dma_address(sg); | 345 | dma_addr_t sg_addr = sg_dma_address(sg); |
344 | u32 sg_len = sg_dma_len(sg); | 346 | u32 sg_len = sg_dma_len(sg); |
345 | 347 | ||
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 37b850ae0845..7e72463a90eb 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1136,9 +1136,10 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1136 | struct mv_port_priv *pp = qc->ap->private_data; | 1136 | struct mv_port_priv *pp = qc->ap->private_data; |
1137 | struct scatterlist *sg; | 1137 | struct scatterlist *sg; |
1138 | struct mv_sg *mv_sg, *last_sg = NULL; | 1138 | struct mv_sg *mv_sg, *last_sg = NULL; |
1139 | unsigned int si; | ||
1139 | 1140 | ||
1140 | mv_sg = pp->sg_tbl; | 1141 | mv_sg = pp->sg_tbl; |
1141 | ata_for_each_sg(sg, qc) { | 1142 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
1142 | dma_addr_t addr = sg_dma_address(sg); | 1143 | dma_addr_t addr = sg_dma_address(sg); |
1143 | u32 sg_len = sg_dma_len(sg); | 1144 | u32 sg_len = sg_dma_len(sg); |
1144 | 1145 | ||
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index ed5dc7cb50cd..a0f98fdab7a0 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -1336,21 +1336,18 @@ static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, | |||
1336 | static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) | 1336 | static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) |
1337 | { | 1337 | { |
1338 | struct nv_adma_port_priv *pp = qc->ap->private_data; | 1338 | struct nv_adma_port_priv *pp = qc->ap->private_data; |
1339 | unsigned int idx; | ||
1340 | struct nv_adma_prd *aprd; | 1339 | struct nv_adma_prd *aprd; |
1341 | struct scatterlist *sg; | 1340 | struct scatterlist *sg; |
1341 | unsigned int si; | ||
1342 | 1342 | ||
1343 | VPRINTK("ENTER\n"); | 1343 | VPRINTK("ENTER\n"); |
1344 | 1344 | ||
1345 | idx = 0; | 1345 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
1346 | 1346 | aprd = (si < 5) ? &cpb->aprd[si] : | |
1347 | ata_for_each_sg(sg, qc) { | 1347 | &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; |
1348 | aprd = (idx < 5) ? &cpb->aprd[idx] : | 1348 | nv_adma_fill_aprd(qc, sg, si, aprd); |
1349 | &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)]; | ||
1350 | nv_adma_fill_aprd(qc, sg, idx, aprd); | ||
1351 | idx++; | ||
1352 | } | 1349 | } |
1353 | if (idx > 5) | 1350 | if (si > 5) |
1354 | cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); | 1351 | cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); |
1355 | else | 1352 | else |
1356 | cpb->next_aprd = cpu_to_le64(0); | 1353 | cpb->next_aprd = cpu_to_le64(0); |
@@ -1995,17 +1992,14 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) | |||
1995 | { | 1992 | { |
1996 | struct ata_port *ap = qc->ap; | 1993 | struct ata_port *ap = qc->ap; |
1997 | struct scatterlist *sg; | 1994 | struct scatterlist *sg; |
1998 | unsigned int idx; | ||
1999 | struct nv_swncq_port_priv *pp = ap->private_data; | 1995 | struct nv_swncq_port_priv *pp = ap->private_data; |
2000 | struct ata_prd *prd; | 1996 | struct ata_prd *prd; |
2001 | 1997 | unsigned int si, idx; | |
2002 | WARN_ON(qc->__sg == NULL); | ||
2003 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); | ||
2004 | 1998 | ||
2005 | prd = pp->prd + ATA_MAX_PRD * qc->tag; | 1999 | prd = pp->prd + ATA_MAX_PRD * qc->tag; |
2006 | 2000 | ||
2007 | idx = 0; | 2001 | idx = 0; |
2008 | ata_for_each_sg(sg, qc) { | 2002 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
2009 | u32 addr, offset; | 2003 | u32 addr, offset; |
2010 | u32 sg_len, len; | 2004 | u32 sg_len, len; |
2011 | 2005 | ||
@@ -2027,8 +2021,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) | |||
2027 | } | 2021 | } |
2028 | } | 2022 | } |
2029 | 2023 | ||
2030 | if (idx) | 2024 | prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
2031 | prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
2032 | } | 2025 | } |
2033 | 2026 | ||
2034 | static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, | 2027 | static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 01738d736d44..a07d319f6e8c 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c | |||
@@ -533,17 +533,15 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) | |||
533 | { | 533 | { |
534 | struct ata_port *ap = qc->ap; | 534 | struct ata_port *ap = qc->ap; |
535 | struct scatterlist *sg; | 535 | struct scatterlist *sg; |
536 | unsigned int idx; | ||
537 | const u32 SG_COUNT_ASIC_BUG = 41*4; | 536 | const u32 SG_COUNT_ASIC_BUG = 41*4; |
537 | unsigned int si, idx; | ||
538 | u32 len; | ||
538 | 539 | ||
539 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) | 540 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
540 | return; | 541 | return; |
541 | 542 | ||
542 | WARN_ON(qc->__sg == NULL); | ||
543 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); | ||
544 | |||
545 | idx = 0; | 543 | idx = 0; |
546 | ata_for_each_sg(sg, qc) { | 544 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
547 | u32 addr, offset; | 545 | u32 addr, offset; |
548 | u32 sg_len, len; | 546 | u32 sg_len, len; |
549 | 547 | ||
@@ -570,29 +568,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) | |||
570 | } | 568 | } |
571 | } | 569 | } |
572 | 570 | ||
573 | if (idx) { | 571 | len = le32_to_cpu(ap->prd[idx - 1].flags_len); |
574 | u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len); | ||
575 | 572 | ||
576 | if (len > SG_COUNT_ASIC_BUG) { | 573 | if (len > SG_COUNT_ASIC_BUG) { |
577 | u32 addr; | 574 | u32 addr; |
578 | 575 | ||
579 | VPRINTK("Splitting last PRD.\n"); | 576 | VPRINTK("Splitting last PRD.\n"); |
580 | 577 | ||
581 | addr = le32_to_cpu(ap->prd[idx - 1].addr); | 578 | addr = le32_to_cpu(ap->prd[idx - 1].addr); |
582 | ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); | 579 | ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); |
583 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); | 580 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); |
584 | 581 | ||
585 | addr = addr + len - SG_COUNT_ASIC_BUG; | 582 | addr = addr + len - SG_COUNT_ASIC_BUG; |
586 | len = SG_COUNT_ASIC_BUG; | 583 | len = SG_COUNT_ASIC_BUG; |
587 | ap->prd[idx].addr = cpu_to_le32(addr); | 584 | ap->prd[idx].addr = cpu_to_le32(addr); |
588 | ap->prd[idx].flags_len = cpu_to_le32(len); | 585 | ap->prd[idx].flags_len = cpu_to_le32(len); |
589 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); | 586 | VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); |
590 | 587 | ||
591 | idx++; | 588 | idx++; |
592 | } | ||
593 | |||
594 | ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
595 | } | 589 | } |
590 | |||
591 | ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); | ||
596 | } | 592 | } |
597 | 593 | ||
598 | static void pdc_qc_prep(struct ata_queued_cmd *qc) | 594 | static void pdc_qc_prep(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 4e5f07bdd069..91cc12c82040 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c | |||
@@ -287,14 +287,10 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) | |||
287 | struct scatterlist *sg; | 287 | struct scatterlist *sg; |
288 | struct ata_port *ap = qc->ap; | 288 | struct ata_port *ap = qc->ap; |
289 | struct qs_port_priv *pp = ap->private_data; | 289 | struct qs_port_priv *pp = ap->private_data; |
290 | unsigned int nelem; | ||
291 | u8 *prd = pp->pkt + QS_CPB_BYTES; | 290 | u8 *prd = pp->pkt + QS_CPB_BYTES; |
291 | unsigned int si; | ||
292 | 292 | ||
293 | WARN_ON(qc->__sg == NULL); | 293 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
294 | WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); | ||
295 | |||
296 | nelem = 0; | ||
297 | ata_for_each_sg(sg, qc) { | ||
298 | u64 addr; | 294 | u64 addr; |
299 | u32 len; | 295 | u32 len; |
300 | 296 | ||
@@ -306,12 +302,11 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) | |||
306 | *(__le32 *)prd = cpu_to_le32(len); | 302 | *(__le32 *)prd = cpu_to_le32(len); |
307 | prd += sizeof(u64); | 303 | prd += sizeof(u64); |
308 | 304 | ||
309 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, | 305 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si, |
310 | (unsigned long long)addr, len); | 306 | (unsigned long long)addr, len); |
311 | nelem++; | ||
312 | } | 307 | } |
313 | 308 | ||
314 | return nelem; | 309 | return si; |
315 | } | 310 | } |
316 | 311 | ||
317 | static void qs_qc_prep(struct ata_queued_cmd *qc) | 312 | static void qs_qc_prep(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index fdd3ceac329b..b4b1f91ea693 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -813,8 +813,9 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | |||
813 | { | 813 | { |
814 | struct scatterlist *sg; | 814 | struct scatterlist *sg; |
815 | struct sil24_sge *last_sge = NULL; | 815 | struct sil24_sge *last_sge = NULL; |
816 | unsigned int si; | ||
816 | 817 | ||
817 | ata_for_each_sg(sg, qc) { | 818 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
818 | sge->addr = cpu_to_le64(sg_dma_address(sg)); | 819 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
819 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); | 820 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); |
820 | sge->flags = 0; | 821 | sge->flags = 0; |
@@ -823,8 +824,7 @@ static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | |||
823 | sge++; | 824 | sge++; |
824 | } | 825 | } |
825 | 826 | ||
826 | if (likely(last_sge)) | 827 | last_sge->flags = cpu_to_le32(SGE_TRM); |
827 | last_sge->flags = cpu_to_le32(SGE_TRM); | ||
828 | } | 828 | } |
829 | 829 | ||
830 | static int sil24_qc_defer(struct ata_queued_cmd *qc) | 830 | static int sil24_qc_defer(struct ata_queued_cmd *qc) |
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 3de0c27caf53..211ba8da64f4 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c | |||
@@ -473,7 +473,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
473 | void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; | 473 | void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR]; |
474 | void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; | 474 | void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR]; |
475 | unsigned int portno = ap->port_no; | 475 | unsigned int portno = ap->port_no; |
476 | unsigned int i, idx, total_len = 0, sgt_len; | 476 | unsigned int i, si, idx, total_len = 0, sgt_len; |
477 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; | 477 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; |
478 | 478 | ||
479 | WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); | 479 | WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP)); |
@@ -487,7 +487,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
487 | * Build S/G table | 487 | * Build S/G table |
488 | */ | 488 | */ |
489 | idx = 0; | 489 | idx = 0; |
490 | ata_for_each_sg(sg, qc) { | 490 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
491 | buf[idx++] = cpu_to_le32(sg_dma_address(sg)); | 491 | buf[idx++] = cpu_to_le32(sg_dma_address(sg)); |
492 | buf[idx++] = cpu_to_le32(sg_dma_len(sg)); | 492 | buf[idx++] = cpu_to_le32(sg_dma_len(sg)); |
493 | total_len += sg_dma_len(sg); | 493 | total_len += sg_dma_len(sg); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 3e78bc2d9170..aa0df0a4b22a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5142,6 +5142,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, | |||
5142 | struct ipr_ioadl_desc *last_ioadl = NULL; | 5142 | struct ipr_ioadl_desc *last_ioadl = NULL; |
5143 | int len = qc->nbytes + qc->pad_len; | 5143 | int len = qc->nbytes + qc->pad_len; |
5144 | struct scatterlist *sg; | 5144 | struct scatterlist *sg; |
5145 | unsigned int si; | ||
5145 | 5146 | ||
5146 | if (len == 0) | 5147 | if (len == 0) |
5147 | return; | 5148 | return; |
@@ -5159,7 +5160,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, | |||
5159 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); | 5160 | cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); |
5160 | } | 5161 | } |
5161 | 5162 | ||
5162 | ata_for_each_sg(sg, qc) { | 5163 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
5163 | ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); | 5164 | ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); |
5164 | ioadl->address = cpu_to_be32(sg_dma_address(sg)); | 5165 | ioadl->address = cpu_to_be32(sg_dma_address(sg)); |
5165 | 5166 | ||
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index f78d0605747d..827cfb132f21 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -158,8 +158,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
158 | struct Scsi_Host *host = sas_ha->core.shost; | 158 | struct Scsi_Host *host = sas_ha->core.shost; |
159 | struct sas_internal *i = to_sas_internal(host->transportt); | 159 | struct sas_internal *i = to_sas_internal(host->transportt); |
160 | struct scatterlist *sg; | 160 | struct scatterlist *sg; |
161 | unsigned int num = 0; | ||
162 | unsigned int xfer = 0; | 161 | unsigned int xfer = 0; |
162 | unsigned int si; | ||
163 | 163 | ||
164 | task = sas_alloc_task(GFP_ATOMIC); | 164 | task = sas_alloc_task(GFP_ATOMIC); |
165 | if (!task) | 165 | if (!task) |
@@ -181,17 +181,15 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
181 | task->total_xfer_len = qc->nbytes + qc->pad_len; | 181 | task->total_xfer_len = qc->nbytes + qc->pad_len; |
182 | task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; | 182 | task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; |
183 | } else { | 183 | } else { |
184 | ata_for_each_sg(sg, qc) { | 184 | for_each_sg(qc->sg, sg, qc->n_elem, si) |
185 | num++; | ||
186 | xfer += sg->length; | 185 | xfer += sg->length; |
187 | } | ||
188 | 186 | ||
189 | task->total_xfer_len = xfer; | 187 | task->total_xfer_len = xfer; |
190 | task->num_scatter = num; | 188 | task->num_scatter = si; |
191 | } | 189 | } |
192 | 190 | ||
193 | task->data_dir = qc->dma_dir; | 191 | task->data_dir = qc->dma_dir; |
194 | task->scatter = qc->__sg; | 192 | task->scatter = qc->sg; |
195 | task->ata_task.retry_count = 1; | 193 | task->ata_task.retry_count = 1; |
196 | task->task_state_flags = SAS_TASK_STATE_PENDING; | 194 | task->task_state_flags = SAS_TASK_STATE_PENDING; |
197 | qc->lldd_task = task; | 195 | qc->lldd_task = task; |