aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-core.c
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2007-12-05 02:43:11 -0500
committerJeff Garzik <jeff@garzik.org>2008-01-23 05:24:14 -0500
commitff2aeb1eb64c8a4770a6304f9addbae9f9828646 (patch)
treec6febbec290ec6c40bf3abc7bcdb7188f5039443 /drivers/ata/libata-core.c
parentf92a26365a72333f418abe82700c6030d4a1a807 (diff)
libata: convert to chained sg
libata used private sg iterator to handle padding sg. Now that sg can be chained, padding can be handled using standard sg ops. Convert to chained sg. * s/qc->__sg/qc->sg/ * s/qc->pad_sgent/qc->extra_sg[]/. Because chaining consumes one sg entry. There need to be two extra sg entries. The renaming is also for future addition of other extra sg entries. * Padding setup is moved into ata_sg_setup_extra() which is organized in a way that future addition of other extra sg entries is easy. * qc->orig_n_elem is unused and removed. * qc->n_elem now contains the number of sg entries that LLDs should map. qc->mapped_n_elem is added to carry the original number of mapped sgs for unmapping. * The last sg of the original sg list is used to chain to extra sg list. The original last sg is pointed to by qc->last_sg and the content is stored in qc->saved_last_sg. It's restored during ata_sg_clean(). * All sg walking code has been updated. Unnecessary assertions and checks for conditions the core layer already guarantees are removed. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r--drivers/ata/libata-core.c201
1 files changed, 118 insertions, 83 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 865428a64de3..e998028302da 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4471,13 +4471,13 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
4471void ata_sg_clean(struct ata_queued_cmd *qc) 4471void ata_sg_clean(struct ata_queued_cmd *qc)
4472{ 4472{
4473 struct ata_port *ap = qc->ap; 4473 struct ata_port *ap = qc->ap;
4474 struct scatterlist *sg = qc->__sg; 4474 struct scatterlist *sg = qc->sg;
4475 int dir = qc->dma_dir; 4475 int dir = qc->dma_dir;
4476 void *pad_buf = NULL; 4476 void *pad_buf = NULL;
4477 4477
4478 WARN_ON(sg == NULL); 4478 WARN_ON(sg == NULL);
4479 4479
4480 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4480 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
4481 4481
4482 /* if we padded the buffer out to 32-bit bound, and data 4482 /* if we padded the buffer out to 32-bit bound, and data
4483 * xfer direction is from-device, we must copy from the 4483 * xfer direction is from-device, we must copy from the
@@ -4486,19 +4486,20 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4486 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 4486 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4487 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4487 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4488 4488
4489 if (qc->n_elem) 4489 if (qc->mapped_n_elem)
4490 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4490 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4491 /* restore last sg */ 4491 /* restore last sg */
4492 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len; 4492 if (qc->last_sg)
4493 *qc->last_sg = qc->saved_last_sg;
4493 if (pad_buf) { 4494 if (pad_buf) {
4494 struct scatterlist *psg = &qc->pad_sgent; 4495 struct scatterlist *psg = &qc->extra_sg[1];
4495 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); 4496 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4496 memcpy(addr + psg->offset, pad_buf, qc->pad_len); 4497 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4497 kunmap_atomic(addr, KM_IRQ0); 4498 kunmap_atomic(addr, KM_IRQ0);
4498 } 4499 }
4499 4500
4500 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4501 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4501 qc->__sg = NULL; 4502 qc->sg = NULL;
4502} 4503}
4503 4504
4504/** 4505/**
@@ -4516,13 +4517,10 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
4516{ 4517{
4517 struct ata_port *ap = qc->ap; 4518 struct ata_port *ap = qc->ap;
4518 struct scatterlist *sg; 4519 struct scatterlist *sg;
4519 unsigned int idx; 4520 unsigned int si, pi;
4520 4521
4521 WARN_ON(qc->__sg == NULL); 4522 pi = 0;
4522 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); 4523 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4523
4524 idx = 0;
4525 ata_for_each_sg(sg, qc) {
4526 u32 addr, offset; 4524 u32 addr, offset;
4527 u32 sg_len, len; 4525 u32 sg_len, len;
4528 4526
@@ -4539,18 +4537,17 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
4539 if ((offset + sg_len) > 0x10000) 4537 if ((offset + sg_len) > 0x10000)
4540 len = 0x10000 - offset; 4538 len = 0x10000 - offset;
4541 4539
4542 ap->prd[idx].addr = cpu_to_le32(addr); 4540 ap->prd[pi].addr = cpu_to_le32(addr);
4543 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 4541 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4544 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4542 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4545 4543
4546 idx++; 4544 pi++;
4547 sg_len -= len; 4545 sg_len -= len;
4548 addr += len; 4546 addr += len;
4549 } 4547 }
4550 } 4548 }
4551 4549
4552 if (idx) 4550 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4553 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4554} 4551}
4555 4552
4556/** 4553/**
@@ -4570,13 +4567,10 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4570{ 4567{
4571 struct ata_port *ap = qc->ap; 4568 struct ata_port *ap = qc->ap;
4572 struct scatterlist *sg; 4569 struct scatterlist *sg;
4573 unsigned int idx; 4570 unsigned int si, pi;
4574
4575 WARN_ON(qc->__sg == NULL);
4576 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4577 4571
4578 idx = 0; 4572 pi = 0;
4579 ata_for_each_sg(sg, qc) { 4573 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4580 u32 addr, offset; 4574 u32 addr, offset;
4581 u32 sg_len, len, blen; 4575 u32 sg_len, len, blen;
4582 4576
@@ -4594,25 +4588,24 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4594 len = 0x10000 - offset; 4588 len = 0x10000 - offset;
4595 4589
4596 blen = len & 0xffff; 4590 blen = len & 0xffff;
4597 ap->prd[idx].addr = cpu_to_le32(addr); 4591 ap->prd[pi].addr = cpu_to_le32(addr);
4598 if (blen == 0) { 4592 if (blen == 0) {
4599 /* Some PATA chipsets like the CS5530 can't 4593 /* Some PATA chipsets like the CS5530 can't
4600 cope with 0x0000 meaning 64K as the spec says */ 4594 cope with 0x0000 meaning 64K as the spec says */
4601 ap->prd[idx].flags_len = cpu_to_le32(0x8000); 4595 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4602 blen = 0x8000; 4596 blen = 0x8000;
4603 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000); 4597 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4604 } 4598 }
4605 ap->prd[idx].flags_len = cpu_to_le32(blen); 4599 ap->prd[pi].flags_len = cpu_to_le32(blen);
4606 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 4600 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4607 4601
4608 idx++; 4602 pi++;
4609 sg_len -= len; 4603 sg_len -= len;
4610 addr += len; 4604 addr += len;
4611 } 4605 }
4612 } 4606 }
4613 4607
4614 if (idx) 4608 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4615 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4616} 4609}
4617 4610
4618/** 4611/**
@@ -4764,54 +4757,48 @@ void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4764void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4757void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4765 unsigned int n_elem) 4758 unsigned int n_elem)
4766{ 4759{
4767 qc->__sg = sg; 4760 qc->sg = sg;
4768 qc->n_elem = n_elem; 4761 qc->n_elem = n_elem;
4769 qc->orig_n_elem = n_elem; 4762 qc->cursg = qc->sg;
4770 qc->cursg = qc->__sg;
4771} 4763}
4772 4764
4773/** 4765static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4774 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4766 unsigned int *n_elem_extra)
4775 * @qc: Command with scatter-gather table to be mapped.
4776 *
4777 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4778 *
4779 * LOCKING:
4780 * spin_lock_irqsave(host lock)
4781 *
4782 * RETURNS:
4783 * Zero on success, negative on error.
4784 *
4785 */
4786
4787static int ata_sg_setup(struct ata_queued_cmd *qc)
4788{ 4767{
4789 struct ata_port *ap = qc->ap; 4768 struct ata_port *ap = qc->ap;
4790 struct scatterlist *sg = qc->__sg; 4769 unsigned int n_elem = qc->n_elem;
4791 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem); 4770 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4792 int n_elem, pre_n_elem, dir, trim_sg = 0;
4793 4771
4794 VPRINTK("ENTER, ata%u\n", ap->print_id); 4772 *n_elem_extra = 0;
4773
4774 /* needs padding? */
4775 qc->pad_len = qc->nbytes & 3;
4776
4777 if (likely(!qc->pad_len))
4778 return n_elem;
4779
4780 /* locate last sg and save it */
4781 lsg = sg_last(qc->sg, n_elem);
4782 qc->last_sg = lsg;
4783 qc->saved_last_sg = *lsg;
4784
4785 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4795 4786
4796 /* we must lengthen transfers to end on a 32-bit boundary */
4797 qc->pad_len = lsg->length & 3;
4798 if (qc->pad_len) { 4787 if (qc->pad_len) {
4788 struct scatterlist *psg = &qc->extra_sg[1];
4799 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 4789 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4800 struct scatterlist *psg = &qc->pad_sgent;
4801 unsigned int offset; 4790 unsigned int offset;
4802 4791
4803 WARN_ON(qc->dev->class != ATA_DEV_ATAPI); 4792 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4804 4793
4805 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 4794 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4806 4795
4807 /* 4796 /* psg->page/offset are used to copy to-be-written
4808 * psg->page/offset are used to copy to-be-written
4809 * data in this function or read data in ata_sg_clean. 4797 * data in this function or read data in ata_sg_clean.
4810 */ 4798 */
4811 offset = lsg->offset + lsg->length - qc->pad_len; 4799 offset = lsg->offset + lsg->length - qc->pad_len;
4812 sg_init_table(psg, 1);
4813 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), 4800 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4814 qc->pad_len, offset_in_page(offset)); 4801 qc->pad_len, offset_in_page(offset));
4815 4802
4816 if (qc->tf.flags & ATA_TFLAG_WRITE) { 4803 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4817 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); 4804 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
@@ -4821,36 +4808,84 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
4821 4808
4822 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 4809 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4823 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 4810 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4824 /* trim last sg */ 4811
4812 /* Trim the last sg entry and chain the original and
4813 * padding sg lists.
4814 *
4815 * Because chaining consumes one sg entry, one extra
4816 * sg entry is allocated and the last sg entry is
4817 * copied to it if the length isn't zero after padded
4818 * amount is removed.
4819 *
4820 * If the last sg entry is completely replaced by
4821 * padding sg entry, the first sg entry is skipped
4822 * while chaining.
4823 */
4825 lsg->length -= qc->pad_len; 4824 lsg->length -= qc->pad_len;
4826 if (lsg->length == 0) 4825 if (lsg->length) {
4827 trim_sg = 1; 4826 copy_lsg = &qc->extra_sg[0];
4827 tsg = &qc->extra_sg[0];
4828 } else {
4829 n_elem--;
4830 tsg = &qc->extra_sg[1];
4831 }
4832
4833 esg = &qc->extra_sg[1];
4828 4834
4829 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 4835 (*n_elem_extra)++;
4830 qc->n_elem - 1, lsg->length, qc->pad_len);
4831 } 4836 }
4832 4837
4833 pre_n_elem = qc->n_elem; 4838 if (copy_lsg)
4834 if (trim_sg && pre_n_elem) 4839 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4835 pre_n_elem--;
4836 4840
4837 if (!pre_n_elem) { 4841 sg_chain(lsg, 1, tsg);
4838 n_elem = 0; 4842 sg_mark_end(esg);
4839 goto skip_map; 4843
4844 /* sglist can't start with chaining sg entry, fast forward */
4845 if (qc->sg == lsg) {
4846 qc->sg = tsg;
4847 qc->cursg = tsg;
4840 } 4848 }
4841 4849
4842 dir = qc->dma_dir; 4850 return n_elem;
4843 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir); 4851}
4844 if (n_elem < 1) { 4852
4845 /* restore last sg */ 4853/**
4846 lsg->length += qc->pad_len; 4854 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4847 return -1; 4855 * @qc: Command with scatter-gather table to be mapped.
4856 *
4857 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4858 *
4859 * LOCKING:
4860 * spin_lock_irqsave(host lock)
4861 *
4862 * RETURNS:
4863 * Zero on success, negative on error.
4864 *
4865 */
4866static int ata_sg_setup(struct ata_queued_cmd *qc)
4867{
4868 struct ata_port *ap = qc->ap;
4869 unsigned int n_elem, n_elem_extra;
4870
4871 VPRINTK("ENTER, ata%u\n", ap->print_id);
4872
4873 n_elem = ata_sg_setup_extra(qc, &n_elem_extra);
4874
4875 if (n_elem) {
4876 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4877 if (n_elem < 1) {
4878 /* restore last sg */
4879 if (qc->last_sg)
4880 *qc->last_sg = qc->saved_last_sg;
4881 return -1;
4882 }
4883 DPRINTK("%d sg elements mapped\n", n_elem);
4848 } 4884 }
4849 4885
4850 DPRINTK("%d sg elements mapped\n", n_elem); 4886 qc->n_elem = qc->mapped_n_elem = n_elem;
4887 qc->n_elem += n_elem_extra;
4851 4888
4852skip_map:
4853 qc->n_elem = n_elem;
4854 qc->flags |= ATA_QCFLAG_DMAMAP; 4889 qc->flags |= ATA_QCFLAG_DMAMAP;
4855 4890
4856 return 0; 4891 return 0;
@@ -5912,7 +5947,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5912 /* We guarantee to LLDs that they will have at least one 5947 /* We guarantee to LLDs that they will have at least one
5913 * non-zero sg if the command is a data command. 5948 * non-zero sg if the command is a data command.
5914 */ 5949 */
5915 BUG_ON(ata_is_data(prot) && (!qc->__sg || !qc->n_elem || !qc->nbytes)); 5950 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5916 5951
5917 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5952 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5918 (ap->flags & ATA_FLAG_PIO_DMA))) 5953 (ap->flags & ATA_FLAG_PIO_DMA)))