diff options
author | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-02-19 05:36:56 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-02-19 05:36:56 -0500 |
commit | dde2020754aeb14e17052d61784dcb37f252aac2 (patch) | |
tree | 1b6d57c6eff2024fd13e4b3b115d0a6770d8cb80 /drivers/ata/libata-core.c | |
parent | db0a2e0099be3a1cff55879225881465f16c67d3 (diff) |
libata: eliminate the home grown dma padding in favour of
that provided by the block layer
ATA requires that all DMA transfers begin and end on word boundaries.
Because of this, a large amount of machinery grew up in ide to adjust
scatterlists on this basis. However, as of 2.5, the block layer has a
dma_alignment variable which ensures both the beginning and length of a
DMA transfer are aligned on the dma_alignment boundary. Although the
block layer does adjust the beginning of the transfer to ensure this
happens, it doesn't actually adjust the length, it merely makes sure
that space is allocated for transfers beyond the declared length. The
upshot of this is that scatterlists may be padded to any size between
the actual length and the length adjusted to the dma_alignment safely
knowing that memory is allocated in this region.
Right at the moment, SCSI takes the default dma_aligment which is on a
512 byte boundary. Note that this aligment only applies to transfers
coming in from user space. However, since all kernel allocations are
automatically aligned on a minimum of 32 byte boundaries, it is safe to
adjust them in this manner as well.
tj: * Adjusting sg after padding is done in block layer. Make libata
set queue alignment correctly for ATAPI devices and drop broken
sg mangling from ata_sg_setup().
* Use request->raw_data_len for ATAPI transfer chunk size.
* Killed qc->raw_nbytes.
* Separated out killing qc->n_iter.
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/ata/libata-core.c')
-rw-r--r-- | drivers/ata/libata-core.c | 145 |
1 files changed, 9 insertions, 136 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a109ccbda9ca..3587ac3fe3f3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4493,30 +4493,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4493 | struct ata_port *ap = qc->ap; | 4493 | struct ata_port *ap = qc->ap; |
4494 | struct scatterlist *sg = qc->sg; | 4494 | struct scatterlist *sg = qc->sg; |
4495 | int dir = qc->dma_dir; | 4495 | int dir = qc->dma_dir; |
4496 | void *pad_buf = NULL; | ||
4497 | 4496 | ||
4498 | WARN_ON(sg == NULL); | 4497 | WARN_ON(sg == NULL); |
4499 | 4498 | ||
4500 | VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); | 4499 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
4501 | 4500 | ||
4502 | /* if we padded the buffer out to 32-bit bound, and data | 4501 | if (qc->n_elem) |
4503 | * xfer direction is from-device, we must copy from the | 4502 | dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); |
4504 | * pad buffer back into the supplied buffer | ||
4505 | */ | ||
4506 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
4507 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
4508 | |||
4509 | if (qc->mapped_n_elem) | ||
4510 | dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir); | ||
4511 | /* restore last sg */ | ||
4512 | if (qc->last_sg) | ||
4513 | *qc->last_sg = qc->saved_last_sg; | ||
4514 | if (pad_buf) { | ||
4515 | struct scatterlist *psg = &qc->extra_sg[1]; | ||
4516 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); | ||
4517 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | ||
4518 | kunmap_atomic(addr, KM_IRQ0); | ||
4519 | } | ||
4520 | 4503 | ||
4521 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 4504 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
4522 | qc->sg = NULL; | 4505 | qc->sg = NULL; |
@@ -4767,97 +4750,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
4767 | qc->cursg = qc->sg; | 4750 | qc->cursg = qc->sg; |
4768 | } | 4751 | } |
4769 | 4752 | ||
4770 | static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc, | ||
4771 | unsigned int *n_elem_extra, | ||
4772 | unsigned int *nbytes_extra) | ||
4773 | { | ||
4774 | struct ata_port *ap = qc->ap; | ||
4775 | unsigned int n_elem = qc->n_elem; | ||
4776 | struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL; | ||
4777 | |||
4778 | *n_elem_extra = 0; | ||
4779 | *nbytes_extra = 0; | ||
4780 | |||
4781 | /* needs padding? */ | ||
4782 | qc->pad_len = qc->nbytes & 3; | ||
4783 | |||
4784 | if (likely(!qc->pad_len)) | ||
4785 | return n_elem; | ||
4786 | |||
4787 | /* locate last sg and save it */ | ||
4788 | lsg = sg_last(qc->sg, n_elem); | ||
4789 | qc->last_sg = lsg; | ||
4790 | qc->saved_last_sg = *lsg; | ||
4791 | |||
4792 | sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg)); | ||
4793 | |||
4794 | if (qc->pad_len) { | ||
4795 | struct scatterlist *psg = &qc->extra_sg[1]; | ||
4796 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
4797 | unsigned int offset; | ||
4798 | |||
4799 | WARN_ON(qc->dev->class != ATA_DEV_ATAPI); | ||
4800 | |||
4801 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
4802 | |||
4803 | /* psg->page/offset are used to copy to-be-written | ||
4804 | * data in this function or read data in ata_sg_clean. | ||
4805 | */ | ||
4806 | offset = lsg->offset + lsg->length - qc->pad_len; | ||
4807 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), | ||
4808 | qc->pad_len, offset_in_page(offset)); | ||
4809 | |||
4810 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
4811 | void *addr = kmap_atomic(sg_page(psg), KM_IRQ0); | ||
4812 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); | ||
4813 | kunmap_atomic(addr, KM_IRQ0); | ||
4814 | } | ||
4815 | |||
4816 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
4817 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
4818 | |||
4819 | /* Trim the last sg entry and chain the original and | ||
4820 | * padding sg lists. | ||
4821 | * | ||
4822 | * Because chaining consumes one sg entry, one extra | ||
4823 | * sg entry is allocated and the last sg entry is | ||
4824 | * copied to it if the length isn't zero after padded | ||
4825 | * amount is removed. | ||
4826 | * | ||
4827 | * If the last sg entry is completely replaced by | ||
4828 | * padding sg entry, the first sg entry is skipped | ||
4829 | * while chaining. | ||
4830 | */ | ||
4831 | lsg->length -= qc->pad_len; | ||
4832 | if (lsg->length) { | ||
4833 | copy_lsg = &qc->extra_sg[0]; | ||
4834 | tsg = &qc->extra_sg[0]; | ||
4835 | } else { | ||
4836 | n_elem--; | ||
4837 | tsg = &qc->extra_sg[1]; | ||
4838 | } | ||
4839 | |||
4840 | esg = &qc->extra_sg[1]; | ||
4841 | |||
4842 | (*n_elem_extra)++; | ||
4843 | (*nbytes_extra) += 4 - qc->pad_len; | ||
4844 | } | ||
4845 | |||
4846 | if (copy_lsg) | ||
4847 | sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset); | ||
4848 | |||
4849 | sg_chain(lsg, 1, tsg); | ||
4850 | sg_mark_end(esg); | ||
4851 | |||
4852 | /* sglist can't start with chaining sg entry, fast forward */ | ||
4853 | if (qc->sg == lsg) { | ||
4854 | qc->sg = tsg; | ||
4855 | qc->cursg = tsg; | ||
4856 | } | ||
4857 | |||
4858 | return n_elem; | ||
4859 | } | ||
4860 | |||
4861 | /** | 4753 | /** |
4862 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. | 4754 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. |
4863 | * @qc: Command with scatter-gather table to be mapped. | 4755 | * @qc: Command with scatter-gather table to be mapped. |
@@ -4874,26 +4766,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc, | |||
4874 | static int ata_sg_setup(struct ata_queued_cmd *qc) | 4766 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
4875 | { | 4767 | { |
4876 | struct ata_port *ap = qc->ap; | 4768 | struct ata_port *ap = qc->ap; |
4877 | unsigned int n_elem, n_elem_extra, nbytes_extra; | 4769 | unsigned int n_elem; |
4878 | 4770 | ||
4879 | VPRINTK("ENTER, ata%u\n", ap->print_id); | 4771 | VPRINTK("ENTER, ata%u\n", ap->print_id); |
4880 | 4772 | ||
4881 | n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra); | 4773 | n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); |
4774 | if (n_elem < 1) | ||
4775 | return -1; | ||
4882 | 4776 | ||
4883 | if (n_elem) { | 4777 | DPRINTK("%d sg elements mapped\n", n_elem); |
4884 | n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir); | ||
4885 | if (n_elem < 1) { | ||
4886 | /* restore last sg */ | ||
4887 | if (qc->last_sg) | ||
4888 | *qc->last_sg = qc->saved_last_sg; | ||
4889 | return -1; | ||
4890 | } | ||
4891 | DPRINTK("%d sg elements mapped\n", n_elem); | ||
4892 | } | ||
4893 | 4778 | ||
4894 | qc->n_elem = qc->mapped_n_elem = n_elem; | 4779 | qc->n_elem = n_elem; |
4895 | qc->n_elem += n_elem_extra; | ||
4896 | qc->nbytes += nbytes_extra; | ||
4897 | qc->flags |= ATA_QCFLAG_DMAMAP; | 4780 | qc->flags |= ATA_QCFLAG_DMAMAP; |
4898 | 4781 | ||
4899 | return 0; | 4782 | return 0; |
@@ -5962,9 +5845,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5962 | */ | 5845 | */ |
5963 | BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); | 5846 | BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); |
5964 | 5847 | ||
5965 | /* ata_sg_setup() may update nbytes */ | ||
5966 | qc->raw_nbytes = qc->nbytes; | ||
5967 | |||
5968 | if (ata_is_dma(prot) || (ata_is_pio(prot) && | 5848 | if (ata_is_dma(prot) || (ata_is_pio(prot) && |
5969 | (ap->flags & ATA_FLAG_PIO_DMA))) | 5849 | (ap->flags & ATA_FLAG_PIO_DMA))) |
5970 | if (ata_sg_setup(qc)) | 5850 | if (ata_sg_setup(qc)) |
@@ -6573,19 +6453,12 @@ void ata_host_resume(struct ata_host *host) | |||
6573 | int ata_port_start(struct ata_port *ap) | 6453 | int ata_port_start(struct ata_port *ap) |
6574 | { | 6454 | { |
6575 | struct device *dev = ap->dev; | 6455 | struct device *dev = ap->dev; |
6576 | int rc; | ||
6577 | 6456 | ||
6578 | ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, | 6457 | ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, |
6579 | GFP_KERNEL); | 6458 | GFP_KERNEL); |
6580 | if (!ap->prd) | 6459 | if (!ap->prd) |
6581 | return -ENOMEM; | 6460 | return -ENOMEM; |
6582 | 6461 | ||
6583 | rc = ata_pad_alloc(ap, dev); | ||
6584 | if (rc) | ||
6585 | return rc; | ||
6586 | |||
6587 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, | ||
6588 | (unsigned long long)ap->prd_dma); | ||
6589 | return 0; | 6462 | return 0; |
6590 | } | 6463 | } |
6591 | 6464 | ||