aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-19 11:03:02 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-19 11:03:02 -0500
commitf6c427663a158056cd8ca71c01f30653e4b313f7 (patch)
tree7756753c37db84c1dd6cde1c99487f10e8a63878 /drivers
parent9ef38eaf4289a99beb3bc13d1ded220a68cc8877 (diff)
parentfa2fc7f4813bfec1ae3232d49e3befbd601e8a6f (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: libata: implement drain buffers libata: eliminate the home grown dma padding in favour of block: clear drain buffer if draining for write command block: implement request_queue->dma_drain_needed block: add request->raw_data_len block: update bio according to DMA alignment padding libata: update ATAPI overflow draining elevator: make elevator_get() attempt to load the appropriate module cfq-iosched: add hlist for browsing parallel to the radix tree block: make blk_rq_map_user() clear ->bio if it unmaps it fs/block_dev.c: remove #if 0'ed code make struct def_blk_aops static make blk_settings_init() static make blk_ioc_init() static make blk-core.c:request_cachep static again
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/libata-core.c249
-rw-r--r--drivers/ata/libata-scsi.c72
-rw-r--r--drivers/ata/pata_icside.c8
-rw-r--r--drivers/ata/sata_fsl.c13
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/scsi_lib.c8
10 files changed, 95 insertions, 279 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 29e71bddd6ff..3c06e457b4dc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1975,16 +1975,11 @@ static int ahci_port_start(struct ata_port *ap)
1975 struct ahci_port_priv *pp; 1975 struct ahci_port_priv *pp;
1976 void *mem; 1976 void *mem;
1977 dma_addr_t mem_dma; 1977 dma_addr_t mem_dma;
1978 int rc;
1979 1978
1980 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1979 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981 if (!pp) 1980 if (!pp)
1982 return -ENOMEM; 1981 return -ENOMEM;
1983 1982
1984 rc = ata_pad_alloc(ap, dev);
1985 if (rc)
1986 return rc;
1987
1988 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1983 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1989 GFP_KERNEL); 1984 GFP_KERNEL);
1990 if (!mem) 1985 if (!mem)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f46eb6f6dc9f..def3682f416a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,30 +4493,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4493 struct ata_port *ap = qc->ap; 4493 struct ata_port *ap = qc->ap;
4494 struct scatterlist *sg = qc->sg; 4494 struct scatterlist *sg = qc->sg;
4495 int dir = qc->dma_dir; 4495 int dir = qc->dma_dir;
4496 void *pad_buf = NULL;
4497 4496
4498 WARN_ON(sg == NULL); 4497 WARN_ON(sg == NULL);
4499 4498
4500 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); 4499 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4501 4500
4502 /* if we padded the buffer out to 32-bit bound, and data 4501 if (qc->n_elem)
4503 * xfer direction is from-device, we must copy from the 4502 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4504 * pad buffer back into the supplied buffer
4505 */
4506 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4507 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4508
4509 if (qc->mapped_n_elem)
4510 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4511 /* restore last sg */
4512 if (qc->last_sg)
4513 *qc->last_sg = qc->saved_last_sg;
4514 if (pad_buf) {
4515 struct scatterlist *psg = &qc->extra_sg[1];
4516 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4517 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4518 kunmap_atomic(addr, KM_IRQ0);
4519 }
4520 4503
4521 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4504 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4522 qc->sg = NULL; 4505 qc->sg = NULL;
@@ -4659,43 +4642,6 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4659} 4642}
4660 4643
4661/** 4644/**
4662 * atapi_qc_may_overflow - Check whether data transfer may overflow
4663 * @qc: ATA command in question
4664 *
4665 * ATAPI commands which transfer variable length data to host
4666 * might overflow due to application error or hardare bug. This
4667 * function checks whether overflow should be drained and ignored
4668 * for @qc.
4669 *
4670 * LOCKING:
4671 * None.
4672 *
4673 * RETURNS:
4674 * 1 if @qc may overflow; otherwise, 0.
4675 */
4676static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4677{
4678 if (qc->tf.protocol != ATAPI_PROT_PIO &&
4679 qc->tf.protocol != ATAPI_PROT_DMA)
4680 return 0;
4681
4682 if (qc->tf.flags & ATA_TFLAG_WRITE)
4683 return 0;
4684
4685 switch (qc->cdb[0]) {
4686 case READ_10:
4687 case READ_12:
4688 case WRITE_10:
4689 case WRITE_12:
4690 case GPCMD_READ_CD:
4691 case GPCMD_READ_CD_MSF:
4692 return 0;
4693 }
4694
4695 return 1;
4696}
4697
4698/**
4699 * ata_std_qc_defer - Check whether a qc needs to be deferred 4645 * ata_std_qc_defer - Check whether a qc needs to be deferred
4700 * @qc: ATA command in question 4646 * @qc: ATA command in question
4701 * 4647 *
@@ -4782,97 +4728,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4782 qc->cursg = qc->sg; 4728 qc->cursg = qc->sg;
4783} 4729}
4784 4730
4785static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4786 unsigned int *n_elem_extra,
4787 unsigned int *nbytes_extra)
4788{
4789 struct ata_port *ap = qc->ap;
4790 unsigned int n_elem = qc->n_elem;
4791 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4792
4793 *n_elem_extra = 0;
4794 *nbytes_extra = 0;
4795
4796 /* needs padding? */
4797 qc->pad_len = qc->nbytes & 3;
4798
4799 if (likely(!qc->pad_len))
4800 return n_elem;
4801
4802 /* locate last sg and save it */
4803 lsg = sg_last(qc->sg, n_elem);
4804 qc->last_sg = lsg;
4805 qc->saved_last_sg = *lsg;
4806
4807 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4808
4809 if (qc->pad_len) {
4810 struct scatterlist *psg = &qc->extra_sg[1];
4811 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4812 unsigned int offset;
4813
4814 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4815
4816 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4817
4818 /* psg->page/offset are used to copy to-be-written
4819 * data in this function or read data in ata_sg_clean.
4820 */
4821 offset = lsg->offset + lsg->length - qc->pad_len;
4822 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4823 qc->pad_len, offset_in_page(offset));
4824
4825 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4826 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4827 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4828 kunmap_atomic(addr, KM_IRQ0);
4829 }
4830
4831 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4832 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4833
4834 /* Trim the last sg entry and chain the original and
4835 * padding sg lists.
4836 *
4837 * Because chaining consumes one sg entry, one extra
4838 * sg entry is allocated and the last sg entry is
4839 * copied to it if the length isn't zero after padded
4840 * amount is removed.
4841 *
4842 * If the last sg entry is completely replaced by
4843 * padding sg entry, the first sg entry is skipped
4844 * while chaining.
4845 */
4846 lsg->length -= qc->pad_len;
4847 if (lsg->length) {
4848 copy_lsg = &qc->extra_sg[0];
4849 tsg = &qc->extra_sg[0];
4850 } else {
4851 n_elem--;
4852 tsg = &qc->extra_sg[1];
4853 }
4854
4855 esg = &qc->extra_sg[1];
4856
4857 (*n_elem_extra)++;
4858 (*nbytes_extra) += 4 - qc->pad_len;
4859 }
4860
4861 if (copy_lsg)
4862 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4863
4864 sg_chain(lsg, 1, tsg);
4865 sg_mark_end(esg);
4866
4867 /* sglist can't start with chaining sg entry, fast forward */
4868 if (qc->sg == lsg) {
4869 qc->sg = tsg;
4870 qc->cursg = tsg;
4871 }
4872
4873 return n_elem;
4874}
4875
4876/** 4731/**
4877 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4732 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4878 * @qc: Command with scatter-gather table to be mapped. 4733 * @qc: Command with scatter-gather table to be mapped.
@@ -4889,26 +4744,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4889static int ata_sg_setup(struct ata_queued_cmd *qc) 4744static int ata_sg_setup(struct ata_queued_cmd *qc)
4890{ 4745{
4891 struct ata_port *ap = qc->ap; 4746 struct ata_port *ap = qc->ap;
4892 unsigned int n_elem, n_elem_extra, nbytes_extra; 4747 unsigned int n_elem;
4893 4748
4894 VPRINTK("ENTER, ata%u\n", ap->print_id); 4749 VPRINTK("ENTER, ata%u\n", ap->print_id);
4895 4750
4896 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra); 4751 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4752 if (n_elem < 1)
4753 return -1;
4897 4754
4898 if (n_elem) { 4755 DPRINTK("%d sg elements mapped\n", n_elem);
4899 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4900 if (n_elem < 1) {
4901 /* restore last sg */
4902 if (qc->last_sg)
4903 *qc->last_sg = qc->saved_last_sg;
4904 return -1;
4905 }
4906 DPRINTK("%d sg elements mapped\n", n_elem);
4907 }
4908 4756
4909 qc->n_elem = qc->mapped_n_elem = n_elem; 4757 qc->n_elem = n_elem;
4910 qc->n_elem += n_elem_extra;
4911 qc->nbytes += nbytes_extra;
4912 qc->flags |= ATA_QCFLAG_DMAMAP; 4758 qc->flags |= ATA_QCFLAG_DMAMAP;
4913 4759
4914 return 0; 4760 return 0;
@@ -5146,46 +4992,22 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5146 */ 4992 */
5147static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 4993static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5148{ 4994{
5149 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 4995 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5150 struct ata_port *ap = qc->ap; 4996 struct ata_port *ap = qc->ap;
5151 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 4997 struct ata_device *dev = qc->dev;
4998 struct ata_eh_info *ehi = &dev->link->eh_info;
5152 struct scatterlist *sg; 4999 struct scatterlist *sg;
5153 struct page *page; 5000 struct page *page;
5154 unsigned char *buf; 5001 unsigned char *buf;
5155 unsigned int offset, count; 5002 unsigned int offset, count, consumed;
5156 5003
5157next_sg: 5004next_sg:
5158 sg = qc->cursg; 5005 sg = qc->cursg;
5159 if (unlikely(!sg)) { 5006 if (unlikely(!sg)) {
5160 /* 5007 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5161 * The end of qc->sg is reached and the device expects 5008 "buf=%u cur=%u bytes=%u",
5162 * more data to transfer. In order not to overrun qc->sg 5009 qc->nbytes, qc->curbytes, bytes);
5163 * and fulfill length specified in the byte count register, 5010 return -1;
5164 * - for read case, discard trailing data from the device
5165 * - for write case, padding zero data to the device
5166 */
5167 u16 pad_buf[1] = { 0 };
5168 unsigned int i;
5169
5170 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5171 ata_ehi_push_desc(ehi, "too much trailing data "
5172 "buf=%u cur=%u bytes=%u",
5173 qc->nbytes, qc->curbytes, bytes);
5174 return -1;
5175 }
5176
5177 /* overflow is exptected for misc ATAPI commands */
5178 if (bytes && !atapi_qc_may_overflow(qc))
5179 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5180 "trailing data (cdb=%02x nbytes=%u)\n",
5181 bytes, qc->cdb[0], qc->nbytes);
5182
5183 for (i = 0; i < (bytes + 1) / 2; i++)
5184 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5185
5186 qc->curbytes += bytes;
5187
5188 return 0;
5189 } 5011 }
5190 5012
5191 page = sg_page(sg); 5013 page = sg_page(sg);
@@ -5211,18 +5033,16 @@ next_sg:
5211 buf = kmap_atomic(page, KM_IRQ0); 5033 buf = kmap_atomic(page, KM_IRQ0);
5212 5034
5213 /* do the actual data transfer */ 5035 /* do the actual data transfer */
5214 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5036 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5215 5037
5216 kunmap_atomic(buf, KM_IRQ0); 5038 kunmap_atomic(buf, KM_IRQ0);
5217 local_irq_restore(flags); 5039 local_irq_restore(flags);
5218 } else { 5040 } else {
5219 buf = page_address(page); 5041 buf = page_address(page);
5220 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5042 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5221 } 5043 }
5222 5044
5223 bytes -= count; 5045 bytes -= min(bytes, consumed);
5224 if ((count & 1) && bytes)
5225 bytes--;
5226 qc->curbytes += count; 5046 qc->curbytes += count;
5227 qc->cursg_ofs += count; 5047 qc->cursg_ofs += count;
5228 5048
@@ -5231,9 +5051,11 @@ next_sg:
5231 qc->cursg_ofs = 0; 5051 qc->cursg_ofs = 0;
5232 } 5052 }
5233 5053
5054 /* consumed can be larger than count only for the last transfer */
5055 WARN_ON(qc->cursg && count != consumed);
5056
5234 if (bytes) 5057 if (bytes)
5235 goto next_sg; 5058 goto next_sg;
5236
5237 return 0; 5059 return 0;
5238} 5060}
5239 5061
@@ -5251,6 +5073,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5251{ 5073{
5252 struct ata_port *ap = qc->ap; 5074 struct ata_port *ap = qc->ap;
5253 struct ata_device *dev = qc->dev; 5075 struct ata_device *dev = qc->dev;
5076 struct ata_eh_info *ehi = &dev->link->eh_info;
5254 unsigned int ireason, bc_lo, bc_hi, bytes; 5077 unsigned int ireason, bc_lo, bc_hi, bytes;
5255 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5078 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5256 5079
@@ -5268,26 +5091,28 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5268 5091
5269 /* shall be cleared to zero, indicating xfer of data */ 5092 /* shall be cleared to zero, indicating xfer of data */
5270 if (unlikely(ireason & (1 << 0))) 5093 if (unlikely(ireason & (1 << 0)))
5271 goto err_out; 5094 goto atapi_check;
5272 5095
5273 /* make sure transfer direction matches expected */ 5096 /* make sure transfer direction matches expected */
5274 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5097 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5275 if (unlikely(do_write != i_write)) 5098 if (unlikely(do_write != i_write))
5276 goto err_out; 5099 goto atapi_check;
5277 5100
5278 if (unlikely(!bytes)) 5101 if (unlikely(!bytes))
5279 goto err_out; 5102 goto atapi_check;
5280 5103
5281 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5104 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5282 5105
5283 if (__atapi_pio_bytes(qc, bytes)) 5106 if (unlikely(__atapi_pio_bytes(qc, bytes)))
5284 goto err_out; 5107 goto err_out;
5285 ata_altstatus(ap); /* flush */ 5108 ata_altstatus(ap); /* flush */
5286 5109
5287 return; 5110 return;
5288 5111
5289err_out: 5112 atapi_check:
5290 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); 5113 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5114 ireason, bytes);
5115 err_out:
5291 qc->err_mask |= AC_ERR_HSM; 5116 qc->err_mask |= AC_ERR_HSM;
5292 ap->hsm_task_state = HSM_ST_ERR; 5117 ap->hsm_task_state = HSM_ST_ERR;
5293} 5118}
@@ -5972,9 +5797,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5972 */ 5797 */
5973 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5798 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5974 5799
5975 /* ata_sg_setup() may update nbytes */
5976 qc->raw_nbytes = qc->nbytes;
5977
5978 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5800 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5979 (ap->flags & ATA_FLAG_PIO_DMA))) 5801 (ap->flags & ATA_FLAG_PIO_DMA)))
5980 if (ata_sg_setup(qc)) 5802 if (ata_sg_setup(qc))
@@ -6583,19 +6405,12 @@ void ata_host_resume(struct ata_host *host)
6583int ata_port_start(struct ata_port *ap) 6405int ata_port_start(struct ata_port *ap)
6584{ 6406{
6585 struct device *dev = ap->dev; 6407 struct device *dev = ap->dev;
6586 int rc;
6587 6408
6588 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6409 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6589 GFP_KERNEL); 6410 GFP_KERNEL);
6590 if (!ap->prd) 6411 if (!ap->prd)
6591 return -ENOMEM; 6412 return -ENOMEM;
6592 6413
6593 rc = ata_pad_alloc(ap, dev);
6594 if (rc)
6595 return rc;
6596
6597 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6598 (unsigned long long)ap->prd_dma);
6599 return 0; 6414 return 0;
6600} 6415}
6601 6416
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1cea18f62abc..dd41b1a1b304 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -826,30 +826,61 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
826 sdev->max_device_blocked = 1; 826 sdev->max_device_blocked = 1;
827} 827}
828 828
829static void ata_scsi_dev_config(struct scsi_device *sdev, 829/**
830 struct ata_device *dev) 830 * atapi_drain_needed - Check whether data transfer may overflow
831 * @request: request to be checked
832 *
833 * ATAPI commands which transfer variable length data to host
834 * might overflow due to application error or hardare bug. This
835 * function checks whether overflow should be drained and ignored
836 * for @request.
837 *
838 * LOCKING:
839 * None.
840 *
841 * RETURNS:
842 * 1 if ; otherwise, 0.
843 */
844static int atapi_drain_needed(struct request *rq)
845{
846 if (likely(!blk_pc_request(rq)))
847 return 0;
848
849 if (!rq->data_len || (rq->cmd_flags & REQ_RW))
850 return 0;
851
852 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
853}
854
855static int ata_scsi_dev_config(struct scsi_device *sdev,
856 struct ata_device *dev)
831{ 857{
832 /* configure max sectors */ 858 /* configure max sectors */
833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 859 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
834 860
835 /* SATA DMA transfers must be multiples of 4 byte, so
836 * we need to pad ATAPI transfers using an extra sg.
837 * Decrement max hw segments accordingly.
838 */
839 if (dev->class == ATA_DEV_ATAPI) { 861 if (dev->class == ATA_DEV_ATAPI) {
840 struct request_queue *q = sdev->request_queue; 862 struct request_queue *q = sdev->request_queue;
841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 863 void *buf;
842 864
843 /* set the min alignment */ 865 /* set the min alignment */
844 blk_queue_update_dma_alignment(sdev->request_queue, 866 blk_queue_update_dma_alignment(sdev->request_queue,
845 ATA_DMA_PAD_SZ - 1); 867 ATA_DMA_PAD_SZ - 1);
846 } else 868
869 /* configure draining */
870 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
871 if (!buf) {
872 ata_dev_printk(dev, KERN_ERR,
873 "drain buffer allocation failed\n");
874 return -ENOMEM;
875 }
876
877 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
878 } else {
847 /* ATA devices must be sector aligned */ 879 /* ATA devices must be sector aligned */
848 blk_queue_update_dma_alignment(sdev->request_queue, 880 blk_queue_update_dma_alignment(sdev->request_queue,
849 ATA_SECT_SIZE - 1); 881 ATA_SECT_SIZE - 1);
850
851 if (dev->class == ATA_DEV_ATA)
852 sdev->manage_start_stop = 1; 882 sdev->manage_start_stop = 1;
883 }
853 884
854 if (dev->flags & ATA_DFLAG_AN) 885 if (dev->flags & ATA_DFLAG_AN)
855 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 886 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -861,6 +892,8 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
861 depth = min(ATA_MAX_QUEUE - 1, depth); 892 depth = min(ATA_MAX_QUEUE - 1, depth);
862 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 893 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
863 } 894 }
895
896 return 0;
864} 897}
865 898
866/** 899/**
@@ -879,13 +912,14 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
879{ 912{
880 struct ata_port *ap = ata_shost_to_port(sdev->host); 913 struct ata_port *ap = ata_shost_to_port(sdev->host);
881 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 914 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
915 int rc = 0;
882 916
883 ata_scsi_sdev_config(sdev); 917 ata_scsi_sdev_config(sdev);
884 918
885 if (dev) 919 if (dev)
886 ata_scsi_dev_config(sdev, dev); 920 rc = ata_scsi_dev_config(sdev, dev);
887 921
888 return 0; 922 return rc;
889} 923}
890 924
891/** 925/**
@@ -905,6 +939,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
905void ata_scsi_slave_destroy(struct scsi_device *sdev) 939void ata_scsi_slave_destroy(struct scsi_device *sdev)
906{ 940{
907 struct ata_port *ap = ata_shost_to_port(sdev->host); 941 struct ata_port *ap = ata_shost_to_port(sdev->host);
942 struct request_queue *q = sdev->request_queue;
908 unsigned long flags; 943 unsigned long flags;
909 struct ata_device *dev; 944 struct ata_device *dev;
910 945
@@ -920,6 +955,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
920 ata_port_schedule_eh(ap); 955 ata_port_schedule_eh(ap);
921 } 956 }
922 spin_unlock_irqrestore(ap->lock, flags); 957 spin_unlock_irqrestore(ap->lock, flags);
958
959 kfree(q->dma_drain_buffer);
960 q->dma_drain_buffer = NULL;
961 q->dma_drain_size = 0;
923} 962}
924 963
925/** 964/**
@@ -2500,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2500 * want to set it properly, and for DMA where it is 2539 * want to set it properly, and for DMA where it is
2501 * effectively meaningless. 2540 * effectively meaningless.
2502 */ 2541 */
2503 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2542 nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
2504 2543
2505 /* Most ATAPI devices which honor transfer chunk size don't 2544 /* Most ATAPI devices which honor transfer chunk size don't
2506 * behave according to the spec when odd chunk size which 2545 * behave according to the spec when odd chunk size which
@@ -3555,7 +3594,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3555 * @ap: Port to initialize 3594 * @ap: Port to initialize
3556 * 3595 *
3557 * Called just after data structures for each port are 3596 * Called just after data structures for each port are
3558 * initialized. Allocates DMA pad. 3597 * initialized.
3559 * 3598 *
3560 * May be used as the port_start() entry in ata_port_operations. 3599 * May be used as the port_start() entry in ata_port_operations.
3561 * 3600 *
@@ -3564,7 +3603,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3564 */ 3603 */
3565int ata_sas_port_start(struct ata_port *ap) 3604int ata_sas_port_start(struct ata_port *ap)
3566{ 3605{
3567 return ata_pad_alloc(ap, ap->dev); 3606 return 0;
3568} 3607}
3569EXPORT_SYMBOL_GPL(ata_sas_port_start); 3608EXPORT_SYMBOL_GPL(ata_sas_port_start);
3570 3609
@@ -3572,8 +3611,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3572 * ata_port_stop - Undo ata_sas_port_start() 3611 * ata_port_stop - Undo ata_sas_port_start()
3573 * @ap: Port to shut down 3612 * @ap: Port to shut down
3574 * 3613 *
3575 * Frees the DMA pad.
3576 *
3577 * May be used as the port_stop() entry in ata_port_operations. 3614 * May be used as the port_stop() entry in ata_port_operations.
3578 * 3615 *
3579 * LOCKING: 3616 * LOCKING:
@@ -3582,7 +3619,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3582 3619
3583void ata_sas_port_stop(struct ata_port *ap) 3620void ata_sas_port_stop(struct ata_port *ap)
3584{ 3621{
3585 ata_pad_free(ap, ap->dev);
3586} 3622}
3587EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3623EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3588 3624
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 5b8586dac63b..f97068be2d79 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -304,12 +304,6 @@ static int icside_dma_init(struct pata_icside_info *info)
304} 304}
305 305
306 306
307static int pata_icside_port_start(struct ata_port *ap)
308{
309 /* No PRD to alloc */
310 return ata_pad_alloc(ap, ap->dev);
311}
312
313static struct scsi_host_template pata_icside_sht = { 307static struct scsi_host_template pata_icside_sht = {
314 .module = THIS_MODULE, 308 .module = THIS_MODULE,
315 .name = DRV_NAME, 309 .name = DRV_NAME,
@@ -389,8 +383,6 @@ static struct ata_port_operations pata_icside_port_ops = {
389 .irq_clear = ata_dummy_noret, 383 .irq_clear = ata_dummy_noret,
390 .irq_on = ata_irq_on, 384 .irq_on = ata_irq_on,
391 385
392 .port_start = pata_icside_port_start,
393
394 .bmdma_stop = pata_icside_bmdma_stop, 386 .bmdma_stop = pata_icside_bmdma_stop,
395 .bmdma_status = pata_icside_bmdma_status, 387 .bmdma_status = pata_icside_bmdma_status,
396}; 388};
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index efcb66b6ccef..9323dd0c7d8d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -601,21 +601,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
601 if (!pp) 601 if (!pp)
602 return -ENOMEM; 602 return -ENOMEM;
603 603
604 /*
605 * allocate per command dma alignment pad buffer, which is used
606 * internally by libATA to ensure that all transfers ending on
607 * unaligned boundaries are padded, to align on Dword boundaries
608 */
609 retval = ata_pad_alloc(ap, dev);
610 if (retval) {
611 kfree(pp);
612 return retval;
613 }
614
615 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 604 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
616 GFP_KERNEL); 605 GFP_KERNEL);
617 if (!mem) { 606 if (!mem) {
618 ata_pad_free(ap, dev);
619 kfree(pp); 607 kfree(pp);
620 return -ENOMEM; 608 return -ENOMEM;
621 } 609 }
@@ -694,7 +682,6 @@ static void sata_fsl_port_stop(struct ata_port *ap)
694 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, 682 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
695 pp->cmdslot, pp->cmdslot_paddr); 683 pp->cmdslot, pp->cmdslot_paddr);
696 684
697 ata_pad_free(ap, dev);
698 kfree(pp); 685 kfree(pp);
699} 686}
700 687
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 2ecd44db4142..1c1fbf375d9a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1158,17 +1158,13 @@ static int mv_port_start(struct ata_port *ap)
1158 struct mv_port_priv *pp; 1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap); 1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags; 1160 unsigned long flags;
1161 int tag, rc; 1161 int tag;
1162 1162
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp) 1164 if (!pp)
1165 return -ENOMEM; 1165 return -ENOMEM;
1166 ap->private_data = pp; 1166 ap->private_data = pp;
1167 1167
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
1170 return rc;
1171
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb) 1169 if (!pp->crqb)
1174 return -ENOMEM; 1170 return -ENOMEM;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index b4b1f91ea693..df7988df7908 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1234,7 +1234,6 @@ static int sil24_port_start(struct ata_port *ap)
1234 union sil24_cmd_block *cb; 1234 union sil24_cmd_block *cb;
1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
1236 dma_addr_t cb_dma; 1236 dma_addr_t cb_dma;
1237 int rc;
1238 1237
1239 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1238 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1240 if (!pp) 1239 if (!pp)
@@ -1247,10 +1246,6 @@ static int sil24_port_start(struct ata_port *ap)
1247 return -ENOMEM; 1246 return -ENOMEM;
1248 memset(cb, 0, cb_size); 1247 memset(cb, 0, cb_size);
1249 1248
1250 rc = ata_pad_alloc(ap, dev);
1251 if (rc)
1252 return rc;
1253
1254 pp->cmd_block = cb; 1249 pp->cmd_block = cb;
1255 pp->cmd_block_dma = cb_dma; 1250 pp->cmd_block_dma = cb_dma;
1256 1251
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2074701f7e76..c72014a3e7d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5140,7 +5140,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5142 struct ipr_ioadl_desc *last_ioadl = NULL; 5142 struct ipr_ioadl_desc *last_ioadl = NULL;
5143 int len = qc->nbytes + qc->pad_len; 5143 int len = qc->nbytes;
5144 struct scatterlist *sg; 5144 struct scatterlist *sg;
5145 unsigned int si; 5145 unsigned int si;
5146 5146
@@ -5206,7 +5206,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5209 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 5209 ipr_cmd->dma_use_sg = qc->n_elem;
5210 5210
5211 ipr_build_ata_ioadl(ipr_cmd, qc); 5211 ipr_build_ata_ioadl(ipr_cmd, qc);
5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0996f866f14c..7cd05b599a12 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -178,8 +178,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
178 task->uldd_task = qc; 178 task->uldd_task = qc;
179 if (ata_is_atapi(qc->tf.protocol)) { 179 if (ata_is_atapi(qc->tf.protocol)) {
180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
181 task->total_xfer_len = qc->nbytes + qc->pad_len; 181 task->total_xfer_len = qc->nbytes;
182 task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 182 task->num_scatter = qc->n_elem;
183 } else { 183 } else {
184 for_each_sg(qc->sg, sg, qc->n_elem, si) 184 for_each_sg(qc->sg, sg, qc->n_elem, si)
185 xfer += sg->length; 185 xfer += sg->length;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 135c1d054701..ba21d97d1855 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1014,10 +1014,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1014 } 1014 }
1015 1015
1016 req->buffer = NULL; 1016 req->buffer = NULL;
1017 if (blk_pc_request(req))
1018 sdb->length = req->data_len;
1019 else
1020 sdb->length = req->nr_sectors << 9;
1021 1017
1022 /* 1018 /*
1023 * Next, walk the list, and fill in the addresses and sizes of 1019 * Next, walk the list, and fill in the addresses and sizes of
@@ -1026,6 +1022,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1026 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1022 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1027 BUG_ON(count > sdb->table.nents); 1023 BUG_ON(count > sdb->table.nents);
1028 sdb->table.nents = count; 1024 sdb->table.nents = count;
1025 if (blk_pc_request(req))
1026 sdb->length = req->data_len;
1027 else
1028 sdb->length = req->nr_sectors << 9;
1029 return BLKPREP_OK; 1029 return BLKPREP_OK;
1030} 1030}
1031 1031