diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-05 07:13:30 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-05 07:13:30 -0400 |
commit | cedc9a478d8c6265879dc3839ef3d4849a709184 (patch) | |
tree | 0c8e0fbffdb6081381c01b8cfd93c95b168acb44 /drivers/scsi/libata-core.c | |
parent | ed39f731ab2e77e58122232f6e27333331d7793d (diff) |
libata: fix ATAPI DMA alignment issues
ATAPI needs to be padded to next 4 byte boundary, if misaligned.
Original work by me, many fixes from Tejun Heo.
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 123 |
1 files changed, 106 insertions, 17 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index e5b01997117a..943b44c3c16f 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -2156,8 +2156,9 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) | |||
2156 | static void ata_sg_clean(struct ata_queued_cmd *qc) | 2156 | static void ata_sg_clean(struct ata_queued_cmd *qc) |
2157 | { | 2157 | { |
2158 | struct ata_port *ap = qc->ap; | 2158 | struct ata_port *ap = qc->ap; |
2159 | struct scatterlist *sg = qc->sg; | 2159 | struct scatterlist *sg = qc->__sg; |
2160 | int dir = qc->dma_dir; | 2160 | int dir = qc->dma_dir; |
2161 | void *pad_buf = NULL; | ||
2161 | 2162 | ||
2162 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 2163 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
2163 | assert(sg != NULL); | 2164 | assert(sg != NULL); |
@@ -2167,14 +2168,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2167 | 2168 | ||
2168 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2169 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); |
2169 | 2170 | ||
2170 | if (qc->flags & ATA_QCFLAG_SG) | 2171 | /* if we padded the buffer out to 32-bit bound, and data |
2172 | * xfer direction is from-device, we must copy from the | ||
2173 | * pad buffer back into the supplied buffer | ||
2174 | */ | ||
2175 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
2176 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2177 | |||
2178 | if (qc->flags & ATA_QCFLAG_SG) { | ||
2171 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2179 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2172 | else | 2180 | /* restore last sg */ |
2181 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | ||
2182 | if (pad_buf) { | ||
2183 | struct scatterlist *psg = &qc->pad_sgent; | ||
2184 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2185 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | ||
2186 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2187 | } | ||
2188 | } else { | ||
2173 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2189 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), |
2174 | sg_dma_len(&sg[0]), dir); | 2190 | sg_dma_len(&sg[0]), dir); |
2191 | /* restore sg */ | ||
2192 | sg->length += qc->pad_len; | ||
2193 | if (pad_buf) | ||
2194 | memcpy(qc->buf_virt + sg->length - qc->pad_len, | ||
2195 | pad_buf, qc->pad_len); | ||
2196 | } | ||
2175 | 2197 | ||
2176 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 2198 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
2177 | qc->sg = NULL; | 2199 | qc->__sg = NULL; |
2178 | } | 2200 | } |
2179 | 2201 | ||
2180 | /** | 2202 | /** |
@@ -2190,15 +2212,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2190 | */ | 2212 | */ |
2191 | static void ata_fill_sg(struct ata_queued_cmd *qc) | 2213 | static void ata_fill_sg(struct ata_queued_cmd *qc) |
2192 | { | 2214 | { |
2193 | struct scatterlist *sg = qc->sg; | ||
2194 | struct ata_port *ap = qc->ap; | 2215 | struct ata_port *ap = qc->ap; |
2195 | unsigned int idx, nelem; | 2216 | struct scatterlist *sg; |
2217 | unsigned int idx; | ||
2196 | 2218 | ||
2197 | assert(sg != NULL); | 2219 | assert(qc->__sg != NULL); |
2198 | assert(qc->n_elem > 0); | 2220 | assert(qc->n_elem > 0); |
2199 | 2221 | ||
2200 | idx = 0; | 2222 | idx = 0; |
2201 | for (nelem = qc->n_elem; nelem; nelem--,sg++) { | 2223 | ata_for_each_sg(sg, qc) { |
2202 | u32 addr, offset; | 2224 | u32 addr, offset; |
2203 | u32 sg_len, len; | 2225 | u32 sg_len, len; |
2204 | 2226 | ||
@@ -2289,11 +2311,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) | |||
2289 | qc->flags |= ATA_QCFLAG_SINGLE; | 2311 | qc->flags |= ATA_QCFLAG_SINGLE; |
2290 | 2312 | ||
2291 | memset(&qc->sgent, 0, sizeof(qc->sgent)); | 2313 | memset(&qc->sgent, 0, sizeof(qc->sgent)); |
2292 | qc->sg = &qc->sgent; | 2314 | qc->__sg = &qc->sgent; |
2293 | qc->n_elem = 1; | 2315 | qc->n_elem = 1; |
2316 | qc->orig_n_elem = 1; | ||
2294 | qc->buf_virt = buf; | 2317 | qc->buf_virt = buf; |
2295 | 2318 | ||
2296 | sg = qc->sg; | 2319 | sg = qc->__sg; |
2297 | sg->page = virt_to_page(buf); | 2320 | sg->page = virt_to_page(buf); |
2298 | sg->offset = (unsigned long) buf & ~PAGE_MASK; | 2321 | sg->offset = (unsigned long) buf & ~PAGE_MASK; |
2299 | sg->length = buflen; | 2322 | sg->length = buflen; |
@@ -2317,8 +2340,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
2317 | unsigned int n_elem) | 2340 | unsigned int n_elem) |
2318 | { | 2341 | { |
2319 | qc->flags |= ATA_QCFLAG_SG; | 2342 | qc->flags |= ATA_QCFLAG_SG; |
2320 | qc->sg = sg; | 2343 | qc->__sg = sg; |
2321 | qc->n_elem = n_elem; | 2344 | qc->n_elem = n_elem; |
2345 | qc->orig_n_elem = n_elem; | ||
2322 | } | 2346 | } |
2323 | 2347 | ||
2324 | /** | 2348 | /** |
@@ -2338,9 +2362,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2338 | { | 2362 | { |
2339 | struct ata_port *ap = qc->ap; | 2363 | struct ata_port *ap = qc->ap; |
2340 | int dir = qc->dma_dir; | 2364 | int dir = qc->dma_dir; |
2341 | struct scatterlist *sg = qc->sg; | 2365 | struct scatterlist *sg = qc->__sg; |
2342 | dma_addr_t dma_address; | 2366 | dma_addr_t dma_address; |
2343 | 2367 | ||
2368 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2369 | qc->pad_len = sg->length & 3; | ||
2370 | if (qc->pad_len) { | ||
2371 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2372 | struct scatterlist *psg = &qc->pad_sgent; | ||
2373 | |||
2374 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2375 | |||
2376 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2377 | |||
2378 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
2379 | memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, | ||
2380 | qc->pad_len); | ||
2381 | |||
2382 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2383 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2384 | /* trim sg */ | ||
2385 | sg->length -= qc->pad_len; | ||
2386 | |||
2387 | DPRINTK("padding done, sg->length=%u pad_len=%u\n", | ||
2388 | sg->length, qc->pad_len); | ||
2389 | } | ||
2390 | |||
2344 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2391 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
2345 | sg->length, dir); | 2392 | sg->length, dir); |
2346 | if (dma_mapping_error(dma_address)) | 2393 | if (dma_mapping_error(dma_address)) |
@@ -2372,12 +2419,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2372 | static int ata_sg_setup(struct ata_queued_cmd *qc) | 2419 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
2373 | { | 2420 | { |
2374 | struct ata_port *ap = qc->ap; | 2421 | struct ata_port *ap = qc->ap; |
2375 | struct scatterlist *sg = qc->sg; | 2422 | struct scatterlist *sg = qc->__sg; |
2423 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | ||
2376 | int n_elem, dir; | 2424 | int n_elem, dir; |
2377 | 2425 | ||
2378 | VPRINTK("ENTER, ata%u\n", ap->id); | 2426 | VPRINTK("ENTER, ata%u\n", ap->id); |
2379 | assert(qc->flags & ATA_QCFLAG_SG); | 2427 | assert(qc->flags & ATA_QCFLAG_SG); |
2380 | 2428 | ||
2429 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2430 | qc->pad_len = lsg->length & 3; | ||
2431 | if (qc->pad_len) { | ||
2432 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2433 | struct scatterlist *psg = &qc->pad_sgent; | ||
2434 | unsigned int offset; | ||
2435 | |||
2436 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2437 | |||
2438 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2439 | |||
2440 | /* | ||
2441 | * psg->page/offset are used to copy to-be-written | ||
2442 | * data in this function or read data in ata_sg_clean. | ||
2443 | */ | ||
2444 | offset = lsg->offset + lsg->length - qc->pad_len; | ||
2445 | psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); | ||
2446 | psg->offset = offset_in_page(offset); | ||
2447 | |||
2448 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
2449 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2450 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); | ||
2451 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2452 | } | ||
2453 | |||
2454 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2455 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2456 | /* trim last sg */ | ||
2457 | lsg->length -= qc->pad_len; | ||
2458 | |||
2459 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | ||
2460 | qc->n_elem - 1, lsg->length, qc->pad_len); | ||
2461 | } | ||
2462 | |||
2381 | dir = qc->dma_dir; | 2463 | dir = qc->dma_dir; |
2382 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2464 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2383 | if (n_elem < 1) | 2465 | if (n_elem < 1) |
@@ -2655,7 +2737,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2655 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 2737 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
2656 | { | 2738 | { |
2657 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2739 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2658 | struct scatterlist *sg = qc->sg; | 2740 | struct scatterlist *sg = qc->__sg; |
2659 | struct ata_port *ap = qc->ap; | 2741 | struct ata_port *ap = qc->ap; |
2660 | struct page *page; | 2742 | struct page *page; |
2661 | unsigned int offset; | 2743 | unsigned int offset; |
@@ -2705,7 +2787,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2705 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 2787 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
2706 | { | 2788 | { |
2707 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2789 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2708 | struct scatterlist *sg = qc->sg; | 2790 | struct scatterlist *sg = qc->__sg; |
2709 | struct ata_port *ap = qc->ap; | 2791 | struct ata_port *ap = qc->ap; |
2710 | struct page *page; | 2792 | struct page *page; |
2711 | unsigned char *buf; | 2793 | unsigned char *buf; |
@@ -2738,7 +2820,7 @@ next_sg: | |||
2738 | return; | 2820 | return; |
2739 | } | 2821 | } |
2740 | 2822 | ||
2741 | sg = &qc->sg[qc->cursg]; | 2823 | sg = &qc->__sg[qc->cursg]; |
2742 | 2824 | ||
2743 | page = sg->page; | 2825 | page = sg->page; |
2744 | offset = sg->offset + qc->cursg_ofs; | 2826 | offset = sg->offset + qc->cursg_ofs; |
@@ -3145,7 +3227,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3145 | 3227 | ||
3146 | qc = ata_qc_new(ap); | 3228 | qc = ata_qc_new(ap); |
3147 | if (qc) { | 3229 | if (qc) { |
3148 | qc->sg = NULL; | 3230 | qc->__sg = NULL; |
3149 | qc->flags = 0; | 3231 | qc->flags = 0; |
3150 | qc->scsicmd = NULL; | 3232 | qc->scsicmd = NULL; |
3151 | qc->ap = ap; | 3233 | qc->ap = ap; |
@@ -3837,6 +3919,12 @@ int ata_port_start (struct ata_port *ap) | |||
3837 | if (!ap->prd) | 3919 | if (!ap->prd) |
3838 | return -ENOMEM; | 3920 | return -ENOMEM; |
3839 | 3921 | ||
3922 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
3923 | if (!ap->pad) { | ||
3924 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | ||
3925 | return -ENOMEM; | ||
3926 | } | ||
3927 | |||
3840 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); | 3928 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); |
3841 | 3929 | ||
3842 | return 0; | 3930 | return 0; |
@@ -3859,6 +3947,7 @@ void ata_port_stop (struct ata_port *ap) | |||
3859 | struct device *dev = ap->host_set->dev; | 3947 | struct device *dev = ap->host_set->dev; |
3860 | 3948 | ||
3861 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | 3949 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); |
3950 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
3862 | } | 3951 | } |
3863 | 3952 | ||
3864 | void ata_host_stop (struct ata_host_set *host_set) | 3953 | void ata_host_stop (struct ata_host_set *host_set) |