diff options
-rw-r--r-- | drivers/scsi/ahci.c | 30 | ||||
-rw-r--r-- | drivers/scsi/libata-core.c | 123 | ||||
-rw-r--r-- | drivers/scsi/libata-scsi.c | 14 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 8 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 13 | ||||
-rw-r--r-- | include/linux/libata.h | 27 |
6 files changed, 174 insertions, 41 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 5ec866b00479..f0d8f89b5d40 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
@@ -314,8 +314,15 @@ static int ahci_port_start(struct ata_port *ap) | |||
314 | return -ENOMEM; | 314 | return -ENOMEM; |
315 | memset(pp, 0, sizeof(*pp)); | 315 | memset(pp, 0, sizeof(*pp)); |
316 | 316 | ||
317 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
318 | if (!ap->pad) { | ||
319 | kfree(pp); | ||
320 | return -ENOMEM; | ||
321 | } | ||
322 | |||
317 | mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); | 323 | mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); |
318 | if (!mem) { | 324 | if (!mem) { |
325 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
319 | kfree(pp); | 326 | kfree(pp); |
320 | return -ENOMEM; | 327 | return -ENOMEM; |
321 | } | 328 | } |
@@ -391,6 +398,7 @@ static void ahci_port_stop(struct ata_port *ap) | |||
391 | ap->private_data = NULL; | 398 | ap->private_data = NULL; |
392 | dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, | 399 | dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, |
393 | pp->cmd_slot, pp->cmd_slot_dma); | 400 | pp->cmd_slot, pp->cmd_slot_dma); |
401 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
394 | kfree(pp); | 402 | kfree(pp); |
395 | } | 403 | } |
396 | 404 | ||
@@ -476,23 +484,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
476 | static void ahci_fill_sg(struct ata_queued_cmd *qc) | 484 | static void ahci_fill_sg(struct ata_queued_cmd *qc) |
477 | { | 485 | { |
478 | struct ahci_port_priv *pp = qc->ap->private_data; | 486 | struct ahci_port_priv *pp = qc->ap->private_data; |
479 | unsigned int i; | 487 | struct scatterlist *sg; |
488 | struct ahci_sg *ahci_sg; | ||
480 | 489 | ||
481 | VPRINTK("ENTER\n"); | 490 | VPRINTK("ENTER\n"); |
482 | 491 | ||
483 | /* | 492 | /* |
484 | * Next, the S/G list. | 493 | * Next, the S/G list. |
485 | */ | 494 | */ |
486 | for (i = 0; i < qc->n_elem; i++) { | 495 | ahci_sg = pp->cmd_tbl_sg; |
487 | u32 sg_len; | 496 | ata_for_each_sg(sg, qc) { |
488 | dma_addr_t addr; | 497 | dma_addr_t addr = sg_dma_address(sg); |
489 | 498 | u32 sg_len = sg_dma_len(sg); | |
490 | addr = sg_dma_address(&qc->sg[i]); | 499 | |
491 | sg_len = sg_dma_len(&qc->sg[i]); | 500 | ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); |
492 | 501 | ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
493 | pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff); | 502 | ahci_sg->flags_size = cpu_to_le32(sg_len - 1); |
494 | pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | 503 | ahci_sg++; |
495 | pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1); | ||
496 | } | 504 | } |
497 | } | 505 | } |
498 | 506 | ||
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 175d4646333d..c5e663fefdf7 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -2274,8 +2274,9 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) | |||
2274 | static void ata_sg_clean(struct ata_queued_cmd *qc) | 2274 | static void ata_sg_clean(struct ata_queued_cmd *qc) |
2275 | { | 2275 | { |
2276 | struct ata_port *ap = qc->ap; | 2276 | struct ata_port *ap = qc->ap; |
2277 | struct scatterlist *sg = qc->sg; | 2277 | struct scatterlist *sg = qc->__sg; |
2278 | int dir = qc->dma_dir; | 2278 | int dir = qc->dma_dir; |
2279 | void *pad_buf = NULL; | ||
2279 | 2280 | ||
2280 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 2281 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
2281 | assert(sg != NULL); | 2282 | assert(sg != NULL); |
@@ -2285,14 +2286,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2285 | 2286 | ||
2286 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2287 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); |
2287 | 2288 | ||
2288 | if (qc->flags & ATA_QCFLAG_SG) | 2289 | /* if we padded the buffer out to 32-bit bound, and data |
2290 | * xfer direction is from-device, we must copy from the | ||
2291 | * pad buffer back into the supplied buffer | ||
2292 | */ | ||
2293 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
2294 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2295 | |||
2296 | if (qc->flags & ATA_QCFLAG_SG) { | ||
2289 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2297 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2290 | else | 2298 | /* restore last sg */ |
2299 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | ||
2300 | if (pad_buf) { | ||
2301 | struct scatterlist *psg = &qc->pad_sgent; | ||
2302 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2303 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | ||
2304 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2305 | } | ||
2306 | } else { | ||
2291 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2307 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), |
2292 | sg_dma_len(&sg[0]), dir); | 2308 | sg_dma_len(&sg[0]), dir); |
2309 | /* restore sg */ | ||
2310 | sg->length += qc->pad_len; | ||
2311 | if (pad_buf) | ||
2312 | memcpy(qc->buf_virt + sg->length - qc->pad_len, | ||
2313 | pad_buf, qc->pad_len); | ||
2314 | } | ||
2293 | 2315 | ||
2294 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 2316 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
2295 | qc->sg = NULL; | 2317 | qc->__sg = NULL; |
2296 | } | 2318 | } |
2297 | 2319 | ||
2298 | /** | 2320 | /** |
@@ -2308,15 +2330,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2308 | */ | 2330 | */ |
2309 | static void ata_fill_sg(struct ata_queued_cmd *qc) | 2331 | static void ata_fill_sg(struct ata_queued_cmd *qc) |
2310 | { | 2332 | { |
2311 | struct scatterlist *sg = qc->sg; | ||
2312 | struct ata_port *ap = qc->ap; | 2333 | struct ata_port *ap = qc->ap; |
2313 | unsigned int idx, nelem; | 2334 | struct scatterlist *sg; |
2335 | unsigned int idx; | ||
2314 | 2336 | ||
2315 | assert(sg != NULL); | 2337 | assert(qc->__sg != NULL); |
2316 | assert(qc->n_elem > 0); | 2338 | assert(qc->n_elem > 0); |
2317 | 2339 | ||
2318 | idx = 0; | 2340 | idx = 0; |
2319 | for (nelem = qc->n_elem; nelem; nelem--,sg++) { | 2341 | ata_for_each_sg(sg, qc) { |
2320 | u32 addr, offset; | 2342 | u32 addr, offset; |
2321 | u32 sg_len, len; | 2343 | u32 sg_len, len; |
2322 | 2344 | ||
@@ -2407,11 +2429,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) | |||
2407 | qc->flags |= ATA_QCFLAG_SINGLE; | 2429 | qc->flags |= ATA_QCFLAG_SINGLE; |
2408 | 2430 | ||
2409 | memset(&qc->sgent, 0, sizeof(qc->sgent)); | 2431 | memset(&qc->sgent, 0, sizeof(qc->sgent)); |
2410 | qc->sg = &qc->sgent; | 2432 | qc->__sg = &qc->sgent; |
2411 | qc->n_elem = 1; | 2433 | qc->n_elem = 1; |
2434 | qc->orig_n_elem = 1; | ||
2412 | qc->buf_virt = buf; | 2435 | qc->buf_virt = buf; |
2413 | 2436 | ||
2414 | sg = qc->sg; | 2437 | sg = qc->__sg; |
2415 | sg->page = virt_to_page(buf); | 2438 | sg->page = virt_to_page(buf); |
2416 | sg->offset = (unsigned long) buf & ~PAGE_MASK; | 2439 | sg->offset = (unsigned long) buf & ~PAGE_MASK; |
2417 | sg->length = buflen; | 2440 | sg->length = buflen; |
@@ -2435,8 +2458,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
2435 | unsigned int n_elem) | 2458 | unsigned int n_elem) |
2436 | { | 2459 | { |
2437 | qc->flags |= ATA_QCFLAG_SG; | 2460 | qc->flags |= ATA_QCFLAG_SG; |
2438 | qc->sg = sg; | 2461 | qc->__sg = sg; |
2439 | qc->n_elem = n_elem; | 2462 | qc->n_elem = n_elem; |
2463 | qc->orig_n_elem = n_elem; | ||
2440 | } | 2464 | } |
2441 | 2465 | ||
2442 | /** | 2466 | /** |
@@ -2456,9 +2480,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2456 | { | 2480 | { |
2457 | struct ata_port *ap = qc->ap; | 2481 | struct ata_port *ap = qc->ap; |
2458 | int dir = qc->dma_dir; | 2482 | int dir = qc->dma_dir; |
2459 | struct scatterlist *sg = qc->sg; | 2483 | struct scatterlist *sg = qc->__sg; |
2460 | dma_addr_t dma_address; | 2484 | dma_addr_t dma_address; |
2461 | 2485 | ||
2486 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2487 | qc->pad_len = sg->length & 3; | ||
2488 | if (qc->pad_len) { | ||
2489 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2490 | struct scatterlist *psg = &qc->pad_sgent; | ||
2491 | |||
2492 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2493 | |||
2494 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2495 | |||
2496 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
2497 | memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, | ||
2498 | qc->pad_len); | ||
2499 | |||
2500 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2501 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2502 | /* trim sg */ | ||
2503 | sg->length -= qc->pad_len; | ||
2504 | |||
2505 | DPRINTK("padding done, sg->length=%u pad_len=%u\n", | ||
2506 | sg->length, qc->pad_len); | ||
2507 | } | ||
2508 | |||
2462 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2509 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
2463 | sg->length, dir); | 2510 | sg->length, dir); |
2464 | if (dma_mapping_error(dma_address)) | 2511 | if (dma_mapping_error(dma_address)) |
@@ -2490,12 +2537,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2490 | static int ata_sg_setup(struct ata_queued_cmd *qc) | 2537 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
2491 | { | 2538 | { |
2492 | struct ata_port *ap = qc->ap; | 2539 | struct ata_port *ap = qc->ap; |
2493 | struct scatterlist *sg = qc->sg; | 2540 | struct scatterlist *sg = qc->__sg; |
2541 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | ||
2494 | int n_elem, dir; | 2542 | int n_elem, dir; |
2495 | 2543 | ||
2496 | VPRINTK("ENTER, ata%u\n", ap->id); | 2544 | VPRINTK("ENTER, ata%u\n", ap->id); |
2497 | assert(qc->flags & ATA_QCFLAG_SG); | 2545 | assert(qc->flags & ATA_QCFLAG_SG); |
2498 | 2546 | ||
2547 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2548 | qc->pad_len = lsg->length & 3; | ||
2549 | if (qc->pad_len) { | ||
2550 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2551 | struct scatterlist *psg = &qc->pad_sgent; | ||
2552 | unsigned int offset; | ||
2553 | |||
2554 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2555 | |||
2556 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2557 | |||
2558 | /* | ||
2559 | * psg->page/offset are used to copy to-be-written | ||
2560 | * data in this function or read data in ata_sg_clean. | ||
2561 | */ | ||
2562 | offset = lsg->offset + lsg->length - qc->pad_len; | ||
2563 | psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); | ||
2564 | psg->offset = offset_in_page(offset); | ||
2565 | |||
2566 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
2567 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2568 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); | ||
2569 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2570 | } | ||
2571 | |||
2572 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2573 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2574 | /* trim last sg */ | ||
2575 | lsg->length -= qc->pad_len; | ||
2576 | |||
2577 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | ||
2578 | qc->n_elem - 1, lsg->length, qc->pad_len); | ||
2579 | } | ||
2580 | |||
2499 | dir = qc->dma_dir; | 2581 | dir = qc->dma_dir; |
2500 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2582 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2501 | if (n_elem < 1) | 2583 | if (n_elem < 1) |
@@ -2773,7 +2855,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2773 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 2855 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
2774 | { | 2856 | { |
2775 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2857 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2776 | struct scatterlist *sg = qc->sg; | 2858 | struct scatterlist *sg = qc->__sg; |
2777 | struct ata_port *ap = qc->ap; | 2859 | struct ata_port *ap = qc->ap; |
2778 | struct page *page; | 2860 | struct page *page; |
2779 | unsigned int offset; | 2861 | unsigned int offset; |
@@ -2823,7 +2905,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2823 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 2905 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
2824 | { | 2906 | { |
2825 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2907 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2826 | struct scatterlist *sg = qc->sg; | 2908 | struct scatterlist *sg = qc->__sg; |
2827 | struct ata_port *ap = qc->ap; | 2909 | struct ata_port *ap = qc->ap; |
2828 | struct page *page; | 2910 | struct page *page; |
2829 | unsigned char *buf; | 2911 | unsigned char *buf; |
@@ -2856,7 +2938,7 @@ next_sg: | |||
2856 | return; | 2938 | return; |
2857 | } | 2939 | } |
2858 | 2940 | ||
2859 | sg = &qc->sg[qc->cursg]; | 2941 | sg = &qc->__sg[qc->cursg]; |
2860 | 2942 | ||
2861 | page = sg->page; | 2943 | page = sg->page; |
2862 | offset = sg->offset + qc->cursg_ofs; | 2944 | offset = sg->offset + qc->cursg_ofs; |
@@ -3217,7 +3299,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3217 | 3299 | ||
3218 | qc = ata_qc_new(ap); | 3300 | qc = ata_qc_new(ap); |
3219 | if (qc) { | 3301 | if (qc) { |
3220 | qc->sg = NULL; | 3302 | qc->__sg = NULL; |
3221 | qc->flags = 0; | 3303 | qc->flags = 0; |
3222 | qc->scsicmd = NULL; | 3304 | qc->scsicmd = NULL; |
3223 | qc->ap = ap; | 3305 | qc->ap = ap; |
@@ -3906,6 +3988,12 @@ int ata_port_start (struct ata_port *ap) | |||
3906 | if (!ap->prd) | 3988 | if (!ap->prd) |
3907 | return -ENOMEM; | 3989 | return -ENOMEM; |
3908 | 3990 | ||
3991 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
3992 | if (!ap->pad) { | ||
3993 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | ||
3994 | return -ENOMEM; | ||
3995 | } | ||
3996 | |||
3909 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); | 3997 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); |
3910 | 3998 | ||
3911 | return 0; | 3999 | return 0; |
@@ -3928,6 +4016,7 @@ void ata_port_stop (struct ata_port *ap) | |||
3928 | struct device *dev = ap->host_set->dev; | 4016 | struct device *dev = ap->host_set->dev; |
3929 | 4017 | ||
3930 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | 4018 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); |
4019 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
3931 | } | 4020 | } |
3932 | 4021 | ||
3933 | void ata_host_stop (struct ata_host_set *host_set) | 4022 | void ata_host_stop (struct ata_host_set *host_set) |
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 90bf22204668..9944adbe5a1d 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
@@ -158,10 +158,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, | |||
158 | qc->scsidone = done; | 158 | qc->scsidone = done; |
159 | 159 | ||
160 | if (cmd->use_sg) { | 160 | if (cmd->use_sg) { |
161 | qc->sg = (struct scatterlist *) cmd->request_buffer; | 161 | qc->__sg = (struct scatterlist *) cmd->request_buffer; |
162 | qc->n_elem = cmd->use_sg; | 162 | qc->n_elem = cmd->use_sg; |
163 | } else { | 163 | } else { |
164 | qc->sg = &qc->sgent; | 164 | qc->__sg = &qc->sgent; |
165 | qc->n_elem = 1; | 165 | qc->n_elem = 1; |
166 | } | 166 | } |
167 | } else { | 167 | } else { |
@@ -362,6 +362,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev) | |||
362 | */ | 362 | */ |
363 | blk_queue_max_sectors(sdev->request_queue, 2048); | 363 | blk_queue_max_sectors(sdev->request_queue, 2048); |
364 | } | 364 | } |
365 | |||
366 | /* | ||
367 | * SATA DMA transfers must be multiples of 4 byte, so | ||
368 | * we need to pad ATAPI transfers using an extra sg. | ||
369 | * Decrement max hw segments accordingly. | ||
370 | */ | ||
371 | if (dev->class == ATA_DEV_ATAPI) { | ||
372 | request_queue_t *q = sdev->request_queue; | ||
373 | blk_queue_max_hw_segments(q, q->max_hw_segments - 1); | ||
374 | } | ||
365 | } | 375 | } |
366 | 376 | ||
367 | return 0; /* scsi layer doesn't check return value, sigh */ | 377 | return 0; /* scsi layer doesn't check return value, sigh */ |
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index ffcdeb68641c..69a9b1cf6f9c 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
@@ -268,16 +268,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
268 | 268 | ||
269 | static void qs_fill_sg(struct ata_queued_cmd *qc) | 269 | static void qs_fill_sg(struct ata_queued_cmd *qc) |
270 | { | 270 | { |
271 | struct scatterlist *sg = qc->sg; | 271 | struct scatterlist *sg; |
272 | struct ata_port *ap = qc->ap; | 272 | struct ata_port *ap = qc->ap; |
273 | struct qs_port_priv *pp = ap->private_data; | 273 | struct qs_port_priv *pp = ap->private_data; |
274 | unsigned int nelem; | 274 | unsigned int nelem; |
275 | u8 *prd = pp->pkt + QS_CPB_BYTES; | 275 | u8 *prd = pp->pkt + QS_CPB_BYTES; |
276 | 276 | ||
277 | assert(sg != NULL); | 277 | assert(qc->__sg != NULL); |
278 | assert(qc->n_elem > 0); | 278 | assert(qc->n_elem > 0); |
279 | 279 | ||
280 | for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { | 280 | nelem = 0; |
281 | ata_for_each_sg(sg, qc) { | ||
281 | u64 addr; | 282 | u64 addr; |
282 | u32 len; | 283 | u32 len; |
283 | 284 | ||
@@ -291,6 +292,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc) | |||
291 | 292 | ||
292 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, | 293 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, |
293 | (unsigned long long)addr, len); | 294 | (unsigned long long)addr, len); |
295 | nelem++; | ||
294 | } | 296 | } |
295 | } | 297 | } |
296 | 298 | ||
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 540a85191172..79fdbbab513e 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
@@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, | |||
449 | 449 | ||
450 | static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | 450 | static void pdc20621_dma_prep(struct ata_queued_cmd *qc) |
451 | { | 451 | { |
452 | struct scatterlist *sg = qc->sg; | 452 | struct scatterlist *sg; |
453 | struct ata_port *ap = qc->ap; | 453 | struct ata_port *ap = qc->ap; |
454 | struct pdc_port_priv *pp = ap->private_data; | 454 | struct pdc_port_priv *pp = ap->private_data; |
455 | void __iomem *mmio = ap->host_set->mmio_base; | 455 | void __iomem *mmio = ap->host_set->mmio_base; |
456 | struct pdc_host_priv *hpriv = ap->host_set->private_data; | 456 | struct pdc_host_priv *hpriv = ap->host_set->private_data; |
457 | void __iomem *dimm_mmio = hpriv->dimm_mmio; | 457 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
458 | unsigned int portno = ap->port_no; | 458 | unsigned int portno = ap->port_no; |
459 | unsigned int i, last, idx, total_len = 0, sgt_len; | 459 | unsigned int i, idx, total_len = 0, sgt_len; |
460 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; | 460 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; |
461 | 461 | ||
462 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 462 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
@@ -469,12 +469,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
469 | /* | 469 | /* |
470 | * Build S/G table | 470 | * Build S/G table |
471 | */ | 471 | */ |
472 | last = qc->n_elem; | ||
473 | idx = 0; | 472 | idx = 0; |
474 | for (i = 0; i < last; i++) { | 473 | ata_for_each_sg(sg, qc) { |
475 | buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); | 474 | buf[idx++] = cpu_to_le32(sg_dma_address(sg)); |
476 | buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); | 475 | buf[idx++] = cpu_to_le32(sg_dma_len(sg)); |
477 | total_len += sg_dma_len(&sg[i]); | 476 | total_len += sg_dma_len(sg); |
478 | } | 477 | } |
479 | buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); | 478 | buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); |
480 | sgt_len = idx * 4; | 479 | sgt_len = idx * 4; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 0261c55f3483..d3f58a796c3a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -155,6 +155,10 @@ enum { | |||
155 | ATA_SHIFT_UDMA = 0, | 155 | ATA_SHIFT_UDMA = 0, |
156 | ATA_SHIFT_MWDMA = 8, | 156 | ATA_SHIFT_MWDMA = 8, |
157 | ATA_SHIFT_PIO = 11, | 157 | ATA_SHIFT_PIO = 11, |
158 | |||
159 | /* size of buffer to pad xfers ending on unaligned boundaries */ | ||
160 | ATA_DMA_PAD_SZ = 4, | ||
161 | ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, | ||
158 | 162 | ||
159 | /* Masks for port functions */ | 163 | /* Masks for port functions */ |
160 | ATA_PORT_PRIMARY = (1 << 0), | 164 | ATA_PORT_PRIMARY = (1 << 0), |
@@ -242,9 +246,12 @@ struct ata_queued_cmd { | |||
242 | unsigned long flags; /* ATA_QCFLAG_xxx */ | 246 | unsigned long flags; /* ATA_QCFLAG_xxx */ |
243 | unsigned int tag; | 247 | unsigned int tag; |
244 | unsigned int n_elem; | 248 | unsigned int n_elem; |
249 | unsigned int orig_n_elem; | ||
245 | 250 | ||
246 | int dma_dir; | 251 | int dma_dir; |
247 | 252 | ||
253 | unsigned int pad_len; | ||
254 | |||
248 | unsigned int nsect; | 255 | unsigned int nsect; |
249 | unsigned int cursect; | 256 | unsigned int cursect; |
250 | 257 | ||
@@ -255,9 +262,11 @@ struct ata_queued_cmd { | |||
255 | unsigned int cursg_ofs; | 262 | unsigned int cursg_ofs; |
256 | 263 | ||
257 | struct scatterlist sgent; | 264 | struct scatterlist sgent; |
265 | struct scatterlist pad_sgent; | ||
258 | void *buf_virt; | 266 | void *buf_virt; |
259 | 267 | ||
260 | struct scatterlist *sg; | 268 | /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ |
269 | struct scatterlist *__sg; | ||
261 | 270 | ||
262 | ata_qc_cb_t complete_fn; | 271 | ata_qc_cb_t complete_fn; |
263 | 272 | ||
@@ -303,6 +312,9 @@ struct ata_port { | |||
303 | struct ata_prd *prd; /* our SG list */ | 312 | struct ata_prd *prd; /* our SG list */ |
304 | dma_addr_t prd_dma; /* and its DMA mapping */ | 313 | dma_addr_t prd_dma; /* and its DMA mapping */ |
305 | 314 | ||
315 | void *pad; /* array of DMA pad buffers */ | ||
316 | dma_addr_t pad_dma; | ||
317 | |||
306 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 318 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
307 | 319 | ||
308 | u8 ctl; /* cache of ATA control register */ | 320 | u8 ctl; /* cache of ATA control register */ |
@@ -468,6 +480,19 @@ extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); | |||
468 | #endif /* CONFIG_PCI */ | 480 | #endif /* CONFIG_PCI */ |
469 | 481 | ||
470 | 482 | ||
483 | static inline struct scatterlist * | ||
484 | ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) | ||
485 | { | ||
486 | if (sg == &qc->pad_sgent) | ||
487 | return NULL; | ||
488 | if (++sg - qc->__sg < qc->n_elem) | ||
489 | return sg; | ||
490 | return qc->pad_len ? &qc->pad_sgent : NULL; | ||
491 | } | ||
492 | |||
493 | #define ata_for_each_sg(sg, qc) \ | ||
494 | for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc)) | ||
495 | |||
471 | static inline unsigned int ata_tag_valid(unsigned int tag) | 496 | static inline unsigned int ata_tag_valid(unsigned int tag) |
472 | { | 497 | { |
473 | return (tag < ATA_MAX_QUEUE) ? 1 : 0; | 498 | return (tag < ATA_MAX_QUEUE) ? 1 : 0; |