diff options
| -rw-r--r-- | drivers/scsi/ahci.c | 30 | ||||
| -rw-r--r-- | drivers/scsi/libata-core.c | 128 | ||||
| -rw-r--r-- | drivers/scsi/libata-scsi.c | 14 | ||||
| -rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
| -rw-r--r-- | drivers/scsi/sata_mv.c | 17 | ||||
| -rw-r--r-- | drivers/scsi/sata_qstor.c | 8 | ||||
| -rw-r--r-- | drivers/scsi/sata_sil24.c | 15 | ||||
| -rw-r--r-- | drivers/scsi/sata_sx4.c | 13 | ||||
| -rw-r--r-- | include/linux/libata.h | 39 |
9 files changed, 214 insertions, 58 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 03829aedfd39..3df74a08fe22 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c | |||
| @@ -312,8 +312,15 @@ static int ahci_port_start(struct ata_port *ap) | |||
| 312 | return -ENOMEM; | 312 | return -ENOMEM; |
| 313 | memset(pp, 0, sizeof(*pp)); | 313 | memset(pp, 0, sizeof(*pp)); |
| 314 | 314 | ||
| 315 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
| 316 | if (!ap->pad) { | ||
| 317 | kfree(pp); | ||
| 318 | return -ENOMEM; | ||
| 319 | } | ||
| 320 | |||
| 315 | mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); | 321 | mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); |
| 316 | if (!mem) { | 322 | if (!mem) { |
| 323 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
| 317 | kfree(pp); | 324 | kfree(pp); |
| 318 | return -ENOMEM; | 325 | return -ENOMEM; |
| 319 | } | 326 | } |
| @@ -389,6 +396,7 @@ static void ahci_port_stop(struct ata_port *ap) | |||
| 389 | ap->private_data = NULL; | 396 | ap->private_data = NULL; |
| 390 | dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, | 397 | dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, |
| 391 | pp->cmd_slot, pp->cmd_slot_dma); | 398 | pp->cmd_slot, pp->cmd_slot_dma); |
| 399 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
| 392 | kfree(pp); | 400 | kfree(pp); |
| 393 | } | 401 | } |
| 394 | 402 | ||
| @@ -467,23 +475,23 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
| 467 | static void ahci_fill_sg(struct ata_queued_cmd *qc) | 475 | static void ahci_fill_sg(struct ata_queued_cmd *qc) |
| 468 | { | 476 | { |
| 469 | struct ahci_port_priv *pp = qc->ap->private_data; | 477 | struct ahci_port_priv *pp = qc->ap->private_data; |
| 470 | unsigned int i; | 478 | struct scatterlist *sg; |
| 479 | struct ahci_sg *ahci_sg; | ||
| 471 | 480 | ||
| 472 | VPRINTK("ENTER\n"); | 481 | VPRINTK("ENTER\n"); |
| 473 | 482 | ||
| 474 | /* | 483 | /* |
| 475 | * Next, the S/G list. | 484 | * Next, the S/G list. |
| 476 | */ | 485 | */ |
| 477 | for (i = 0; i < qc->n_elem; i++) { | 486 | ahci_sg = pp->cmd_tbl_sg; |
| 478 | u32 sg_len; | 487 | ata_for_each_sg(sg, qc) { |
| 479 | dma_addr_t addr; | 488 | dma_addr_t addr = sg_dma_address(sg); |
| 480 | 489 | u32 sg_len = sg_dma_len(sg); | |
| 481 | addr = sg_dma_address(&qc->sg[i]); | 490 | |
| 482 | sg_len = sg_dma_len(&qc->sg[i]); | 491 | ahci_sg->addr = cpu_to_le32(addr & 0xffffffff); |
| 483 | 492 | ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); | |
| 484 | pp->cmd_tbl_sg[i].addr = cpu_to_le32(addr & 0xffffffff); | 493 | ahci_sg->flags_size = cpu_to_le32(sg_len - 1); |
| 485 | pp->cmd_tbl_sg[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | 494 | ahci_sg++; |
| 486 | pp->cmd_tbl_sg[i].flags_size = cpu_to_le32(sg_len - 1); | ||
| 487 | } | 495 | } |
| 488 | } | 496 | } |
| 489 | 497 | ||
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 771bc7d376bc..0d58f4d3e5ce 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
| @@ -2427,8 +2427,9 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) | |||
| 2427 | static void ata_sg_clean(struct ata_queued_cmd *qc) | 2427 | static void ata_sg_clean(struct ata_queued_cmd *qc) |
| 2428 | { | 2428 | { |
| 2429 | struct ata_port *ap = qc->ap; | 2429 | struct ata_port *ap = qc->ap; |
| 2430 | struct scatterlist *sg = qc->sg; | 2430 | struct scatterlist *sg = qc->__sg; |
| 2431 | int dir = qc->dma_dir; | 2431 | int dir = qc->dma_dir; |
| 2432 | void *pad_buf = NULL; | ||
| 2432 | 2433 | ||
| 2433 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 2434 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
| 2434 | assert(sg != NULL); | 2435 | assert(sg != NULL); |
| @@ -2438,14 +2439,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
| 2438 | 2439 | ||
| 2439 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2440 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); |
| 2440 | 2441 | ||
| 2441 | if (qc->flags & ATA_QCFLAG_SG) | 2442 | /* if we padded the buffer out to 32-bit bound, and data |
| 2443 | * xfer direction is from-device, we must copy from the | ||
| 2444 | * pad buffer back into the supplied buffer | ||
| 2445 | */ | ||
| 2446 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
| 2447 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
| 2448 | |||
| 2449 | if (qc->flags & ATA_QCFLAG_SG) { | ||
| 2442 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2450 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
| 2443 | else | 2451 | /* restore last sg */ |
| 2452 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | ||
| 2453 | if (pad_buf) { | ||
| 2454 | struct scatterlist *psg = &qc->pad_sgent; | ||
| 2455 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
| 2456 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | ||
| 2457 | kunmap_atomic(psg->page, KM_IRQ0); | ||
| 2458 | } | ||
| 2459 | } else { | ||
| 2444 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2460 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), |
| 2445 | sg_dma_len(&sg[0]), dir); | 2461 | sg_dma_len(&sg[0]), dir); |
| 2462 | /* restore sg */ | ||
| 2463 | sg->length += qc->pad_len; | ||
| 2464 | if (pad_buf) | ||
| 2465 | memcpy(qc->buf_virt + sg->length - qc->pad_len, | ||
| 2466 | pad_buf, qc->pad_len); | ||
| 2467 | } | ||
| 2446 | 2468 | ||
| 2447 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 2469 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
| 2448 | qc->sg = NULL; | 2470 | qc->__sg = NULL; |
| 2449 | } | 2471 | } |
| 2450 | 2472 | ||
| 2451 | /** | 2473 | /** |
| @@ -2461,15 +2483,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
| 2461 | */ | 2483 | */ |
| 2462 | static void ata_fill_sg(struct ata_queued_cmd *qc) | 2484 | static void ata_fill_sg(struct ata_queued_cmd *qc) |
| 2463 | { | 2485 | { |
| 2464 | struct scatterlist *sg = qc->sg; | ||
| 2465 | struct ata_port *ap = qc->ap; | 2486 | struct ata_port *ap = qc->ap; |
| 2466 | unsigned int idx, nelem; | 2487 | struct scatterlist *sg; |
| 2488 | unsigned int idx; | ||
| 2467 | 2489 | ||
| 2468 | assert(sg != NULL); | 2490 | assert(qc->__sg != NULL); |
| 2469 | assert(qc->n_elem > 0); | 2491 | assert(qc->n_elem > 0); |
| 2470 | 2492 | ||
| 2471 | idx = 0; | 2493 | idx = 0; |
| 2472 | for (nelem = qc->n_elem; nelem; nelem--,sg++) { | 2494 | ata_for_each_sg(sg, qc) { |
| 2473 | u32 addr, offset; | 2495 | u32 addr, offset; |
| 2474 | u32 sg_len, len; | 2496 | u32 sg_len, len; |
| 2475 | 2497 | ||
| @@ -2555,12 +2577,18 @@ void ata_qc_prep(struct ata_queued_cmd *qc) | |||
| 2555 | 2577 | ||
| 2556 | void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) | 2578 | void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) |
| 2557 | { | 2579 | { |
| 2580 | struct scatterlist *sg; | ||
| 2581 | |||
| 2558 | qc->flags |= ATA_QCFLAG_SINGLE; | 2582 | qc->flags |= ATA_QCFLAG_SINGLE; |
| 2559 | 2583 | ||
| 2560 | qc->sg = &qc->sgent; | 2584 | memset(&qc->sgent, 0, sizeof(qc->sgent)); |
| 2585 | qc->__sg = &qc->sgent; | ||
| 2561 | qc->n_elem = 1; | 2586 | qc->n_elem = 1; |
| 2587 | qc->orig_n_elem = 1; | ||
| 2562 | qc->buf_virt = buf; | 2588 | qc->buf_virt = buf; |
| 2563 | sg_init_one(qc->sg, buf, buflen); | 2589 | |
| 2590 | sg = qc->__sg; | ||
| 2591 | sg_init_one(sg, buf, buflen); | ||
| 2564 | } | 2592 | } |
| 2565 | 2593 | ||
| 2566 | /** | 2594 | /** |
| @@ -2581,8 +2609,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
| 2581 | unsigned int n_elem) | 2609 | unsigned int n_elem) |
| 2582 | { | 2610 | { |
| 2583 | qc->flags |= ATA_QCFLAG_SG; | 2611 | qc->flags |= ATA_QCFLAG_SG; |
| 2584 | qc->sg = sg; | 2612 | qc->__sg = sg; |
| 2585 | qc->n_elem = n_elem; | 2613 | qc->n_elem = n_elem; |
| 2614 | qc->orig_n_elem = n_elem; | ||
| 2586 | } | 2615 | } |
| 2587 | 2616 | ||
| 2588 | /** | 2617 | /** |
| @@ -2602,9 +2631,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
| 2602 | { | 2631 | { |
| 2603 | struct ata_port *ap = qc->ap; | 2632 | struct ata_port *ap = qc->ap; |
| 2604 | int dir = qc->dma_dir; | 2633 | int dir = qc->dma_dir; |
| 2605 | struct scatterlist *sg = qc->sg; | 2634 | struct scatterlist *sg = qc->__sg; |
| 2606 | dma_addr_t dma_address; | 2635 | dma_addr_t dma_address; |
| 2607 | 2636 | ||
| 2637 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
| 2638 | qc->pad_len = sg->length & 3; | ||
| 2639 | if (qc->pad_len) { | ||
| 2640 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
| 2641 | struct scatterlist *psg = &qc->pad_sgent; | ||
| 2642 | |||
| 2643 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
| 2644 | |||
| 2645 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
| 2646 | |||
| 2647 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
| 2648 | memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, | ||
| 2649 | qc->pad_len); | ||
| 2650 | |||
| 2651 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
| 2652 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
| 2653 | /* trim sg */ | ||
| 2654 | sg->length -= qc->pad_len; | ||
| 2655 | |||
| 2656 | DPRINTK("padding done, sg->length=%u pad_len=%u\n", | ||
| 2657 | sg->length, qc->pad_len); | ||
| 2658 | } | ||
| 2659 | |||
| 2608 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2660 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
| 2609 | sg->length, dir); | 2661 | sg->length, dir); |
| 2610 | if (dma_mapping_error(dma_address)) | 2662 | if (dma_mapping_error(dma_address)) |
| @@ -2636,12 +2688,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
| 2636 | static int ata_sg_setup(struct ata_queued_cmd *qc) | 2688 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
| 2637 | { | 2689 | { |
| 2638 | struct ata_port *ap = qc->ap; | 2690 | struct ata_port *ap = qc->ap; |
| 2639 | struct scatterlist *sg = qc->sg; | 2691 | struct scatterlist *sg = qc->__sg; |
| 2692 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | ||
| 2640 | int n_elem, dir; | 2693 | int n_elem, dir; |
| 2641 | 2694 | ||
| 2642 | VPRINTK("ENTER, ata%u\n", ap->id); | 2695 | VPRINTK("ENTER, ata%u\n", ap->id); |
| 2643 | assert(qc->flags & ATA_QCFLAG_SG); | 2696 | assert(qc->flags & ATA_QCFLAG_SG); |
| 2644 | 2697 | ||
| 2698 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
| 2699 | qc->pad_len = lsg->length & 3; | ||
| 2700 | if (qc->pad_len) { | ||
| 2701 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
| 2702 | struct scatterlist *psg = &qc->pad_sgent; | ||
| 2703 | unsigned int offset; | ||
| 2704 | |||
| 2705 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
| 2706 | |||
| 2707 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
| 2708 | |||
| 2709 | /* | ||
| 2710 | * psg->page/offset are used to copy to-be-written | ||
| 2711 | * data in this function or read data in ata_sg_clean. | ||
| 2712 | */ | ||
| 2713 | offset = lsg->offset + lsg->length - qc->pad_len; | ||
| 2714 | psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); | ||
| 2715 | psg->offset = offset_in_page(offset); | ||
| 2716 | |||
| 2717 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
| 2718 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
| 2719 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); | ||
| 2720 | kunmap_atomic(psg->page, KM_IRQ0); | ||
| 2721 | } | ||
| 2722 | |||
| 2723 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
| 2724 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
| 2725 | /* trim last sg */ | ||
| 2726 | lsg->length -= qc->pad_len; | ||
| 2727 | |||
| 2728 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | ||
| 2729 | qc->n_elem - 1, lsg->length, qc->pad_len); | ||
| 2730 | } | ||
| 2731 | |||
| 2645 | dir = qc->dma_dir; | 2732 | dir = qc->dma_dir; |
| 2646 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2733 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
| 2647 | if (n_elem < 1) | 2734 | if (n_elem < 1) |
| @@ -2917,7 +3004,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
| 2917 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 3004 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
| 2918 | { | 3005 | { |
| 2919 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3006 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
| 2920 | struct scatterlist *sg = qc->sg; | 3007 | struct scatterlist *sg = qc->__sg; |
| 2921 | struct ata_port *ap = qc->ap; | 3008 | struct ata_port *ap = qc->ap; |
| 2922 | struct page *page; | 3009 | struct page *page; |
| 2923 | unsigned int offset; | 3010 | unsigned int offset; |
| @@ -2967,7 +3054,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
| 2967 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 3054 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
| 2968 | { | 3055 | { |
| 2969 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3056 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
| 2970 | struct scatterlist *sg = qc->sg; | 3057 | struct scatterlist *sg = qc->__sg; |
| 2971 | struct ata_port *ap = qc->ap; | 3058 | struct ata_port *ap = qc->ap; |
| 2972 | struct page *page; | 3059 | struct page *page; |
| 2973 | unsigned char *buf; | 3060 | unsigned char *buf; |
| @@ -3000,7 +3087,7 @@ next_sg: | |||
| 3000 | return; | 3087 | return; |
| 3001 | } | 3088 | } |
| 3002 | 3089 | ||
| 3003 | sg = &qc->sg[qc->cursg]; | 3090 | sg = &qc->__sg[qc->cursg]; |
| 3004 | 3091 | ||
| 3005 | page = sg->page; | 3092 | page = sg->page; |
| 3006 | offset = sg->offset + qc->cursg_ofs; | 3093 | offset = sg->offset + qc->cursg_ofs; |
| @@ -3360,7 +3447,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
| 3360 | 3447 | ||
| 3361 | qc = ata_qc_new(ap); | 3448 | qc = ata_qc_new(ap); |
| 3362 | if (qc) { | 3449 | if (qc) { |
| 3363 | qc->sg = NULL; | 3450 | qc->__sg = NULL; |
| 3364 | qc->flags = 0; | 3451 | qc->flags = 0; |
| 3365 | qc->scsicmd = NULL; | 3452 | qc->scsicmd = NULL; |
| 3366 | qc->ap = ap; | 3453 | qc->ap = ap; |
| @@ -4047,6 +4134,12 @@ int ata_port_start (struct ata_port *ap) | |||
| 4047 | if (!ap->prd) | 4134 | if (!ap->prd) |
| 4048 | return -ENOMEM; | 4135 | return -ENOMEM; |
| 4049 | 4136 | ||
| 4137 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
| 4138 | if (!ap->pad) { | ||
| 4139 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | ||
| 4140 | return -ENOMEM; | ||
| 4141 | } | ||
| 4142 | |||
| 4050 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); | 4143 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); |
| 4051 | 4144 | ||
| 4052 | return 0; | 4145 | return 0; |
| @@ -4070,6 +4163,7 @@ void ata_port_stop (struct ata_port *ap) | |||
| 4070 | struct device *dev = ap->host_set->dev; | 4163 | struct device *dev = ap->host_set->dev; |
| 4071 | 4164 | ||
| 4072 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | 4165 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); |
| 4166 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
| 4073 | } | 4167 | } |
| 4074 | 4168 | ||
| 4075 | void ata_host_stop (struct ata_host_set *host_set) | 4169 | void ata_host_stop (struct ata_host_set *host_set) |
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 89a04b1a5a0e..69058510f43a 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c | |||
| @@ -354,10 +354,10 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, | |||
| 354 | qc->scsidone = done; | 354 | qc->scsidone = done; |
| 355 | 355 | ||
| 356 | if (cmd->use_sg) { | 356 | if (cmd->use_sg) { |
| 357 | qc->sg = (struct scatterlist *) cmd->request_buffer; | 357 | qc->__sg = (struct scatterlist *) cmd->request_buffer; |
| 358 | qc->n_elem = cmd->use_sg; | 358 | qc->n_elem = cmd->use_sg; |
| 359 | } else { | 359 | } else { |
| 360 | qc->sg = &qc->sgent; | 360 | qc->__sg = &qc->sgent; |
| 361 | qc->n_elem = 1; | 361 | qc->n_elem = 1; |
| 362 | } | 362 | } |
| 363 | } else { | 363 | } else { |
| @@ -693,6 +693,16 @@ int ata_scsi_slave_config(struct scsi_device *sdev) | |||
| 693 | */ | 693 | */ |
| 694 | blk_queue_max_sectors(sdev->request_queue, 2048); | 694 | blk_queue_max_sectors(sdev->request_queue, 2048); |
| 695 | } | 695 | } |
| 696 | |||
| 697 | /* | ||
| 698 | * SATA DMA transfers must be multiples of 4 byte, so | ||
| 699 | * we need to pad ATAPI transfers using an extra sg. | ||
| 700 | * Decrement max hw segments accordingly. | ||
| 701 | */ | ||
| 702 | if (dev->class == ATA_DEV_ATAPI) { | ||
| 703 | request_queue_t *q = sdev->request_queue; | ||
| 704 | blk_queue_max_hw_segments(q, q->max_hw_segments - 1); | ||
| 705 | } | ||
| 696 | } | 706 | } |
| 697 | 707 | ||
| 698 | return 0; /* scsi layer doesn't check return value, sigh */ | 708 | return 0; /* scsi layer doesn't check return value, sigh */ |
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index af99feb9d237..5c0f90677d00 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c | |||
| @@ -292,14 +292,14 @@ static void adma_eng_timeout(struct ata_port *ap) | |||
| 292 | 292 | ||
| 293 | static int adma_fill_sg(struct ata_queued_cmd *qc) | 293 | static int adma_fill_sg(struct ata_queued_cmd *qc) |
| 294 | { | 294 | { |
| 295 | struct scatterlist *sg = qc->sg; | 295 | struct scatterlist *sg; |
| 296 | struct ata_port *ap = qc->ap; | 296 | struct ata_port *ap = qc->ap; |
| 297 | struct adma_port_priv *pp = ap->private_data; | 297 | struct adma_port_priv *pp = ap->private_data; |
| 298 | u8 *buf = pp->pkt; | 298 | u8 *buf = pp->pkt; |
| 299 | int nelem, i = (2 + buf[3]) * 8; | 299 | int i = (2 + buf[3]) * 8; |
| 300 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); | 300 | u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); |
| 301 | 301 | ||
| 302 | for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { | 302 | ata_for_each_sg(sg, qc) { |
| 303 | u32 addr; | 303 | u32 addr; |
| 304 | u32 len; | 304 | u32 len; |
| 305 | 305 | ||
| @@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) | |||
| 311 | *(__le32 *)(buf + i) = cpu_to_le32(len); | 311 | *(__le32 *)(buf + i) = cpu_to_le32(len); |
| 312 | i += 4; | 312 | i += 4; |
| 313 | 313 | ||
| 314 | if ((nelem + 1) == qc->n_elem) | 314 | if (ata_sg_is_last(sg, qc)) |
| 315 | pFLAGS |= pEND; | 315 | pFLAGS |= pEND; |
| 316 | buf[i++] = pFLAGS; | 316 | buf[i++] = pFLAGS; |
| 317 | buf[i++] = qc->dev->dma_mode & 0xf; | 317 | buf[i++] = qc->dev->dma_mode & 0xf; |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index dcef5fe8600b..ad4808ef71d4 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
| @@ -783,23 +783,24 @@ static void mv_port_stop(struct ata_port *ap) | |||
| 783 | static void mv_fill_sg(struct ata_queued_cmd *qc) | 783 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
| 784 | { | 784 | { |
| 785 | struct mv_port_priv *pp = qc->ap->private_data; | 785 | struct mv_port_priv *pp = qc->ap->private_data; |
| 786 | unsigned int i; | 786 | unsigned int i = 0; |
| 787 | struct scatterlist *sg; | ||
| 787 | 788 | ||
| 788 | for (i = 0; i < qc->n_elem; i++) { | 789 | ata_for_each_sg(sg, qc) { |
| 789 | u32 sg_len; | 790 | u32 sg_len; |
| 790 | dma_addr_t addr; | 791 | dma_addr_t addr; |
| 791 | 792 | ||
| 792 | addr = sg_dma_address(&qc->sg[i]); | 793 | addr = sg_dma_address(sg); |
| 793 | sg_len = sg_dma_len(&qc->sg[i]); | 794 | sg_len = sg_dma_len(sg); |
| 794 | 795 | ||
| 795 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); | 796 | pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); |
| 796 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); | 797 | pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); |
| 797 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); | 798 | assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); |
| 798 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); | 799 | pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); |
| 799 | } | 800 | if (ata_sg_is_last(sg, qc)) |
| 800 | if (0 < qc->n_elem) { | 801 | pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
| 801 | pp->sg_tbl[qc->n_elem - 1].flags_size |= | 802 | |
| 802 | cpu_to_le32(EPRD_FLAG_END_OF_TBL); | 803 | i++; |
| 803 | } | 804 | } |
| 804 | } | 805 | } |
| 805 | 806 | ||
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 1aaf3304d397..f2c599f08fa2 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c | |||
| @@ -270,16 +270,17 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) | |||
| 270 | 270 | ||
| 271 | static void qs_fill_sg(struct ata_queued_cmd *qc) | 271 | static void qs_fill_sg(struct ata_queued_cmd *qc) |
| 272 | { | 272 | { |
| 273 | struct scatterlist *sg = qc->sg; | 273 | struct scatterlist *sg; |
| 274 | struct ata_port *ap = qc->ap; | 274 | struct ata_port *ap = qc->ap; |
| 275 | struct qs_port_priv *pp = ap->private_data; | 275 | struct qs_port_priv *pp = ap->private_data; |
| 276 | unsigned int nelem; | 276 | unsigned int nelem; |
| 277 | u8 *prd = pp->pkt + QS_CPB_BYTES; | 277 | u8 *prd = pp->pkt + QS_CPB_BYTES; |
| 278 | 278 | ||
| 279 | assert(sg != NULL); | 279 | assert(qc->__sg != NULL); |
| 280 | assert(qc->n_elem > 0); | 280 | assert(qc->n_elem > 0); |
| 281 | 281 | ||
| 282 | for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { | 282 | nelem = 0; |
| 283 | ata_for_each_sg(sg, qc) { | ||
| 283 | u64 addr; | 284 | u64 addr; |
| 284 | u32 len; | 285 | u32 len; |
| 285 | 286 | ||
| @@ -293,6 +294,7 @@ static void qs_fill_sg(struct ata_queued_cmd *qc) | |||
| 293 | 294 | ||
| 294 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, | 295 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", nelem, |
| 295 | (unsigned long long)addr, len); | 296 | (unsigned long long)addr, len); |
| 297 | nelem++; | ||
| 296 | } | 298 | } |
| 297 | } | 299 | } |
| 298 | 300 | ||
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index e18a1e2bb65e..7e6e5c049b7d 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c | |||
| @@ -410,15 +410,20 @@ static void sil24_phy_reset(struct ata_port *ap) | |||
| 410 | static inline void sil24_fill_sg(struct ata_queued_cmd *qc, | 410 | static inline void sil24_fill_sg(struct ata_queued_cmd *qc, |
| 411 | struct sil24_cmd_block *cb) | 411 | struct sil24_cmd_block *cb) |
| 412 | { | 412 | { |
| 413 | struct scatterlist *sg = qc->sg; | ||
| 414 | struct sil24_sge *sge = cb->sge; | 413 | struct sil24_sge *sge = cb->sge; |
| 415 | unsigned i; | 414 | struct scatterlist *sg; |
| 415 | unsigned int idx = 0; | ||
| 416 | 416 | ||
| 417 | for (i = 0; i < qc->n_elem; i++, sg++, sge++) { | 417 | ata_for_each_sg(sg, qc) { |
| 418 | sge->addr = cpu_to_le64(sg_dma_address(sg)); | 418 | sge->addr = cpu_to_le64(sg_dma_address(sg)); |
| 419 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); | 419 | sge->cnt = cpu_to_le32(sg_dma_len(sg)); |
| 420 | sge->flags = 0; | 420 | if (ata_sg_is_last(sg, qc)) |
| 421 | sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM); | 421 | sge->flags = cpu_to_le32(SGE_TRM); |
| 422 | else | ||
| 423 | sge->flags = 0; | ||
| 424 | |||
| 425 | sge++; | ||
| 426 | idx++; | ||
| 422 | } | 427 | } |
| 423 | } | 428 | } |
| 424 | 429 | ||
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index af08f4f650c1..8710d0f14f93 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c | |||
| @@ -449,14 +449,14 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, | |||
| 449 | 449 | ||
| 450 | static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | 450 | static void pdc20621_dma_prep(struct ata_queued_cmd *qc) |
| 451 | { | 451 | { |
| 452 | struct scatterlist *sg = qc->sg; | 452 | struct scatterlist *sg; |
| 453 | struct ata_port *ap = qc->ap; | 453 | struct ata_port *ap = qc->ap; |
| 454 | struct pdc_port_priv *pp = ap->private_data; | 454 | struct pdc_port_priv *pp = ap->private_data; |
| 455 | void __iomem *mmio = ap->host_set->mmio_base; | 455 | void __iomem *mmio = ap->host_set->mmio_base; |
| 456 | struct pdc_host_priv *hpriv = ap->host_set->private_data; | 456 | struct pdc_host_priv *hpriv = ap->host_set->private_data; |
| 457 | void __iomem *dimm_mmio = hpriv->dimm_mmio; | 457 | void __iomem *dimm_mmio = hpriv->dimm_mmio; |
| 458 | unsigned int portno = ap->port_no; | 458 | unsigned int portno = ap->port_no; |
| 459 | unsigned int i, last, idx, total_len = 0, sgt_len; | 459 | unsigned int i, idx, total_len = 0, sgt_len; |
| 460 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; | 460 | u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; |
| 461 | 461 | ||
| 462 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 462 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
| @@ -469,12 +469,11 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc) | |||
| 469 | /* | 469 | /* |
| 470 | * Build S/G table | 470 | * Build S/G table |
| 471 | */ | 471 | */ |
| 472 | last = qc->n_elem; | ||
| 473 | idx = 0; | 472 | idx = 0; |
| 474 | for (i = 0; i < last; i++) { | 473 | ata_for_each_sg(sg, qc) { |
| 475 | buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); | 474 | buf[idx++] = cpu_to_le32(sg_dma_address(sg)); |
| 476 | buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); | 475 | buf[idx++] = cpu_to_le32(sg_dma_len(sg)); |
| 477 | total_len += sg_dma_len(&sg[i]); | 476 | total_len += sg_dma_len(sg); |
| 478 | } | 477 | } |
| 479 | buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); | 478 | buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); |
| 480 | sgt_len = idx * 4; | 479 | sgt_len = idx * 4; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index a4cce9936a80..364cd11456f6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -155,6 +155,10 @@ enum { | |||
| 155 | ATA_SHIFT_UDMA = 0, | 155 | ATA_SHIFT_UDMA = 0, |
| 156 | ATA_SHIFT_MWDMA = 8, | 156 | ATA_SHIFT_MWDMA = 8, |
| 157 | ATA_SHIFT_PIO = 11, | 157 | ATA_SHIFT_PIO = 11, |
| 158 | |||
| 159 | /* size of buffer to pad xfers ending on unaligned boundaries */ | ||
| 160 | ATA_DMA_PAD_SZ = 4, | ||
| 161 | ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, | ||
| 158 | 162 | ||
| 159 | /* Masks for port functions */ | 163 | /* Masks for port functions */ |
| 160 | ATA_PORT_PRIMARY = (1 << 0), | 164 | ATA_PORT_PRIMARY = (1 << 0), |
| @@ -242,9 +246,12 @@ struct ata_queued_cmd { | |||
| 242 | unsigned long flags; /* ATA_QCFLAG_xxx */ | 246 | unsigned long flags; /* ATA_QCFLAG_xxx */ |
| 243 | unsigned int tag; | 247 | unsigned int tag; |
| 244 | unsigned int n_elem; | 248 | unsigned int n_elem; |
| 249 | unsigned int orig_n_elem; | ||
| 245 | 250 | ||
| 246 | int dma_dir; | 251 | int dma_dir; |
| 247 | 252 | ||
| 253 | unsigned int pad_len; | ||
| 254 | |||
| 248 | unsigned int nsect; | 255 | unsigned int nsect; |
| 249 | unsigned int cursect; | 256 | unsigned int cursect; |
| 250 | 257 | ||
| @@ -255,9 +262,11 @@ struct ata_queued_cmd { | |||
| 255 | unsigned int cursg_ofs; | 262 | unsigned int cursg_ofs; |
| 256 | 263 | ||
| 257 | struct scatterlist sgent; | 264 | struct scatterlist sgent; |
| 265 | struct scatterlist pad_sgent; | ||
| 258 | void *buf_virt; | 266 | void *buf_virt; |
| 259 | 267 | ||
| 260 | struct scatterlist *sg; | 268 | /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ |
| 269 | struct scatterlist *__sg; | ||
| 261 | 270 | ||
| 262 | ata_qc_cb_t complete_fn; | 271 | ata_qc_cb_t complete_fn; |
| 263 | 272 | ||
| @@ -303,6 +312,9 @@ struct ata_port { | |||
| 303 | struct ata_prd *prd; /* our SG list */ | 312 | struct ata_prd *prd; /* our SG list */ |
| 304 | dma_addr_t prd_dma; /* and its DMA mapping */ | 313 | dma_addr_t prd_dma; /* and its DMA mapping */ |
| 305 | 314 | ||
| 315 | void *pad; /* array of DMA pad buffers */ | ||
| 316 | dma_addr_t pad_dma; | ||
| 317 | |||
| 306 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 318 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
| 307 | 319 | ||
| 308 | u8 ctl; /* cache of ATA control register */ | 320 | u8 ctl; /* cache of ATA control register */ |
| @@ -505,6 +517,31 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit | |||
| 505 | #endif /* CONFIG_PCI */ | 517 | #endif /* CONFIG_PCI */ |
| 506 | 518 | ||
| 507 | 519 | ||
| 520 | static inline int | ||
| 521 | ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) | ||
| 522 | { | ||
| 523 | if (sg == &qc->pad_sgent) | ||
| 524 | return 1; | ||
| 525 | if (qc->pad_len) | ||
| 526 | return 0; | ||
| 527 | if (((sg - qc->__sg) + 1) == qc->n_elem) | ||
| 528 | return 1; | ||
| 529 | return 0; | ||
| 530 | } | ||
| 531 | |||
| 532 | static inline struct scatterlist * | ||
| 533 | ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) | ||
| 534 | { | ||
| 535 | if (sg == &qc->pad_sgent) | ||
| 536 | return NULL; | ||
| 537 | if (++sg - qc->__sg < qc->n_elem) | ||
| 538 | return sg; | ||
| 539 | return qc->pad_len ? &qc->pad_sgent : NULL; | ||
| 540 | } | ||
| 541 | |||
| 542 | #define ata_for_each_sg(sg, qc) \ | ||
| 543 | for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc)) | ||
| 544 | |||
| 508 | static inline unsigned int ata_tag_valid(unsigned int tag) | 545 | static inline unsigned int ata_tag_valid(unsigned int tag) |
| 509 | { | 546 | { |
| 510 | return (tag < ATA_MAX_QUEUE) ? 1 : 0; | 547 | return (tag < ATA_MAX_QUEUE) ? 1 : 0; |
