diff options
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 123 |
1 files changed, 106 insertions, 17 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index f53d7b8ac33f..64f30bf59315 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -2444,8 +2444,9 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) | |||
2444 | static void ata_sg_clean(struct ata_queued_cmd *qc) | 2444 | static void ata_sg_clean(struct ata_queued_cmd *qc) |
2445 | { | 2445 | { |
2446 | struct ata_port *ap = qc->ap; | 2446 | struct ata_port *ap = qc->ap; |
2447 | struct scatterlist *sg = qc->sg; | 2447 | struct scatterlist *sg = qc->__sg; |
2448 | int dir = qc->dma_dir; | 2448 | int dir = qc->dma_dir; |
2449 | void *pad_buf = NULL; | ||
2449 | 2450 | ||
2450 | assert(qc->flags & ATA_QCFLAG_DMAMAP); | 2451 | assert(qc->flags & ATA_QCFLAG_DMAMAP); |
2451 | assert(sg != NULL); | 2452 | assert(sg != NULL); |
@@ -2455,14 +2456,35 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2455 | 2456 | ||
2456 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2457 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); |
2457 | 2458 | ||
2458 | if (qc->flags & ATA_QCFLAG_SG) | 2459 | /* if we padded the buffer out to 32-bit bound, and data |
2460 | * xfer direction is from-device, we must copy from the | ||
2461 | * pad buffer back into the supplied buffer | ||
2462 | */ | ||
2463 | if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) | ||
2464 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2465 | |||
2466 | if (qc->flags & ATA_QCFLAG_SG) { | ||
2459 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2467 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2460 | else | 2468 | /* restore last sg */ |
2469 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | ||
2470 | if (pad_buf) { | ||
2471 | struct scatterlist *psg = &qc->pad_sgent; | ||
2472 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2473 | memcpy(addr + psg->offset, pad_buf, qc->pad_len); | ||
2474 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2475 | } | ||
2476 | } else { | ||
2461 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2477 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), |
2462 | sg_dma_len(&sg[0]), dir); | 2478 | sg_dma_len(&sg[0]), dir); |
2479 | /* restore sg */ | ||
2480 | sg->length += qc->pad_len; | ||
2481 | if (pad_buf) | ||
2482 | memcpy(qc->buf_virt + sg->length - qc->pad_len, | ||
2483 | pad_buf, qc->pad_len); | ||
2484 | } | ||
2463 | 2485 | ||
2464 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 2486 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
2465 | qc->sg = NULL; | 2487 | qc->__sg = NULL; |
2466 | } | 2488 | } |
2467 | 2489 | ||
2468 | /** | 2490 | /** |
@@ -2478,15 +2500,15 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2478 | */ | 2500 | */ |
2479 | static void ata_fill_sg(struct ata_queued_cmd *qc) | 2501 | static void ata_fill_sg(struct ata_queued_cmd *qc) |
2480 | { | 2502 | { |
2481 | struct scatterlist *sg = qc->sg; | ||
2482 | struct ata_port *ap = qc->ap; | 2503 | struct ata_port *ap = qc->ap; |
2483 | unsigned int idx, nelem; | 2504 | struct scatterlist *sg; |
2505 | unsigned int idx; | ||
2484 | 2506 | ||
2485 | assert(sg != NULL); | 2507 | assert(qc->__sg != NULL); |
2486 | assert(qc->n_elem > 0); | 2508 | assert(qc->n_elem > 0); |
2487 | 2509 | ||
2488 | idx = 0; | 2510 | idx = 0; |
2489 | for (nelem = qc->n_elem; nelem; nelem--,sg++) { | 2511 | ata_for_each_sg(sg, qc) { |
2490 | u32 addr, offset; | 2512 | u32 addr, offset; |
2491 | u32 sg_len, len; | 2513 | u32 sg_len, len; |
2492 | 2514 | ||
@@ -2577,11 +2599,12 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) | |||
2577 | qc->flags |= ATA_QCFLAG_SINGLE; | 2599 | qc->flags |= ATA_QCFLAG_SINGLE; |
2578 | 2600 | ||
2579 | memset(&qc->sgent, 0, sizeof(qc->sgent)); | 2601 | memset(&qc->sgent, 0, sizeof(qc->sgent)); |
2580 | qc->sg = &qc->sgent; | 2602 | qc->__sg = &qc->sgent; |
2581 | qc->n_elem = 1; | 2603 | qc->n_elem = 1; |
2604 | qc->orig_n_elem = 1; | ||
2582 | qc->buf_virt = buf; | 2605 | qc->buf_virt = buf; |
2583 | 2606 | ||
2584 | sg = qc->sg; | 2607 | sg = qc->__sg; |
2585 | sg->page = virt_to_page(buf); | 2608 | sg->page = virt_to_page(buf); |
2586 | sg->offset = (unsigned long) buf & ~PAGE_MASK; | 2609 | sg->offset = (unsigned long) buf & ~PAGE_MASK; |
2587 | sg->length = buflen; | 2610 | sg->length = buflen; |
@@ -2605,8 +2628,9 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, | |||
2605 | unsigned int n_elem) | 2628 | unsigned int n_elem) |
2606 | { | 2629 | { |
2607 | qc->flags |= ATA_QCFLAG_SG; | 2630 | qc->flags |= ATA_QCFLAG_SG; |
2608 | qc->sg = sg; | 2631 | qc->__sg = sg; |
2609 | qc->n_elem = n_elem; | 2632 | qc->n_elem = n_elem; |
2633 | qc->orig_n_elem = n_elem; | ||
2610 | } | 2634 | } |
2611 | 2635 | ||
2612 | /** | 2636 | /** |
@@ -2626,9 +2650,32 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2626 | { | 2650 | { |
2627 | struct ata_port *ap = qc->ap; | 2651 | struct ata_port *ap = qc->ap; |
2628 | int dir = qc->dma_dir; | 2652 | int dir = qc->dma_dir; |
2629 | struct scatterlist *sg = qc->sg; | 2653 | struct scatterlist *sg = qc->__sg; |
2630 | dma_addr_t dma_address; | 2654 | dma_addr_t dma_address; |
2631 | 2655 | ||
2656 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2657 | qc->pad_len = sg->length & 3; | ||
2658 | if (qc->pad_len) { | ||
2659 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2660 | struct scatterlist *psg = &qc->pad_sgent; | ||
2661 | |||
2662 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2663 | |||
2664 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2665 | |||
2666 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
2667 | memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, | ||
2668 | qc->pad_len); | ||
2669 | |||
2670 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2671 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2672 | /* trim sg */ | ||
2673 | sg->length -= qc->pad_len; | ||
2674 | |||
2675 | DPRINTK("padding done, sg->length=%u pad_len=%u\n", | ||
2676 | sg->length, qc->pad_len); | ||
2677 | } | ||
2678 | |||
2632 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2679 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
2633 | sg->length, dir); | 2680 | sg->length, dir); |
2634 | if (dma_mapping_error(dma_address)) | 2681 | if (dma_mapping_error(dma_address)) |
@@ -2660,12 +2707,47 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2660 | static int ata_sg_setup(struct ata_queued_cmd *qc) | 2707 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
2661 | { | 2708 | { |
2662 | struct ata_port *ap = qc->ap; | 2709 | struct ata_port *ap = qc->ap; |
2663 | struct scatterlist *sg = qc->sg; | 2710 | struct scatterlist *sg = qc->__sg; |
2711 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | ||
2664 | int n_elem, dir; | 2712 | int n_elem, dir; |
2665 | 2713 | ||
2666 | VPRINTK("ENTER, ata%u\n", ap->id); | 2714 | VPRINTK("ENTER, ata%u\n", ap->id); |
2667 | assert(qc->flags & ATA_QCFLAG_SG); | 2715 | assert(qc->flags & ATA_QCFLAG_SG); |
2668 | 2716 | ||
2717 | /* we must lengthen transfers to end on a 32-bit boundary */ | ||
2718 | qc->pad_len = lsg->length & 3; | ||
2719 | if (qc->pad_len) { | ||
2720 | void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | ||
2721 | struct scatterlist *psg = &qc->pad_sgent; | ||
2722 | unsigned int offset; | ||
2723 | |||
2724 | assert(qc->dev->class == ATA_DEV_ATAPI); | ||
2725 | |||
2726 | memset(pad_buf, 0, ATA_DMA_PAD_SZ); | ||
2727 | |||
2728 | /* | ||
2729 | * psg->page/offset are used to copy to-be-written | ||
2730 | * data in this function or read data in ata_sg_clean. | ||
2731 | */ | ||
2732 | offset = lsg->offset + lsg->length - qc->pad_len; | ||
2733 | psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); | ||
2734 | psg->offset = offset_in_page(offset); | ||
2735 | |||
2736 | if (qc->tf.flags & ATA_TFLAG_WRITE) { | ||
2737 | void *addr = kmap_atomic(psg->page, KM_IRQ0); | ||
2738 | memcpy(pad_buf, addr + psg->offset, qc->pad_len); | ||
2739 | kunmap_atomic(psg->page, KM_IRQ0); | ||
2740 | } | ||
2741 | |||
2742 | sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); | ||
2743 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | ||
2744 | /* trim last sg */ | ||
2745 | lsg->length -= qc->pad_len; | ||
2746 | |||
2747 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | ||
2748 | qc->n_elem - 1, lsg->length, qc->pad_len); | ||
2749 | } | ||
2750 | |||
2669 | dir = qc->dma_dir; | 2751 | dir = qc->dma_dir; |
2670 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2752 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); |
2671 | if (n_elem < 1) | 2753 | if (n_elem < 1) |
@@ -2941,7 +3023,7 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2941 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 3023 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
2942 | { | 3024 | { |
2943 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3025 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2944 | struct scatterlist *sg = qc->sg; | 3026 | struct scatterlist *sg = qc->__sg; |
2945 | struct ata_port *ap = qc->ap; | 3027 | struct ata_port *ap = qc->ap; |
2946 | struct page *page; | 3028 | struct page *page; |
2947 | unsigned int offset; | 3029 | unsigned int offset; |
@@ -2991,7 +3073,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2991 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 3073 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
2992 | { | 3074 | { |
2993 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 3075 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
2994 | struct scatterlist *sg = qc->sg; | 3076 | struct scatterlist *sg = qc->__sg; |
2995 | struct ata_port *ap = qc->ap; | 3077 | struct ata_port *ap = qc->ap; |
2996 | struct page *page; | 3078 | struct page *page; |
2997 | unsigned char *buf; | 3079 | unsigned char *buf; |
@@ -3024,7 +3106,7 @@ next_sg: | |||
3024 | return; | 3106 | return; |
3025 | } | 3107 | } |
3026 | 3108 | ||
3027 | sg = &qc->sg[qc->cursg]; | 3109 | sg = &qc->__sg[qc->cursg]; |
3028 | 3110 | ||
3029 | page = sg->page; | 3111 | page = sg->page; |
3030 | offset = sg->offset + qc->cursg_ofs; | 3112 | offset = sg->offset + qc->cursg_ofs; |
@@ -3384,7 +3466,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3384 | 3466 | ||
3385 | qc = ata_qc_new(ap); | 3467 | qc = ata_qc_new(ap); |
3386 | if (qc) { | 3468 | if (qc) { |
3387 | qc->sg = NULL; | 3469 | qc->__sg = NULL; |
3388 | qc->flags = 0; | 3470 | qc->flags = 0; |
3389 | qc->scsicmd = NULL; | 3471 | qc->scsicmd = NULL; |
3390 | qc->ap = ap; | 3472 | qc->ap = ap; |
@@ -4071,6 +4153,12 @@ int ata_port_start (struct ata_port *ap) | |||
4071 | if (!ap->prd) | 4153 | if (!ap->prd) |
4072 | return -ENOMEM; | 4154 | return -ENOMEM; |
4073 | 4155 | ||
4156 | ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, &ap->pad_dma, GFP_KERNEL); | ||
4157 | if (!ap->pad) { | ||
4158 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | ||
4159 | return -ENOMEM; | ||
4160 | } | ||
4161 | |||
4074 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); | 4162 | DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); |
4075 | 4163 | ||
4076 | return 0; | 4164 | return 0; |
@@ -4094,6 +4182,7 @@ void ata_port_stop (struct ata_port *ap) | |||
4094 | struct device *dev = ap->host_set->dev; | 4182 | struct device *dev = ap->host_set->dev; |
4095 | 4183 | ||
4096 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); | 4184 | dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); |
4185 | dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); | ||
4097 | } | 4186 | } |
4098 | 4187 | ||
4099 | void ata_host_stop (struct ata_host_set *host_set) | 4188 | void ata_host_stop (struct ata_host_set *host_set) |