aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-14 14:06:26 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-14 14:06:26 -0500
commite1410f2d951d45aee3bdbcc05ecedaaa9db276e5 (patch)
tree16d7ab46582219e349df6bfd48c6e20a5ba8e8fc /drivers/scsi
parentad36d1a533da91d3448029b4da1113c5b880f25d (diff)
[libata] fix bugs in ATAPI padding DMA mapping code
The ATAPI pad-to-next-32bit-boundary code modifies the scatterlist's length variable, sometimes to zero. x86-64 platform would oops if a zero-length scatterlist entry was asked to be mapped. Work around this by ensuring that we never DMA-map a zero length buffer or SG entry.
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/libata-core.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index d81db3a3d4b9..1ccaf467d516 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2409,7 +2409,8 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2409 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2409 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2410 2410
2411 if (qc->flags & ATA_QCFLAG_SG) { 2411 if (qc->flags & ATA_QCFLAG_SG) {
2412 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2412 if (qc->n_elem)
2413 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2413 /* restore last sg */ 2414 /* restore last sg */
2414 sg[qc->orig_n_elem - 1].length += qc->pad_len; 2415 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2415 if (pad_buf) { 2416 if (pad_buf) {
@@ -2419,8 +2420,10 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2419 kunmap_atomic(psg->page, KM_IRQ0); 2420 kunmap_atomic(psg->page, KM_IRQ0);
2420 } 2421 }
2421 } else { 2422 } else {
2422 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), 2423 if (sg_dma_len(&sg[0]) > 0)
2423 sg_dma_len(&sg[0]), dir); 2424 dma_unmap_single(ap->host_set->dev,
2425 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2426 dir);
2424 /* restore sg */ 2427 /* restore sg */
2425 sg->length += qc->pad_len; 2428 sg->length += qc->pad_len;
2426 if (pad_buf) 2429 if (pad_buf)
@@ -2619,6 +2622,11 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2619 sg->length, qc->pad_len); 2622 sg->length, qc->pad_len);
2620 } 2623 }
2621 2624
2625 if (!sg->length) {
2626 sg_dma_address(sg) = 0;
2627 goto skip_map;
2628 }
2629
2622 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, 2630 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2623 sg->length, dir); 2631 sg->length, dir);
2624 if (dma_mapping_error(dma_address)) { 2632 if (dma_mapping_error(dma_address)) {
@@ -2628,6 +2636,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2628 } 2636 }
2629 2637
2630 sg_dma_address(sg) = dma_address; 2638 sg_dma_address(sg) = dma_address;
2639skip_map:
2631 sg_dma_len(sg) = sg->length; 2640 sg_dma_len(sg) = sg->length;
2632 2641
2633 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 2642 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
@@ -2655,7 +2664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2655 struct ata_port *ap = qc->ap; 2664 struct ata_port *ap = qc->ap;
2656 struct scatterlist *sg = qc->__sg; 2665 struct scatterlist *sg = qc->__sg;
2657 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 2666 struct scatterlist *lsg = &sg[qc->n_elem - 1];
2658 int n_elem, dir; 2667 int n_elem, pre_n_elem, dir, trim_sg = 0;
2659 2668
2660 VPRINTK("ENTER, ata%u\n", ap->id); 2669 VPRINTK("ENTER, ata%u\n", ap->id);
2661 assert(qc->flags & ATA_QCFLAG_SG); 2670 assert(qc->flags & ATA_QCFLAG_SG);
@@ -2689,13 +2698,24 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2689 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2698 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2690 /* trim last sg */ 2699 /* trim last sg */
2691 lsg->length -= qc->pad_len; 2700 lsg->length -= qc->pad_len;
2701 if (lsg->length == 0)
2702 trim_sg = 1;
2692 2703
2693 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 2704 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2694 qc->n_elem - 1, lsg->length, qc->pad_len); 2705 qc->n_elem - 1, lsg->length, qc->pad_len);
2695 } 2706 }
2696 2707
2708 pre_n_elem = qc->n_elem;
2709 if (trim_sg && pre_n_elem)
2710 pre_n_elem--;
2711
2712 if (!pre_n_elem) {
2713 n_elem = 0;
2714 goto skip_map;
2715 }
2716
2697 dir = qc->dma_dir; 2717 dir = qc->dma_dir;
2698 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2718 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
2699 if (n_elem < 1) { 2719 if (n_elem < 1) {
2700 /* restore last sg */ 2720 /* restore last sg */
2701 lsg->length += qc->pad_len; 2721 lsg->length += qc->pad_len;
@@ -2704,6 +2724,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2704 2724
2705 DPRINTK("%d sg elements mapped\n", n_elem); 2725 DPRINTK("%d sg elements mapped\n", n_elem);
2706 2726
2727skip_map:
2707 qc->n_elem = n_elem; 2728 qc->n_elem = n_elem;
2708 2729
2709 return 0; 2730 return 0;