diff options
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 70 |
1 files changed, 33 insertions, 37 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index d81db3a3d4b9..bb604dfbdef6 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -1263,7 +1263,7 @@ retry: | |||
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | /* ATAPI-specific feature tests */ | 1265 | /* ATAPI-specific feature tests */ |
1266 | else { | 1266 | else if (dev->class == ATA_DEV_ATAPI) { |
1267 | if (ata_id_is_ata(dev->id)) /* sanity check */ | 1267 | if (ata_id_is_ata(dev->id)) /* sanity check */ |
1268 | goto err_out_nosup; | 1268 | goto err_out_nosup; |
1269 | 1269 | ||
@@ -1570,11 +1570,13 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1570 | 1570 | ||
1571 | /* | 1571 | /* |
1572 | * Find the mode. | 1572 | * Find the mode. |
1573 | */ | 1573 | */ |
1574 | 1574 | ||
1575 | if (!(s = ata_timing_find_mode(speed))) | 1575 | if (!(s = ata_timing_find_mode(speed))) |
1576 | return -EINVAL; | 1576 | return -EINVAL; |
1577 | 1577 | ||
1578 | memcpy(t, s, sizeof(*s)); | ||
1579 | |||
1578 | /* | 1580 | /* |
1579 | * If the drive is an EIDE drive, it can tell us it needs extended | 1581 | * If the drive is an EIDE drive, it can tell us it needs extended |
1580 | * PIO/MW_DMA cycle timing. | 1582 | * PIO/MW_DMA cycle timing. |
@@ -1595,7 +1597,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1595 | * Convert the timing to bus clock counts. | 1597 | * Convert the timing to bus clock counts. |
1596 | */ | 1598 | */ |
1597 | 1599 | ||
1598 | ata_timing_quantize(s, t, T, UT); | 1600 | ata_timing_quantize(t, t, T, UT); |
1599 | 1601 | ||
1600 | /* | 1602 | /* |
1601 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T | 1603 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T |
@@ -2399,7 +2401,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2399 | if (qc->flags & ATA_QCFLAG_SINGLE) | 2401 | if (qc->flags & ATA_QCFLAG_SINGLE) |
2400 | assert(qc->n_elem == 1); | 2402 | assert(qc->n_elem == 1); |
2401 | 2403 | ||
2402 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2404 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
2403 | 2405 | ||
2404 | /* if we padded the buffer out to 32-bit bound, and data | 2406 | /* if we padded the buffer out to 32-bit bound, and data |
2405 | * xfer direction is from-device, we must copy from the | 2407 | * xfer direction is from-device, we must copy from the |
@@ -2409,7 +2411,8 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2409 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | 2411 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
2410 | 2412 | ||
2411 | if (qc->flags & ATA_QCFLAG_SG) { | 2413 | if (qc->flags & ATA_QCFLAG_SG) { |
2412 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2414 | if (qc->n_elem) |
2415 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | ||
2413 | /* restore last sg */ | 2416 | /* restore last sg */ |
2414 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | 2417 | sg[qc->orig_n_elem - 1].length += qc->pad_len; |
2415 | if (pad_buf) { | 2418 | if (pad_buf) { |
@@ -2419,8 +2422,10 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2419 | kunmap_atomic(psg->page, KM_IRQ0); | 2422 | kunmap_atomic(psg->page, KM_IRQ0); |
2420 | } | 2423 | } |
2421 | } else { | 2424 | } else { |
2422 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2425 | if (sg_dma_len(&sg[0]) > 0) |
2423 | sg_dma_len(&sg[0]), dir); | 2426 | dma_unmap_single(ap->host_set->dev, |
2427 | sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), | ||
2428 | dir); | ||
2424 | /* restore sg */ | 2429 | /* restore sg */ |
2425 | sg->length += qc->pad_len; | 2430 | sg->length += qc->pad_len; |
2426 | if (pad_buf) | 2431 | if (pad_buf) |
@@ -2619,6 +2624,11 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2619 | sg->length, qc->pad_len); | 2624 | sg->length, qc->pad_len); |
2620 | } | 2625 | } |
2621 | 2626 | ||
2627 | if (!sg->length) { | ||
2628 | sg_dma_address(sg) = 0; | ||
2629 | goto skip_map; | ||
2630 | } | ||
2631 | |||
2622 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2632 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
2623 | sg->length, dir); | 2633 | sg->length, dir); |
2624 | if (dma_mapping_error(dma_address)) { | 2634 | if (dma_mapping_error(dma_address)) { |
@@ -2628,6 +2638,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2628 | } | 2638 | } |
2629 | 2639 | ||
2630 | sg_dma_address(sg) = dma_address; | 2640 | sg_dma_address(sg) = dma_address; |
2641 | skip_map: | ||
2631 | sg_dma_len(sg) = sg->length; | 2642 | sg_dma_len(sg) = sg->length; |
2632 | 2643 | ||
2633 | DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), | 2644 | DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), |
@@ -2655,7 +2666,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2655 | struct ata_port *ap = qc->ap; | 2666 | struct ata_port *ap = qc->ap; |
2656 | struct scatterlist *sg = qc->__sg; | 2667 | struct scatterlist *sg = qc->__sg; |
2657 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | 2668 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; |
2658 | int n_elem, dir; | 2669 | int n_elem, pre_n_elem, dir, trim_sg = 0; |
2659 | 2670 | ||
2660 | VPRINTK("ENTER, ata%u\n", ap->id); | 2671 | VPRINTK("ENTER, ata%u\n", ap->id); |
2661 | assert(qc->flags & ATA_QCFLAG_SG); | 2672 | assert(qc->flags & ATA_QCFLAG_SG); |
@@ -2689,13 +2700,24 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2689 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | 2700 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; |
2690 | /* trim last sg */ | 2701 | /* trim last sg */ |
2691 | lsg->length -= qc->pad_len; | 2702 | lsg->length -= qc->pad_len; |
2703 | if (lsg->length == 0) | ||
2704 | trim_sg = 1; | ||
2692 | 2705 | ||
2693 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | 2706 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", |
2694 | qc->n_elem - 1, lsg->length, qc->pad_len); | 2707 | qc->n_elem - 1, lsg->length, qc->pad_len); |
2695 | } | 2708 | } |
2696 | 2709 | ||
2710 | pre_n_elem = qc->n_elem; | ||
2711 | if (trim_sg && pre_n_elem) | ||
2712 | pre_n_elem--; | ||
2713 | |||
2714 | if (!pre_n_elem) { | ||
2715 | n_elem = 0; | ||
2716 | goto skip_map; | ||
2717 | } | ||
2718 | |||
2697 | dir = qc->dma_dir; | 2719 | dir = qc->dma_dir; |
2698 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2720 | n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); |
2699 | if (n_elem < 1) { | 2721 | if (n_elem < 1) { |
2700 | /* restore last sg */ | 2722 | /* restore last sg */ |
2701 | lsg->length += qc->pad_len; | 2723 | lsg->length += qc->pad_len; |
@@ -2704,6 +2726,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2704 | 2726 | ||
2705 | DPRINTK("%d sg elements mapped\n", n_elem); | 2727 | DPRINTK("%d sg elements mapped\n", n_elem); |
2706 | 2728 | ||
2729 | skip_map: | ||
2707 | qc->n_elem = n_elem; | 2730 | qc->n_elem = n_elem; |
2708 | 2731 | ||
2709 | return 0; | 2732 | return 0; |
@@ -3263,32 +3286,11 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3263 | { | 3286 | { |
3264 | struct ata_port *ap = qc->ap; | 3287 | struct ata_port *ap = qc->ap; |
3265 | struct ata_host_set *host_set = ap->host_set; | 3288 | struct ata_host_set *host_set = ap->host_set; |
3266 | struct ata_device *dev = qc->dev; | ||
3267 | u8 host_stat = 0, drv_stat; | 3289 | u8 host_stat = 0, drv_stat; |
3268 | unsigned long flags; | 3290 | unsigned long flags; |
3269 | 3291 | ||
3270 | DPRINTK("ENTER\n"); | 3292 | DPRINTK("ENTER\n"); |
3271 | 3293 | ||
3272 | /* FIXME: doesn't this conflict with timeout handling? */ | ||
3273 | if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) { | ||
3274 | struct scsi_cmnd *cmd = qc->scsicmd; | ||
3275 | |||
3276 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { | ||
3277 | |||
3278 | /* finish completing original command */ | ||
3279 | spin_lock_irqsave(&host_set->lock, flags); | ||
3280 | __ata_qc_complete(qc); | ||
3281 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
3282 | |||
3283 | atapi_request_sense(ap, dev, cmd); | ||
3284 | |||
3285 | cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16); | ||
3286 | scsi_finish_command(cmd); | ||
3287 | |||
3288 | goto out; | ||
3289 | } | ||
3290 | } | ||
3291 | |||
3292 | spin_lock_irqsave(&host_set->lock, flags); | 3294 | spin_lock_irqsave(&host_set->lock, flags); |
3293 | 3295 | ||
3294 | /* hack alert! We cannot use the supplied completion | 3296 | /* hack alert! We cannot use the supplied completion |
@@ -3327,7 +3329,6 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3327 | 3329 | ||
3328 | spin_unlock_irqrestore(&host_set->lock, flags); | 3330 | spin_unlock_irqrestore(&host_set->lock, flags); |
3329 | 3331 | ||
3330 | out: | ||
3331 | DPRINTK("EXIT\n"); | 3332 | DPRINTK("EXIT\n"); |
3332 | } | 3333 | } |
3333 | 3334 | ||
@@ -3411,16 +3412,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3411 | 3412 | ||
3412 | qc = ata_qc_new(ap); | 3413 | qc = ata_qc_new(ap); |
3413 | if (qc) { | 3414 | if (qc) { |
3414 | qc->__sg = NULL; | ||
3415 | qc->flags = 0; | ||
3416 | qc->scsicmd = NULL; | 3415 | qc->scsicmd = NULL; |
3417 | qc->ap = ap; | 3416 | qc->ap = ap; |
3418 | qc->dev = dev; | 3417 | qc->dev = dev; |
3419 | qc->cursect = qc->cursg = qc->cursg_ofs = 0; | ||
3420 | qc->nsect = 0; | ||
3421 | qc->nbytes = qc->curbytes = 0; | ||
3422 | 3418 | ||
3423 | ata_tf_init(ap, &qc->tf, dev->devno); | 3419 | ata_qc_reinit(qc); |
3424 | } | 3420 | } |
3425 | 3421 | ||
3426 | return qc; | 3422 | return qc; |