aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libata-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r--drivers/scsi/libata-core.c64
1 files changed, 29 insertions, 35 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index d81db3a3d4b9..ba1eb8b38e00 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1263,7 +1263,7 @@ retry:
1263 } 1263 }
1264 1264
1265 /* ATAPI-specific feature tests */ 1265 /* ATAPI-specific feature tests */
1266 else { 1266 else if (dev->class == ATA_DEV_ATAPI) {
1267 if (ata_id_is_ata(dev->id)) /* sanity check */ 1267 if (ata_id_is_ata(dev->id)) /* sanity check */
1268 goto err_out_nosup; 1268 goto err_out_nosup;
1269 1269
@@ -2399,7 +2399,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2399 if (qc->flags & ATA_QCFLAG_SINGLE) 2399 if (qc->flags & ATA_QCFLAG_SINGLE)
2400 assert(qc->n_elem == 1); 2400 assert(qc->n_elem == 1);
2401 2401
2402 DPRINTK("unmapping %u sg elements\n", qc->n_elem); 2402 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2403 2403
2404 /* if we padded the buffer out to 32-bit bound, and data 2404 /* if we padded the buffer out to 32-bit bound, and data
2405 * xfer direction is from-device, we must copy from the 2405 * xfer direction is from-device, we must copy from the
@@ -2409,7 +2409,8 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2409 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2409 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2410 2410
2411 if (qc->flags & ATA_QCFLAG_SG) { 2411 if (qc->flags & ATA_QCFLAG_SG) {
2412 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2412 if (qc->n_elem)
2413 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2413 /* restore last sg */ 2414 /* restore last sg */
2414 sg[qc->orig_n_elem - 1].length += qc->pad_len; 2415 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2415 if (pad_buf) { 2416 if (pad_buf) {
@@ -2419,8 +2420,10 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
2419 kunmap_atomic(psg->page, KM_IRQ0); 2420 kunmap_atomic(psg->page, KM_IRQ0);
2420 } 2421 }
2421 } else { 2422 } else {
2422 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), 2423 if (sg_dma_len(&sg[0]) > 0)
2423 sg_dma_len(&sg[0]), dir); 2424 dma_unmap_single(ap->host_set->dev,
2425 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2426 dir);
2424 /* restore sg */ 2427 /* restore sg */
2425 sg->length += qc->pad_len; 2428 sg->length += qc->pad_len;
2426 if (pad_buf) 2429 if (pad_buf)
@@ -2619,6 +2622,11 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2619 sg->length, qc->pad_len); 2622 sg->length, qc->pad_len);
2620 } 2623 }
2621 2624
2625 if (!sg->length) {
2626 sg_dma_address(sg) = 0;
2627 goto skip_map;
2628 }
2629
2622 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, 2630 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2623 sg->length, dir); 2631 sg->length, dir);
2624 if (dma_mapping_error(dma_address)) { 2632 if (dma_mapping_error(dma_address)) {
@@ -2628,6 +2636,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2628 } 2636 }
2629 2637
2630 sg_dma_address(sg) = dma_address; 2638 sg_dma_address(sg) = dma_address;
2639skip_map:
2631 sg_dma_len(sg) = sg->length; 2640 sg_dma_len(sg) = sg->length;
2632 2641
2633 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 2642 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
@@ -2655,7 +2664,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2655 struct ata_port *ap = qc->ap; 2664 struct ata_port *ap = qc->ap;
2656 struct scatterlist *sg = qc->__sg; 2665 struct scatterlist *sg = qc->__sg;
2657 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 2666 struct scatterlist *lsg = &sg[qc->n_elem - 1];
2658 int n_elem, dir; 2667 int n_elem, pre_n_elem, dir, trim_sg = 0;
2659 2668
2660 VPRINTK("ENTER, ata%u\n", ap->id); 2669 VPRINTK("ENTER, ata%u\n", ap->id);
2661 assert(qc->flags & ATA_QCFLAG_SG); 2670 assert(qc->flags & ATA_QCFLAG_SG);
@@ -2689,13 +2698,24 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2689 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2698 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2690 /* trim last sg */ 2699 /* trim last sg */
2691 lsg->length -= qc->pad_len; 2700 lsg->length -= qc->pad_len;
2701 if (lsg->length == 0)
2702 trim_sg = 1;
2692 2703
2693 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 2704 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2694 qc->n_elem - 1, lsg->length, qc->pad_len); 2705 qc->n_elem - 1, lsg->length, qc->pad_len);
2695 } 2706 }
2696 2707
2708 pre_n_elem = qc->n_elem;
2709 if (trim_sg && pre_n_elem)
2710 pre_n_elem--;
2711
2712 if (!pre_n_elem) {
2713 n_elem = 0;
2714 goto skip_map;
2715 }
2716
2697 dir = qc->dma_dir; 2717 dir = qc->dma_dir;
2698 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2718 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
2699 if (n_elem < 1) { 2719 if (n_elem < 1) {
2700 /* restore last sg */ 2720 /* restore last sg */
2701 lsg->length += qc->pad_len; 2721 lsg->length += qc->pad_len;
@@ -2704,6 +2724,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
2704 2724
2705 DPRINTK("%d sg elements mapped\n", n_elem); 2725 DPRINTK("%d sg elements mapped\n", n_elem);
2706 2726
2727skip_map:
2707 qc->n_elem = n_elem; 2728 qc->n_elem = n_elem;
2708 2729
2709 return 0; 2730 return 0;
@@ -3263,32 +3284,11 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3263{ 3284{
3264 struct ata_port *ap = qc->ap; 3285 struct ata_port *ap = qc->ap;
3265 struct ata_host_set *host_set = ap->host_set; 3286 struct ata_host_set *host_set = ap->host_set;
3266 struct ata_device *dev = qc->dev;
3267 u8 host_stat = 0, drv_stat; 3287 u8 host_stat = 0, drv_stat;
3268 unsigned long flags; 3288 unsigned long flags;
3269 3289
3270 DPRINTK("ENTER\n"); 3290 DPRINTK("ENTER\n");
3271 3291
3272 /* FIXME: doesn't this conflict with timeout handling? */
3273 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
3274 struct scsi_cmnd *cmd = qc->scsicmd;
3275
3276 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
3277
3278 /* finish completing original command */
3279 spin_lock_irqsave(&host_set->lock, flags);
3280 __ata_qc_complete(qc);
3281 spin_unlock_irqrestore(&host_set->lock, flags);
3282
3283 atapi_request_sense(ap, dev, cmd);
3284
3285 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
3286 scsi_finish_command(cmd);
3287
3288 goto out;
3289 }
3290 }
3291
3292 spin_lock_irqsave(&host_set->lock, flags); 3292 spin_lock_irqsave(&host_set->lock, flags);
3293 3293
3294 /* hack alert! We cannot use the supplied completion 3294 /* hack alert! We cannot use the supplied completion
@@ -3327,7 +3327,6 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
3327 3327
3328 spin_unlock_irqrestore(&host_set->lock, flags); 3328 spin_unlock_irqrestore(&host_set->lock, flags);
3329 3329
3330out:
3331 DPRINTK("EXIT\n"); 3330 DPRINTK("EXIT\n");
3332} 3331}
3333 3332
@@ -3411,16 +3410,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3411 3410
3412 qc = ata_qc_new(ap); 3411 qc = ata_qc_new(ap);
3413 if (qc) { 3412 if (qc) {
3414 qc->__sg = NULL;
3415 qc->flags = 0;
3416 qc->scsicmd = NULL; 3413 qc->scsicmd = NULL;
3417 qc->ap = ap; 3414 qc->ap = ap;
3418 qc->dev = dev; 3415 qc->dev = dev;
3419 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
3420 qc->nsect = 0;
3421 qc->nbytes = qc->curbytes = 0;
3422 3416
3423 ata_tf_init(ap, &qc->tf, dev->devno); 3417 ata_qc_reinit(qc);
3424 } 3418 }
3425 3419
3426 return qc; 3420 return qc;