diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-11-18 11:44:17 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-11-18 11:44:17 -0500 |
commit | f333b3f111e9db76109e304df8ee777ace7fbf86 (patch) | |
tree | ce9a74a7327020c48c80d278e1db5f12552f0fb0 /drivers/scsi/libata-core.c | |
parent | f4256e301d9800b1e0276404cb01b3ac85b51067 (diff) | |
parent | 79bfb0a98fdc73ed6a18469cef245cbf50a1d8bb (diff) |
Merge branch 'upstream'
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 105 |
1 files changed, 62 insertions, 43 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index ebd0de2d1098..aae3a331d753 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -533,8 +533,7 @@ void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) | |||
533 | * @fis: Buffer from which data will be input | 533 | * @fis: Buffer from which data will be input |
534 | * @tf: Taskfile to output | 534 | * @tf: Taskfile to output |
535 | * | 535 | * |
536 | * Converts a standard ATA taskfile to a Serial ATA | 536 | * Converts a serial ATA FIS structure to a standard ATA taskfile. |
537 | * FIS structure (Register - Host to Device). | ||
538 | * | 537 | * |
539 | * LOCKING: | 538 | * LOCKING: |
540 | * Inherited from caller. | 539 | * Inherited from caller. |
@@ -1048,6 +1047,30 @@ static unsigned int ata_pio_modes(const struct ata_device *adev) | |||
1048 | return modes; | 1047 | return modes; |
1049 | } | 1048 | } |
1050 | 1049 | ||
1050 | static int ata_qc_wait_err(struct ata_queued_cmd *qc, | ||
1051 | struct completion *wait) | ||
1052 | { | ||
1053 | int rc = 0; | ||
1054 | |||
1055 | if (wait_for_completion_timeout(wait, 30 * HZ) < 1) { | ||
1056 | /* timeout handling */ | ||
1057 | unsigned int err_mask = ac_err_mask(ata_chk_status(qc->ap)); | ||
1058 | |||
1059 | if (!err_mask) { | ||
1060 | printk(KERN_WARNING "ata%u: slow completion (cmd %x)\n", | ||
1061 | qc->ap->id, qc->tf.command); | ||
1062 | } else { | ||
1063 | printk(KERN_WARNING "ata%u: qc timeout (cmd %x)\n", | ||
1064 | qc->ap->id, qc->tf.command); | ||
1065 | rc = -EIO; | ||
1066 | } | ||
1067 | |||
1068 | ata_qc_complete(qc, err_mask); | ||
1069 | } | ||
1070 | |||
1071 | return rc; | ||
1072 | } | ||
1073 | |||
1051 | /** | 1074 | /** |
1052 | * ata_dev_identify - obtain IDENTIFY x DEVICE page | 1075 | * ata_dev_identify - obtain IDENTIFY x DEVICE page |
1053 | * @ap: port on which device we wish to probe resides | 1076 | * @ap: port on which device we wish to probe resides |
@@ -1127,7 +1150,7 @@ retry: | |||
1127 | if (rc) | 1150 | if (rc) |
1128 | goto err_out; | 1151 | goto err_out; |
1129 | else | 1152 | else |
1130 | wait_for_completion(&wait); | 1153 | ata_qc_wait_err(qc, &wait); |
1131 | 1154 | ||
1132 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1155 | spin_lock_irqsave(&ap->host_set->lock, flags); |
1133 | ap->ops->tf_read(ap, &qc->tf); | 1156 | ap->ops->tf_read(ap, &qc->tf); |
@@ -1271,7 +1294,7 @@ retry: | |||
1271 | } | 1294 | } |
1272 | 1295 | ||
1273 | /* ATAPI-specific feature tests */ | 1296 | /* ATAPI-specific feature tests */ |
1274 | else { | 1297 | else if (dev->class == ATA_DEV_ATAPI) { |
1275 | if (ata_id_is_ata(dev->id)) /* sanity check */ | 1298 | if (ata_id_is_ata(dev->id)) /* sanity check */ |
1276 | goto err_out_nosup; | 1299 | goto err_out_nosup; |
1277 | 1300 | ||
@@ -1581,11 +1604,13 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1581 | 1604 | ||
1582 | /* | 1605 | /* |
1583 | * Find the mode. | 1606 | * Find the mode. |
1584 | */ | 1607 | */ |
1585 | 1608 | ||
1586 | if (!(s = ata_timing_find_mode(speed))) | 1609 | if (!(s = ata_timing_find_mode(speed))) |
1587 | return -EINVAL; | 1610 | return -EINVAL; |
1588 | 1611 | ||
1612 | memcpy(t, s, sizeof(*s)); | ||
1613 | |||
1589 | /* | 1614 | /* |
1590 | * If the drive is an EIDE drive, it can tell us it needs extended | 1615 | * If the drive is an EIDE drive, it can tell us it needs extended |
1591 | * PIO/MW_DMA cycle timing. | 1616 | * PIO/MW_DMA cycle timing. |
@@ -1606,7 +1631,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1606 | * Convert the timing to bus clock counts. | 1631 | * Convert the timing to bus clock counts. |
1607 | */ | 1632 | */ |
1608 | 1633 | ||
1609 | ata_timing_quantize(s, t, T, UT); | 1634 | ata_timing_quantize(t, t, T, UT); |
1610 | 1635 | ||
1611 | /* | 1636 | /* |
1612 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T | 1637 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T |
@@ -2278,7 +2303,7 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) | |||
2278 | if (rc) | 2303 | if (rc) |
2279 | ata_port_disable(ap); | 2304 | ata_port_disable(ap); |
2280 | else | 2305 | else |
2281 | wait_for_completion(&wait); | 2306 | ata_qc_wait_err(qc, &wait); |
2282 | 2307 | ||
2283 | DPRINTK("EXIT\n"); | 2308 | DPRINTK("EXIT\n"); |
2284 | } | 2309 | } |
@@ -2326,7 +2351,7 @@ static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev) | |||
2326 | if (rc) | 2351 | if (rc) |
2327 | goto err_out; | 2352 | goto err_out; |
2328 | 2353 | ||
2329 | wait_for_completion(&wait); | 2354 | ata_qc_wait_err(qc, &wait); |
2330 | 2355 | ||
2331 | swap_buf_le16(dev->id, ATA_ID_WORDS); | 2356 | swap_buf_le16(dev->id, ATA_ID_WORDS); |
2332 | 2357 | ||
@@ -2382,7 +2407,7 @@ static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) | |||
2382 | if (rc) | 2407 | if (rc) |
2383 | ata_port_disable(ap); | 2408 | ata_port_disable(ap); |
2384 | else | 2409 | else |
2385 | wait_for_completion(&wait); | 2410 | ata_qc_wait_err(qc, &wait); |
2386 | 2411 | ||
2387 | DPRINTK("EXIT\n"); | 2412 | DPRINTK("EXIT\n"); |
2388 | } | 2413 | } |
@@ -2410,7 +2435,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2410 | if (qc->flags & ATA_QCFLAG_SINGLE) | 2435 | if (qc->flags & ATA_QCFLAG_SINGLE) |
2411 | assert(qc->n_elem == 1); | 2436 | assert(qc->n_elem == 1); |
2412 | 2437 | ||
2413 | DPRINTK("unmapping %u sg elements\n", qc->n_elem); | 2438 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
2414 | 2439 | ||
2415 | /* if we padded the buffer out to 32-bit bound, and data | 2440 | /* if we padded the buffer out to 32-bit bound, and data |
2416 | * xfer direction is from-device, we must copy from the | 2441 | * xfer direction is from-device, we must copy from the |
@@ -2420,7 +2445,8 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2420 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); | 2445 | pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); |
2421 | 2446 | ||
2422 | if (qc->flags & ATA_QCFLAG_SG) { | 2447 | if (qc->flags & ATA_QCFLAG_SG) { |
2423 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2448 | if (qc->n_elem) |
2449 | dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); | ||
2424 | /* restore last sg */ | 2450 | /* restore last sg */ |
2425 | sg[qc->orig_n_elem - 1].length += qc->pad_len; | 2451 | sg[qc->orig_n_elem - 1].length += qc->pad_len; |
2426 | if (pad_buf) { | 2452 | if (pad_buf) { |
@@ -2430,8 +2456,10 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) | |||
2430 | kunmap_atomic(psg->page, KM_IRQ0); | 2456 | kunmap_atomic(psg->page, KM_IRQ0); |
2431 | } | 2457 | } |
2432 | } else { | 2458 | } else { |
2433 | dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), | 2459 | if (sg_dma_len(&sg[0]) > 0) |
2434 | sg_dma_len(&sg[0]), dir); | 2460 | dma_unmap_single(ap->host_set->dev, |
2461 | sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), | ||
2462 | dir); | ||
2435 | /* restore sg */ | 2463 | /* restore sg */ |
2436 | sg->length += qc->pad_len; | 2464 | sg->length += qc->pad_len; |
2437 | if (pad_buf) | 2465 | if (pad_buf) |
@@ -2630,6 +2658,11 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2630 | sg->length, qc->pad_len); | 2658 | sg->length, qc->pad_len); |
2631 | } | 2659 | } |
2632 | 2660 | ||
2661 | if (!sg->length) { | ||
2662 | sg_dma_address(sg) = 0; | ||
2663 | goto skip_map; | ||
2664 | } | ||
2665 | |||
2633 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, | 2666 | dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, |
2634 | sg->length, dir); | 2667 | sg->length, dir); |
2635 | if (dma_mapping_error(dma_address)) { | 2668 | if (dma_mapping_error(dma_address)) { |
@@ -2639,6 +2672,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) | |||
2639 | } | 2672 | } |
2640 | 2673 | ||
2641 | sg_dma_address(sg) = dma_address; | 2674 | sg_dma_address(sg) = dma_address; |
2675 | skip_map: | ||
2642 | sg_dma_len(sg) = sg->length; | 2676 | sg_dma_len(sg) = sg->length; |
2643 | 2677 | ||
2644 | DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), | 2678 | DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), |
@@ -2666,7 +2700,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2666 | struct ata_port *ap = qc->ap; | 2700 | struct ata_port *ap = qc->ap; |
2667 | struct scatterlist *sg = qc->__sg; | 2701 | struct scatterlist *sg = qc->__sg; |
2668 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; | 2702 | struct scatterlist *lsg = &sg[qc->n_elem - 1]; |
2669 | int n_elem, dir; | 2703 | int n_elem, pre_n_elem, dir, trim_sg = 0; |
2670 | 2704 | ||
2671 | VPRINTK("ENTER, ata%u\n", ap->id); | 2705 | VPRINTK("ENTER, ata%u\n", ap->id); |
2672 | assert(qc->flags & ATA_QCFLAG_SG); | 2706 | assert(qc->flags & ATA_QCFLAG_SG); |
@@ -2700,13 +2734,24 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2700 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; | 2734 | sg_dma_len(psg) = ATA_DMA_PAD_SZ; |
2701 | /* trim last sg */ | 2735 | /* trim last sg */ |
2702 | lsg->length -= qc->pad_len; | 2736 | lsg->length -= qc->pad_len; |
2737 | if (lsg->length == 0) | ||
2738 | trim_sg = 1; | ||
2703 | 2739 | ||
2704 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", | 2740 | DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", |
2705 | qc->n_elem - 1, lsg->length, qc->pad_len); | 2741 | qc->n_elem - 1, lsg->length, qc->pad_len); |
2706 | } | 2742 | } |
2707 | 2743 | ||
2744 | pre_n_elem = qc->n_elem; | ||
2745 | if (trim_sg && pre_n_elem) | ||
2746 | pre_n_elem--; | ||
2747 | |||
2748 | if (!pre_n_elem) { | ||
2749 | n_elem = 0; | ||
2750 | goto skip_map; | ||
2751 | } | ||
2752 | |||
2708 | dir = qc->dma_dir; | 2753 | dir = qc->dma_dir; |
2709 | n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); | 2754 | n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); |
2710 | if (n_elem < 1) { | 2755 | if (n_elem < 1) { |
2711 | /* restore last sg */ | 2756 | /* restore last sg */ |
2712 | lsg->length += qc->pad_len; | 2757 | lsg->length += qc->pad_len; |
@@ -2715,6 +2760,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2715 | 2760 | ||
2716 | DPRINTK("%d sg elements mapped\n", n_elem); | 2761 | DPRINTK("%d sg elements mapped\n", n_elem); |
2717 | 2762 | ||
2763 | skip_map: | ||
2718 | qc->n_elem = n_elem; | 2764 | qc->n_elem = n_elem; |
2719 | 2765 | ||
2720 | return 0; | 2766 | return 0; |
@@ -3445,32 +3491,11 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3445 | { | 3491 | { |
3446 | struct ata_port *ap = qc->ap; | 3492 | struct ata_port *ap = qc->ap; |
3447 | struct ata_host_set *host_set = ap->host_set; | 3493 | struct ata_host_set *host_set = ap->host_set; |
3448 | struct ata_device *dev = qc->dev; | ||
3449 | u8 host_stat = 0, drv_stat; | 3494 | u8 host_stat = 0, drv_stat; |
3450 | unsigned long flags; | 3495 | unsigned long flags; |
3451 | 3496 | ||
3452 | DPRINTK("ENTER\n"); | 3497 | DPRINTK("ENTER\n"); |
3453 | 3498 | ||
3454 | /* FIXME: doesn't this conflict with timeout handling? */ | ||
3455 | if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) { | ||
3456 | struct scsi_cmnd *cmd = qc->scsicmd; | ||
3457 | |||
3458 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { | ||
3459 | |||
3460 | /* finish completing original command */ | ||
3461 | spin_lock_irqsave(&host_set->lock, flags); | ||
3462 | __ata_qc_complete(qc); | ||
3463 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
3464 | |||
3465 | atapi_request_sense(ap, dev, cmd); | ||
3466 | |||
3467 | cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16); | ||
3468 | scsi_finish_command(cmd); | ||
3469 | |||
3470 | goto out; | ||
3471 | } | ||
3472 | } | ||
3473 | |||
3474 | spin_lock_irqsave(&host_set->lock, flags); | 3499 | spin_lock_irqsave(&host_set->lock, flags); |
3475 | 3500 | ||
3476 | /* hack alert! We cannot use the supplied completion | 3501 | /* hack alert! We cannot use the supplied completion |
@@ -3511,7 +3536,6 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
3511 | 3536 | ||
3512 | spin_unlock_irqrestore(&host_set->lock, flags); | 3537 | spin_unlock_irqrestore(&host_set->lock, flags); |
3513 | 3538 | ||
3514 | out: | ||
3515 | DPRINTK("EXIT\n"); | 3539 | DPRINTK("EXIT\n"); |
3516 | } | 3540 | } |
3517 | 3541 | ||
@@ -3595,16 +3619,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, | |||
3595 | 3619 | ||
3596 | qc = ata_qc_new(ap); | 3620 | qc = ata_qc_new(ap); |
3597 | if (qc) { | 3621 | if (qc) { |
3598 | qc->__sg = NULL; | ||
3599 | qc->flags = 0; | ||
3600 | qc->scsicmd = NULL; | 3622 | qc->scsicmd = NULL; |
3601 | qc->ap = ap; | 3623 | qc->ap = ap; |
3602 | qc->dev = dev; | 3624 | qc->dev = dev; |
3603 | qc->cursect = qc->cursg = qc->cursg_ofs = 0; | ||
3604 | qc->nsect = 0; | ||
3605 | qc->nbytes = qc->curbytes = 0; | ||
3606 | 3625 | ||
3607 | ata_tf_init(ap, &qc->tf, dev->devno); | 3626 | ata_qc_reinit(qc); |
3608 | } | 3627 | } |
3609 | 3628 | ||
3610 | return qc; | 3629 | return qc; |