diff options
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 247 |
1 files changed, 207 insertions, 40 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index f4e7dcb6492b..f15a07f9f471 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -1304,12 +1304,12 @@ static inline u8 ata_dev_knobble(struct ata_port *ap) | |||
1304 | /** | 1304 | /** |
1305 | * ata_dev_config - Run device specific handlers and check for | 1305 | * ata_dev_config - Run device specific handlers and check for |
1306 | * SATA->PATA bridges | 1306 | * SATA->PATA bridges |
1307 | * @ap: Bus | 1307 | * @ap: Bus |
1308 | * @i: Device | 1308 | * @i: Device |
1309 | * | 1309 | * |
1310 | * LOCKING: | 1310 | * LOCKING: |
1311 | */ | 1311 | */ |
1312 | 1312 | ||
1313 | void ata_dev_config(struct ata_port *ap, unsigned int i) | 1313 | void ata_dev_config(struct ata_port *ap, unsigned int i) |
1314 | { | 1314 | { |
1315 | /* limit bridge transfers to udma5, 200 sectors */ | 1315 | /* limit bridge transfers to udma5, 200 sectors */ |
@@ -2377,6 +2377,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2377 | } | 2377 | } |
2378 | 2378 | ||
2379 | /** | 2379 | /** |
2380 | * ata_poll_qc_complete - turn irq back on and finish qc | ||
2381 | * @qc: Command to complete | ||
2382 | * @drv_stat: ATA status register content | ||
2383 | * | ||
2384 | * LOCKING: | ||
2385 | * None. (grabs host lock) | ||
2386 | */ | ||
2387 | |||
2388 | void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | ||
2389 | { | ||
2390 | struct ata_port *ap = qc->ap; | ||
2391 | unsigned long flags; | ||
2392 | |||
2393 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
2394 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
2395 | ata_irq_on(ap); | ||
2396 | ata_qc_complete(qc, drv_stat); | ||
2397 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
2398 | } | ||
2399 | |||
2400 | /** | ||
2380 | * ata_pio_poll - | 2401 | * ata_pio_poll - |
2381 | * @ap: | 2402 | * @ap: |
2382 | * | 2403 | * |
@@ -2438,11 +2459,10 @@ static void ata_pio_complete (struct ata_port *ap) | |||
2438 | u8 drv_stat; | 2459 | u8 drv_stat; |
2439 | 2460 | ||
2440 | /* | 2461 | /* |
2441 | * This is purely hueristic. This is a fast path. | 2462 | * This is purely heuristic. This is a fast path. Sometimes when |
2442 | * Sometimes when we enter, BSY will be cleared in | 2463 | * we enter, BSY will be cleared in a chk-status or two. If not, |
2443 | * a chk-status or two. If not, the drive is probably seeking | 2464 | * the drive is probably seeking or something. Snooze for a couple |
2444 | * or something. Snooze for a couple msecs, then | 2465 | * msecs, then chk-status again. If still busy, fall back to |
2445 | * chk-status again. If still busy, fall back to | ||
2446 | * PIO_ST_POLL state. | 2466 | * PIO_ST_POLL state. |
2447 | */ | 2467 | */ |
2448 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); | 2468 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); |
@@ -2467,9 +2487,7 @@ static void ata_pio_complete (struct ata_port *ap) | |||
2467 | 2487 | ||
2468 | ap->pio_task_state = PIO_ST_IDLE; | 2488 | ap->pio_task_state = PIO_ST_IDLE; |
2469 | 2489 | ||
2470 | ata_irq_on(ap); | 2490 | ata_poll_qc_complete(qc, drv_stat); |
2471 | |||
2472 | ata_qc_complete(qc, drv_stat); | ||
2473 | } | 2491 | } |
2474 | 2492 | ||
2475 | 2493 | ||
@@ -2494,6 +2512,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
2494 | #endif /* __BIG_ENDIAN */ | 2512 | #endif /* __BIG_ENDIAN */ |
2495 | } | 2513 | } |
2496 | 2514 | ||
2515 | /** | ||
2516 | * ata_mmio_data_xfer - Transfer data by MMIO | ||
2517 | * @ap: port to read/write | ||
2518 | * @buf: data buffer | ||
2519 | * @buflen: buffer length | ||
2520 | * @do_write: read/write | ||
2521 | * | ||
2522 | * Transfer data from/to the device data register by MMIO. | ||
2523 | * | ||
2524 | * LOCKING: | ||
2525 | * Inherited from caller. | ||
2526 | * | ||
2527 | */ | ||
2528 | |||
2497 | static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | 2529 | static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, |
2498 | unsigned int buflen, int write_data) | 2530 | unsigned int buflen, int write_data) |
2499 | { | 2531 | { |
@@ -2502,6 +2534,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2502 | u16 *buf16 = (u16 *) buf; | 2534 | u16 *buf16 = (u16 *) buf; |
2503 | void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; | 2535 | void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; |
2504 | 2536 | ||
2537 | /* Transfer multiple of 2 bytes */ | ||
2505 | if (write_data) { | 2538 | if (write_data) { |
2506 | for (i = 0; i < words; i++) | 2539 | for (i = 0; i < words; i++) |
2507 | writew(le16_to_cpu(buf16[i]), mmio); | 2540 | writew(le16_to_cpu(buf16[i]), mmio); |
@@ -2509,19 +2542,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2509 | for (i = 0; i < words; i++) | 2542 | for (i = 0; i < words; i++) |
2510 | buf16[i] = cpu_to_le16(readw(mmio)); | 2543 | buf16[i] = cpu_to_le16(readw(mmio)); |
2511 | } | 2544 | } |
2545 | |||
2546 | /* Transfer trailing 1 byte, if any. */ | ||
2547 | if (unlikely(buflen & 0x01)) { | ||
2548 | u16 align_buf[1] = { 0 }; | ||
2549 | unsigned char *trailing_buf = buf + buflen - 1; | ||
2550 | |||
2551 | if (write_data) { | ||
2552 | memcpy(align_buf, trailing_buf, 1); | ||
2553 | writew(le16_to_cpu(align_buf[0]), mmio); | ||
2554 | } else { | ||
2555 | align_buf[0] = cpu_to_le16(readw(mmio)); | ||
2556 | memcpy(trailing_buf, align_buf, 1); | ||
2557 | } | ||
2558 | } | ||
2512 | } | 2559 | } |
2513 | 2560 | ||
2561 | /** | ||
2562 | * ata_pio_data_xfer - Transfer data by PIO | ||
2563 | * @ap: port to read/write | ||
2564 | * @buf: data buffer | ||
2565 | * @buflen: buffer length | ||
2566 | * @do_write: read/write | ||
2567 | * | ||
2568 | * Transfer data from/to the device data register by PIO. | ||
2569 | * | ||
2570 | * LOCKING: | ||
2571 | * Inherited from caller. | ||
2572 | * | ||
2573 | */ | ||
2574 | |||
2514 | static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, | 2575 | static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, |
2515 | unsigned int buflen, int write_data) | 2576 | unsigned int buflen, int write_data) |
2516 | { | 2577 | { |
2517 | unsigned int dwords = buflen >> 1; | 2578 | unsigned int words = buflen >> 1; |
2518 | 2579 | ||
2580 | /* Transfer multiple of 2 bytes */ | ||
2519 | if (write_data) | 2581 | if (write_data) |
2520 | outsw(ap->ioaddr.data_addr, buf, dwords); | 2582 | outsw(ap->ioaddr.data_addr, buf, words); |
2521 | else | 2583 | else |
2522 | insw(ap->ioaddr.data_addr, buf, dwords); | 2584 | insw(ap->ioaddr.data_addr, buf, words); |
2585 | |||
2586 | /* Transfer trailing 1 byte, if any. */ | ||
2587 | if (unlikely(buflen & 0x01)) { | ||
2588 | u16 align_buf[1] = { 0 }; | ||
2589 | unsigned char *trailing_buf = buf + buflen - 1; | ||
2590 | |||
2591 | if (write_data) { | ||
2592 | memcpy(align_buf, trailing_buf, 1); | ||
2593 | outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); | ||
2594 | } else { | ||
2595 | align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); | ||
2596 | memcpy(trailing_buf, align_buf, 1); | ||
2597 | } | ||
2598 | } | ||
2523 | } | 2599 | } |
2524 | 2600 | ||
2601 | /** | ||
2602 | * ata_data_xfer - Transfer data from/to the data register. | ||
2603 | * @ap: port to read/write | ||
2604 | * @buf: data buffer | ||
2605 | * @buflen: buffer length | ||
2606 | * @do_write: read/write | ||
2607 | * | ||
2608 | * Transfer data from/to the device data register. | ||
2609 | * | ||
2610 | * LOCKING: | ||
2611 | * Inherited from caller. | ||
2612 | * | ||
2613 | */ | ||
2614 | |||
2525 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | 2615 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, |
2526 | unsigned int buflen, int do_write) | 2616 | unsigned int buflen, int do_write) |
2527 | { | 2617 | { |
@@ -2531,6 +2621,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2531 | ata_pio_data_xfer(ap, buf, buflen, do_write); | 2621 | ata_pio_data_xfer(ap, buf, buflen, do_write); |
2532 | } | 2622 | } |
2533 | 2623 | ||
2624 | /** | ||
2625 | * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. | ||
2626 | * @qc: Command on going | ||
2627 | * | ||
2628 | * Transfer ATA_SECT_SIZE of data from/to the ATA device. | ||
2629 | * | ||
2630 | * LOCKING: | ||
2631 | * Inherited from caller. | ||
2632 | */ | ||
2633 | |||
2534 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 2634 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
2535 | { | 2635 | { |
2536 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2636 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
@@ -2569,6 +2669,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2569 | kunmap(page); | 2669 | kunmap(page); |
2570 | } | 2670 | } |
2571 | 2671 | ||
2672 | /** | ||
2673 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
2674 | * @qc: Command on going | ||
2675 | * @bytes: number of bytes | ||
2676 | * | ||
2677 | * Transfer Transfer data from/to the ATAPI device. | ||
2678 | * | ||
2679 | * LOCKING: | ||
2680 | * Inherited from caller. | ||
2681 | * | ||
2682 | */ | ||
2683 | |||
2572 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 2684 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
2573 | { | 2685 | { |
2574 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2686 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
@@ -2578,10 +2690,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | |||
2578 | unsigned char *buf; | 2690 | unsigned char *buf; |
2579 | unsigned int offset, count; | 2691 | unsigned int offset, count; |
2580 | 2692 | ||
2581 | if (qc->curbytes == qc->nbytes - bytes) | 2693 | if (qc->curbytes + bytes >= qc->nbytes) |
2582 | ap->pio_task_state = PIO_ST_LAST; | 2694 | ap->pio_task_state = PIO_ST_LAST; |
2583 | 2695 | ||
2584 | next_sg: | 2696 | next_sg: |
2697 | if (unlikely(qc->cursg >= qc->n_elem)) { | ||
2698 | /* | ||
2699 | * The end of qc->sg is reached and the device expects | ||
2700 | * more data to transfer. In order not to overrun qc->sg | ||
2701 | * and fulfill length specified in the byte count register, | ||
2702 | * - for read case, discard trailing data from the device | ||
2703 | * - for write case, padding zero data to the device | ||
2704 | */ | ||
2705 | u16 pad_buf[1] = { 0 }; | ||
2706 | unsigned int words = bytes >> 1; | ||
2707 | unsigned int i; | ||
2708 | |||
2709 | if (words) /* warning if bytes > 1 */ | ||
2710 | printk(KERN_WARNING "ata%u: %u bytes trailing data\n", | ||
2711 | ap->id, bytes); | ||
2712 | |||
2713 | for (i = 0; i < words; i++) | ||
2714 | ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); | ||
2715 | |||
2716 | ap->pio_task_state = PIO_ST_LAST; | ||
2717 | return; | ||
2718 | } | ||
2719 | |||
2585 | sg = &qc->sg[qc->cursg]; | 2720 | sg = &qc->sg[qc->cursg]; |
2586 | 2721 | ||
2587 | page = sg->page; | 2722 | page = sg->page; |
@@ -2615,11 +2750,21 @@ next_sg: | |||
2615 | 2750 | ||
2616 | kunmap(page); | 2751 | kunmap(page); |
2617 | 2752 | ||
2618 | if (bytes) { | 2753 | if (bytes) |
2619 | goto next_sg; | 2754 | goto next_sg; |
2620 | } | ||
2621 | } | 2755 | } |
2622 | 2756 | ||
2757 | /** | ||
2758 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
2759 | * @qc: Command on going | ||
2760 | * | ||
2761 | * Transfer Transfer data from/to the ATAPI device. | ||
2762 | * | ||
2763 | * LOCKING: | ||
2764 | * Inherited from caller. | ||
2765 | * | ||
2766 | */ | ||
2767 | |||
2623 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) | 2768 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) |
2624 | { | 2769 | { |
2625 | struct ata_port *ap = qc->ap; | 2770 | struct ata_port *ap = qc->ap; |
@@ -2692,9 +2837,7 @@ static void ata_pio_block(struct ata_port *ap) | |||
2692 | if ((status & ATA_DRQ) == 0) { | 2837 | if ((status & ATA_DRQ) == 0) { |
2693 | ap->pio_task_state = PIO_ST_IDLE; | 2838 | ap->pio_task_state = PIO_ST_IDLE; |
2694 | 2839 | ||
2695 | ata_irq_on(ap); | 2840 | ata_poll_qc_complete(qc, status); |
2696 | |||
2697 | ata_qc_complete(qc, status); | ||
2698 | return; | 2841 | return; |
2699 | } | 2842 | } |
2700 | 2843 | ||
@@ -2724,9 +2867,7 @@ static void ata_pio_error(struct ata_port *ap) | |||
2724 | 2867 | ||
2725 | ap->pio_task_state = PIO_ST_IDLE; | 2868 | ap->pio_task_state = PIO_ST_IDLE; |
2726 | 2869 | ||
2727 | ata_irq_on(ap); | 2870 | ata_poll_qc_complete(qc, drv_stat | ATA_ERR); |
2728 | |||
2729 | ata_qc_complete(qc, drv_stat | ATA_ERR); | ||
2730 | } | 2871 | } |
2731 | 2872 | ||
2732 | static void ata_pio_task(void *_data) | 2873 | static void ata_pio_task(void *_data) |
@@ -2832,8 +2973,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, | |||
2832 | static void ata_qc_timeout(struct ata_queued_cmd *qc) | 2973 | static void ata_qc_timeout(struct ata_queued_cmd *qc) |
2833 | { | 2974 | { |
2834 | struct ata_port *ap = qc->ap; | 2975 | struct ata_port *ap = qc->ap; |
2976 | struct ata_host_set *host_set = ap->host_set; | ||
2835 | struct ata_device *dev = qc->dev; | 2977 | struct ata_device *dev = qc->dev; |
2836 | u8 host_stat = 0, drv_stat; | 2978 | u8 host_stat = 0, drv_stat; |
2979 | unsigned long flags; | ||
2837 | 2980 | ||
2838 | DPRINTK("ENTER\n"); | 2981 | DPRINTK("ENTER\n"); |
2839 | 2982 | ||
@@ -2844,7 +2987,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2844 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { | 2987 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { |
2845 | 2988 | ||
2846 | /* finish completing original command */ | 2989 | /* finish completing original command */ |
2990 | spin_lock_irqsave(&host_set->lock, flags); | ||
2847 | __ata_qc_complete(qc); | 2991 | __ata_qc_complete(qc); |
2992 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
2848 | 2993 | ||
2849 | atapi_request_sense(ap, dev, cmd); | 2994 | atapi_request_sense(ap, dev, cmd); |
2850 | 2995 | ||
@@ -2855,6 +3000,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2855 | } | 3000 | } |
2856 | } | 3001 | } |
2857 | 3002 | ||
3003 | spin_lock_irqsave(&host_set->lock, flags); | ||
3004 | |||
2858 | /* hack alert! We cannot use the supplied completion | 3005 | /* hack alert! We cannot use the supplied completion |
2859 | * function from inside the ->eh_strategy_handler() thread. | 3006 | * function from inside the ->eh_strategy_handler() thread. |
2860 | * libata is the only user of ->eh_strategy_handler() in | 3007 | * libata is the only user of ->eh_strategy_handler() in |
@@ -2870,7 +3017,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2870 | host_stat = ap->ops->bmdma_status(ap); | 3017 | host_stat = ap->ops->bmdma_status(ap); |
2871 | 3018 | ||
2872 | /* before we do anything else, clear DMA-Start bit */ | 3019 | /* before we do anything else, clear DMA-Start bit */ |
2873 | ap->ops->bmdma_stop(ap); | 3020 | ap->ops->bmdma_stop(qc); |
2874 | 3021 | ||
2875 | /* fall through */ | 3022 | /* fall through */ |
2876 | 3023 | ||
@@ -2888,6 +3035,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2888 | ata_qc_complete(qc, drv_stat); | 3035 | ata_qc_complete(qc, drv_stat); |
2889 | break; | 3036 | break; |
2890 | } | 3037 | } |
3038 | |||
3039 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
3040 | |||
2891 | out: | 3041 | out: |
2892 | DPRINTK("EXIT\n"); | 3042 | DPRINTK("EXIT\n"); |
2893 | } | 3043 | } |
@@ -3061,9 +3211,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
3061 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) | 3211 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
3062 | ata_sg_clean(qc); | 3212 | ata_sg_clean(qc); |
3063 | 3213 | ||
3214 | /* atapi: mark qc as inactive to prevent the interrupt handler | ||
3215 | * from completing the command twice later, before the error handler | ||
3216 | * is called. (when rc != 0 and atapi request sense is needed) | ||
3217 | */ | ||
3218 | qc->flags &= ~ATA_QCFLAG_ACTIVE; | ||
3219 | |||
3064 | /* call completion callback */ | 3220 | /* call completion callback */ |
3065 | rc = qc->complete_fn(qc, drv_stat); | 3221 | rc = qc->complete_fn(qc, drv_stat); |
3066 | qc->flags &= ~ATA_QCFLAG_ACTIVE; | ||
3067 | 3222 | ||
3068 | /* if callback indicates not to complete command (non-zero), | 3223 | /* if callback indicates not to complete command (non-zero), |
3069 | * return immediately | 3224 | * return immediately |
@@ -3193,11 +3348,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3193 | break; | 3348 | break; |
3194 | 3349 | ||
3195 | case ATA_PROT_ATAPI_NODATA: | 3350 | case ATA_PROT_ATAPI_NODATA: |
3351 | ap->flags |= ATA_FLAG_NOINTR; | ||
3196 | ata_tf_to_host_nolock(ap, &qc->tf); | 3352 | ata_tf_to_host_nolock(ap, &qc->tf); |
3197 | queue_work(ata_wq, &ap->packet_task); | 3353 | queue_work(ata_wq, &ap->packet_task); |
3198 | break; | 3354 | break; |
3199 | 3355 | ||
3200 | case ATA_PROT_ATAPI_DMA: | 3356 | case ATA_PROT_ATAPI_DMA: |
3357 | ap->flags |= ATA_FLAG_NOINTR; | ||
3201 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3358 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3202 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3359 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3203 | queue_work(ata_wq, &ap->packet_task); | 3360 | queue_work(ata_wq, &ap->packet_task); |
@@ -3242,7 +3399,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) | |||
3242 | } | 3399 | } |
3243 | 3400 | ||
3244 | /** | 3401 | /** |
3245 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | 3402 | * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction |
3246 | * @qc: Info associated with this ATA transaction. | 3403 | * @qc: Info associated with this ATA transaction. |
3247 | * | 3404 | * |
3248 | * LOCKING: | 3405 | * LOCKING: |
@@ -3413,7 +3570,7 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
3413 | 3570 | ||
3414 | /** | 3571 | /** |
3415 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | 3572 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer |
3416 | * @ap: Port associated with this ATA transaction. | 3573 | * @qc: Command we are ending DMA for |
3417 | * | 3574 | * |
3418 | * Clears the ATA_DMA_START flag in the dma control register | 3575 | * Clears the ATA_DMA_START flag in the dma control register |
3419 | * | 3576 | * |
@@ -3423,8 +3580,9 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
3423 | * spin_lock_irqsave(host_set lock) | 3580 | * spin_lock_irqsave(host_set lock) |
3424 | */ | 3581 | */ |
3425 | 3582 | ||
3426 | void ata_bmdma_stop(struct ata_port *ap) | 3583 | void ata_bmdma_stop(struct ata_queued_cmd *qc) |
3427 | { | 3584 | { |
3585 | struct ata_port *ap = qc->ap; | ||
3428 | if (ap->flags & ATA_FLAG_MMIO) { | 3586 | if (ap->flags & ATA_FLAG_MMIO) { |
3429 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | 3587 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; |
3430 | 3588 | ||
@@ -3476,7 +3634,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap, | |||
3476 | goto idle_irq; | 3634 | goto idle_irq; |
3477 | 3635 | ||
3478 | /* before we do anything else, clear DMA-Start bit */ | 3636 | /* before we do anything else, clear DMA-Start bit */ |
3479 | ap->ops->bmdma_stop(ap); | 3637 | ap->ops->bmdma_stop(qc); |
3480 | 3638 | ||
3481 | /* fall through */ | 3639 | /* fall through */ |
3482 | 3640 | ||
@@ -3551,7 +3709,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
3551 | struct ata_port *ap; | 3709 | struct ata_port *ap; |
3552 | 3710 | ||
3553 | ap = host_set->ports[i]; | 3711 | ap = host_set->ports[i]; |
3554 | if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { | 3712 | if (ap && |
3713 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | ||
3555 | struct ata_queued_cmd *qc; | 3714 | struct ata_queued_cmd *qc; |
3556 | 3715 | ||
3557 | qc = ata_qc_from_tag(ap, ap->active_tag); | 3716 | qc = ata_qc_from_tag(ap, ap->active_tag); |
@@ -3603,19 +3762,27 @@ static void atapi_packet_task(void *_data) | |||
3603 | /* send SCSI cdb */ | 3762 | /* send SCSI cdb */ |
3604 | DPRINTK("send cdb\n"); | 3763 | DPRINTK("send cdb\n"); |
3605 | assert(ap->cdb_len >= 12); | 3764 | assert(ap->cdb_len >= 12); |
3606 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3607 | 3765 | ||
3608 | /* if we are DMA'ing, irq handler takes over from here */ | 3766 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || |
3609 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | 3767 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { |
3610 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 3768 | unsigned long flags; |
3611 | 3769 | ||
3612 | /* non-data commands are also handled via irq */ | 3770 | /* Once we're done issuing command and kicking bmdma, |
3613 | else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | 3771 | * irq handler takes over. To not lose irq, we need |
3614 | /* do nothing */ | 3772 | * to clear NOINTR flag before sending cdb, but |
3615 | } | 3773 | * interrupt handler shouldn't be invoked before we're |
3774 | * finished. Hence, the following locking. | ||
3775 | */ | ||
3776 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3777 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
3778 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3779 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
3780 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
3781 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3782 | } else { | ||
3783 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3616 | 3784 | ||
3617 | /* PIO commands are handled by polling */ | 3785 | /* PIO commands are handled by polling */ |
3618 | else { | ||
3619 | ap->pio_task_state = PIO_ST; | 3786 | ap->pio_task_state = PIO_ST; |
3620 | queue_work(ata_wq, &ap->pio_task); | 3787 | queue_work(ata_wq, &ap->pio_task); |
3621 | } | 3788 | } |
@@ -3623,7 +3790,7 @@ static void atapi_packet_task(void *_data) | |||
3623 | return; | 3790 | return; |
3624 | 3791 | ||
3625 | err_out: | 3792 | err_out: |
3626 | ata_qc_complete(qc, ATA_ERR); | 3793 | ata_poll_qc_complete(qc, ATA_ERR); |
3627 | } | 3794 | } |
3628 | 3795 | ||
3629 | 3796 | ||