diff options
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 247 |
1 files changed, 207 insertions, 40 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 4154e5b6bad8..dee4b12b0342 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -1314,12 +1314,12 @@ static inline u8 ata_dev_knobble(struct ata_port *ap) | |||
1314 | /** | 1314 | /** |
1315 | * ata_dev_config - Run device specific handlers and check for | 1315 | * ata_dev_config - Run device specific handlers and check for |
1316 | * SATA->PATA bridges | 1316 | * SATA->PATA bridges |
1317 | * @ap: Bus | 1317 | * @ap: Bus |
1318 | * @i: Device | 1318 | * @i: Device |
1319 | * | 1319 | * |
1320 | * LOCKING: | 1320 | * LOCKING: |
1321 | */ | 1321 | */ |
1322 | 1322 | ||
1323 | void ata_dev_config(struct ata_port *ap, unsigned int i) | 1323 | void ata_dev_config(struct ata_port *ap, unsigned int i) |
1324 | { | 1324 | { |
1325 | /* limit bridge transfers to udma5, 200 sectors */ | 1325 | /* limit bridge transfers to udma5, 200 sectors */ |
@@ -2387,6 +2387,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
2387 | } | 2387 | } |
2388 | 2388 | ||
2389 | /** | 2389 | /** |
2390 | * ata_poll_qc_complete - turn irq back on and finish qc | ||
2391 | * @qc: Command to complete | ||
2392 | * @drv_stat: ATA status register content | ||
2393 | * | ||
2394 | * LOCKING: | ||
2395 | * None. (grabs host lock) | ||
2396 | */ | ||
2397 | |||
2398 | void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | ||
2399 | { | ||
2400 | struct ata_port *ap = qc->ap; | ||
2401 | unsigned long flags; | ||
2402 | |||
2403 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
2404 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
2405 | ata_irq_on(ap); | ||
2406 | ata_qc_complete(qc, drv_stat); | ||
2407 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
2408 | } | ||
2409 | |||
2410 | /** | ||
2390 | * ata_pio_poll - | 2411 | * ata_pio_poll - |
2391 | * @ap: | 2412 | * @ap: |
2392 | * | 2413 | * |
@@ -2448,11 +2469,10 @@ static void ata_pio_complete (struct ata_port *ap) | |||
2448 | u8 drv_stat; | 2469 | u8 drv_stat; |
2449 | 2470 | ||
2450 | /* | 2471 | /* |
2451 | * This is purely hueristic. This is a fast path. | 2472 | * This is purely heuristic. This is a fast path. Sometimes when |
2452 | * Sometimes when we enter, BSY will be cleared in | 2473 | * we enter, BSY will be cleared in a chk-status or two. If not, |
2453 | * a chk-status or two. If not, the drive is probably seeking | 2474 | * the drive is probably seeking or something. Snooze for a couple |
2454 | * or something. Snooze for a couple msecs, then | 2475 | * msecs, then chk-status again. If still busy, fall back to |
2455 | * chk-status again. If still busy, fall back to | ||
2456 | * PIO_ST_POLL state. | 2476 | * PIO_ST_POLL state. |
2457 | */ | 2477 | */ |
2458 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); | 2478 | drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); |
@@ -2477,9 +2497,7 @@ static void ata_pio_complete (struct ata_port *ap) | |||
2477 | 2497 | ||
2478 | ap->pio_task_state = PIO_ST_IDLE; | 2498 | ap->pio_task_state = PIO_ST_IDLE; |
2479 | 2499 | ||
2480 | ata_irq_on(ap); | 2500 | ata_poll_qc_complete(qc, drv_stat); |
2481 | |||
2482 | ata_qc_complete(qc, drv_stat); | ||
2483 | } | 2501 | } |
2484 | 2502 | ||
2485 | 2503 | ||
@@ -2504,6 +2522,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) | |||
2504 | #endif /* __BIG_ENDIAN */ | 2522 | #endif /* __BIG_ENDIAN */ |
2505 | } | 2523 | } |
2506 | 2524 | ||
2525 | /** | ||
2526 | * ata_mmio_data_xfer - Transfer data by MMIO | ||
2527 | * @ap: port to read/write | ||
2528 | * @buf: data buffer | ||
2529 | * @buflen: buffer length | ||
2530 | * @do_write: read/write | ||
2531 | * | ||
2532 | * Transfer data from/to the device data register by MMIO. | ||
2533 | * | ||
2534 | * LOCKING: | ||
2535 | * Inherited from caller. | ||
2536 | * | ||
2537 | */ | ||
2538 | |||
2507 | static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | 2539 | static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, |
2508 | unsigned int buflen, int write_data) | 2540 | unsigned int buflen, int write_data) |
2509 | { | 2541 | { |
@@ -2512,6 +2544,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2512 | u16 *buf16 = (u16 *) buf; | 2544 | u16 *buf16 = (u16 *) buf; |
2513 | void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; | 2545 | void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; |
2514 | 2546 | ||
2547 | /* Transfer multiple of 2 bytes */ | ||
2515 | if (write_data) { | 2548 | if (write_data) { |
2516 | for (i = 0; i < words; i++) | 2549 | for (i = 0; i < words; i++) |
2517 | writew(le16_to_cpu(buf16[i]), mmio); | 2550 | writew(le16_to_cpu(buf16[i]), mmio); |
@@ -2519,19 +2552,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2519 | for (i = 0; i < words; i++) | 2552 | for (i = 0; i < words; i++) |
2520 | buf16[i] = cpu_to_le16(readw(mmio)); | 2553 | buf16[i] = cpu_to_le16(readw(mmio)); |
2521 | } | 2554 | } |
2555 | |||
2556 | /* Transfer trailing 1 byte, if any. */ | ||
2557 | if (unlikely(buflen & 0x01)) { | ||
2558 | u16 align_buf[1] = { 0 }; | ||
2559 | unsigned char *trailing_buf = buf + buflen - 1; | ||
2560 | |||
2561 | if (write_data) { | ||
2562 | memcpy(align_buf, trailing_buf, 1); | ||
2563 | writew(le16_to_cpu(align_buf[0]), mmio); | ||
2564 | } else { | ||
2565 | align_buf[0] = cpu_to_le16(readw(mmio)); | ||
2566 | memcpy(trailing_buf, align_buf, 1); | ||
2567 | } | ||
2568 | } | ||
2522 | } | 2569 | } |
2523 | 2570 | ||
2571 | /** | ||
2572 | * ata_pio_data_xfer - Transfer data by PIO | ||
2573 | * @ap: port to read/write | ||
2574 | * @buf: data buffer | ||
2575 | * @buflen: buffer length | ||
2576 | * @do_write: read/write | ||
2577 | * | ||
2578 | * Transfer data from/to the device data register by PIO. | ||
2579 | * | ||
2580 | * LOCKING: | ||
2581 | * Inherited from caller. | ||
2582 | * | ||
2583 | */ | ||
2584 | |||
2524 | static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, | 2585 | static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, |
2525 | unsigned int buflen, int write_data) | 2586 | unsigned int buflen, int write_data) |
2526 | { | 2587 | { |
2527 | unsigned int dwords = buflen >> 1; | 2588 | unsigned int words = buflen >> 1; |
2528 | 2589 | ||
2590 | /* Transfer multiple of 2 bytes */ | ||
2529 | if (write_data) | 2591 | if (write_data) |
2530 | outsw(ap->ioaddr.data_addr, buf, dwords); | 2592 | outsw(ap->ioaddr.data_addr, buf, words); |
2531 | else | 2593 | else |
2532 | insw(ap->ioaddr.data_addr, buf, dwords); | 2594 | insw(ap->ioaddr.data_addr, buf, words); |
2595 | |||
2596 | /* Transfer trailing 1 byte, if any. */ | ||
2597 | if (unlikely(buflen & 0x01)) { | ||
2598 | u16 align_buf[1] = { 0 }; | ||
2599 | unsigned char *trailing_buf = buf + buflen - 1; | ||
2600 | |||
2601 | if (write_data) { | ||
2602 | memcpy(align_buf, trailing_buf, 1); | ||
2603 | outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); | ||
2604 | } else { | ||
2605 | align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); | ||
2606 | memcpy(trailing_buf, align_buf, 1); | ||
2607 | } | ||
2608 | } | ||
2533 | } | 2609 | } |
2534 | 2610 | ||
2611 | /** | ||
2612 | * ata_data_xfer - Transfer data from/to the data register. | ||
2613 | * @ap: port to read/write | ||
2614 | * @buf: data buffer | ||
2615 | * @buflen: buffer length | ||
2616 | * @do_write: read/write | ||
2617 | * | ||
2618 | * Transfer data from/to the device data register. | ||
2619 | * | ||
2620 | * LOCKING: | ||
2621 | * Inherited from caller. | ||
2622 | * | ||
2623 | */ | ||
2624 | |||
2535 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | 2625 | static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, |
2536 | unsigned int buflen, int do_write) | 2626 | unsigned int buflen, int do_write) |
2537 | { | 2627 | { |
@@ -2541,6 +2631,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, | |||
2541 | ata_pio_data_xfer(ap, buf, buflen, do_write); | 2631 | ata_pio_data_xfer(ap, buf, buflen, do_write); |
2542 | } | 2632 | } |
2543 | 2633 | ||
2634 | /** | ||
2635 | * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. | ||
2636 | * @qc: Command on going | ||
2637 | * | ||
2638 | * Transfer ATA_SECT_SIZE of data from/to the ATA device. | ||
2639 | * | ||
2640 | * LOCKING: | ||
2641 | * Inherited from caller. | ||
2642 | */ | ||
2643 | |||
2544 | static void ata_pio_sector(struct ata_queued_cmd *qc) | 2644 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
2545 | { | 2645 | { |
2546 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2646 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
@@ -2579,6 +2679,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | |||
2579 | kunmap(page); | 2679 | kunmap(page); |
2580 | } | 2680 | } |
2581 | 2681 | ||
2682 | /** | ||
2683 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
2684 | * @qc: Command on going | ||
2685 | * @bytes: number of bytes | ||
2686 | * | ||
2687 | * Transfer Transfer data from/to the ATAPI device. | ||
2688 | * | ||
2689 | * LOCKING: | ||
2690 | * Inherited from caller. | ||
2691 | * | ||
2692 | */ | ||
2693 | |||
2582 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | 2694 | static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
2583 | { | 2695 | { |
2584 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); | 2696 | int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
@@ -2588,10 +2700,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) | |||
2588 | unsigned char *buf; | 2700 | unsigned char *buf; |
2589 | unsigned int offset, count; | 2701 | unsigned int offset, count; |
2590 | 2702 | ||
2591 | if (qc->curbytes == qc->nbytes - bytes) | 2703 | if (qc->curbytes + bytes >= qc->nbytes) |
2592 | ap->pio_task_state = PIO_ST_LAST; | 2704 | ap->pio_task_state = PIO_ST_LAST; |
2593 | 2705 | ||
2594 | next_sg: | 2706 | next_sg: |
2707 | if (unlikely(qc->cursg >= qc->n_elem)) { | ||
2708 | /* | ||
2709 | * The end of qc->sg is reached and the device expects | ||
2710 | * more data to transfer. In order not to overrun qc->sg | ||
2711 | * and fulfill length specified in the byte count register, | ||
2712 | * - for read case, discard trailing data from the device | ||
2713 | * - for write case, padding zero data to the device | ||
2714 | */ | ||
2715 | u16 pad_buf[1] = { 0 }; | ||
2716 | unsigned int words = bytes >> 1; | ||
2717 | unsigned int i; | ||
2718 | |||
2719 | if (words) /* warning if bytes > 1 */ | ||
2720 | printk(KERN_WARNING "ata%u: %u bytes trailing data\n", | ||
2721 | ap->id, bytes); | ||
2722 | |||
2723 | for (i = 0; i < words; i++) | ||
2724 | ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); | ||
2725 | |||
2726 | ap->pio_task_state = PIO_ST_LAST; | ||
2727 | return; | ||
2728 | } | ||
2729 | |||
2595 | sg = &qc->sg[qc->cursg]; | 2730 | sg = &qc->sg[qc->cursg]; |
2596 | 2731 | ||
2597 | page = sg->page; | 2732 | page = sg->page; |
@@ -2625,11 +2760,21 @@ next_sg: | |||
2625 | 2760 | ||
2626 | kunmap(page); | 2761 | kunmap(page); |
2627 | 2762 | ||
2628 | if (bytes) { | 2763 | if (bytes) |
2629 | goto next_sg; | 2764 | goto next_sg; |
2630 | } | ||
2631 | } | 2765 | } |
2632 | 2766 | ||
2767 | /** | ||
2768 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. | ||
2769 | * @qc: Command on going | ||
2770 | * | ||
2771 | * Transfer Transfer data from/to the ATAPI device. | ||
2772 | * | ||
2773 | * LOCKING: | ||
2774 | * Inherited from caller. | ||
2775 | * | ||
2776 | */ | ||
2777 | |||
2633 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) | 2778 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) |
2634 | { | 2779 | { |
2635 | struct ata_port *ap = qc->ap; | 2780 | struct ata_port *ap = qc->ap; |
@@ -2702,9 +2847,7 @@ static void ata_pio_block(struct ata_port *ap) | |||
2702 | if ((status & ATA_DRQ) == 0) { | 2847 | if ((status & ATA_DRQ) == 0) { |
2703 | ap->pio_task_state = PIO_ST_IDLE; | 2848 | ap->pio_task_state = PIO_ST_IDLE; |
2704 | 2849 | ||
2705 | ata_irq_on(ap); | 2850 | ata_poll_qc_complete(qc, status); |
2706 | |||
2707 | ata_qc_complete(qc, status); | ||
2708 | return; | 2851 | return; |
2709 | } | 2852 | } |
2710 | 2853 | ||
@@ -2734,9 +2877,7 @@ static void ata_pio_error(struct ata_port *ap) | |||
2734 | 2877 | ||
2735 | ap->pio_task_state = PIO_ST_IDLE; | 2878 | ap->pio_task_state = PIO_ST_IDLE; |
2736 | 2879 | ||
2737 | ata_irq_on(ap); | 2880 | ata_poll_qc_complete(qc, drv_stat | ATA_ERR); |
2738 | |||
2739 | ata_qc_complete(qc, drv_stat | ATA_ERR); | ||
2740 | } | 2881 | } |
2741 | 2882 | ||
2742 | static void ata_pio_task(void *_data) | 2883 | static void ata_pio_task(void *_data) |
@@ -2842,8 +2983,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, | |||
2842 | static void ata_qc_timeout(struct ata_queued_cmd *qc) | 2983 | static void ata_qc_timeout(struct ata_queued_cmd *qc) |
2843 | { | 2984 | { |
2844 | struct ata_port *ap = qc->ap; | 2985 | struct ata_port *ap = qc->ap; |
2986 | struct ata_host_set *host_set = ap->host_set; | ||
2845 | struct ata_device *dev = qc->dev; | 2987 | struct ata_device *dev = qc->dev; |
2846 | u8 host_stat = 0, drv_stat; | 2988 | u8 host_stat = 0, drv_stat; |
2989 | unsigned long flags; | ||
2847 | 2990 | ||
2848 | DPRINTK("ENTER\n"); | 2991 | DPRINTK("ENTER\n"); |
2849 | 2992 | ||
@@ -2854,7 +2997,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2854 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { | 2997 | if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { |
2855 | 2998 | ||
2856 | /* finish completing original command */ | 2999 | /* finish completing original command */ |
3000 | spin_lock_irqsave(&host_set->lock, flags); | ||
2857 | __ata_qc_complete(qc); | 3001 | __ata_qc_complete(qc); |
3002 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
2858 | 3003 | ||
2859 | atapi_request_sense(ap, dev, cmd); | 3004 | atapi_request_sense(ap, dev, cmd); |
2860 | 3005 | ||
@@ -2865,6 +3010,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2865 | } | 3010 | } |
2866 | } | 3011 | } |
2867 | 3012 | ||
3013 | spin_lock_irqsave(&host_set->lock, flags); | ||
3014 | |||
2868 | /* hack alert! We cannot use the supplied completion | 3015 | /* hack alert! We cannot use the supplied completion |
2869 | * function from inside the ->eh_strategy_handler() thread. | 3016 | * function from inside the ->eh_strategy_handler() thread. |
2870 | * libata is the only user of ->eh_strategy_handler() in | 3017 | * libata is the only user of ->eh_strategy_handler() in |
@@ -2880,7 +3027,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2880 | host_stat = ap->ops->bmdma_status(ap); | 3027 | host_stat = ap->ops->bmdma_status(ap); |
2881 | 3028 | ||
2882 | /* before we do anything else, clear DMA-Start bit */ | 3029 | /* before we do anything else, clear DMA-Start bit */ |
2883 | ap->ops->bmdma_stop(ap); | 3030 | ap->ops->bmdma_stop(qc); |
2884 | 3031 | ||
2885 | /* fall through */ | 3032 | /* fall through */ |
2886 | 3033 | ||
@@ -2898,6 +3045,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) | |||
2898 | ata_qc_complete(qc, drv_stat); | 3045 | ata_qc_complete(qc, drv_stat); |
2899 | break; | 3046 | break; |
2900 | } | 3047 | } |
3048 | |||
3049 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
3050 | |||
2901 | out: | 3051 | out: |
2902 | DPRINTK("EXIT\n"); | 3052 | DPRINTK("EXIT\n"); |
2903 | } | 3053 | } |
@@ -3071,9 +3221,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) | |||
3071 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) | 3221 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
3072 | ata_sg_clean(qc); | 3222 | ata_sg_clean(qc); |
3073 | 3223 | ||
3224 | /* atapi: mark qc as inactive to prevent the interrupt handler | ||
3225 | * from completing the command twice later, before the error handler | ||
3226 | * is called. (when rc != 0 and atapi request sense is needed) | ||
3227 | */ | ||
3228 | qc->flags &= ~ATA_QCFLAG_ACTIVE; | ||
3229 | |||
3074 | /* call completion callback */ | 3230 | /* call completion callback */ |
3075 | rc = qc->complete_fn(qc, drv_stat); | 3231 | rc = qc->complete_fn(qc, drv_stat); |
3076 | qc->flags &= ~ATA_QCFLAG_ACTIVE; | ||
3077 | 3232 | ||
3078 | /* if callback indicates not to complete command (non-zero), | 3233 | /* if callback indicates not to complete command (non-zero), |
3079 | * return immediately | 3234 | * return immediately |
@@ -3203,11 +3358,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
3203 | break; | 3358 | break; |
3204 | 3359 | ||
3205 | case ATA_PROT_ATAPI_NODATA: | 3360 | case ATA_PROT_ATAPI_NODATA: |
3361 | ap->flags |= ATA_FLAG_NOINTR; | ||
3206 | ata_tf_to_host_nolock(ap, &qc->tf); | 3362 | ata_tf_to_host_nolock(ap, &qc->tf); |
3207 | queue_work(ata_wq, &ap->packet_task); | 3363 | queue_work(ata_wq, &ap->packet_task); |
3208 | break; | 3364 | break; |
3209 | 3365 | ||
3210 | case ATA_PROT_ATAPI_DMA: | 3366 | case ATA_PROT_ATAPI_DMA: |
3367 | ap->flags |= ATA_FLAG_NOINTR; | ||
3211 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 3368 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
3212 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 3369 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
3213 | queue_work(ata_wq, &ap->packet_task); | 3370 | queue_work(ata_wq, &ap->packet_task); |
@@ -3252,7 +3409,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) | |||
3252 | } | 3409 | } |
3253 | 3410 | ||
3254 | /** | 3411 | /** |
3255 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction | 3412 | * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction |
3256 | * @qc: Info associated with this ATA transaction. | 3413 | * @qc: Info associated with this ATA transaction. |
3257 | * | 3414 | * |
3258 | * LOCKING: | 3415 | * LOCKING: |
@@ -3423,7 +3580,7 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
3423 | 3580 | ||
3424 | /** | 3581 | /** |
3425 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer | 3582 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer |
3426 | * @ap: Port associated with this ATA transaction. | 3583 | * @qc: Command we are ending DMA for |
3427 | * | 3584 | * |
3428 | * Clears the ATA_DMA_START flag in the dma control register | 3585 | * Clears the ATA_DMA_START flag in the dma control register |
3429 | * | 3586 | * |
@@ -3433,8 +3590,9 @@ u8 ata_bmdma_status(struct ata_port *ap) | |||
3433 | * spin_lock_irqsave(host_set lock) | 3590 | * spin_lock_irqsave(host_set lock) |
3434 | */ | 3591 | */ |
3435 | 3592 | ||
3436 | void ata_bmdma_stop(struct ata_port *ap) | 3593 | void ata_bmdma_stop(struct ata_queued_cmd *qc) |
3437 | { | 3594 | { |
3595 | struct ata_port *ap = qc->ap; | ||
3438 | if (ap->flags & ATA_FLAG_MMIO) { | 3596 | if (ap->flags & ATA_FLAG_MMIO) { |
3439 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; | 3597 | void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; |
3440 | 3598 | ||
@@ -3486,7 +3644,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap, | |||
3486 | goto idle_irq; | 3644 | goto idle_irq; |
3487 | 3645 | ||
3488 | /* before we do anything else, clear DMA-Start bit */ | 3646 | /* before we do anything else, clear DMA-Start bit */ |
3489 | ap->ops->bmdma_stop(ap); | 3647 | ap->ops->bmdma_stop(qc); |
3490 | 3648 | ||
3491 | /* fall through */ | 3649 | /* fall through */ |
3492 | 3650 | ||
@@ -3561,7 +3719,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
3561 | struct ata_port *ap; | 3719 | struct ata_port *ap; |
3562 | 3720 | ||
3563 | ap = host_set->ports[i]; | 3721 | ap = host_set->ports[i]; |
3564 | if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { | 3722 | if (ap && |
3723 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | ||
3565 | struct ata_queued_cmd *qc; | 3724 | struct ata_queued_cmd *qc; |
3566 | 3725 | ||
3567 | qc = ata_qc_from_tag(ap, ap->active_tag); | 3726 | qc = ata_qc_from_tag(ap, ap->active_tag); |
@@ -3613,19 +3772,27 @@ static void atapi_packet_task(void *_data) | |||
3613 | /* send SCSI cdb */ | 3772 | /* send SCSI cdb */ |
3614 | DPRINTK("send cdb\n"); | 3773 | DPRINTK("send cdb\n"); |
3615 | assert(ap->cdb_len >= 12); | 3774 | assert(ap->cdb_len >= 12); |
3616 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3617 | 3775 | ||
3618 | /* if we are DMA'ing, irq handler takes over from here */ | 3776 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || |
3619 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | 3777 | qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { |
3620 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | 3778 | unsigned long flags; |
3621 | 3779 | ||
3622 | /* non-data commands are also handled via irq */ | 3780 | /* Once we're done issuing command and kicking bmdma, |
3623 | else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { | 3781 | * irq handler takes over. To not lose irq, we need |
3624 | /* do nothing */ | 3782 | * to clear NOINTR flag before sending cdb, but |
3625 | } | 3783 | * interrupt handler shouldn't be invoked before we're |
3784 | * finished. Hence, the following locking. | ||
3785 | */ | ||
3786 | spin_lock_irqsave(&ap->host_set->lock, flags); | ||
3787 | ap->flags &= ~ATA_FLAG_NOINTR; | ||
3788 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3789 | if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) | ||
3790 | ap->ops->bmdma_start(qc); /* initiate bmdma */ | ||
3791 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | ||
3792 | } else { | ||
3793 | ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); | ||
3626 | 3794 | ||
3627 | /* PIO commands are handled by polling */ | 3795 | /* PIO commands are handled by polling */ |
3628 | else { | ||
3629 | ap->pio_task_state = PIO_ST; | 3796 | ap->pio_task_state = PIO_ST; |
3630 | queue_work(ata_wq, &ap->pio_task); | 3797 | queue_work(ata_wq, &ap->pio_task); |
3631 | } | 3798 | } |
@@ -3633,7 +3800,7 @@ static void atapi_packet_task(void *_data) | |||
3633 | return; | 3800 | return; |
3634 | 3801 | ||
3635 | err_out: | 3802 | err_out: |
3636 | ata_qc_complete(qc, ATA_ERR); | 3803 | ata_poll_qc_complete(qc, ATA_ERR); |
3637 | } | 3804 | } |
3638 | 3805 | ||
3639 | 3806 | ||