diff options
author | Tejun Heo <tj@kernel.org> | 2010-05-10 15:41:39 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2010-05-19 13:36:46 -0400 |
commit | fe06e5f9b7c61dc567edace3f4909672067f7d7e (patch) | |
tree | b2242169e8e3b32c63925ed9901fff9d49c26192 /drivers/ata/libata-sff.c | |
parent | c429137a67b82788d24682153bb9c96501a9ef34 (diff) |
libata-sff: separate out BMDMA EH
Some of error handling logic in ata_sff_error_handler() and all of
ata_sff_post_internal_cmd() are for BMDMA. Create
ata_bmdma_error_handler() and ata_bmdma_post_internal_cmd() and move
BMDMA part into those.
While at it, change DMA protocol check to ata_is_dma(), fix
post_internal_cmd to call ap->ops->bmdma_stop instead of directly
calling ata_bmdma_stop() and open code hardreset selection so that
ata_std_error_handler() doesn't have to know about sff hardreset.
As these two functions are BMDMA specific, there's no reason to check
for bmdma_addr before calling bmdma methods if the protocol of the
failed command is DMA. sata_mv and pata_mpc52xx now don't need to set
.post_internal_cmd to ATA_OP_NULL and pata_icside and sata_qstor don't
need to set it to their bmdma_stop routines.
ata_sff_post_internal_cmd() becomes noop and is removed.
This fixes p3 described in clean-up-BMDMA-initialization patch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r-- | drivers/ata/libata-sff.c | 159 |
1 files changed, 93 insertions, 66 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index e78ad76861f4..aa378c04ed87 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -56,7 +56,6 @@ const struct ata_port_operations ata_sff_port_ops = { | |||
56 | .hardreset = sata_sff_hardreset, | 56 | .hardreset = sata_sff_hardreset, |
57 | .postreset = ata_sff_postreset, | 57 | .postreset = ata_sff_postreset, |
58 | .error_handler = ata_sff_error_handler, | 58 | .error_handler = ata_sff_error_handler, |
59 | .post_internal_cmd = ata_sff_post_internal_cmd, | ||
60 | 59 | ||
61 | .sff_dev_select = ata_sff_dev_select, | 60 | .sff_dev_select = ata_sff_dev_select, |
62 | .sff_check_status = ata_sff_check_status, | 61 | .sff_check_status = ata_sff_check_status, |
@@ -2361,7 +2360,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc) | |||
2361 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); | 2360 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); |
2362 | 2361 | ||
2363 | /** | 2362 | /** |
2364 | * ata_sff_error_handler - Stock error handler for BMDMA controller | 2363 | * ata_sff_error_handler - Stock error handler for SFF controller |
2365 | * @ap: port to handle error for | 2364 | * @ap: port to handle error for |
2366 | * | 2365 | * |
2367 | * Stock error handler for SFF controller. It can handle both | 2366 | * Stock error handler for SFF controller. It can handle both |
@@ -2378,64 +2377,32 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2378 | ata_reset_fn_t hardreset = ap->ops->hardreset; | 2377 | ata_reset_fn_t hardreset = ap->ops->hardreset; |
2379 | struct ata_queued_cmd *qc; | 2378 | struct ata_queued_cmd *qc; |
2380 | unsigned long flags; | 2379 | unsigned long flags; |
2381 | bool thaw = false; | ||
2382 | 2380 | ||
2383 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); | 2381 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); |
2384 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | 2382 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) |
2385 | qc = NULL; | 2383 | qc = NULL; |
2386 | 2384 | ||
2387 | /* reset PIO HSM and stop DMA engine */ | ||
2388 | spin_lock_irqsave(ap->lock, flags); | 2385 | spin_lock_irqsave(ap->lock, flags); |
2389 | 2386 | ||
2390 | if (ap->ioaddr.bmdma_addr && | 2387 | /* |
2391 | qc && (qc->tf.protocol == ATA_PROT_DMA || | 2388 | * We *MUST* do FIFO draining before we issue a reset as |
2392 | qc->tf.protocol == ATAPI_PROT_DMA)) { | 2389 | * several devices helpfully clear their internal state and |
2393 | u8 host_stat; | 2390 | * will lock solid if we touch the data port post reset. Pass |
2394 | 2391 | * qc in case anyone wants to do different PIO/DMA recovery or | |
2395 | host_stat = ap->ops->bmdma_status(ap); | 2392 | * has per command fixups |
2396 | |||
2397 | /* BMDMA controllers indicate host bus error by | ||
2398 | * setting DMA_ERR bit and timing out. As it wasn't | ||
2399 | * really a timeout event, adjust error mask and | ||
2400 | * cancel frozen state. | ||
2401 | */ | ||
2402 | if (qc->err_mask == AC_ERR_TIMEOUT | ||
2403 | && (host_stat & ATA_DMA_ERR)) { | ||
2404 | qc->err_mask = AC_ERR_HOST_BUS; | ||
2405 | thaw = true; | ||
2406 | } | ||
2407 | |||
2408 | ap->ops->bmdma_stop(qc); | ||
2409 | |||
2410 | /* if we're gonna thaw, make sure IRQ is clear */ | ||
2411 | if (thaw) { | ||
2412 | ap->ops->sff_check_status(ap); | ||
2413 | ap->ops->sff_irq_clear(ap); | ||
2414 | |||
2415 | spin_unlock_irqrestore(ap->lock, flags); | ||
2416 | ata_eh_thaw_port(ap); | ||
2417 | spin_lock_irqsave(ap->lock, flags); | ||
2418 | } | ||
2419 | } | ||
2420 | |||
2421 | /* We *MUST* do FIFO draining before we issue a reset as several | ||
2422 | * devices helpfully clear their internal state and will lock solid | ||
2423 | * if we touch the data port post reset. Pass qc in case anyone wants | ||
2424 | * to do different PIO/DMA recovery or has per command fixups | ||
2425 | */ | 2393 | */ |
2426 | if (ap->ops->sff_drain_fifo) | 2394 | if (ap->ops->sff_drain_fifo) |
2427 | ap->ops->sff_drain_fifo(qc); | 2395 | ap->ops->sff_drain_fifo(qc); |
2428 | 2396 | ||
2429 | spin_unlock_irqrestore(ap->lock, flags); | 2397 | spin_unlock_irqrestore(ap->lock, flags); |
2430 | 2398 | ||
2431 | /* PIO and DMA engines have been stopped, perform recovery */ | 2399 | /* ignore ata_sff_softreset if ctl isn't accessible */ |
2432 | |||
2433 | /* Ignore ata_sff_softreset if ctl isn't accessible and | ||
2434 | * built-in hardresets if SCR access isn't available. | ||
2435 | */ | ||
2436 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) | 2400 | if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) |
2437 | softreset = NULL; | 2401 | softreset = NULL; |
2438 | if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) | 2402 | |
2403 | /* ignore built-in hardresets if SCR access is not available */ | ||
2404 | if ((hardreset == sata_std_hardreset || | ||
2405 | hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) | ||
2439 | hardreset = NULL; | 2406 | hardreset = NULL; |
2440 | 2407 | ||
2441 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, | 2408 | ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, |
@@ -2444,27 +2411,6 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2444 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); | 2411 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); |
2445 | 2412 | ||
2446 | /** | 2413 | /** |
2447 | * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller | ||
2448 | * @qc: internal command to clean up | ||
2449 | * | ||
2450 | * LOCKING: | ||
2451 | * Kernel thread context (may sleep) | ||
2452 | */ | ||
2453 | void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) | ||
2454 | { | ||
2455 | struct ata_port *ap = qc->ap; | ||
2456 | unsigned long flags; | ||
2457 | |||
2458 | spin_lock_irqsave(ap->lock, flags); | ||
2459 | |||
2460 | if (ap->ioaddr.bmdma_addr) | ||
2461 | ap->ops->bmdma_stop(qc); | ||
2462 | |||
2463 | spin_unlock_irqrestore(ap->lock, flags); | ||
2464 | } | ||
2465 | EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); | ||
2466 | |||
2467 | /** | ||
2468 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. | 2414 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
2469 | * @ioaddr: IO address structure to be initialized | 2415 | * @ioaddr: IO address structure to be initialized |
2470 | * | 2416 | * |
@@ -2811,6 +2757,9 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); | |||
2811 | const struct ata_port_operations ata_bmdma_port_ops = { | 2757 | const struct ata_port_operations ata_bmdma_port_ops = { |
2812 | .inherits = &ata_sff_port_ops, | 2758 | .inherits = &ata_sff_port_ops, |
2813 | 2759 | ||
2760 | .error_handler = ata_bmdma_error_handler, | ||
2761 | .post_internal_cmd = ata_bmdma_post_internal_cmd, | ||
2762 | |||
2814 | .bmdma_setup = ata_bmdma_setup, | 2763 | .bmdma_setup = ata_bmdma_setup, |
2815 | .bmdma_start = ata_bmdma_start, | 2764 | .bmdma_start = ata_bmdma_start, |
2816 | .bmdma_stop = ata_bmdma_stop, | 2765 | .bmdma_stop = ata_bmdma_stop, |
@@ -2829,6 +2778,84 @@ const struct ata_port_operations ata_bmdma32_port_ops = { | |||
2829 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); | 2778 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); |
2830 | 2779 | ||
2831 | /** | 2780 | /** |
2781 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller | ||
2782 | * @ap: port to handle error for | ||
2783 | * | ||
2784 | * Stock error handler for BMDMA controller. It can handle both | ||
2785 | * PATA and SATA controllers. Most BMDMA controllers should be | ||
2786 | * able to use this EH as-is or with some added handling before | ||
2787 | * and after. | ||
2788 | * | ||
2789 | * LOCKING: | ||
2790 | * Kernel thread context (may sleep) | ||
2791 | */ | ||
2792 | void ata_bmdma_error_handler(struct ata_port *ap) | ||
2793 | { | ||
2794 | struct ata_queued_cmd *qc; | ||
2795 | unsigned long flags; | ||
2796 | bool thaw = false; | ||
2797 | |||
2798 | qc = __ata_qc_from_tag(ap, ap->link.active_tag); | ||
2799 | if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) | ||
2800 | qc = NULL; | ||
2801 | |||
2802 | /* reset PIO HSM and stop DMA engine */ | ||
2803 | spin_lock_irqsave(ap->lock, flags); | ||
2804 | |||
2805 | if (qc && ata_is_dma(qc->tf.protocol)) { | ||
2806 | u8 host_stat; | ||
2807 | |||
2808 | host_stat = ap->ops->bmdma_status(ap); | ||
2809 | |||
2810 | /* BMDMA controllers indicate host bus error by | ||
2811 | * setting DMA_ERR bit and timing out. As it wasn't | ||
2812 | * really a timeout event, adjust error mask and | ||
2813 | * cancel frozen state. | ||
2814 | */ | ||
2815 | if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { | ||
2816 | qc->err_mask = AC_ERR_HOST_BUS; | ||
2817 | thaw = true; | ||
2818 | } | ||
2819 | |||
2820 | ap->ops->bmdma_stop(qc); | ||
2821 | |||
2822 | /* if we're gonna thaw, make sure IRQ is clear */ | ||
2823 | if (thaw) { | ||
2824 | ap->ops->sff_check_status(ap); | ||
2825 | ap->ops->sff_irq_clear(ap); | ||
2826 | } | ||
2827 | } | ||
2828 | |||
2829 | spin_unlock_irqrestore(ap->lock, flags); | ||
2830 | |||
2831 | if (thaw) | ||
2832 | ata_eh_thaw_port(ap); | ||
2833 | |||
2834 | ata_sff_error_handler(ap); | ||
2835 | } | ||
2836 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); | ||
2837 | |||
2838 | /** | ||
2839 | * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA | ||
2840 | * @qc: internal command to clean up | ||
2841 | * | ||
2842 | * LOCKING: | ||
2843 | * Kernel thread context (may sleep) | ||
2844 | */ | ||
2845 | void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) | ||
2846 | { | ||
2847 | struct ata_port *ap = qc->ap; | ||
2848 | unsigned long flags; | ||
2849 | |||
2850 | if (ata_is_dma(qc->tf.protocol)) { | ||
2851 | spin_lock_irqsave(ap->lock, flags); | ||
2852 | ap->ops->bmdma_stop(qc); | ||
2853 | spin_unlock_irqrestore(ap->lock, flags); | ||
2854 | } | ||
2855 | } | ||
2856 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); | ||
2857 | |||
2858 | /** | ||
2832 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction | 2859 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction |
2833 | * @qc: Info associated with this ATA transaction. | 2860 | * @qc: Info associated with this ATA transaction. |
2834 | * | 2861 | * |