diff options
author | Arik Nemtsov <arik@wizery.com> | 2012-06-21 11:10:51 -0400 |
---|---|---|
committer | Luciano Coelho <coelho@ti.com> | 2012-06-23 02:32:32 -0400 |
commit | b5b45b3cbd56162d9612dd76529d7ad9f6be9a56 (patch) | |
tree | 5ba9aefbd6450d2de9019724a4dbc92abc10bb32 /drivers/net/wireless/ti | |
parent | c439a1ca3bdc58febf51a388a9930eeba361b410 (diff) |
wlcore: refactor threaded IRQ routine
Separate the threaded IRQ handling routine into two functions.
The outer function takes the mutex and calls recovery on errors. It also
performs a Tx-path optimization to avoid redundant works.
The inner function is simplified - all calls to recovery are removed and
it assumes the lock is taken. The locked variant will be reused elsewhere.
Signed-off-by: Arik Nemtsov <arik@wizery.com>
Signed-off-by: Luciano Coelho <coelho@ti.com>
Diffstat (limited to 'drivers/net/wireless/ti')
-rw-r--r-- | drivers/net/wireless/ti/wlcore/main.c | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index e6e665440f81..0c1e0751ecaa 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -494,20 +494,15 @@ static void wl1271_netstack_work(struct work_struct *work) | |||
494 | 494 | ||
495 | #define WL1271_IRQ_MAX_LOOPS 256 | 495 | #define WL1271_IRQ_MAX_LOOPS 256 |
496 | 496 | ||
497 | static irqreturn_t wl1271_irq(int irq, void *cookie) | 497 | static int wlcore_irq_locked(struct wl1271 *wl) |
498 | { | 498 | { |
499 | int ret; | 499 | int ret = 0; |
500 | u32 intr; | 500 | u32 intr; |
501 | int loopcount = WL1271_IRQ_MAX_LOOPS; | 501 | int loopcount = WL1271_IRQ_MAX_LOOPS; |
502 | struct wl1271 *wl = (struct wl1271 *)cookie; | ||
503 | bool done = false; | 502 | bool done = false; |
504 | unsigned int defer_count; | 503 | unsigned int defer_count; |
505 | unsigned long flags; | 504 | unsigned long flags; |
506 | 505 | ||
507 | /* TX might be handled here, avoid redundant work */ | ||
508 | set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); | ||
509 | cancel_work_sync(&wl->tx_work); | ||
510 | |||
511 | /* | 506 | /* |
512 | * In case edge triggered interrupt must be used, we cannot iterate | 507 | * In case edge triggered interrupt must be used, we cannot iterate |
513 | * more than once without introducing race conditions with the hardirq. | 508 | * more than once without introducing race conditions with the hardirq. |
@@ -515,8 +510,6 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
515 | if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) | 510 | if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) |
516 | loopcount = 1; | 511 | loopcount = 1; |
517 | 512 | ||
518 | mutex_lock(&wl->mutex); | ||
519 | |||
520 | wl1271_debug(DEBUG_IRQ, "IRQ work"); | 513 | wl1271_debug(DEBUG_IRQ, "IRQ work"); |
521 | 514 | ||
522 | if (unlikely(wl->state == WL1271_STATE_OFF)) | 515 | if (unlikely(wl->state == WL1271_STATE_OFF)) |
@@ -536,10 +529,8 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
536 | smp_mb__after_clear_bit(); | 529 | smp_mb__after_clear_bit(); |
537 | 530 | ||
538 | ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); | 531 | ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2); |
539 | if (ret < 0) { | 532 | if (ret < 0) |
540 | wl12xx_queue_recovery_work(wl); | ||
541 | goto out; | 533 | goto out; |
542 | } | ||
543 | 534 | ||
544 | wlcore_hw_tx_immediate_compl(wl); | 535 | wlcore_hw_tx_immediate_compl(wl); |
545 | 536 | ||
@@ -553,7 +544,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
553 | if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { | 544 | if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { |
554 | wl1271_error("HW watchdog interrupt received! starting recovery."); | 545 | wl1271_error("HW watchdog interrupt received! starting recovery."); |
555 | wl->watchdog_recovery = true; | 546 | wl->watchdog_recovery = true; |
556 | wl12xx_queue_recovery_work(wl); | 547 | ret = -EIO; |
557 | 548 | ||
558 | /* restarting the chip. ignore any other interrupt. */ | 549 | /* restarting the chip. ignore any other interrupt. */ |
559 | goto out; | 550 | goto out; |
@@ -563,7 +554,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
563 | wl1271_error("SW watchdog interrupt received! " | 554 | wl1271_error("SW watchdog interrupt received! " |
564 | "starting recovery."); | 555 | "starting recovery."); |
565 | wl->watchdog_recovery = true; | 556 | wl->watchdog_recovery = true; |
566 | wl12xx_queue_recovery_work(wl); | 557 | ret = -EIO; |
567 | 558 | ||
568 | /* restarting the chip. ignore any other interrupt. */ | 559 | /* restarting the chip. ignore any other interrupt. */ |
569 | goto out; | 560 | goto out; |
@@ -573,10 +564,8 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
573 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); | 564 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); |
574 | 565 | ||
575 | ret = wlcore_rx(wl, wl->fw_status_1); | 566 | ret = wlcore_rx(wl, wl->fw_status_1); |
576 | if (ret < 0) { | 567 | if (ret < 0) |
577 | wl12xx_queue_recovery_work(wl); | ||
578 | goto out; | 568 | goto out; |
579 | } | ||
580 | 569 | ||
581 | /* Check if any tx blocks were freed */ | 570 | /* Check if any tx blocks were freed */ |
582 | spin_lock_irqsave(&wl->wl_lock, flags); | 571 | spin_lock_irqsave(&wl->wl_lock, flags); |
@@ -588,20 +577,16 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
588 | * call the work function directly. | 577 | * call the work function directly. |
589 | */ | 578 | */ |
590 | ret = wlcore_tx_work_locked(wl); | 579 | ret = wlcore_tx_work_locked(wl); |
591 | if (ret < 0) { | 580 | if (ret < 0) |
592 | wl12xx_queue_recovery_work(wl); | ||
593 | goto out; | 581 | goto out; |
594 | } | ||
595 | } else { | 582 | } else { |
596 | spin_unlock_irqrestore(&wl->wl_lock, flags); | 583 | spin_unlock_irqrestore(&wl->wl_lock, flags); |
597 | } | 584 | } |
598 | 585 | ||
599 | /* check for tx results */ | 586 | /* check for tx results */ |
600 | ret = wlcore_hw_tx_delayed_compl(wl); | 587 | ret = wlcore_hw_tx_delayed_compl(wl); |
601 | if (ret < 0) { | 588 | if (ret < 0) |
602 | wl12xx_queue_recovery_work(wl); | ||
603 | goto out; | 589 | goto out; |
604 | } | ||
605 | 590 | ||
606 | /* Make sure the deferred queues don't get too long */ | 591 | /* Make sure the deferred queues don't get too long */ |
607 | defer_count = skb_queue_len(&wl->deferred_tx_queue) + | 592 | defer_count = skb_queue_len(&wl->deferred_tx_queue) + |
@@ -613,19 +598,15 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
613 | if (intr & WL1271_ACX_INTR_EVENT_A) { | 598 | if (intr & WL1271_ACX_INTR_EVENT_A) { |
614 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); | 599 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); |
615 | ret = wl1271_event_handle(wl, 0); | 600 | ret = wl1271_event_handle(wl, 0); |
616 | if (ret < 0) { | 601 | if (ret < 0) |
617 | wl12xx_queue_recovery_work(wl); | ||
618 | goto out; | 602 | goto out; |
619 | } | ||
620 | } | 603 | } |
621 | 604 | ||
622 | if (intr & WL1271_ACX_INTR_EVENT_B) { | 605 | if (intr & WL1271_ACX_INTR_EVENT_B) { |
623 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); | 606 | wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); |
624 | ret = wl1271_event_handle(wl, 1); | 607 | ret = wl1271_event_handle(wl, 1); |
625 | if (ret < 0) { | 608 | if (ret < 0) |
626 | wl12xx_queue_recovery_work(wl); | ||
627 | goto out; | 609 | goto out; |
628 | } | ||
629 | } | 610 | } |
630 | 611 | ||
631 | if (intr & WL1271_ACX_INTR_INIT_COMPLETE) | 612 | if (intr & WL1271_ACX_INTR_INIT_COMPLETE) |
@@ -639,6 +620,25 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) | |||
639 | wl1271_ps_elp_sleep(wl); | 620 | wl1271_ps_elp_sleep(wl); |
640 | 621 | ||
641 | out: | 622 | out: |
623 | return ret; | ||
624 | } | ||
625 | |||
626 | static irqreturn_t wlcore_irq(int irq, void *cookie) | ||
627 | { | ||
628 | int ret; | ||
629 | unsigned long flags; | ||
630 | struct wl1271 *wl = cookie; | ||
631 | |||
632 | /* TX might be handled here, avoid redundant work */ | ||
633 | set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); | ||
634 | cancel_work_sync(&wl->tx_work); | ||
635 | |||
636 | mutex_lock(&wl->mutex); | ||
637 | |||
638 | ret = wlcore_irq_locked(wl); | ||
639 | if (ret) | ||
640 | wl12xx_queue_recovery_work(wl); | ||
641 | |||
642 | spin_lock_irqsave(&wl->wl_lock, flags); | 642 | spin_lock_irqsave(&wl->wl_lock, flags); |
643 | /* In case TX was not handled here, queue TX work */ | 643 | /* In case TX was not handled here, queue TX work */ |
644 | clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); | 644 | clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); |
@@ -1748,7 +1748,7 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) | |||
1748 | 1748 | ||
1749 | /* don't talk to the HW if recovery is pending */ | 1749 | /* don't talk to the HW if recovery is pending */ |
1750 | if (!pending_recovery) | 1750 | if (!pending_recovery) |
1751 | wl1271_irq(0, wl); | 1751 | wlcore_irq(0, wl); |
1752 | 1752 | ||
1753 | wlcore_enable_interrupts(wl); | 1753 | wlcore_enable_interrupts(wl); |
1754 | } | 1754 | } |
@@ -5489,7 +5489,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) | |||
5489 | else | 5489 | else |
5490 | irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; | 5490 | irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; |
5491 | 5491 | ||
5492 | ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq, | 5492 | ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq, |
5493 | irqflags, | 5493 | irqflags, |
5494 | pdev->name, wl); | 5494 | pdev->name, wl); |
5495 | if (ret < 0) { | 5495 | if (ret < 0) { |