diff options
author | Maciej W. Rozycki <macro@linux-mips.org> | 2007-09-29 01:42:14 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:53:55 -0400 |
commit | 0ac49527318bc388a881152d60f49d7951606024 (patch) | |
tree | 64b99a7543c913ff17344259b3938d6a5702ef69 /drivers | |
parent | f7ab697d328b0a417d9e3cb891d45693ea89e83d (diff) |
PHYLIB: IRQ event workqueue handling fixes
Keep track of disable_irq_nosync() invocations and call enable_irq() the
right number of times if work has been cancelled that would include them.
Now that the call to flush_work_keventd() (problematic because of
rtnl_mutex being held) has been replaced by cancel_work_sync() another
issue has arisen and been left unresolved. As the MDIO bus cannot be
accessed from the interrupt context the PHY interrupt handler uses
disable_irq_nosync() to prevent from looping and schedules some work to be
done as a softirq, which, apart from handling the state change of the
originating PHY, is responsible for reenabling the interrupt. Now if the
interrupt line is shared by another device and a call to the softirq
handler has been cancelled, that call to enable_irq() never happens and the
other device cannot use its interrupt anymore as its stuck disabled.
I decided to use a counter rather than a flag because there may be more
than one call to phy_change() cancelled in the queue -- a real one and a
fake one triggered by free_irq() if DEBUG_SHIRQ is used, if nothing else.
Therefore because of its nesting property enable_irq() has to be called the
right number of times to match the number disable_irq_nosync() was called
and restore the original state. This DEBUG_SHIRQ feature is also the
reason why free_irq() has to be called before cancel_work_sync().
While at it I updated the comment about phy_stop_interrupts() being called
from `keventd' -- this is no longer relevant as the use of
cancel_work_sync() makes such an approach unnecessary. OTOH a similar
comment referring to flush_scheduled_work() in phy_stop() still applies as
using cancel_work_sync() there would be dangerous.
Checked with checkpatch.pl and at the run time (with and without
DEBUG_SHIRQ).
Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
Cc: Andy Fleming <afleming@freescale.com>
Cc: Jeff Garzik <jgarzik@pobox.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/phy/phy.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 4da993dfcfd8..5a314edc2744 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Author: Andy Fleming | 7 | * Author: Andy Fleming |
8 | * | 8 | * |
9 | * Copyright (c) 2004 Freescale Semiconductor, Inc. | 9 | * Copyright (c) 2004 Freescale Semiconductor, Inc. |
10 | * Copyright (c) 2006 Maciej W. Rozycki | 10 | * Copyright (c) 2006, 2007 Maciej W. Rozycki |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the | 13 | * under the terms of the GNU General Public License as published by the |
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | 37 | ||
38 | #include <asm/atomic.h> | ||
38 | #include <asm/io.h> | 39 | #include <asm/io.h> |
39 | #include <asm/irq.h> | 40 | #include <asm/irq.h> |
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
@@ -562,6 +563,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
562 | * queue will write the PHY to disable and clear the | 563 | * queue will write the PHY to disable and clear the |
563 | * interrupt, and then reenable the irq line. */ | 564 | * interrupt, and then reenable the irq line. */ |
564 | disable_irq_nosync(irq); | 565 | disable_irq_nosync(irq); |
566 | atomic_inc(&phydev->irq_disable); | ||
565 | 567 | ||
566 | schedule_work(&phydev->phy_queue); | 568 | schedule_work(&phydev->phy_queue); |
567 | 569 | ||
@@ -632,6 +634,7 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
632 | 634 | ||
633 | INIT_WORK(&phydev->phy_queue, phy_change); | 635 | INIT_WORK(&phydev->phy_queue, phy_change); |
634 | 636 | ||
637 | atomic_set(&phydev->irq_disable, 0); | ||
635 | if (request_irq(phydev->irq, phy_interrupt, | 638 | if (request_irq(phydev->irq, phy_interrupt, |
636 | IRQF_SHARED, | 639 | IRQF_SHARED, |
637 | "phy_interrupt", | 640 | "phy_interrupt", |
@@ -662,13 +665,22 @@ int phy_stop_interrupts(struct phy_device *phydev) | |||
662 | if (err) | 665 | if (err) |
663 | phy_error(phydev); | 666 | phy_error(phydev); |
664 | 667 | ||
668 | free_irq(phydev->irq, phydev); | ||
669 | |||
665 | /* | 670 | /* |
666 | * Finish any pending work; we might have been scheduled to be called | 671 | * Cannot call flush_scheduled_work() here as desired because |
667 | * from keventd ourselves, but cancel_work_sync() handles that. | 672 | * of rtnl_lock(), but we do not really care about what would |
673 | * be done, except from enable_irq(), so cancel any work | ||
674 | * possibly pending and take care of the matter below. | ||
668 | */ | 675 | */ |
669 | cancel_work_sync(&phydev->phy_queue); | 676 | cancel_work_sync(&phydev->phy_queue); |
670 | 677 | /* | |
671 | free_irq(phydev->irq, phydev); | 678 | * If work indeed has been cancelled, disable_irq() will have |
679 | * been left unbalanced from phy_interrupt() and enable_irq() | ||
680 | * has to be called so that other devices on the line work. | ||
681 | */ | ||
682 | while (atomic_dec_return(&phydev->irq_disable) >= 0) | ||
683 | enable_irq(phydev->irq); | ||
672 | 684 | ||
673 | return err; | 685 | return err; |
674 | } | 686 | } |
@@ -695,6 +707,7 @@ static void phy_change(struct work_struct *work) | |||
695 | phydev->state = PHY_CHANGELINK; | 707 | phydev->state = PHY_CHANGELINK; |
696 | spin_unlock_bh(&phydev->lock); | 708 | spin_unlock_bh(&phydev->lock); |
697 | 709 | ||
710 | atomic_dec(&phydev->irq_disable); | ||
698 | enable_irq(phydev->irq); | 711 | enable_irq(phydev->irq); |
699 | 712 | ||
700 | /* Reenable interrupts */ | 713 | /* Reenable interrupts */ |
@@ -708,6 +721,7 @@ static void phy_change(struct work_struct *work) | |||
708 | 721 | ||
709 | irq_enable_err: | 722 | irq_enable_err: |
710 | disable_irq(phydev->irq); | 723 | disable_irq(phydev->irq); |
724 | atomic_inc(&phydev->irq_disable); | ||
711 | phy_err: | 725 | phy_err: |
712 | phy_error(phydev); | 726 | phy_error(phydev); |
713 | } | 727 | } |