diff options
author | Marcin Slusarz <marcin.slusarz@gmail.com> | 2009-03-13 18:41:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-13 18:41:19 -0400 |
commit | a390d1f379cf821248b735f43d2e1147ebb8241d (patch) | |
tree | 8011cb8a5056055bedb4a9f4948929e2543a727d /drivers | |
parent | 34cd347cec6dba8075ceca06efd4fb0c6574cb75 (diff) |
phylib: convert state_queue work to delayed_work
It closes a race in phy_stop_machine when reprogramming of phy_timer
(from phy_state_machine) happens between del_timer_sync and cancel_work_sync.
Without this change it could lead to crash if phy_device would be freed after
phy_stop_machine (timer would fire and schedule freed work).
Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
Acked-by: Jean Delvare <khali@linux-fr.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/phy/phy.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e4ede6080c9d..58b73b08dde0 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -414,7 +414,6 @@ EXPORT_SYMBOL(phy_start_aneg); | |||
414 | 414 | ||
415 | static void phy_change(struct work_struct *work); | 415 | static void phy_change(struct work_struct *work); |
416 | static void phy_state_machine(struct work_struct *work); | 416 | static void phy_state_machine(struct work_struct *work); |
417 | static void phy_timer(unsigned long data); | ||
418 | 417 | ||
419 | /** | 418 | /** |
420 | * phy_start_machine - start PHY state machine tracking | 419 | * phy_start_machine - start PHY state machine tracking |
@@ -434,11 +433,8 @@ void phy_start_machine(struct phy_device *phydev, | |||
434 | { | 433 | { |
435 | phydev->adjust_state = handler; | 434 | phydev->adjust_state = handler; |
436 | 435 | ||
437 | INIT_WORK(&phydev->state_queue, phy_state_machine); | 436 | INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); |
438 | init_timer(&phydev->phy_timer); | 437 | schedule_delayed_work(&phydev->state_queue, jiffies + HZ); |
439 | phydev->phy_timer.function = &phy_timer; | ||
440 | phydev->phy_timer.data = (unsigned long) phydev; | ||
441 | mod_timer(&phydev->phy_timer, jiffies + HZ); | ||
442 | } | 438 | } |
443 | 439 | ||
444 | /** | 440 | /** |
@@ -451,8 +447,7 @@ void phy_start_machine(struct phy_device *phydev, | |||
451 | */ | 447 | */ |
452 | void phy_stop_machine(struct phy_device *phydev) | 448 | void phy_stop_machine(struct phy_device *phydev) |
453 | { | 449 | { |
454 | del_timer_sync(&phydev->phy_timer); | 450 | cancel_delayed_work_sync(&phydev->state_queue); |
455 | cancel_work_sync(&phydev->state_queue); | ||
456 | 451 | ||
457 | mutex_lock(&phydev->lock); | 452 | mutex_lock(&phydev->lock); |
458 | if (phydev->state > PHY_UP) | 453 | if (phydev->state > PHY_UP) |
@@ -680,11 +675,9 @@ static void phy_change(struct work_struct *work) | |||
680 | if (err) | 675 | if (err) |
681 | goto irq_enable_err; | 676 | goto irq_enable_err; |
682 | 677 | ||
683 | /* Stop timer and run the state queue now. The work function for | 678 | /* reschedule state queue work to run as soon as possible */ |
684 | * state_queue will start the timer up again. | 679 | cancel_delayed_work_sync(&phydev->state_queue); |
685 | */ | 680 | schedule_delayed_work(&phydev->state_queue, 0); |
686 | del_timer(&phydev->phy_timer); | ||
687 | schedule_work(&phydev->state_queue); | ||
688 | 681 | ||
689 | return; | 682 | return; |
690 | 683 | ||
@@ -761,14 +754,13 @@ EXPORT_SYMBOL(phy_start); | |||
761 | /** | 754 | /** |
762 | * phy_state_machine - Handle the state machine | 755 | * phy_state_machine - Handle the state machine |
763 | * @work: work_struct that describes the work to be done | 756 | * @work: work_struct that describes the work to be done |
764 | * | ||
765 | * Description: Scheduled by the state_queue workqueue each time | ||
766 | * phy_timer is triggered. | ||
767 | */ | 757 | */ |
768 | static void phy_state_machine(struct work_struct *work) | 758 | static void phy_state_machine(struct work_struct *work) |
769 | { | 759 | { |
760 | struct delayed_work *dwork = | ||
761 | container_of(work, struct delayed_work, work); | ||
770 | struct phy_device *phydev = | 762 | struct phy_device *phydev = |
771 | container_of(work, struct phy_device, state_queue); | 763 | container_of(dwork, struct phy_device, state_queue); |
772 | int needs_aneg = 0; | 764 | int needs_aneg = 0; |
773 | int err = 0; | 765 | int err = 0; |
774 | 766 | ||
@@ -946,17 +938,6 @@ static void phy_state_machine(struct work_struct *work) | |||
946 | if (err < 0) | 938 | if (err < 0) |
947 | phy_error(phydev); | 939 | phy_error(phydev); |
948 | 940 | ||
949 | mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); | 941 | schedule_delayed_work(&phydev->state_queue, |
950 | } | 942 | jiffies + PHY_STATE_TIME * HZ); |
951 | |||
952 | /* PHY timer which schedules the state machine work */ | ||
953 | static void phy_timer(unsigned long data) | ||
954 | { | ||
955 | struct phy_device *phydev = (struct phy_device *)data; | ||
956 | |||
957 | /* | ||
958 | * PHY I/O operations can potentially sleep so we ensure that | ||
959 | * it's done from a process context | ||
960 | */ | ||
961 | schedule_work(&phydev->state_queue); | ||
962 | } | 943 | } |