diff options
Diffstat (limited to 'drivers/net/phy/phy.c')
-rw-r--r-- | drivers/net/phy/phy.c | 440 |
1 files changed, 188 insertions, 252 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 98434b84f041..76d96b9ebcdb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -1,7 +1,4 @@ | |||
1 | /* | 1 | /* Framework for configuring and reading PHY devices |
2 | * drivers/net/phy/phy.c | ||
3 | * | ||
4 | * Framework for configuring and reading PHY devices | ||
5 | * Based on code in sungem_phy.c and gianfar_phy.c | 2 | * Based on code in sungem_phy.c and gianfar_phy.c |
6 | * | 3 | * |
7 | * Author: Andy Fleming | 4 | * Author: Andy Fleming |
@@ -23,7 +20,6 @@ | |||
23 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
24 | #include <linux/unistd.h> | 21 | #include <linux/unistd.h> |
25 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
26 | #include <linux/init.h> | ||
27 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
28 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
29 | #include <linux/etherdevice.h> | 25 | #include <linux/etherdevice.h> |
@@ -36,11 +32,11 @@ | |||
36 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
37 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
38 | #include <linux/mdio.h> | 34 | #include <linux/mdio.h> |
39 | 35 | #include <linux/io.h> | |
36 | #include <linux/uaccess.h> | ||
40 | #include <linux/atomic.h> | 37 | #include <linux/atomic.h> |
41 | #include <asm/io.h> | 38 | |
42 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
43 | #include <asm/uaccess.h> | ||
44 | 40 | ||
45 | /** | 41 | /** |
46 | * phy_print_status - Convenience function to print out the current phy status | 42 | * phy_print_status - Convenience function to print out the current phy status |
@@ -48,13 +44,14 @@ | |||
48 | */ | 44 | */ |
49 | void phy_print_status(struct phy_device *phydev) | 45 | void phy_print_status(struct phy_device *phydev) |
50 | { | 46 | { |
51 | if (phydev->link) | 47 | if (phydev->link) { |
52 | pr_info("%s - Link is Up - %d/%s\n", | 48 | pr_info("%s - Link is Up - %d/%s\n", |
53 | dev_name(&phydev->dev), | 49 | dev_name(&phydev->dev), |
54 | phydev->speed, | 50 | phydev->speed, |
55 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); | 51 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); |
56 | else | 52 | } else { |
57 | pr_info("%s - Link is Down\n", dev_name(&phydev->dev)); | 53 | pr_info("%s - Link is Down\n", dev_name(&phydev->dev)); |
54 | } | ||
58 | } | 55 | } |
59 | EXPORT_SYMBOL(phy_print_status); | 56 | EXPORT_SYMBOL(phy_print_status); |
60 | 57 | ||
@@ -69,12 +66,10 @@ EXPORT_SYMBOL(phy_print_status); | |||
69 | */ | 66 | */ |
70 | static int phy_clear_interrupt(struct phy_device *phydev) | 67 | static int phy_clear_interrupt(struct phy_device *phydev) |
71 | { | 68 | { |
72 | int err = 0; | ||
73 | |||
74 | if (phydev->drv->ack_interrupt) | 69 | if (phydev->drv->ack_interrupt) |
75 | err = phydev->drv->ack_interrupt(phydev); | 70 | return phydev->drv->ack_interrupt(phydev); |
76 | 71 | ||
77 | return err; | 72 | return 0; |
78 | } | 73 | } |
79 | 74 | ||
80 | /** | 75 | /** |
@@ -86,13 +81,11 @@ static int phy_clear_interrupt(struct phy_device *phydev) | |||
86 | */ | 81 | */ |
87 | static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) | 82 | static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) |
88 | { | 83 | { |
89 | int err = 0; | ||
90 | |||
91 | phydev->interrupts = interrupts; | 84 | phydev->interrupts = interrupts; |
92 | if (phydev->drv->config_intr) | 85 | if (phydev->drv->config_intr) |
93 | err = phydev->drv->config_intr(phydev); | 86 | return phydev->drv->config_intr(phydev); |
94 | 87 | ||
95 | return err; | 88 | return 0; |
96 | } | 89 | } |
97 | 90 | ||
98 | 91 | ||
@@ -106,15 +99,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) | |||
106 | */ | 99 | */ |
107 | static inline int phy_aneg_done(struct phy_device *phydev) | 100 | static inline int phy_aneg_done(struct phy_device *phydev) |
108 | { | 101 | { |
109 | int retval; | 102 | int retval = phy_read(phydev, MII_BMSR); |
110 | |||
111 | retval = phy_read(phydev, MII_BMSR); | ||
112 | 103 | ||
113 | return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); | 104 | return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); |
114 | } | 105 | } |
115 | 106 | ||
116 | /* A structure for mapping a particular speed and duplex | 107 | /* A structure for mapping a particular speed and duplex |
117 | * combination to a particular SUPPORTED and ADVERTISED value */ | 108 | * combination to a particular SUPPORTED and ADVERTISED value |
109 | */ | ||
118 | struct phy_setting { | 110 | struct phy_setting { |
119 | int speed; | 111 | int speed; |
120 | int duplex; | 112 | int duplex; |
@@ -172,13 +164,12 @@ static const struct phy_setting settings[] = { | |||
172 | * of that setting. Returns the index of the last setting if | 164 | * of that setting. Returns the index of the last setting if |
173 | * none of the others match. | 165 | * none of the others match. |
174 | */ | 166 | */ |
175 | static inline int phy_find_setting(int speed, int duplex) | 167 | static inline unsigned int phy_find_setting(int speed, int duplex) |
176 | { | 168 | { |
177 | int idx = 0; | 169 | unsigned int idx = 0; |
178 | 170 | ||
179 | while (idx < ARRAY_SIZE(settings) && | 171 | while (idx < ARRAY_SIZE(settings) && |
180 | (settings[idx].speed != speed || | 172 | (settings[idx].speed != speed || settings[idx].duplex != duplex)) |
181 | settings[idx].duplex != duplex)) | ||
182 | idx++; | 173 | idx++; |
183 | 174 | ||
184 | return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; | 175 | return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1; |
@@ -194,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex) | |||
194 | * the mask in features. Returns the index of the last setting | 185 | * the mask in features. Returns the index of the last setting |
195 | * if nothing else matches. | 186 | * if nothing else matches. |
196 | */ | 187 | */ |
197 | static inline int phy_find_valid(int idx, u32 features) | 188 | static inline unsigned int phy_find_valid(unsigned int idx, u32 features) |
198 | { | 189 | { |
199 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) | 190 | while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) |
200 | idx++; | 191 | idx++; |
@@ -213,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features) | |||
213 | static void phy_sanitize_settings(struct phy_device *phydev) | 204 | static void phy_sanitize_settings(struct phy_device *phydev) |
214 | { | 205 | { |
215 | u32 features = phydev->supported; | 206 | u32 features = phydev->supported; |
216 | int idx; | 207 | unsigned int idx; |
217 | 208 | ||
218 | /* Sanitize settings based on PHY capabilities */ | 209 | /* Sanitize settings based on PHY capabilities */ |
219 | if ((features & SUPPORTED_Autoneg) == 0) | 210 | if ((features & SUPPORTED_Autoneg) == 0) |
@@ -245,8 +236,7 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) | |||
245 | if (cmd->phy_address != phydev->addr) | 236 | if (cmd->phy_address != phydev->addr) |
246 | return -EINVAL; | 237 | return -EINVAL; |
247 | 238 | ||
248 | /* We make sure that we don't pass unsupported | 239 | /* We make sure that we don't pass unsupported values in to the PHY */ |
249 | * values in to the PHY */ | ||
250 | cmd->advertising &= phydev->supported; | 240 | cmd->advertising &= phydev->supported; |
251 | 241 | ||
252 | /* Verify the settings we care about. */ | 242 | /* Verify the settings we care about. */ |
@@ -289,6 +279,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) | |||
289 | cmd->supported = phydev->supported; | 279 | cmd->supported = phydev->supported; |
290 | 280 | ||
291 | cmd->advertising = phydev->advertising; | 281 | cmd->advertising = phydev->advertising; |
282 | cmd->lp_advertising = phydev->lp_advertising; | ||
292 | 283 | ||
293 | ethtool_cmd_speed_set(cmd, phydev->speed); | 284 | ethtool_cmd_speed_set(cmd, phydev->speed); |
294 | cmd->duplex = phydev->duplex; | 285 | cmd->duplex = phydev->duplex; |
@@ -312,8 +303,7 @@ EXPORT_SYMBOL(phy_ethtool_gset); | |||
312 | * PHYCONTROL layer. It changes registers without regard to | 303 | * PHYCONTROL layer. It changes registers without regard to |
313 | * current state. Use at own risk. | 304 | * current state. Use at own risk. |
314 | */ | 305 | */ |
315 | int phy_mii_ioctl(struct phy_device *phydev, | 306 | int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) |
316 | struct ifreq *ifr, int cmd) | ||
317 | { | 307 | { |
318 | struct mii_ioctl_data *mii_data = if_mii(ifr); | 308 | struct mii_ioctl_data *mii_data = if_mii(ifr); |
319 | u16 val = mii_data->val_in; | 309 | u16 val = mii_data->val_in; |
@@ -326,25 +316,24 @@ int phy_mii_ioctl(struct phy_device *phydev, | |||
326 | case SIOCGMIIREG: | 316 | case SIOCGMIIREG: |
327 | mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, | 317 | mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, |
328 | mii_data->reg_num); | 318 | mii_data->reg_num); |
329 | break; | 319 | return 0; |
330 | 320 | ||
331 | case SIOCSMIIREG: | 321 | case SIOCSMIIREG: |
332 | if (mii_data->phy_id == phydev->addr) { | 322 | if (mii_data->phy_id == phydev->addr) { |
333 | switch(mii_data->reg_num) { | 323 | switch (mii_data->reg_num) { |
334 | case MII_BMCR: | 324 | case MII_BMCR: |
335 | if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) | 325 | if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) |
336 | phydev->autoneg = AUTONEG_DISABLE; | 326 | phydev->autoneg = AUTONEG_DISABLE; |
337 | else | 327 | else |
338 | phydev->autoneg = AUTONEG_ENABLE; | 328 | phydev->autoneg = AUTONEG_ENABLE; |
339 | if ((!phydev->autoneg) && (val & BMCR_FULLDPLX)) | 329 | if (!phydev->autoneg && (val & BMCR_FULLDPLX)) |
340 | phydev->duplex = DUPLEX_FULL; | 330 | phydev->duplex = DUPLEX_FULL; |
341 | else | 331 | else |
342 | phydev->duplex = DUPLEX_HALF; | 332 | phydev->duplex = DUPLEX_HALF; |
343 | if ((!phydev->autoneg) && | 333 | if (!phydev->autoneg && (val & BMCR_SPEED1000)) |
344 | (val & BMCR_SPEED1000)) | ||
345 | phydev->speed = SPEED_1000; | 334 | phydev->speed = SPEED_1000; |
346 | else if ((!phydev->autoneg) && | 335 | else if (!phydev->autoneg && |
347 | (val & BMCR_SPEED100)) | 336 | (val & BMCR_SPEED100)) |
348 | phydev->speed = SPEED_100; | 337 | phydev->speed = SPEED_100; |
349 | break; | 338 | break; |
350 | case MII_ADVERTISE: | 339 | case MII_ADVERTISE: |
@@ -360,12 +349,9 @@ int phy_mii_ioctl(struct phy_device *phydev, | |||
360 | mii_data->reg_num, val); | 349 | mii_data->reg_num, val); |
361 | 350 | ||
362 | if (mii_data->reg_num == MII_BMCR && | 351 | if (mii_data->reg_num == MII_BMCR && |
363 | val & BMCR_RESET && | 352 | val & BMCR_RESET) |
364 | phydev->drv->config_init) { | 353 | return phy_init_hw(phydev); |
365 | phy_scan_fixups(phydev); | 354 | return 0; |
366 | phydev->drv->config_init(phydev); | ||
367 | } | ||
368 | break; | ||
369 | 355 | ||
370 | case SIOCSHWTSTAMP: | 356 | case SIOCSHWTSTAMP: |
371 | if (phydev->drv->hwtstamp) | 357 | if (phydev->drv->hwtstamp) |
@@ -375,8 +361,6 @@ int phy_mii_ioctl(struct phy_device *phydev, | |||
375 | default: | 361 | default: |
376 | return -EOPNOTSUPP; | 362 | return -EOPNOTSUPP; |
377 | } | 363 | } |
378 | |||
379 | return 0; | ||
380 | } | 364 | } |
381 | EXPORT_SYMBOL(phy_mii_ioctl); | 365 | EXPORT_SYMBOL(phy_mii_ioctl); |
382 | 366 | ||
@@ -399,7 +383,6 @@ int phy_start_aneg(struct phy_device *phydev) | |||
399 | phy_sanitize_settings(phydev); | 383 | phy_sanitize_settings(phydev); |
400 | 384 | ||
401 | err = phydev->drv->config_aneg(phydev); | 385 | err = phydev->drv->config_aneg(phydev); |
402 | |||
403 | if (err < 0) | 386 | if (err < 0) |
404 | goto out_unlock; | 387 | goto out_unlock; |
405 | 388 | ||
@@ -419,25 +402,18 @@ out_unlock: | |||
419 | } | 402 | } |
420 | EXPORT_SYMBOL(phy_start_aneg); | 403 | EXPORT_SYMBOL(phy_start_aneg); |
421 | 404 | ||
422 | |||
423 | /** | 405 | /** |
424 | * phy_start_machine - start PHY state machine tracking | 406 | * phy_start_machine - start PHY state machine tracking |
425 | * @phydev: the phy_device struct | 407 | * @phydev: the phy_device struct |
426 | * @handler: callback function for state change notifications | ||
427 | * | 408 | * |
428 | * Description: The PHY infrastructure can run a state machine | 409 | * Description: The PHY infrastructure can run a state machine |
429 | * which tracks whether the PHY is starting up, negotiating, | 410 | * which tracks whether the PHY is starting up, negotiating, |
430 | * etc. This function starts the timer which tracks the state | 411 | * etc. This function starts the timer which tracks the state |
431 | * of the PHY. If you want to be notified when the state changes, | 412 | * of the PHY. If you want to maintain your own state machine, |
432 | * pass in the callback @handler, otherwise, pass NULL. If you | 413 | * do not call this function. |
433 | * want to maintain your own state machine, do not call this | ||
434 | * function. | ||
435 | */ | 414 | */ |
436 | void phy_start_machine(struct phy_device *phydev, | 415 | void phy_start_machine(struct phy_device *phydev) |
437 | void (*handler)(struct net_device *)) | ||
438 | { | 416 | { |
439 | phydev->adjust_state = handler; | ||
440 | |||
441 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); | 417 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); |
442 | } | 418 | } |
443 | 419 | ||
@@ -457,8 +433,6 @@ void phy_stop_machine(struct phy_device *phydev) | |||
457 | if (phydev->state > PHY_UP) | 433 | if (phydev->state > PHY_UP) |
458 | phydev->state = PHY_UP; | 434 | phydev->state = PHY_UP; |
459 | mutex_unlock(&phydev->lock); | 435 | mutex_unlock(&phydev->lock); |
460 | |||
461 | phydev->adjust_state = NULL; | ||
462 | } | 436 | } |
463 | 437 | ||
464 | /** | 438 | /** |
@@ -495,7 +469,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
495 | /* The MDIO bus is not allowed to be written in interrupt | 469 | /* The MDIO bus is not allowed to be written in interrupt |
496 | * context, so we need to disable the irq here. A work | 470 | * context, so we need to disable the irq here. A work |
497 | * queue will write the PHY to disable and clear the | 471 | * queue will write the PHY to disable and clear the |
498 | * interrupt, and then reenable the irq line. */ | 472 | * interrupt, and then reenable the irq line. |
473 | */ | ||
499 | disable_irq_nosync(irq); | 474 | disable_irq_nosync(irq); |
500 | atomic_inc(&phydev->irq_disable); | 475 | atomic_inc(&phydev->irq_disable); |
501 | 476 | ||
@@ -510,16 +485,12 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
510 | */ | 485 | */ |
511 | static int phy_enable_interrupts(struct phy_device *phydev) | 486 | static int phy_enable_interrupts(struct phy_device *phydev) |
512 | { | 487 | { |
513 | int err; | 488 | int err = phy_clear_interrupt(phydev); |
514 | |||
515 | err = phy_clear_interrupt(phydev); | ||
516 | 489 | ||
517 | if (err < 0) | 490 | if (err < 0) |
518 | return err; | 491 | return err; |
519 | 492 | ||
520 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); | 493 | return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); |
521 | |||
522 | return err; | ||
523 | } | 494 | } |
524 | 495 | ||
525 | /** | 496 | /** |
@@ -532,13 +503,11 @@ static int phy_disable_interrupts(struct phy_device *phydev) | |||
532 | 503 | ||
533 | /* Disable PHY interrupts */ | 504 | /* Disable PHY interrupts */ |
534 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); | 505 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); |
535 | |||
536 | if (err) | 506 | if (err) |
537 | goto phy_err; | 507 | goto phy_err; |
538 | 508 | ||
539 | /* Clear the interrupt */ | 509 | /* Clear the interrupt */ |
540 | err = phy_clear_interrupt(phydev); | 510 | err = phy_clear_interrupt(phydev); |
541 | |||
542 | if (err) | 511 | if (err) |
543 | goto phy_err; | 512 | goto phy_err; |
544 | 513 | ||
@@ -562,8 +531,6 @@ phy_err: | |||
562 | */ | 531 | */ |
563 | int phy_start_interrupts(struct phy_device *phydev) | 532 | int phy_start_interrupts(struct phy_device *phydev) |
564 | { | 533 | { |
565 | int err = 0; | ||
566 | |||
567 | atomic_set(&phydev->irq_disable, 0); | 534 | atomic_set(&phydev->irq_disable, 0); |
568 | if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", | 535 | if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt", |
569 | phydev) < 0) { | 536 | phydev) < 0) { |
@@ -573,9 +540,7 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
573 | return 0; | 540 | return 0; |
574 | } | 541 | } |
575 | 542 | ||
576 | err = phy_enable_interrupts(phydev); | 543 | return phy_enable_interrupts(phydev); |
577 | |||
578 | return err; | ||
579 | } | 544 | } |
580 | EXPORT_SYMBOL(phy_start_interrupts); | 545 | EXPORT_SYMBOL(phy_start_interrupts); |
581 | 546 | ||
@@ -585,24 +550,20 @@ EXPORT_SYMBOL(phy_start_interrupts); | |||
585 | */ | 550 | */ |
586 | int phy_stop_interrupts(struct phy_device *phydev) | 551 | int phy_stop_interrupts(struct phy_device *phydev) |
587 | { | 552 | { |
588 | int err; | 553 | int err = phy_disable_interrupts(phydev); |
589 | |||
590 | err = phy_disable_interrupts(phydev); | ||
591 | 554 | ||
592 | if (err) | 555 | if (err) |
593 | phy_error(phydev); | 556 | phy_error(phydev); |
594 | 557 | ||
595 | free_irq(phydev->irq, phydev); | 558 | free_irq(phydev->irq, phydev); |
596 | 559 | ||
597 | /* | 560 | /* Cannot call flush_scheduled_work() here as desired because |
598 | * Cannot call flush_scheduled_work() here as desired because | ||
599 | * of rtnl_lock(), but we do not really care about what would | 561 | * of rtnl_lock(), but we do not really care about what would |
600 | * be done, except from enable_irq(), so cancel any work | 562 | * be done, except from enable_irq(), so cancel any work |
601 | * possibly pending and take care of the matter below. | 563 | * possibly pending and take care of the matter below. |
602 | */ | 564 | */ |
603 | cancel_work_sync(&phydev->phy_queue); | 565 | cancel_work_sync(&phydev->phy_queue); |
604 | /* | 566 | /* If work indeed has been cancelled, disable_irq() will have |
605 | * If work indeed has been cancelled, disable_irq() will have | ||
606 | * been left unbalanced from phy_interrupt() and enable_irq() | 567 | * been left unbalanced from phy_interrupt() and enable_irq() |
607 | * has to be called so that other devices on the line work. | 568 | * has to be called so that other devices on the line work. |
608 | */ | 569 | */ |
@@ -613,14 +574,12 @@ int phy_stop_interrupts(struct phy_device *phydev) | |||
613 | } | 574 | } |
614 | EXPORT_SYMBOL(phy_stop_interrupts); | 575 | EXPORT_SYMBOL(phy_stop_interrupts); |
615 | 576 | ||
616 | |||
617 | /** | 577 | /** |
618 | * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes | 578 | * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes |
619 | * @work: work_struct that describes the work to be done | 579 | * @work: work_struct that describes the work to be done |
620 | */ | 580 | */ |
621 | void phy_change(struct work_struct *work) | 581 | void phy_change(struct work_struct *work) |
622 | { | 582 | { |
623 | int err; | ||
624 | struct phy_device *phydev = | 583 | struct phy_device *phydev = |
625 | container_of(work, struct phy_device, phy_queue); | 584 | container_of(work, struct phy_device, phy_queue); |
626 | 585 | ||
@@ -628,9 +587,7 @@ void phy_change(struct work_struct *work) | |||
628 | !phydev->drv->did_interrupt(phydev)) | 587 | !phydev->drv->did_interrupt(phydev)) |
629 | goto ignore; | 588 | goto ignore; |
630 | 589 | ||
631 | err = phy_disable_interrupts(phydev); | 590 | if (phy_disable_interrupts(phydev)) |
632 | |||
633 | if (err) | ||
634 | goto phy_err; | 591 | goto phy_err; |
635 | 592 | ||
636 | mutex_lock(&phydev->lock); | 593 | mutex_lock(&phydev->lock); |
@@ -642,16 +599,13 @@ void phy_change(struct work_struct *work) | |||
642 | enable_irq(phydev->irq); | 599 | enable_irq(phydev->irq); |
643 | 600 | ||
644 | /* Reenable interrupts */ | 601 | /* Reenable interrupts */ |
645 | if (PHY_HALTED != phydev->state) | 602 | if (PHY_HALTED != phydev->state && |
646 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); | 603 | phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) |
647 | |||
648 | if (err) | ||
649 | goto irq_enable_err; | 604 | goto irq_enable_err; |
650 | 605 | ||
651 | /* reschedule state queue work to run as soon as possible */ | 606 | /* reschedule state queue work to run as soon as possible */ |
652 | cancel_delayed_work_sync(&phydev->state_queue); | 607 | cancel_delayed_work_sync(&phydev->state_queue); |
653 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); | 608 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); |
654 | |||
655 | return; | 609 | return; |
656 | 610 | ||
657 | ignore: | 611 | ignore: |
@@ -690,13 +644,12 @@ void phy_stop(struct phy_device *phydev) | |||
690 | out_unlock: | 644 | out_unlock: |
691 | mutex_unlock(&phydev->lock); | 645 | mutex_unlock(&phydev->lock); |
692 | 646 | ||
693 | /* | 647 | /* Cannot call flush_scheduled_work() here as desired because |
694 | * Cannot call flush_scheduled_work() here as desired because | ||
695 | * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() | 648 | * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() |
696 | * will not reenable interrupts. | 649 | * will not reenable interrupts. |
697 | */ | 650 | */ |
698 | } | 651 | } |
699 | 652 | EXPORT_SYMBOL(phy_stop); | |
700 | 653 | ||
701 | /** | 654 | /** |
702 | * phy_start - start or restart a PHY device | 655 | * phy_start - start or restart a PHY device |
@@ -713,20 +666,19 @@ void phy_start(struct phy_device *phydev) | |||
713 | mutex_lock(&phydev->lock); | 666 | mutex_lock(&phydev->lock); |
714 | 667 | ||
715 | switch (phydev->state) { | 668 | switch (phydev->state) { |
716 | case PHY_STARTING: | 669 | case PHY_STARTING: |
717 | phydev->state = PHY_PENDING; | 670 | phydev->state = PHY_PENDING; |
718 | break; | 671 | break; |
719 | case PHY_READY: | 672 | case PHY_READY: |
720 | phydev->state = PHY_UP; | 673 | phydev->state = PHY_UP; |
721 | break; | 674 | break; |
722 | case PHY_HALTED: | 675 | case PHY_HALTED: |
723 | phydev->state = PHY_RESUMING; | 676 | phydev->state = PHY_RESUMING; |
724 | default: | 677 | default: |
725 | break; | 678 | break; |
726 | } | 679 | } |
727 | mutex_unlock(&phydev->lock); | 680 | mutex_unlock(&phydev->lock); |
728 | } | 681 | } |
729 | EXPORT_SYMBOL(phy_stop); | ||
730 | EXPORT_SYMBOL(phy_start); | 682 | EXPORT_SYMBOL(phy_start); |
731 | 683 | ||
732 | /** | 684 | /** |
@@ -738,160 +690,132 @@ void phy_state_machine(struct work_struct *work) | |||
738 | struct delayed_work *dwork = to_delayed_work(work); | 690 | struct delayed_work *dwork = to_delayed_work(work); |
739 | struct phy_device *phydev = | 691 | struct phy_device *phydev = |
740 | container_of(dwork, struct phy_device, state_queue); | 692 | container_of(dwork, struct phy_device, state_queue); |
741 | int needs_aneg = 0; | 693 | int needs_aneg = 0, do_suspend = 0; |
742 | int err = 0; | 694 | int err = 0; |
743 | 695 | ||
744 | mutex_lock(&phydev->lock); | 696 | mutex_lock(&phydev->lock); |
745 | 697 | ||
746 | if (phydev->adjust_state) | 698 | switch (phydev->state) { |
747 | phydev->adjust_state(phydev->attached_dev); | 699 | case PHY_DOWN: |
700 | case PHY_STARTING: | ||
701 | case PHY_READY: | ||
702 | case PHY_PENDING: | ||
703 | break; | ||
704 | case PHY_UP: | ||
705 | needs_aneg = 1; | ||
748 | 706 | ||
749 | switch(phydev->state) { | 707 | phydev->link_timeout = PHY_AN_TIMEOUT; |
750 | case PHY_DOWN: | ||
751 | case PHY_STARTING: | ||
752 | case PHY_READY: | ||
753 | case PHY_PENDING: | ||
754 | break; | ||
755 | case PHY_UP: | ||
756 | needs_aneg = 1; | ||
757 | 708 | ||
758 | phydev->link_timeout = PHY_AN_TIMEOUT; | 709 | break; |
710 | case PHY_AN: | ||
711 | err = phy_read_status(phydev); | ||
712 | if (err < 0) | ||
713 | break; | ||
759 | 714 | ||
715 | /* If the link is down, give up on negotiation for now */ | ||
716 | if (!phydev->link) { | ||
717 | phydev->state = PHY_NOLINK; | ||
718 | netif_carrier_off(phydev->attached_dev); | ||
719 | phydev->adjust_link(phydev->attached_dev); | ||
760 | break; | 720 | break; |
761 | case PHY_AN: | 721 | } |
762 | err = phy_read_status(phydev); | ||
763 | 722 | ||
764 | if (err < 0) | 723 | /* Check if negotiation is done. Break if there's an error */ |
765 | break; | 724 | err = phy_aneg_done(phydev); |
725 | if (err < 0) | ||
726 | break; | ||
766 | 727 | ||
767 | /* If the link is down, give up on | 728 | /* If AN is done, we're running */ |
768 | * negotiation for now */ | 729 | if (err > 0) { |
769 | if (!phydev->link) { | 730 | phydev->state = PHY_RUNNING; |
770 | phydev->state = PHY_NOLINK; | 731 | netif_carrier_on(phydev->attached_dev); |
771 | netif_carrier_off(phydev->attached_dev); | 732 | phydev->adjust_link(phydev->attached_dev); |
772 | phydev->adjust_link(phydev->attached_dev); | ||
773 | break; | ||
774 | } | ||
775 | 733 | ||
776 | /* Check if negotiation is done. Break | 734 | } else if (0 == phydev->link_timeout--) { |
777 | * if there's an error */ | 735 | needs_aneg = 1; |
778 | err = phy_aneg_done(phydev); | 736 | /* If we have the magic_aneg bit, we try again */ |
779 | if (err < 0) | 737 | if (phydev->drv->flags & PHY_HAS_MAGICANEG) |
780 | break; | 738 | break; |
781 | 739 | } | |
782 | /* If AN is done, we're running */ | 740 | break; |
783 | if (err > 0) { | 741 | case PHY_NOLINK: |
784 | phydev->state = PHY_RUNNING; | 742 | err = phy_read_status(phydev); |
785 | netif_carrier_on(phydev->attached_dev); | 743 | if (err) |
786 | phydev->adjust_link(phydev->attached_dev); | ||
787 | |||
788 | } else if (0 == phydev->link_timeout--) { | ||
789 | needs_aneg = 1; | ||
790 | /* If we have the magic_aneg bit, | ||
791 | * we try again */ | ||
792 | if (phydev->drv->flags & PHY_HAS_MAGICANEG) | ||
793 | break; | ||
794 | } | ||
795 | break; | 744 | break; |
796 | case PHY_NOLINK: | ||
797 | err = phy_read_status(phydev); | ||
798 | |||
799 | if (err) | ||
800 | break; | ||
801 | 745 | ||
802 | if (phydev->link) { | 746 | if (phydev->link) { |
803 | phydev->state = PHY_RUNNING; | 747 | phydev->state = PHY_RUNNING; |
804 | netif_carrier_on(phydev->attached_dev); | 748 | netif_carrier_on(phydev->attached_dev); |
805 | phydev->adjust_link(phydev->attached_dev); | 749 | phydev->adjust_link(phydev->attached_dev); |
806 | } | 750 | } |
751 | break; | ||
752 | case PHY_FORCING: | ||
753 | err = genphy_update_link(phydev); | ||
754 | if (err) | ||
807 | break; | 755 | break; |
808 | case PHY_FORCING: | ||
809 | err = genphy_update_link(phydev); | ||
810 | |||
811 | if (err) | ||
812 | break; | ||
813 | 756 | ||
814 | if (phydev->link) { | 757 | if (phydev->link) { |
815 | phydev->state = PHY_RUNNING; | 758 | phydev->state = PHY_RUNNING; |
816 | netif_carrier_on(phydev->attached_dev); | 759 | netif_carrier_on(phydev->attached_dev); |
817 | } else { | 760 | } else { |
818 | if (0 == phydev->link_timeout--) | 761 | if (0 == phydev->link_timeout--) |
819 | needs_aneg = 1; | 762 | needs_aneg = 1; |
820 | } | 763 | } |
821 | 764 | ||
822 | phydev->adjust_link(phydev->attached_dev); | 765 | phydev->adjust_link(phydev->attached_dev); |
823 | break; | 766 | break; |
824 | case PHY_RUNNING: | 767 | case PHY_RUNNING: |
825 | /* Only register a CHANGE if we are | 768 | /* Only register a CHANGE if we are |
826 | * polling or ignoring interrupts | 769 | * polling or ignoring interrupts |
827 | */ | 770 | */ |
828 | if (!phy_interrupt_is_valid(phydev)) | 771 | if (!phy_interrupt_is_valid(phydev)) |
829 | phydev->state = PHY_CHANGELINK; | 772 | phydev->state = PHY_CHANGELINK; |
773 | break; | ||
774 | case PHY_CHANGELINK: | ||
775 | err = phy_read_status(phydev); | ||
776 | if (err) | ||
830 | break; | 777 | break; |
831 | case PHY_CHANGELINK: | ||
832 | err = phy_read_status(phydev); | ||
833 | 778 | ||
834 | if (err) | 779 | if (phydev->link) { |
835 | break; | 780 | phydev->state = PHY_RUNNING; |
781 | netif_carrier_on(phydev->attached_dev); | ||
782 | } else { | ||
783 | phydev->state = PHY_NOLINK; | ||
784 | netif_carrier_off(phydev->attached_dev); | ||
785 | } | ||
836 | 786 | ||
837 | if (phydev->link) { | 787 | phydev->adjust_link(phydev->attached_dev); |
838 | phydev->state = PHY_RUNNING; | ||
839 | netif_carrier_on(phydev->attached_dev); | ||
840 | } else { | ||
841 | phydev->state = PHY_NOLINK; | ||
842 | netif_carrier_off(phydev->attached_dev); | ||
843 | } | ||
844 | 788 | ||
789 | if (phy_interrupt_is_valid(phydev)) | ||
790 | err = phy_config_interrupt(phydev, | ||
791 | PHY_INTERRUPT_ENABLED); | ||
792 | break; | ||
793 | case PHY_HALTED: | ||
794 | if (phydev->link) { | ||
795 | phydev->link = 0; | ||
796 | netif_carrier_off(phydev->attached_dev); | ||
845 | phydev->adjust_link(phydev->attached_dev); | 797 | phydev->adjust_link(phydev->attached_dev); |
846 | 798 | do_suspend = 1; | |
847 | if (phy_interrupt_is_valid(phydev)) | 799 | } |
848 | err = phy_config_interrupt(phydev, | 800 | break; |
849 | PHY_INTERRUPT_ENABLED); | 801 | case PHY_RESUMING: |
850 | break; | 802 | err = phy_clear_interrupt(phydev); |
851 | case PHY_HALTED: | 803 | if (err) |
852 | if (phydev->link) { | ||
853 | phydev->link = 0; | ||
854 | netif_carrier_off(phydev->attached_dev); | ||
855 | phydev->adjust_link(phydev->attached_dev); | ||
856 | } | ||
857 | break; | 804 | break; |
858 | case PHY_RESUMING: | ||
859 | |||
860 | err = phy_clear_interrupt(phydev); | ||
861 | 805 | ||
862 | if (err) | 806 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); |
863 | break; | 807 | if (err) |
864 | 808 | break; | |
865 | err = phy_config_interrupt(phydev, | ||
866 | PHY_INTERRUPT_ENABLED); | ||
867 | 809 | ||
868 | if (err) | 810 | if (AUTONEG_ENABLE == phydev->autoneg) { |
811 | err = phy_aneg_done(phydev); | ||
812 | if (err < 0) | ||
869 | break; | 813 | break; |
870 | 814 | ||
871 | if (AUTONEG_ENABLE == phydev->autoneg) { | 815 | /* err > 0 if AN is done. |
872 | err = phy_aneg_done(phydev); | 816 | * Otherwise, it's 0, and we're still waiting for AN |
873 | if (err < 0) | 817 | */ |
874 | break; | 818 | if (err > 0) { |
875 | |||
876 | /* err > 0 if AN is done. | ||
877 | * Otherwise, it's 0, and we're | ||
878 | * still waiting for AN */ | ||
879 | if (err > 0) { | ||
880 | err = phy_read_status(phydev); | ||
881 | if (err) | ||
882 | break; | ||
883 | |||
884 | if (phydev->link) { | ||
885 | phydev->state = PHY_RUNNING; | ||
886 | netif_carrier_on(phydev->attached_dev); | ||
887 | } else | ||
888 | phydev->state = PHY_NOLINK; | ||
889 | phydev->adjust_link(phydev->attached_dev); | ||
890 | } else { | ||
891 | phydev->state = PHY_AN; | ||
892 | phydev->link_timeout = PHY_AN_TIMEOUT; | ||
893 | } | ||
894 | } else { | ||
895 | err = phy_read_status(phydev); | 819 | err = phy_read_status(phydev); |
896 | if (err) | 820 | if (err) |
897 | break; | 821 | break; |
@@ -899,11 +823,28 @@ void phy_state_machine(struct work_struct *work) | |||
899 | if (phydev->link) { | 823 | if (phydev->link) { |
900 | phydev->state = PHY_RUNNING; | 824 | phydev->state = PHY_RUNNING; |
901 | netif_carrier_on(phydev->attached_dev); | 825 | netif_carrier_on(phydev->attached_dev); |
902 | } else | 826 | } else { |
903 | phydev->state = PHY_NOLINK; | 827 | phydev->state = PHY_NOLINK; |
828 | } | ||
904 | phydev->adjust_link(phydev->attached_dev); | 829 | phydev->adjust_link(phydev->attached_dev); |
830 | } else { | ||
831 | phydev->state = PHY_AN; | ||
832 | phydev->link_timeout = PHY_AN_TIMEOUT; | ||
905 | } | 833 | } |
906 | break; | 834 | } else { |
835 | err = phy_read_status(phydev); | ||
836 | if (err) | ||
837 | break; | ||
838 | |||
839 | if (phydev->link) { | ||
840 | phydev->state = PHY_RUNNING; | ||
841 | netif_carrier_on(phydev->attached_dev); | ||
842 | } else { | ||
843 | phydev->state = PHY_NOLINK; | ||
844 | } | ||
845 | phydev->adjust_link(phydev->attached_dev); | ||
846 | } | ||
847 | break; | ||
907 | } | 848 | } |
908 | 849 | ||
909 | mutex_unlock(&phydev->lock); | 850 | mutex_unlock(&phydev->lock); |
@@ -911,11 +852,14 @@ void phy_state_machine(struct work_struct *work) | |||
911 | if (needs_aneg) | 852 | if (needs_aneg) |
912 | err = phy_start_aneg(phydev); | 853 | err = phy_start_aneg(phydev); |
913 | 854 | ||
855 | if (do_suspend) | ||
856 | phy_suspend(phydev); | ||
857 | |||
914 | if (err < 0) | 858 | if (err < 0) |
915 | phy_error(phydev); | 859 | phy_error(phydev); |
916 | 860 | ||
917 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, | 861 | queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, |
918 | PHY_STATE_TIME * HZ); | 862 | PHY_STATE_TIME * HZ); |
919 | } | 863 | } |
920 | 864 | ||
921 | void phy_mac_interrupt(struct phy_device *phydev, int new_link) | 865 | void phy_mac_interrupt(struct phy_device *phydev, int new_link) |
@@ -957,14 +901,10 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, | |||
957 | static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad, | 901 | static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad, |
958 | int addr) | 902 | int addr) |
959 | { | 903 | { |
960 | u32 ret; | ||
961 | |||
962 | mmd_phy_indirect(bus, prtad, devad, addr); | 904 | mmd_phy_indirect(bus, prtad, devad, addr); |
963 | 905 | ||
964 | /* Read the content of the MMD's selected register */ | 906 | /* Read the content of the MMD's selected register */ |
965 | ret = bus->read(bus, addr, MII_MMD_DATA); | 907 | return bus->read(bus, addr, MII_MMD_DATA); |
966 | |||
967 | return ret; | ||
968 | } | 908 | } |
969 | 909 | ||
970 | /** | 910 | /** |
@@ -1004,8 +944,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad, | |||
1004 | */ | 944 | */ |
1005 | int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | 945 | int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) |
1006 | { | 946 | { |
1007 | int ret = -EPROTONOSUPPORT; | ||
1008 | |||
1009 | /* According to 802.3az,the EEE is supported only in full duplex-mode. | 947 | /* According to 802.3az,the EEE is supported only in full duplex-mode. |
1010 | * Also EEE feature is active when core is operating with MII, GMII | 948 | * Also EEE feature is active when core is operating with MII, GMII |
1011 | * or RGMII. | 949 | * or RGMII. |
@@ -1016,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1016 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { | 954 | (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { |
1017 | int eee_lp, eee_cap, eee_adv; | 955 | int eee_lp, eee_cap, eee_adv; |
1018 | u32 lp, cap, adv; | 956 | u32 lp, cap, adv; |
1019 | int idx, status; | 957 | int status; |
958 | unsigned int idx; | ||
1020 | 959 | ||
1021 | /* Read phy status to properly get the right settings */ | 960 | /* Read phy status to properly get the right settings */ |
1022 | status = phy_read_status(phydev); | 961 | status = phy_read_status(phydev); |
@@ -1031,7 +970,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1031 | 970 | ||
1032 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); | 971 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); |
1033 | if (!cap) | 972 | if (!cap) |
1034 | goto eee_exit; | 973 | return -EPROTONOSUPPORT; |
1035 | 974 | ||
1036 | /* Check which link settings negotiated and verify it in | 975 | /* Check which link settings negotiated and verify it in |
1037 | * the EEE advertising registers. | 976 | * the EEE advertising registers. |
@@ -1050,7 +989,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1050 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); | 989 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); |
1051 | idx = phy_find_setting(phydev->speed, phydev->duplex); | 990 | idx = phy_find_setting(phydev->speed, phydev->duplex); |
1052 | if (!(lp & adv & settings[idx].setting)) | 991 | if (!(lp & adv & settings[idx].setting)) |
1053 | goto eee_exit; | 992 | return -EPROTONOSUPPORT; |
1054 | 993 | ||
1055 | if (clk_stop_enable) { | 994 | if (clk_stop_enable) { |
1056 | /* Configure the PHY to stop receiving xMII | 995 | /* Configure the PHY to stop receiving xMII |
@@ -1067,11 +1006,10 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1067 | MDIO_MMD_PCS, phydev->addr, val); | 1006 | MDIO_MMD_PCS, phydev->addr, val); |
1068 | } | 1007 | } |
1069 | 1008 | ||
1070 | ret = 0; /* EEE supported */ | 1009 | return 0; /* EEE supported */ |
1071 | } | 1010 | } |
1072 | 1011 | ||
1073 | eee_exit: | 1012 | return -EPROTONOSUPPORT; |
1074 | return ret; | ||
1075 | } | 1013 | } |
1076 | EXPORT_SYMBOL(phy_init_eee); | 1014 | EXPORT_SYMBOL(phy_init_eee); |
1077 | 1015 | ||
@@ -1086,7 +1024,6 @@ int phy_get_eee_err(struct phy_device *phydev) | |||
1086 | { | 1024 | { |
1087 | return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR, | 1025 | return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR, |
1088 | MDIO_MMD_PCS, phydev->addr); | 1026 | MDIO_MMD_PCS, phydev->addr); |
1089 | |||
1090 | } | 1027 | } |
1091 | EXPORT_SYMBOL(phy_get_eee_err); | 1028 | EXPORT_SYMBOL(phy_get_eee_err); |
1092 | 1029 | ||
@@ -1136,9 +1073,8 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); | |||
1136 | */ | 1073 | */ |
1137 | int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) | 1074 | int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) |
1138 | { | 1075 | { |
1139 | int val; | 1076 | int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); |
1140 | 1077 | ||
1141 | val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); | ||
1142 | phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, | 1078 | phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, |
1143 | phydev->addr, val); | 1079 | phydev->addr, val); |
1144 | 1080 | ||