diff options
author | Dave Lim <dlim@nvidia.com> | 2016-07-25 21:22:11 -0400 |
---|---|---|
committer | Ashutosh Jha <ajha@nvidia.com> | 2016-08-01 14:54:06 -0400 |
commit | 19031972b862aeec92d29c208c6249c3034466dd (patch) | |
tree | 4cee6df9c296f8e1000e50514cc3c220bc80719d /drivers/net | |
parent | e54d8a2336730dd234c814432fd5d1323568e037 (diff) |
net: eqos: Improve sc7 resume time
Modify phy read/write routines to minimize delays.
Modified wait loops for car reset and mtl reset to
minimize delays.
Instead of hardcoding MDC, it needs to be derived from
axi_cbb.
Bug 200180215
Change-Id: I4d1f5bf95948bd012de11b04acae70c94c8adc2a
Signed-off-by: Dave Lim <dlim@nvidia.com>
Reviewed-on: http://git-master/r/1190510
Reviewed-by: Anirban Ray <aray@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Narayan Reddy <narayanr@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/nvidia/eqos/dev.c | 179 | ||||
-rw-r--r-- | drivers/net/ethernet/nvidia/eqos/init.c | 56 | ||||
-rw-r--r-- | drivers/net/ethernet/nvidia/eqos/mdio.c | 21 | ||||
-rw-r--r-- | drivers/net/ethernet/nvidia/eqos/yheader.h | 5 |
4 files changed, 127 insertions, 134 deletions
diff --git a/drivers/net/ethernet/nvidia/eqos/dev.c b/drivers/net/ethernet/nvidia/eqos/dev.c index b2118d06a..2e62ca8b6 100644 --- a/drivers/net/ethernet/nvidia/eqos/dev.c +++ b/drivers/net/ethernet/nvidia/eqos/dev.c | |||
@@ -2465,63 +2465,54 @@ static INT set_promiscuous_mode(void) | |||
2465 | * \retval -1 Failure | 2465 | * \retval -1 Failure |
2466 | */ | 2466 | */ |
2467 | 2467 | ||
2468 | static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data) | 2468 | static INT mdio_poll(void) |
2469 | { | 2469 | { |
2470 | ULONG retry_cnt = 1000; | 2470 | /* half sec timeout */ |
2471 | ULONG vy_count; | 2471 | ULONG retry_cnt = (500 * 100); |
2472 | volatile ULONG mac_gmiiar; | 2472 | ULONG vy_count = 0; |
2473 | 2473 | ||
2474 | /* wait for any previous MII read/write operation to complete */ | 2474 | ULONG mac_gmiiar; |
2475 | 2475 | ||
2476 | /*Poll Until Poll Condition */ | 2476 | while (vy_count < retry_cnt) { |
2477 | vy_count = 0; | ||
2478 | while (1) { | ||
2479 | if (vy_count > retry_cnt) { | ||
2480 | return -Y_FAILURE; | ||
2481 | } else { | ||
2482 | vy_count++; | ||
2483 | mdelay(1); | ||
2484 | } | ||
2485 | MAC_GMIIAR_RD(mac_gmiiar); | 2477 | MAC_GMIIAR_RD(mac_gmiiar); |
2486 | if (GET_VALUE | 2478 | if (GET_VALUE(mac_gmiiar, MAC_GMIIAR_GB_LPOS, |
2487 | (mac_gmiiar, MAC_GMIIAR_GB_LPOS, MAC_GMIIAR_GB_HPOS) == 0) { | 2479 | MAC_GMIIAR_GB_HPOS) == 0) |
2488 | break; | 2480 | return Y_SUCCESS; |
2489 | } | 2481 | vy_count++; |
2482 | udelay(10); | ||
2490 | } | 2483 | } |
2484 | return -Y_FAILURE; | ||
2485 | } | ||
2486 | |||
2487 | static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data, | ||
2488 | INT mdc_cr) | ||
2489 | { | ||
2490 | ULONG mac_gmiiar; | ||
2491 | |||
2492 | /* wait for any previous MII read/write operation to complete */ | ||
2493 | if (mdio_poll() == -Y_FAILURE) | ||
2494 | return -Y_FAILURE; | ||
2495 | |||
2491 | /* write the data */ | 2496 | /* write the data */ |
2492 | MAC_GMIIDR_GD_WR(phy_reg_data); | 2497 | MAC_GMIIDR_GD_WR(phy_reg_data); |
2498 | |||
2493 | /* initiate the MII write operation by updating desired */ | 2499 | /* initiate the MII write operation by updating desired */ |
2494 | /* phy address/id (0 - 31) */ | 2500 | /* phy address/id (0 - 31) */ |
2495 | /* phy register offset */ | 2501 | /* phy register offset */ |
2496 | /* CSR Clock Range (20 - 35MHz) */ | 2502 | /* CSR Clock Range (20 - 35MHz) */ |
2497 | /* Select write operation */ | 2503 | /* Select write operation */ |
2498 | /* set busy bit */ | 2504 | /* set busy bit */ |
2499 | MAC_GMIIAR_RD(mac_gmiiar); | 2505 | mdc_cr <<= 8; |
2500 | mac_gmiiar = mac_gmiiar & (ULONG) (0x12); | 2506 | mac_gmiiar = ((phy_id) << 21) | ((phy_reg) << 16) | mdc_cr | |
2501 | mac_gmiiar = | 2507 | ((0x1) << 2) | ((0x1) << 0); |
2502 | mac_gmiiar | ((phy_id) << 21) | ((phy_reg) << 16) | ((0x2) << 8) | ||
2503 | | ((0x1) << 2) | ((0x1) << 0); | ||
2504 | MAC_GMIIAR_WR(mac_gmiiar); | 2508 | MAC_GMIIAR_WR(mac_gmiiar); |
2505 | 2509 | ||
2506 | /*DELAY IMPLEMENTATION USING udelay() */ | 2510 | /* delay some to allow mac to set busy bit */ |
2507 | udelay(10); | 2511 | udelay(2); |
2508 | /* wait for MII write operation to complete */ | ||
2509 | 2512 | ||
2510 | /*Poll Until Poll Condition */ | 2513 | /* wait for MII write operation to complete */ |
2511 | vy_count = 0; | 2514 | if (mdio_poll() == -Y_FAILURE) |
2512 | while (1) { | 2515 | return -Y_FAILURE; |
2513 | if (vy_count > retry_cnt) { | ||
2514 | return -Y_FAILURE; | ||
2515 | } else { | ||
2516 | vy_count++; | ||
2517 | mdelay(1); | ||
2518 | } | ||
2519 | MAC_GMIIAR_RD(mac_gmiiar); | ||
2520 | if (GET_VALUE | ||
2521 | (mac_gmiiar, MAC_GMIIAR_GB_LPOS, MAC_GMIIAR_GB_HPOS) == 0) { | ||
2522 | break; | ||
2523 | } | ||
2524 | } | ||
2525 | 2516 | ||
2526 | return Y_SUCCESS; | 2517 | return Y_SUCCESS; |
2527 | } | 2518 | } |
@@ -2536,62 +2527,36 @@ static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data) | |||
2536 | * \retval -1 Failure | 2527 | * \retval -1 Failure |
2537 | */ | 2528 | */ |
2538 | 2529 | ||
2539 | static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data) | 2530 | static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data, |
2531 | INT mdc_cr) | ||
2540 | { | 2532 | { |
2541 | ULONG retry_cnt = 1000; | 2533 | ULONG retry_cnt = 1000; |
2542 | ULONG vy_count; | 2534 | ULONG vy_count; |
2543 | volatile ULONG mac_gmiiar; | 2535 | ULONG mac_gmiiar; |
2544 | ULONG mac_gmiidr; | 2536 | ULONG mac_gmiidr; |
2545 | 2537 | ||
2546 | /* wait for any previous MII read/write operation to complete */ | 2538 | /* wait for any previous MII read/write operation to complete */ |
2539 | if (mdio_poll() == -Y_FAILURE) | ||
2540 | return -Y_FAILURE; | ||
2547 | 2541 | ||
2548 | /*Poll Until Poll Condition */ | ||
2549 | vy_count = 0; | ||
2550 | while (1) { | ||
2551 | if (vy_count > retry_cnt) { | ||
2552 | return -Y_FAILURE; | ||
2553 | } else { | ||
2554 | vy_count++; | ||
2555 | mdelay(1); | ||
2556 | } | ||
2557 | MAC_GMIIAR_RD(mac_gmiiar); | ||
2558 | if (GET_VALUE | ||
2559 | (mac_gmiiar, MAC_GMIIAR_GB_LPOS, MAC_GMIIAR_GB_HPOS) == 0) { | ||
2560 | break; | ||
2561 | } | ||
2562 | } | ||
2563 | /* initiate the MII read operation by updating desired */ | 2542 | /* initiate the MII read operation by updating desired */ |
2564 | /* phy address/id (0 - 31) */ | 2543 | /* phy address/id (0 - 31) */ |
2565 | /* phy register offset */ | 2544 | /* phy register offset */ |
2566 | /* CSR Clock Range (20 - 35MHz) */ | 2545 | /* CSR Clock Range (20 - 35MHz) */ |
2567 | /* Select read operation */ | 2546 | /* Select read operation */ |
2568 | /* set busy bit */ | 2547 | /* set busy bit */ |
2569 | MAC_GMIIAR_RD(mac_gmiiar); | 2548 | mdc_cr <<= 8; |
2570 | mac_gmiiar = mac_gmiiar & (ULONG) (0x12); | 2549 | mac_gmiiar = ((phy_id) << 21) | ((phy_reg) << 16) | mdc_cr | |
2571 | mac_gmiiar = | 2550 | ((0x3) << 2) | ((0x1) << 0); |
2572 | mac_gmiiar | ((phy_id) << 21) | ((phy_reg) << 16) | ((0x2) << 8) | ||
2573 | | ((0x3) << 2) | ((0x1) << 0); | ||
2574 | MAC_GMIIAR_WR(mac_gmiiar); | 2551 | MAC_GMIIAR_WR(mac_gmiiar); |
2575 | 2552 | ||
2576 | /*DELAY IMPLEMENTATION USING udelay() */ | 2553 | /* delay some to allow mac to set busy bit */ |
2577 | udelay(10); | 2554 | udelay(2); |
2578 | /* wait for MII write operation to complete */ | 2555 | |
2556 | /* wait for MII read operation to complete */ | ||
2557 | if (mdio_poll() == -Y_FAILURE) | ||
2558 | return -Y_FAILURE; | ||
2579 | 2559 | ||
2580 | /*Poll Until Poll Condition */ | ||
2581 | vy_count = 0; | ||
2582 | while (1) { | ||
2583 | if (vy_count > retry_cnt) { | ||
2584 | return -Y_FAILURE; | ||
2585 | } else { | ||
2586 | vy_count++; | ||
2587 | mdelay(1); | ||
2588 | } | ||
2589 | MAC_GMIIAR_RD(mac_gmiiar); | ||
2590 | if (GET_VALUE | ||
2591 | (mac_gmiiar, MAC_GMIIAR_GB_LPOS, MAC_GMIIAR_GB_HPOS) == 0) { | ||
2592 | break; | ||
2593 | } | ||
2594 | } | ||
2595 | /* read the data */ | 2560 | /* read the data */ |
2596 | MAC_GMIIDR_RD(mac_gmiidr); | 2561 | MAC_GMIIDR_RD(mac_gmiidr); |
2597 | *phy_reg_data = | 2562 | *phy_reg_data = |
@@ -3322,11 +3287,11 @@ static INT eqos_pad_calibrate(struct eqos_prv_data *pdata) | |||
3322 | 3287 | ||
3323 | static INT eqos_car_reset(struct eqos_prv_data *pdata) | 3288 | static INT eqos_car_reset(struct eqos_prv_data *pdata) |
3324 | { | 3289 | { |
3325 | ULONG retry_cnt = 1000; | 3290 | /* one sec timeout */ |
3326 | ULONG vy_count; | 3291 | ULONG retry_cnt = (500 * 1000); |
3327 | volatile ULONG dma_bmr; | 3292 | ULONG vy_count = 0; |
3328 | 3293 | ||
3329 | DBGPR("-->eqos_car_reset\n"); | 3294 | ULONG dma_bmr; |
3330 | 3295 | ||
3331 | /* Issue a CAR reset */ | 3296 | /* Issue a CAR reset */ |
3332 | if (!IS_ERR_OR_NULL(pdata->eqos_rst)) | 3297 | if (!IS_ERR_OR_NULL(pdata->eqos_rst)) |
@@ -3335,27 +3300,16 @@ static INT eqos_car_reset(struct eqos_prv_data *pdata) | |||
3335 | /* add delay of 10 usec */ | 3300 | /* add delay of 10 usec */ |
3336 | udelay(10); | 3301 | udelay(10); |
3337 | 3302 | ||
3338 | /* Poll Until Poll Condition */ | 3303 | while (vy_count < retry_cnt) { |
3339 | vy_count = 0; | ||
3340 | while (1) { | ||
3341 | if (vy_count > retry_cnt) { | ||
3342 | dev_err(&pdata->pdev->dev, | ||
3343 | "%s():%d: Timed out polling on DMA_BMR_SWR\n", | ||
3344 | __func__, __LINE__); | ||
3345 | return -Y_FAILURE; | ||
3346 | } else { | ||
3347 | vy_count++; | ||
3348 | mdelay(1); | ||
3349 | } | ||
3350 | DMA_BMR_RD(dma_bmr); | 3304 | DMA_BMR_RD(dma_bmr); |
3351 | if (GET_VALUE(dma_bmr, DMA_BMR_SWR_LPOS, DMA_BMR_SWR_HPOS) == 0) { | 3305 | if (GET_VALUE(dma_bmr, |
3352 | break; | 3306 | DMA_BMR_SWR_LPOS, DMA_BMR_SWR_HPOS) == 0) { |
3307 | return Y_SUCCESS; | ||
3353 | } | 3308 | } |
3309 | vy_count++; | ||
3310 | udelay(10); | ||
3354 | } | 3311 | } |
3355 | 3312 | return -Y_FAILURE; | |
3356 | DBGPR("<--eqos_car_reset\n"); | ||
3357 | |||
3358 | return Y_SUCCESS; | ||
3359 | } | 3313 | } |
3360 | 3314 | ||
3361 | /*! | 3315 | /*! |
@@ -3540,7 +3494,7 @@ static UINT calculate_per_queue_fifo(ULONG fifo_size, UCHAR queue_count) | |||
3540 | static INT configure_mtl_queue(UINT qinx, struct eqos_prv_data *pdata) | 3494 | static INT configure_mtl_queue(UINT qinx, struct eqos_prv_data *pdata) |
3541 | { | 3495 | { |
3542 | struct eqos_tx_queue *queue_data = GET_TX_QUEUE_PTR(qinx); | 3496 | struct eqos_tx_queue *queue_data = GET_TX_QUEUE_PTR(qinx); |
3543 | ULONG retry_cnt = 1000; | 3497 | ULONG retry_cnt; |
3544 | ULONG vy_count; | 3498 | ULONG vy_count; |
3545 | volatile ULONG mtl_qtomr; | 3499 | volatile ULONG mtl_qtomr; |
3546 | UINT p_rx_fifo = EQOS_256, p_tx_fifo = EQOS_256; | 3500 | UINT p_rx_fifo = EQOS_256, p_tx_fifo = EQOS_256; |
@@ -3551,20 +3505,21 @@ static INT configure_mtl_queue(UINT qinx, struct eqos_prv_data *pdata) | |||
3551 | /*Flush Tx Queue */ | 3505 | /*Flush Tx Queue */ |
3552 | MTL_QTOMR_FTQ_WR(qinx, 0x1); | 3506 | MTL_QTOMR_FTQ_WR(qinx, 0x1); |
3553 | 3507 | ||
3554 | /*Poll Until Poll Condition */ | 3508 | /* half sec timeout */ |
3509 | retry_cnt = (500 * 1000); | ||
3555 | vy_count = 0; | 3510 | vy_count = 0; |
3556 | while (1) { | 3511 | |
3557 | if (vy_count > retry_cnt) { | 3512 | /* add delay of 10 usec */ |
3558 | return -Y_FAILURE; | 3513 | udelay(10); |
3559 | } else { | 3514 | |
3560 | vy_count++; | 3515 | while (vy_count < retry_cnt) { |
3561 | mdelay(1); | ||
3562 | } | ||
3563 | MTL_QTOMR_RD(qinx, mtl_qtomr); | 3516 | MTL_QTOMR_RD(qinx, mtl_qtomr); |
3564 | if (GET_VALUE(mtl_qtomr, MTL_QTOMR_FTQ_LPOS, MTL_QTOMR_FTQ_HPOS) | 3517 | if (GET_VALUE(mtl_qtomr, MTL_QTOMR_FTQ_LPOS, MTL_QTOMR_FTQ_HPOS) |
3565 | == 0) { | 3518 | == 0) { |
3566 | break; | 3519 | break; |
3567 | } | 3520 | } |
3521 | vy_count++; | ||
3522 | udelay(10); | ||
3568 | } | 3523 | } |
3569 | 3524 | ||
3570 | /*Enable Store and Forward mode for TX */ | 3525 | /*Enable Store and Forward mode for TX */ |
diff --git a/drivers/net/ethernet/nvidia/eqos/init.c b/drivers/net/ethernet/nvidia/eqos/init.c index 8a312a88d..d95198676 100644 --- a/drivers/net/ethernet/nvidia/eqos/init.c +++ b/drivers/net/ethernet/nvidia/eqos/init.c | |||
@@ -661,6 +661,35 @@ static int eqos_therm_init(struct eqos_prv_data *pdata) | |||
661 | return 0; | 661 | return 0; |
662 | } | 662 | } |
663 | 663 | ||
664 | /* Converts csr clock to MDC clock. | ||
665 | * Values come from CR field in MAC_MDIO_Address register | ||
666 | */ | ||
667 | static void save_mdc(struct eqos_prv_data *pdata) | ||
668 | { | ||
669 | if (pdata->csr_clock_speed > 250) { | ||
670 | pdata->mdc_cr = 5; | ||
671 | return; | ||
672 | } | ||
673 | if (pdata->csr_clock_speed > 150) { | ||
674 | pdata->mdc_cr = 4; | ||
675 | return; | ||
676 | } | ||
677 | if (pdata->csr_clock_speed > 100) { | ||
678 | pdata->mdc_cr = 1; | ||
679 | return; | ||
680 | } | ||
681 | if (pdata->csr_clock_speed > 60) { | ||
682 | pdata->mdc_cr = 0; | ||
683 | return; | ||
684 | } | ||
685 | if (pdata->csr_clock_speed > 35) { | ||
686 | pdata->mdc_cr = 3; | ||
687 | return; | ||
688 | } | ||
689 | /* for CSR < 35mhz */ | ||
690 | pdata->mdc_cr = 2; | ||
691 | } | ||
692 | |||
664 | /*! | 693 | /*! |
665 | * \brief API to initialize the device. | 694 | * \brief API to initialize the device. |
666 | * | 695 | * |
@@ -934,19 +963,6 @@ int eqos_probe(struct platform_device *pdev) | |||
934 | pchinfo->int_mask |= VIRT_INTR_CH_CRTL_TX_WR_MASK; | 963 | pchinfo->int_mask |= VIRT_INTR_CH_CRTL_TX_WR_MASK; |
935 | } | 964 | } |
936 | 965 | ||
937 | pdata->interface = eqos_get_phy_interface(pdata); | ||
938 | /* Bypass PHYLIB for TBI, RTBI and SGMII interface */ | ||
939 | if (1 == pdata->hw_feat.sma_sel) { | ||
940 | ret = eqos_mdio_register(ndev); | ||
941 | if (ret < 0) { | ||
942 | pr_err("MDIO bus (id %d) registration failed\n", | ||
943 | pdata->bus_id); | ||
944 | goto err_out_mdio_reg; | ||
945 | } | ||
946 | } else { | ||
947 | pr_err("%s: MDIO is not present\n\n", DEV_NAME); | ||
948 | } | ||
949 | |||
950 | /* csr_clock_speed is axi_cbb_clk rate */ | 966 | /* csr_clock_speed is axi_cbb_clk rate */ |
951 | pdata->csr_clock_speed = clk_get_rate(pdata->axi_cbb_clk) / 1000000; | 967 | pdata->csr_clock_speed = clk_get_rate(pdata->axi_cbb_clk) / 1000000; |
952 | if (pdata->csr_clock_speed <= 0) { | 968 | if (pdata->csr_clock_speed <= 0) { |
@@ -956,6 +972,7 @@ int eqos_probe(struct platform_device *pdev) | |||
956 | pdata->csr_clock_speed); | 972 | pdata->csr_clock_speed); |
957 | MAC_1US_TIC_WR(pdata->csr_clock_speed - 1); | 973 | MAC_1US_TIC_WR(pdata->csr_clock_speed - 1); |
958 | } | 974 | } |
975 | save_mdc(pdata); | ||
959 | 976 | ||
960 | ret = eqos_get_mac_address_dtb("/chosen", "nvidia,ether-mac", mac_addr); | 977 | ret = eqos_get_mac_address_dtb("/chosen", "nvidia,ether-mac", mac_addr); |
961 | if (ret < 0) { | 978 | if (ret < 0) { |
@@ -972,6 +989,19 @@ int eqos_probe(struct platform_device *pdev) | |||
972 | ndev->dev_addr[4] = mac_addr[4]; | 989 | ndev->dev_addr[4] = mac_addr[4]; |
973 | ndev->dev_addr[5] = mac_addr[5]; | 990 | ndev->dev_addr[5] = mac_addr[5]; |
974 | } | 991 | } |
992 | pdata->interface = eqos_get_phy_interface(pdata); | ||
993 | /* Bypass PHYLIB for TBI, RTBI and SGMII interface */ | ||
994 | if (1 == pdata->hw_feat.sma_sel) { | ||
995 | ret = eqos_mdio_register(ndev); | ||
996 | if (ret < 0) { | ||
997 | pr_err("MDIO bus (id %d) registration failed\n", | ||
998 | pdata->bus_id); | ||
999 | goto err_out_mdio_reg; | ||
1000 | } | ||
1001 | } else { | ||
1002 | pr_err("%s: MDIO is not present\n\n", DEV_NAME); | ||
1003 | } | ||
1004 | |||
975 | /* enabling and registration of irq with magic wakeup */ | 1005 | /* enabling and registration of irq with magic wakeup */ |
976 | if (1 == pdata->hw_feat.mgk_sel) { | 1006 | if (1 == pdata->hw_feat.mgk_sel) { |
977 | device_set_wakeup_capable(&pdev->dev, 1); | 1007 | device_set_wakeup_capable(&pdev->dev, 1); |
diff --git a/drivers/net/ethernet/nvidia/eqos/mdio.c b/drivers/net/ethernet/nvidia/eqos/mdio.c index cf0e75c3e..3fe78ab40 100644 --- a/drivers/net/ethernet/nvidia/eqos/mdio.c +++ b/drivers/net/ethernet/nvidia/eqos/mdio.c | |||
@@ -77,7 +77,8 @@ INT eqos_mdio_read_direct(struct eqos_prv_data *pdata, | |||
77 | 77 | ||
78 | if (hw_if->read_phy_regs) { | 78 | if (hw_if->read_phy_regs) { |
79 | phy_reg_read_status = | 79 | phy_reg_read_status = |
80 | hw_if->read_phy_regs(phyaddr, phyreg, phydata); | 80 | hw_if->read_phy_regs(phyaddr, phyreg, phydata, |
81 | pdata->mdc_cr); | ||
81 | } else { | 82 | } else { |
82 | phy_reg_read_status = 1; | 83 | phy_reg_read_status = 1; |
83 | pr_err("%s: hw_if->read_phy_regs not defined", DEV_NAME); | 84 | pr_err("%s: hw_if->read_phy_regs not defined", DEV_NAME); |
@@ -119,7 +120,8 @@ INT eqos_mdio_write_direct(struct eqos_prv_data *pdata, | |||
119 | 120 | ||
120 | if (hw_if->write_phy_regs) { | 121 | if (hw_if->write_phy_regs) { |
121 | phy_reg_write_status = | 122 | phy_reg_write_status = |
122 | hw_if->write_phy_regs(phyaddr, phyreg, phydata); | 123 | hw_if->write_phy_regs(phyaddr, phyreg, |
124 | phydata, pdata->mdc_cr); | ||
123 | } else { | 125 | } else { |
124 | phy_reg_write_status = 1; | 126 | phy_reg_write_status = 1; |
125 | pr_err("%s: hw_if->write_phy_regs not defined", DEV_NAME); | 127 | pr_err("%s: hw_if->write_phy_regs not defined", DEV_NAME); |
@@ -156,7 +158,8 @@ static INT eqos_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) | |||
156 | phyaddr, phyreg); | 158 | phyaddr, phyreg); |
157 | 159 | ||
158 | if (hw_if->read_phy_regs) | 160 | if (hw_if->read_phy_regs) |
159 | hw_if->read_phy_regs(phyaddr, phyreg, &phydata); | 161 | hw_if->read_phy_regs(phyaddr, phyreg, &phydata, |
162 | pdata->mdc_cr); | ||
160 | else | 163 | else |
161 | pr_err("%s: hw_if->read_phy_regs not defined", DEV_NAME); | 164 | pr_err("%s: hw_if->read_phy_regs not defined", DEV_NAME); |
162 | 165 | ||
@@ -191,7 +194,8 @@ static INT eqos_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, | |||
191 | DBGPR_MDIO("--> eqos_mdio_write\n"); | 194 | DBGPR_MDIO("--> eqos_mdio_write\n"); |
192 | 195 | ||
193 | if (hw_if->write_phy_regs) { | 196 | if (hw_if->write_phy_regs) { |
194 | hw_if->write_phy_regs(phyaddr, phyreg, phydata); | 197 | hw_if->write_phy_regs(phyaddr, phyreg, phydata, |
198 | pdata->mdc_cr); | ||
195 | } else { | 199 | } else { |
196 | ret = -1; | 200 | ret = -1; |
197 | pr_err("%s: hw_if->write_phy_regs not defined", DEV_NAME); | 201 | pr_err("%s: hw_if->write_phy_regs not defined", DEV_NAME); |
@@ -222,18 +226,21 @@ static INT eqos_mdio_reset(struct mii_bus *bus) | |||
222 | 226 | ||
223 | DBGPR_MDIO("-->eqos_mdio_reset: phyaddr : %d\n", pdata->phyaddr); | 227 | DBGPR_MDIO("-->eqos_mdio_reset: phyaddr : %d\n", pdata->phyaddr); |
224 | 228 | ||
225 | hw_if->read_phy_regs(pdata->phyaddr, MII_BMCR, &phydata); | 229 | hw_if->read_phy_regs(pdata->phyaddr, MII_BMCR, &phydata, |
230 | pdata->mdc_cr); | ||
226 | 231 | ||
227 | if (phydata < 0) | 232 | if (phydata < 0) |
228 | return 0; | 233 | return 0; |
229 | 234 | ||
230 | /* issue soft reset to PHY */ | 235 | /* issue soft reset to PHY */ |
231 | phydata |= BMCR_RESET; | 236 | phydata |= BMCR_RESET; |
232 | hw_if->write_phy_regs(pdata->phyaddr, MII_BMCR, phydata); | 237 | hw_if->write_phy_regs(pdata->phyaddr, MII_BMCR, phydata, |
238 | pdata->mdc_cr); | ||
233 | 239 | ||
234 | /* wait until software reset completes */ | 240 | /* wait until software reset completes */ |
235 | do { | 241 | do { |
236 | hw_if->read_phy_regs(pdata->phyaddr, MII_BMCR, &phydata); | 242 | hw_if->read_phy_regs(pdata->phyaddr, MII_BMCR, &phydata, |
243 | pdata->mdc_cr); | ||
237 | } while ((phydata >= 0) && (phydata & BMCR_RESET)); | 244 | } while ((phydata >= 0) && (phydata & BMCR_RESET)); |
238 | 245 | ||
239 | DBGPR_MDIO("<--eqos_mdio_reset\n"); | 246 | DBGPR_MDIO("<--eqos_mdio_reset\n"); |
diff --git a/drivers/net/ethernet/nvidia/eqos/yheader.h b/drivers/net/ethernet/nvidia/eqos/yheader.h index bc1cb7823..821e8bfdc 100644 --- a/drivers/net/ethernet/nvidia/eqos/yheader.h +++ b/drivers/net/ethernet/nvidia/eqos/yheader.h | |||
@@ -724,8 +724,8 @@ struct hw_if_struct { | |||
724 | INT(*disable_rx_csum) (void); | 724 | INT(*disable_rx_csum) (void); |
725 | INT(*get_rx_csum_status) (void); | 725 | INT(*get_rx_csum_status) (void); |
726 | 726 | ||
727 | INT(*read_phy_regs) (INT, INT, INT*); | 727 | INT(*read_phy_regs) (INT, INT, INT*, INT); |
728 | INT(*write_phy_regs) (INT, INT, INT); | 728 | INT(*write_phy_regs) (INT, INT, INT, INT); |
729 | INT(*set_full_duplex) (VOID); | 729 | INT(*set_full_duplex) (VOID); |
730 | INT(*set_half_duplex) (VOID); | 730 | INT(*set_half_duplex) (VOID); |
731 | INT(*set_mii_speed_100) (struct eqos_prv_data *); | 731 | INT(*set_mii_speed_100) (struct eqos_prv_data *); |
@@ -1538,6 +1538,7 @@ struct eqos_prv_data { | |||
1538 | int tcp_pkt; | 1538 | int tcp_pkt; |
1539 | 1539 | ||
1540 | u32 csr_clock_speed; | 1540 | u32 csr_clock_speed; |
1541 | u32 mdc_cr; | ||
1541 | 1542 | ||
1542 | struct workqueue_struct *fbe_wq; | 1543 | struct workqueue_struct *fbe_wq; |
1543 | struct work_struct fbe_work; | 1544 | struct work_struct fbe_work; |