diff options
Diffstat (limited to 'drivers/net')
22 files changed, 336 insertions, 205 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index a5f91e1e8fe3..becef25fa194 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig | |||
@@ -148,7 +148,7 @@ config PCMCIA_PCNET | |||
148 | 148 | ||
149 | config NE_H8300 | 149 | config NE_H8300 |
150 | tristate "NE2000 compatible support for H8/300" | 150 | tristate "NE2000 compatible support for H8/300" |
151 | depends on H8300 | 151 | depends on H8300H_AKI3068NET || H8300H_H8MAX |
152 | ---help--- | 152 | ---help--- |
153 | Say Y here if you want to use the NE2000 compatible | 153 | Say Y here if you want to use the NE2000 compatible |
154 | controller on the Renesas H8/300 processor. | 154 | controller on the Renesas H8/300 processor. |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8d726f6e1c52..2361bf236ce3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; | 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
54 | int old_max_eth_txqs, new_max_eth_txqs; | 54 | int old_max_eth_txqs, new_max_eth_txqs; |
55 | int old_txdata_index = 0, new_txdata_index = 0; | 55 | int old_txdata_index = 0, new_txdata_index = 0; |
56 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; | ||
56 | 57 | ||
57 | /* Copy the NAPI object as it has been already initialized */ | 58 | /* Copy the NAPI object as it has been already initialized */ |
58 | from_fp->napi = to_fp->napi; | 59 | from_fp->napi = to_fp->napi; |
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
61 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | 62 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
62 | to_fp->index = to; | 63 | to_fp->index = to; |
63 | 64 | ||
65 | /* Retain the tpa_info of the original `to' version as we don't want | ||
66 | * 2 FPs to contain the same tpa_info pointer. | ||
67 | */ | ||
68 | to_fp->tpa_info = old_tpa_info; | ||
69 | |||
64 | /* move sp_objs contents as well, as their indices match fp ones */ | 70 | /* move sp_objs contents as well, as their indices match fp ones */ |
65 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); | 71 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
66 | 72 | ||
@@ -2959,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2959 | if (IS_PF(bp)) { | 2965 | if (IS_PF(bp)) { |
2960 | if (CNIC_LOADED(bp)) | 2966 | if (CNIC_LOADED(bp)) |
2961 | bnx2x_free_mem_cnic(bp); | 2967 | bnx2x_free_mem_cnic(bp); |
2962 | bnx2x_free_mem(bp); | ||
2963 | } | 2968 | } |
2969 | bnx2x_free_mem(bp); | ||
2970 | |||
2964 | bp->state = BNX2X_STATE_CLOSED; | 2971 | bp->state = BNX2X_STATE_CLOSED; |
2965 | bp->cnic_loaded = false; | 2972 | bp->cnic_loaded = false; |
2966 | 2973 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9d64b988ab34..664568420c9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params, | |||
6501 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; | 6501 | struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; |
6502 | if (vars->line_speed == SPEED_AUTO_NEG && | 6502 | if (vars->line_speed == SPEED_AUTO_NEG && |
6503 | (CHIP_IS_E1x(bp) || | 6503 | (CHIP_IS_E1x(bp) || |
6504 | CHIP_IS_E2(bp))) | 6504 | CHIP_IS_E2(bp))) { |
6505 | bnx2x_set_parallel_detection(phy, params); | 6505 | bnx2x_set_parallel_detection(phy, params); |
6506 | if (params->phy[INT_PHY].config_init) | 6506 | if (params->phy[INT_PHY].config_init) |
6507 | params->phy[INT_PHY].config_init(phy, | 6507 | params->phy[INT_PHY].config_init(phy, |
6508 | params, | 6508 | params, |
6509 | vars); | 6509 | vars); |
6510 | } | ||
6510 | } | 6511 | } |
6511 | 6512 | ||
6512 | /* Init external phy*/ | 6513 | /* Init external phy*/ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c69990d2170e..285f2a59a3a5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
7855 | { | 7855 | { |
7856 | int i; | 7856 | int i; |
7857 | 7857 | ||
7858 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7859 | sizeof(struct host_sp_status_block)); | ||
7860 | |||
7861 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | 7858 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
7862 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | 7859 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
7863 | 7860 | ||
7861 | if (IS_VF(bp)) | ||
7862 | return; | ||
7863 | |||
7864 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7865 | sizeof(struct host_sp_status_block)); | ||
7866 | |||
7864 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 7867 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
7865 | sizeof(struct bnx2x_slowpath)); | 7868 | sizeof(struct bnx2x_slowpath)); |
7866 | 7869 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 73731eb68f2a..b26eb83069b6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -545,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | |||
545 | return 0; | 545 | return 0; |
546 | } | 546 | } |
547 | 547 | ||
548 | static int | ||
549 | bnx2x_vfop_config_vlan0(struct bnx2x *bp, | ||
550 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac, | ||
551 | bool add) | ||
552 | { | ||
553 | int rc; | ||
554 | |||
555 | vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : | ||
556 | BNX2X_VLAN_MAC_DEL; | ||
557 | vlan_mac->user_req.u.vlan.vlan = 0; | ||
558 | |||
559 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
560 | if (rc == -EEXIST) | ||
561 | rc = 0; | ||
562 | return rc; | ||
563 | } | ||
564 | |||
565 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | 548 | static int bnx2x_vfop_config_list(struct bnx2x *bp, |
566 | struct bnx2x_vfop_filters *filters, | 549 | struct bnx2x_vfop_filters *filters, |
567 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | 550 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) |
@@ -666,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
666 | 649 | ||
667 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | 650 | case BNX2X_VFOP_VLAN_CONFIG_LIST: |
668 | /* next state */ | 651 | /* next state */ |
669 | vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; | 652 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; |
670 | |||
671 | /* remove vlan0 - could be no-op */ | ||
672 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); | ||
673 | if (vfop->rc) | ||
674 | goto op_err; | ||
675 | 653 | ||
676 | /* Do vlan list config. if this operation fails we try to | 654 | /* do list config */ |
677 | * restore vlan0 to keep the queue is working order | ||
678 | */ | ||
679 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | 655 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); |
680 | if (!vfop->rc) { | 656 | if (!vfop->rc) { |
681 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | 657 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); |
682 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 658 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); |
683 | } | 659 | } |
684 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ | ||
685 | |||
686 | case BNX2X_VFOP_VLAN_CONFIG_LIST_0: | ||
687 | /* next state */ | ||
688 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
689 | |||
690 | if (list_empty(&obj->head)) | ||
691 | /* add vlan0 */ | ||
692 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); | ||
693 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 660 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
694 | 661 | ||
695 | default: | 662 | default: |
@@ -2833,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) | |||
2833 | return 0; | 2800 | return 0; |
2834 | } | 2801 | } |
2835 | 2802 | ||
2803 | struct set_vf_state_cookie { | ||
2804 | struct bnx2x_virtf *vf; | ||
2805 | u8 state; | ||
2806 | }; | ||
2807 | |||
2808 | void bnx2x_set_vf_state(void *cookie) | ||
2809 | { | ||
2810 | struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; | ||
2811 | |||
2812 | p->vf->state = p->state; | ||
2813 | } | ||
2814 | |||
2836 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2815 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ |
2837 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2816 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2838 | { | 2817 | { |
@@ -2883,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2883 | op_err: | 2862 | op_err: |
2884 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | 2863 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); |
2885 | op_done: | 2864 | op_done: |
2886 | vf->state = VF_ACQUIRED; | 2865 | |
2866 | /* need to make sure there are no outstanding stats ramrods which may | ||
2867 | * cause the device to access the VF's stats buffer which it will free | ||
2868 | * as soon as we return from the close flow. | ||
2869 | */ | ||
2870 | { | ||
2871 | struct set_vf_state_cookie cookie; | ||
2872 | |||
2873 | cookie.vf = vf; | ||
2874 | cookie.state = VF_ACQUIRED; | ||
2875 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | ||
2876 | } | ||
2877 | |||
2887 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2878 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
2888 | bnx2x_vfop_end(bp, vf, vfop); | 2879 | bnx2x_vfop_end(bp, vf, vfop); |
2889 | } | 2880 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d63d1327b051..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
522 | /* should be called under stats_sema */ | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | 523 | static void __bnx2x_stats_start(struct bnx2x *bp) |
524 | { | 524 | { |
525 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | if (IS_PF(bp)) { |
526 | * is required | 526 | if (bp->port.pmf) |
527 | */ | 527 | bnx2x_port_stats_init(bp); |
528 | if (IS_VF(bp)) | ||
529 | return; | ||
530 | |||
531 | if (bp->port.pmf) | ||
532 | bnx2x_port_stats_init(bp); | ||
533 | 528 | ||
534 | else if (bp->func_stx) | 529 | else if (bp->func_stx) |
535 | bnx2x_func_stats_init(bp); | 530 | bnx2x_func_stats_init(bp); |
536 | 531 | ||
537 | bnx2x_hw_stats_post(bp); | 532 | bnx2x_hw_stats_post(bp); |
538 | bnx2x_storm_stats_post(bp); | 533 | bnx2x_storm_stats_post(bp); |
534 | } | ||
539 | 535 | ||
540 | bp->stats_started = true; | 536 | bp->stats_started = true; |
541 | } | 537 | } |
@@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1997 | estats->mac_discard); | 1993 | estats->mac_discard); |
1998 | } | 1994 | } |
1999 | } | 1995 | } |
1996 | |||
1997 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
1998 | void (func_to_exec)(void *cookie), | ||
1999 | void *cookie){ | ||
2000 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
2001 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
2002 | bnx2x_stats_comp(bp); | ||
2003 | func_to_exec(cookie); | ||
2004 | __bnx2x_stats_start(bp); | ||
2005 | up(&bp->stats_sema); | ||
2006 | } | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,6 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
543 | void (func_to_exec)(void *cookie), | ||
544 | void *cookie); | ||
542 | 545 | ||
543 | /** | 546 | /** |
544 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2e55ee29cf13..5701f3d1a169 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp) | |||
3030 | return false; | 3030 | return false; |
3031 | } | 3031 | } |
3032 | 3032 | ||
3033 | static bool tg3_phy_led_bug(struct tg3 *tp) | ||
3034 | { | ||
3035 | switch (tg3_asic_rev(tp)) { | ||
3036 | case ASIC_REV_5719: | ||
3037 | if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && | ||
3038 | !tp->pci_fn) | ||
3039 | return true; | ||
3040 | return false; | ||
3041 | } | ||
3042 | |||
3043 | return false; | ||
3044 | } | ||
3045 | |||
3033 | static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | 3046 | static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) |
3034 | { | 3047 | { |
3035 | u32 val; | 3048 | u32 val; |
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | |||
3077 | } | 3090 | } |
3078 | return; | 3091 | return; |
3079 | } else if (do_low_power) { | 3092 | } else if (do_low_power) { |
3080 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 3093 | if (!tg3_phy_led_bug(tp)) |
3081 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 3094 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
3095 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | ||
3082 | 3096 | ||
3083 | val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | | 3097 | val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | |
3084 | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | | 3098 | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 7cb148c495c9..78d6d6b970e1 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -353,11 +353,9 @@ struct xgmac_extra_stats { | |||
353 | /* Receive errors */ | 353 | /* Receive errors */ |
354 | unsigned long rx_watchdog; | 354 | unsigned long rx_watchdog; |
355 | unsigned long rx_da_filter_fail; | 355 | unsigned long rx_da_filter_fail; |
356 | unsigned long rx_sa_filter_fail; | ||
357 | unsigned long rx_payload_error; | 356 | unsigned long rx_payload_error; |
358 | unsigned long rx_ip_header_error; | 357 | unsigned long rx_ip_header_error; |
359 | /* Tx/Rx IRQ errors */ | 358 | /* Tx/Rx IRQ errors */ |
360 | unsigned long tx_undeflow; | ||
361 | unsigned long tx_process_stopped; | 359 | unsigned long tx_process_stopped; |
362 | unsigned long rx_buf_unav; | 360 | unsigned long rx_buf_unav; |
363 | unsigned long rx_process_stopped; | 361 | unsigned long rx_process_stopped; |
@@ -393,6 +391,7 @@ struct xgmac_priv { | |||
393 | char rx_pause; | 391 | char rx_pause; |
394 | char tx_pause; | 392 | char tx_pause; |
395 | int wolopts; | 393 | int wolopts; |
394 | struct work_struct tx_timeout_work; | ||
396 | }; | 395 | }; |
397 | 396 | ||
398 | /* XGMAC Configuration Settings */ | 397 | /* XGMAC Configuration Settings */ |
@@ -409,6 +408,9 @@ struct xgmac_priv { | |||
409 | #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) | 408 | #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) |
410 | #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) | 409 | #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) |
411 | 410 | ||
411 | #define tx_dma_ring_space(p) \ | ||
412 | dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) | ||
413 | |||
412 | /* XGMAC Descriptor Access Helpers */ | 414 | /* XGMAC Descriptor Access Helpers */ |
413 | static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) | 415 | static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) |
414 | { | 416 | { |
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) | |||
421 | 423 | ||
422 | static inline int desc_get_buf_len(struct xgmac_dma_desc *p) | 424 | static inline int desc_get_buf_len(struct xgmac_dma_desc *p) |
423 | { | 425 | { |
424 | u32 len = cpu_to_le32(p->flags); | 426 | u32 len = le32_to_cpu(p->buf_size); |
425 | return (len & DESC_BUFFER1_SZ_MASK) + | 427 | return (len & DESC_BUFFER1_SZ_MASK) + |
426 | ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); | 428 | ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); |
427 | } | 429 | } |
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) | |||
464 | p->flags = cpu_to_le32(tmpflags); | 466 | p->flags = cpu_to_le32(tmpflags); |
465 | } | 467 | } |
466 | 468 | ||
469 | static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) | ||
470 | { | ||
471 | u32 tmpflags = le32_to_cpu(p->flags); | ||
472 | tmpflags &= TXDESC_END_RING; | ||
473 | p->flags = cpu_to_le32(tmpflags); | ||
474 | } | ||
475 | |||
467 | static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) | 476 | static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) |
468 | { | 477 | { |
469 | return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; | 478 | return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; |
470 | } | 479 | } |
471 | 480 | ||
481 | static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) | ||
482 | { | ||
483 | return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; | ||
484 | } | ||
485 | |||
472 | static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) | 486 | static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) |
473 | { | 487 | { |
474 | return le32_to_cpu(p->buf1_addr); | 488 | return le32_to_cpu(p->buf1_addr); |
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, | |||
609 | { | 623 | { |
610 | u32 data; | 624 | u32 data; |
611 | 625 | ||
612 | data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); | 626 | if (addr) { |
613 | writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); | 627 | data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); |
614 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | 628 | writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); |
615 | writel(data, ioaddr + XGMAC_ADDR_LOW(num)); | 629 | data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; |
630 | writel(data, ioaddr + XGMAC_ADDR_LOW(num)); | ||
631 | } else { | ||
632 | writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); | ||
633 | writel(0, ioaddr + XGMAC_ADDR_LOW(num)); | ||
634 | } | ||
616 | } | 635 | } |
617 | 636 | ||
618 | static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, | 637 | static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, |
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) | |||
683 | if (unlikely(skb == NULL)) | 702 | if (unlikely(skb == NULL)) |
684 | break; | 703 | break; |
685 | 704 | ||
686 | priv->rx_skbuff[entry] = skb; | ||
687 | paddr = dma_map_single(priv->device, skb->data, | 705 | paddr = dma_map_single(priv->device, skb->data, |
688 | bufsz, DMA_FROM_DEVICE); | 706 | priv->dma_buf_sz - NET_IP_ALIGN, |
707 | DMA_FROM_DEVICE); | ||
708 | if (dma_mapping_error(priv->device, paddr)) { | ||
709 | dev_kfree_skb_any(skb); | ||
710 | break; | ||
711 | } | ||
712 | priv->rx_skbuff[entry] = skb; | ||
689 | desc_set_buf_addr(p, paddr, priv->dma_buf_sz); | 713 | desc_set_buf_addr(p, paddr, priv->dma_buf_sz); |
690 | } | 714 | } |
691 | 715 | ||
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) | |||
782 | return; | 806 | return; |
783 | 807 | ||
784 | for (i = 0; i < DMA_RX_RING_SZ; i++) { | 808 | for (i = 0; i < DMA_RX_RING_SZ; i++) { |
785 | if (priv->rx_skbuff[i] == NULL) | 809 | struct sk_buff *skb = priv->rx_skbuff[i]; |
810 | if (skb == NULL) | ||
786 | continue; | 811 | continue; |
787 | 812 | ||
788 | p = priv->dma_rx + i; | 813 | p = priv->dma_rx + i; |
789 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 814 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
790 | priv->dma_buf_sz, DMA_FROM_DEVICE); | 815 | priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); |
791 | dev_kfree_skb_any(priv->rx_skbuff[i]); | 816 | dev_kfree_skb_any(skb); |
792 | priv->rx_skbuff[i] = NULL; | 817 | priv->rx_skbuff[i] = NULL; |
793 | } | 818 | } |
794 | } | 819 | } |
795 | 820 | ||
796 | static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) | 821 | static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) |
797 | { | 822 | { |
798 | int i, f; | 823 | int i; |
799 | struct xgmac_dma_desc *p; | 824 | struct xgmac_dma_desc *p; |
800 | 825 | ||
801 | if (!priv->tx_skbuff) | 826 | if (!priv->tx_skbuff) |
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) | |||
806 | continue; | 831 | continue; |
807 | 832 | ||
808 | p = priv->dma_tx + i; | 833 | p = priv->dma_tx + i; |
809 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 834 | if (desc_get_tx_fs(p)) |
810 | desc_get_buf_len(p), DMA_TO_DEVICE); | 835 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
811 | 836 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
812 | for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { | 837 | else |
813 | p = priv->dma_tx + i++; | ||
814 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | 838 | dma_unmap_page(priv->device, desc_get_buf_addr(p), |
815 | desc_get_buf_len(p), DMA_TO_DEVICE); | 839 | desc_get_buf_len(p), DMA_TO_DEVICE); |
816 | } | ||
817 | 840 | ||
818 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 841 | if (desc_get_tx_ls(p)) |
842 | dev_kfree_skb_any(priv->tx_skbuff[i]); | ||
819 | priv->tx_skbuff[i] = NULL; | 843 | priv->tx_skbuff[i] = NULL; |
820 | } | 844 | } |
821 | } | 845 | } |
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) | |||
852 | */ | 876 | */ |
853 | static void xgmac_tx_complete(struct xgmac_priv *priv) | 877 | static void xgmac_tx_complete(struct xgmac_priv *priv) |
854 | { | 878 | { |
855 | int i; | ||
856 | |||
857 | while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { | 879 | while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { |
858 | unsigned int entry = priv->tx_tail; | 880 | unsigned int entry = priv->tx_tail; |
859 | struct sk_buff *skb = priv->tx_skbuff[entry]; | 881 | struct sk_buff *skb = priv->tx_skbuff[entry]; |
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) | |||
863 | if (desc_get_owner(p)) | 885 | if (desc_get_owner(p)) |
864 | break; | 886 | break; |
865 | 887 | ||
866 | /* Verify tx error by looking at the last segment */ | ||
867 | if (desc_get_tx_ls(p)) | ||
868 | desc_get_tx_status(priv, p); | ||
869 | |||
870 | netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", | 888 | netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", |
871 | priv->tx_head, priv->tx_tail); | 889 | priv->tx_head, priv->tx_tail); |
872 | 890 | ||
873 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 891 | if (desc_get_tx_fs(p)) |
874 | desc_get_buf_len(p), DMA_TO_DEVICE); | 892 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
875 | 893 | desc_get_buf_len(p), DMA_TO_DEVICE); | |
876 | priv->tx_skbuff[entry] = NULL; | 894 | else |
877 | priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
878 | |||
879 | if (!skb) { | ||
880 | continue; | ||
881 | } | ||
882 | |||
883 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
884 | entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, | ||
885 | DMA_TX_RING_SZ); | ||
886 | p = priv->dma_tx + priv->tx_tail; | ||
887 | |||
888 | dma_unmap_page(priv->device, desc_get_buf_addr(p), | 895 | dma_unmap_page(priv->device, desc_get_buf_addr(p), |
889 | desc_get_buf_len(p), DMA_TO_DEVICE); | 896 | desc_get_buf_len(p), DMA_TO_DEVICE); |
897 | |||
898 | /* Check tx error on the last segment */ | ||
899 | if (desc_get_tx_ls(p)) { | ||
900 | desc_get_tx_status(priv, p); | ||
901 | dev_kfree_skb(skb); | ||
890 | } | 902 | } |
891 | 903 | ||
892 | dev_kfree_skb(skb); | 904 | priv->tx_skbuff[entry] = NULL; |
905 | priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
893 | } | 906 | } |
894 | 907 | ||
895 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > | 908 | /* Ensure tx_tail is visible to xgmac_xmit */ |
896 | MAX_SKB_FRAGS) | 909 | smp_mb(); |
910 | if (unlikely(netif_queue_stopped(priv->dev) && | ||
911 | (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) | ||
897 | netif_wake_queue(priv->dev); | 912 | netif_wake_queue(priv->dev); |
898 | } | 913 | } |
899 | 914 | ||
900 | /** | 915 | static void xgmac_tx_timeout_work(struct work_struct *work) |
901 | * xgmac_tx_err: | ||
902 | * @priv: pointer to the private device structure | ||
903 | * Description: it cleans the descriptors and restarts the transmission | ||
904 | * in case of errors. | ||
905 | */ | ||
906 | static void xgmac_tx_err(struct xgmac_priv *priv) | ||
907 | { | 916 | { |
908 | u32 reg, value, inten; | 917 | u32 reg, value; |
918 | struct xgmac_priv *priv = | ||
919 | container_of(work, struct xgmac_priv, tx_timeout_work); | ||
909 | 920 | ||
910 | netif_stop_queue(priv->dev); | 921 | napi_disable(&priv->napi); |
911 | 922 | ||
912 | inten = readl(priv->base + XGMAC_DMA_INTR_ENA); | ||
913 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); | 923 | writel(0, priv->base + XGMAC_DMA_INTR_ENA); |
914 | 924 | ||
925 | netif_tx_lock(priv->dev); | ||
926 | |||
915 | reg = readl(priv->base + XGMAC_DMA_CONTROL); | 927 | reg = readl(priv->base + XGMAC_DMA_CONTROL); |
916 | writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); | 928 | writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); |
917 | do { | 929 | do { |
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv) | |||
927 | 939 | ||
928 | writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, | 940 | writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, |
929 | priv->base + XGMAC_DMA_STATUS); | 941 | priv->base + XGMAC_DMA_STATUS); |
930 | writel(inten, priv->base + XGMAC_DMA_INTR_ENA); | ||
931 | 942 | ||
943 | netif_tx_unlock(priv->dev); | ||
932 | netif_wake_queue(priv->dev); | 944 | netif_wake_queue(priv->dev); |
945 | |||
946 | napi_enable(&priv->napi); | ||
947 | |||
948 | /* Enable interrupts */ | ||
949 | writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); | ||
950 | writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); | ||
933 | } | 951 | } |
934 | 952 | ||
935 | static int xgmac_hw_init(struct net_device *dev) | 953 | static int xgmac_hw_init(struct net_device *dev) |
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev) | |||
957 | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; | 975 | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; |
958 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); | 976 | writel(value, ioaddr + XGMAC_DMA_BUS_MODE); |
959 | 977 | ||
960 | /* Enable interrupts */ | 978 | writel(0, ioaddr + XGMAC_DMA_INTR_ENA); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | ||
962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | ||
963 | 979 | ||
964 | /* Mask power mgt interrupt */ | 980 | /* Mask power mgt interrupt */ |
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | 981 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); |
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev) | |||
1027 | napi_enable(&priv->napi); | 1043 | napi_enable(&priv->napi); |
1028 | netif_start_queue(dev); | 1044 | netif_start_queue(dev); |
1029 | 1045 | ||
1046 | /* Enable interrupts */ | ||
1047 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | ||
1048 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | ||
1049 | |||
1030 | return 0; | 1050 | return 0; |
1031 | } | 1051 | } |
1032 | 1052 | ||
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1087 | paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); | 1107 | paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); |
1088 | if (dma_mapping_error(priv->device, paddr)) { | 1108 | if (dma_mapping_error(priv->device, paddr)) { |
1089 | dev_kfree_skb(skb); | 1109 | dev_kfree_skb(skb); |
1090 | return -EIO; | 1110 | return NETDEV_TX_OK; |
1091 | } | 1111 | } |
1092 | priv->tx_skbuff[entry] = skb; | 1112 | priv->tx_skbuff[entry] = skb; |
1093 | desc_set_buf_addr_and_size(desc, paddr, len); | 1113 | desc_set_buf_addr_and_size(desc, paddr, len); |
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1099 | 1119 | ||
1100 | paddr = skb_frag_dma_map(priv->device, frag, 0, len, | 1120 | paddr = skb_frag_dma_map(priv->device, frag, 0, len, |
1101 | DMA_TO_DEVICE); | 1121 | DMA_TO_DEVICE); |
1102 | if (dma_mapping_error(priv->device, paddr)) { | 1122 | if (dma_mapping_error(priv->device, paddr)) |
1103 | dev_kfree_skb(skb); | 1123 | goto dma_err; |
1104 | return -EIO; | ||
1105 | } | ||
1106 | 1124 | ||
1107 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); | 1125 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); |
1108 | desc = priv->dma_tx + entry; | 1126 | desc = priv->dma_tx + entry; |
1109 | priv->tx_skbuff[entry] = NULL; | 1127 | priv->tx_skbuff[entry] = skb; |
1110 | 1128 | ||
1111 | desc_set_buf_addr_and_size(desc, paddr, len); | 1129 | desc_set_buf_addr_and_size(desc, paddr, len); |
1112 | if (i < (nfrags - 1)) | 1130 | if (i < (nfrags - 1)) |
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1124 | wmb(); | 1142 | wmb(); |
1125 | desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); | 1143 | desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); |
1126 | 1144 | ||
1145 | writel(1, priv->base + XGMAC_DMA_TX_POLL); | ||
1146 | |||
1127 | priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); | 1147 | priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); |
1128 | 1148 | ||
1129 | writel(1, priv->base + XGMAC_DMA_TX_POLL); | 1149 | /* Ensure tx_head update is visible to tx completion */ |
1130 | if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < | 1150 | smp_mb(); |
1131 | MAX_SKB_FRAGS) | 1151 | if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { |
1132 | netif_stop_queue(dev); | 1152 | netif_stop_queue(dev); |
1153 | /* Ensure netif_stop_queue is visible to tx completion */ | ||
1154 | smp_mb(); | ||
1155 | if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) | ||
1156 | netif_start_queue(dev); | ||
1157 | } | ||
1158 | return NETDEV_TX_OK; | ||
1133 | 1159 | ||
1160 | dma_err: | ||
1161 | entry = priv->tx_head; | ||
1162 | for ( ; i > 0; i--) { | ||
1163 | entry = dma_ring_incr(entry, DMA_TX_RING_SZ); | ||
1164 | desc = priv->dma_tx + entry; | ||
1165 | priv->tx_skbuff[entry] = NULL; | ||
1166 | dma_unmap_page(priv->device, desc_get_buf_addr(desc), | ||
1167 | desc_get_buf_len(desc), DMA_TO_DEVICE); | ||
1168 | desc_clear_tx_owner(desc); | ||
1169 | } | ||
1170 | desc = first; | ||
1171 | dma_unmap_single(priv->device, desc_get_buf_addr(desc), | ||
1172 | desc_get_buf_len(desc), DMA_TO_DEVICE); | ||
1173 | dev_kfree_skb(skb); | ||
1134 | return NETDEV_TX_OK; | 1174 | return NETDEV_TX_OK; |
1135 | } | 1175 | } |
1136 | 1176 | ||
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1174 | 1214 | ||
1175 | skb_put(skb, frame_len); | 1215 | skb_put(skb, frame_len); |
1176 | dma_unmap_single(priv->device, desc_get_buf_addr(p), | 1216 | dma_unmap_single(priv->device, desc_get_buf_addr(p), |
1177 | frame_len, DMA_FROM_DEVICE); | 1217 | priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); |
1178 | 1218 | ||
1179 | skb->protocol = eth_type_trans(skb, priv->dev); | 1219 | skb->protocol = eth_type_trans(skb, priv->dev); |
1180 | skb->ip_summed = ip_checksum; | 1220 | skb->ip_summed = ip_checksum; |
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) | |||
1225 | static void xgmac_tx_timeout(struct net_device *dev) | 1265 | static void xgmac_tx_timeout(struct net_device *dev) |
1226 | { | 1266 | { |
1227 | struct xgmac_priv *priv = netdev_priv(dev); | 1267 | struct xgmac_priv *priv = netdev_priv(dev); |
1228 | 1268 | schedule_work(&priv->tx_timeout_work); | |
1229 | /* Clear Tx resources and restart transmitting again */ | ||
1230 | xgmac_tx_err(priv); | ||
1231 | } | 1269 | } |
1232 | 1270 | ||
1233 | /** | 1271 | /** |
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) | |||
1286 | if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { | 1324 | if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { |
1287 | use_hash = true; | 1325 | use_hash = true; |
1288 | value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; | 1326 | value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; |
1327 | } else { | ||
1328 | use_hash = false; | ||
1289 | } | 1329 | } |
1290 | netdev_for_each_mc_addr(ha, dev) { | 1330 | netdev_for_each_mc_addr(ha, dev) { |
1291 | if (use_hash) { | 1331 | if (use_hash) { |
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) | |||
1302 | } | 1342 | } |
1303 | 1343 | ||
1304 | out: | 1344 | out: |
1345 | for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) | ||
1346 | xgmac_set_mac_addr(ioaddr, NULL, reg); | ||
1305 | for (i = 0; i < XGMAC_NUM_HASH; i++) | 1347 | for (i = 0; i < XGMAC_NUM_HASH; i++) |
1306 | writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); | 1348 | writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); |
1307 | 1349 | ||
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) | |||
1366 | static irqreturn_t xgmac_interrupt(int irq, void *dev_id) | 1408 | static irqreturn_t xgmac_interrupt(int irq, void *dev_id) |
1367 | { | 1409 | { |
1368 | u32 intr_status; | 1410 | u32 intr_status; |
1369 | bool tx_err = false; | ||
1370 | struct net_device *dev = (struct net_device *)dev_id; | 1411 | struct net_device *dev = (struct net_device *)dev_id; |
1371 | struct xgmac_priv *priv = netdev_priv(dev); | 1412 | struct xgmac_priv *priv = netdev_priv(dev); |
1372 | struct xgmac_extra_stats *x = &priv->xstats; | 1413 | struct xgmac_extra_stats *x = &priv->xstats; |
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id) | |||
1396 | if (intr_status & DMA_STATUS_TPS) { | 1437 | if (intr_status & DMA_STATUS_TPS) { |
1397 | netdev_err(priv->dev, "transmit process stopped\n"); | 1438 | netdev_err(priv->dev, "transmit process stopped\n"); |
1398 | x->tx_process_stopped++; | 1439 | x->tx_process_stopped++; |
1399 | tx_err = true; | 1440 | schedule_work(&priv->tx_timeout_work); |
1400 | } | 1441 | } |
1401 | if (intr_status & DMA_STATUS_FBI) { | 1442 | if (intr_status & DMA_STATUS_FBI) { |
1402 | netdev_err(priv->dev, "fatal bus error\n"); | 1443 | netdev_err(priv->dev, "fatal bus error\n"); |
1403 | x->fatal_bus_error++; | 1444 | x->fatal_bus_error++; |
1404 | tx_err = true; | ||
1405 | } | 1445 | } |
1406 | |||
1407 | if (tx_err) | ||
1408 | xgmac_tx_err(priv); | ||
1409 | } | 1446 | } |
1410 | 1447 | ||
1411 | /* TX/RX NORMAL interrupts */ | 1448 | /* TX/RX NORMAL interrupts */ |
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = { | |||
1569 | XGMAC_STAT(rx_payload_error), | 1606 | XGMAC_STAT(rx_payload_error), |
1570 | XGMAC_STAT(rx_ip_header_error), | 1607 | XGMAC_STAT(rx_ip_header_error), |
1571 | XGMAC_STAT(rx_da_filter_fail), | 1608 | XGMAC_STAT(rx_da_filter_fail), |
1572 | XGMAC_STAT(rx_sa_filter_fail), | ||
1573 | XGMAC_STAT(fatal_bus_error), | 1609 | XGMAC_STAT(fatal_bus_error), |
1574 | XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), | 1610 | XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), |
1575 | XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), | 1611 | XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), |
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev) | |||
1708 | ndev->netdev_ops = &xgmac_netdev_ops; | 1744 | ndev->netdev_ops = &xgmac_netdev_ops; |
1709 | SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); | 1745 | SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); |
1710 | spin_lock_init(&priv->stats_lock); | 1746 | spin_lock_init(&priv->stats_lock); |
1747 | INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); | ||
1711 | 1748 | ||
1712 | priv->device = &pdev->dev; | 1749 | priv->device = &pdev->dev; |
1713 | priv->dev = ndev; | 1750 | priv->dev = ndev; |
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev) | |||
1759 | if (device_can_wakeup(priv->device)) | 1796 | if (device_can_wakeup(priv->device)) |
1760 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1797 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1761 | 1798 | ||
1762 | ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; | 1799 | ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; |
1763 | if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) | 1800 | if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) |
1764 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 1801 | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
1765 | NETIF_F_RXCSUM; | 1802 | NETIF_F_RXCSUM; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e104db7fcf27..3224d28cdad4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev) | |||
4476 | pci_set_power_state(pdev, PCI_D0); | 4476 | pci_set_power_state(pdev, PCI_D0); |
4477 | pci_restore_state(pdev); | 4477 | pci_restore_state(pdev); |
4478 | 4478 | ||
4479 | status = be_fw_wait_ready(adapter); | ||
4480 | if (status) | ||
4481 | return status; | ||
4482 | |||
4479 | /* tell fw we're ready to fire cmds */ | 4483 | /* tell fw we're ready to fire cmds */ |
4480 | status = be_cmd_fw_init(adapter); | 4484 | status = be_cmd_fw_init(adapter); |
4481 | if (status) | 4485 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ae236009f1a8..0120217a16dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -296,6 +296,9 @@ struct fec_enet_private { | |||
296 | /* The ring entries to be free()ed */ | 296 | /* The ring entries to be free()ed */ |
297 | struct bufdesc *dirty_tx; | 297 | struct bufdesc *dirty_tx; |
298 | 298 | ||
299 | unsigned short tx_ring_size; | ||
300 | unsigned short rx_ring_size; | ||
301 | |||
299 | struct platform_device *pdev; | 302 | struct platform_device *pdev; |
300 | 303 | ||
301 | int opened; | 304 | int opened; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0cd5e4b8b545..f9aacf5d8523 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |||
238 | 238 | ||
239 | static int mii_cnt; | 239 | static int mii_cnt; |
240 | 240 | ||
241 | static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) | 241 | static inline |
242 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | ||
242 | { | 243 | { |
243 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 244 | struct bufdesc *new_bd = bdp + 1; |
244 | if (is_ex) | 245 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; |
245 | return (struct bufdesc *)(ex + 1); | 246 | struct bufdesc_ex *ex_base; |
247 | struct bufdesc *base; | ||
248 | int ring_size; | ||
249 | |||
250 | if (bdp >= fep->tx_bd_base) { | ||
251 | base = fep->tx_bd_base; | ||
252 | ring_size = fep->tx_ring_size; | ||
253 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | ||
254 | } else { | ||
255 | base = fep->rx_bd_base; | ||
256 | ring_size = fep->rx_ring_size; | ||
257 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | ||
258 | } | ||
259 | |||
260 | if (fep->bufdesc_ex) | ||
261 | return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? | ||
262 | ex_base : ex_new_bd); | ||
246 | else | 263 | else |
247 | return bdp + 1; | 264 | return (new_bd >= (base + ring_size)) ? |
265 | base : new_bd; | ||
248 | } | 266 | } |
249 | 267 | ||
250 | static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) | 268 | static inline |
269 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) | ||
251 | { | 270 | { |
252 | struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; | 271 | struct bufdesc *new_bd = bdp - 1; |
253 | if (is_ex) | 272 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; |
254 | return (struct bufdesc *)(ex - 1); | 273 | struct bufdesc_ex *ex_base; |
274 | struct bufdesc *base; | ||
275 | int ring_size; | ||
276 | |||
277 | if (bdp >= fep->tx_bd_base) { | ||
278 | base = fep->tx_bd_base; | ||
279 | ring_size = fep->tx_ring_size; | ||
280 | ex_base = (struct bufdesc_ex *)fep->tx_bd_base; | ||
281 | } else { | ||
282 | base = fep->rx_bd_base; | ||
283 | ring_size = fep->rx_ring_size; | ||
284 | ex_base = (struct bufdesc_ex *)fep->rx_bd_base; | ||
285 | } | ||
286 | |||
287 | if (fep->bufdesc_ex) | ||
288 | return (struct bufdesc *)((ex_new_bd < ex_base) ? | ||
289 | (ex_new_bd + ring_size) : ex_new_bd); | ||
255 | else | 290 | else |
256 | return bdp - 1; | 291 | return (new_bd < base) ? (new_bd + ring_size) : new_bd; |
257 | } | 292 | } |
258 | 293 | ||
259 | static void *swap_buffer(void *bufaddr, int len) | 294 | static void *swap_buffer(void *bufaddr, int len) |
@@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
379 | } | 414 | } |
380 | } | 415 | } |
381 | 416 | ||
382 | bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 417 | bdp_pre = fec_enet_get_prevdesc(bdp, fep); |
383 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && | 418 | if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && |
384 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { | 419 | !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { |
385 | fep->delay_work.trig_tx = true; | 420 | fep->delay_work.trig_tx = true; |
@@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
388 | } | 423 | } |
389 | 424 | ||
390 | /* If this was the last BD in the ring, start at the beginning again. */ | 425 | /* If this was the last BD in the ring, start at the beginning again. */ |
391 | if (status & BD_ENET_TX_WRAP) | 426 | bdp = fec_enet_get_nextdesc(bdp, fep); |
392 | bdp = fep->tx_bd_base; | ||
393 | else | ||
394 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
395 | 427 | ||
396 | fep->cur_tx = bdp; | 428 | fep->cur_tx = bdp; |
397 | 429 | ||
@@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
416 | 448 | ||
417 | /* Initialize the receive buffer descriptors. */ | 449 | /* Initialize the receive buffer descriptors. */ |
418 | bdp = fep->rx_bd_base; | 450 | bdp = fep->rx_bd_base; |
419 | for (i = 0; i < RX_RING_SIZE; i++) { | 451 | for (i = 0; i < fep->rx_ring_size; i++) { |
420 | 452 | ||
421 | /* Initialize the BD for every fragment in the page. */ | 453 | /* Initialize the BD for every fragment in the page. */ |
422 | if (bdp->cbd_bufaddr) | 454 | if (bdp->cbd_bufaddr) |
423 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 455 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
424 | else | 456 | else |
425 | bdp->cbd_sc = 0; | 457 | bdp->cbd_sc = 0; |
426 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 458 | bdp = fec_enet_get_nextdesc(bdp, fep); |
427 | } | 459 | } |
428 | 460 | ||
429 | /* Set the last buffer to wrap */ | 461 | /* Set the last buffer to wrap */ |
430 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 462 | bdp = fec_enet_get_prevdesc(bdp, fep); |
431 | bdp->cbd_sc |= BD_SC_WRAP; | 463 | bdp->cbd_sc |= BD_SC_WRAP; |
432 | 464 | ||
433 | fep->cur_rx = fep->rx_bd_base; | 465 | fep->cur_rx = fep->rx_bd_base; |
@@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
435 | /* ...and the same for transmit */ | 467 | /* ...and the same for transmit */ |
436 | bdp = fep->tx_bd_base; | 468 | bdp = fep->tx_bd_base; |
437 | fep->cur_tx = bdp; | 469 | fep->cur_tx = bdp; |
438 | for (i = 0; i < TX_RING_SIZE; i++) { | 470 | for (i = 0; i < fep->tx_ring_size; i++) { |
439 | 471 | ||
440 | /* Initialize the BD for every fragment in the page. */ | 472 | /* Initialize the BD for every fragment in the page. */ |
441 | bdp->cbd_sc = 0; | 473 | bdp->cbd_sc = 0; |
@@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev) | |||
444 | fep->tx_skbuff[i] = NULL; | 476 | fep->tx_skbuff[i] = NULL; |
445 | } | 477 | } |
446 | bdp->cbd_bufaddr = 0; | 478 | bdp->cbd_bufaddr = 0; |
447 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 479 | bdp = fec_enet_get_nextdesc(bdp, fep); |
448 | } | 480 | } |
449 | 481 | ||
450 | /* Set the last buffer to wrap */ | 482 | /* Set the last buffer to wrap */ |
451 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 483 | bdp = fec_enet_get_prevdesc(bdp, fep); |
452 | bdp->cbd_sc |= BD_SC_WRAP; | 484 | bdp->cbd_sc |= BD_SC_WRAP; |
453 | fep->dirty_tx = bdp; | 485 | fep->dirty_tx = bdp; |
454 | } | 486 | } |
@@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex) | |||
509 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 541 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
510 | if (fep->bufdesc_ex) | 542 | if (fep->bufdesc_ex) |
511 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) | 543 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) |
512 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 544 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); |
513 | else | 545 | else |
514 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 546 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
515 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 547 | * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); |
516 | 548 | ||
517 | 549 | ||
518 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 550 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
@@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev) | |||
726 | bdp = fep->dirty_tx; | 758 | bdp = fep->dirty_tx; |
727 | 759 | ||
728 | /* get next bdp of dirty_tx */ | 760 | /* get next bdp of dirty_tx */ |
729 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | 761 | bdp = fec_enet_get_nextdesc(bdp, fep); |
730 | bdp = fep->tx_bd_base; | ||
731 | else | ||
732 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
733 | 762 | ||
734 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 763 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
735 | 764 | ||
@@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev) | |||
799 | fep->dirty_tx = bdp; | 828 | fep->dirty_tx = bdp; |
800 | 829 | ||
801 | /* Update pointer to next buffer descriptor to be transmitted */ | 830 | /* Update pointer to next buffer descriptor to be transmitted */ |
802 | if (status & BD_ENET_TX_WRAP) | 831 | bdp = fec_enet_get_nextdesc(bdp, fep); |
803 | bdp = fep->tx_bd_base; | ||
804 | else | ||
805 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
806 | 832 | ||
807 | /* Since we have freed up a buffer, the ring is no longer full | 833 | /* Since we have freed up a buffer, the ring is no longer full |
808 | */ | 834 | */ |
@@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
970 | htons(ETH_P_8021Q), | 996 | htons(ETH_P_8021Q), |
971 | vlan_tag); | 997 | vlan_tag); |
972 | 998 | ||
973 | if (!skb_defer_rx_timestamp(skb)) | 999 | napi_gro_receive(&fep->napi, skb); |
974 | napi_gro_receive(&fep->napi, skb); | ||
975 | } | 1000 | } |
976 | 1001 | ||
977 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 1002 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, |
@@ -993,10 +1018,8 @@ rx_processing_done: | |||
993 | } | 1018 | } |
994 | 1019 | ||
995 | /* Update BD pointer to next entry */ | 1020 | /* Update BD pointer to next entry */ |
996 | if (status & BD_ENET_RX_WRAP) | 1021 | bdp = fec_enet_get_nextdesc(bdp, fep); |
997 | bdp = fep->rx_bd_base; | 1022 | |
998 | else | ||
999 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1000 | /* Doing this here will keep the FEC running while we process | 1023 | /* Doing this here will keep the FEC running while we process |
1001 | * incoming frames. On a heavily loaded network, we should be | 1024 | * incoming frames. On a heavily loaded network, we should be |
1002 | * able to keep up at the expense of system resources. | 1025 | * able to keep up at the expense of system resources. |
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1662 | struct bufdesc *bdp; | 1685 | struct bufdesc *bdp; |
1663 | 1686 | ||
1664 | bdp = fep->rx_bd_base; | 1687 | bdp = fep->rx_bd_base; |
1665 | for (i = 0; i < RX_RING_SIZE; i++) { | 1688 | for (i = 0; i < fep->rx_ring_size; i++) { |
1666 | skb = fep->rx_skbuff[i]; | 1689 | skb = fep->rx_skbuff[i]; |
1667 | 1690 | ||
1668 | if (bdp->cbd_bufaddr) | 1691 | if (bdp->cbd_bufaddr) |
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1670 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1693 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1671 | if (skb) | 1694 | if (skb) |
1672 | dev_kfree_skb(skb); | 1695 | dev_kfree_skb(skb); |
1673 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1696 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1674 | } | 1697 | } |
1675 | 1698 | ||
1676 | bdp = fep->tx_bd_base; | 1699 | bdp = fep->tx_bd_base; |
1677 | for (i = 0; i < TX_RING_SIZE; i++) | 1700 | for (i = 0; i < fep->tx_ring_size; i++) |
1678 | kfree(fep->tx_bounce[i]); | 1701 | kfree(fep->tx_bounce[i]); |
1679 | } | 1702 | } |
1680 | 1703 | ||
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1686 | struct bufdesc *bdp; | 1709 | struct bufdesc *bdp; |
1687 | 1710 | ||
1688 | bdp = fep->rx_bd_base; | 1711 | bdp = fep->rx_bd_base; |
1689 | for (i = 0; i < RX_RING_SIZE; i++) { | 1712 | for (i = 0; i < fep->rx_ring_size; i++) { |
1690 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | 1713 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
1691 | if (!skb) { | 1714 | if (!skb) { |
1692 | fec_enet_free_buffers(ndev); | 1715 | fec_enet_free_buffers(ndev); |
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1703 | ebdp->cbd_esc = BD_ENET_RX_INT; | 1726 | ebdp->cbd_esc = BD_ENET_RX_INT; |
1704 | } | 1727 | } |
1705 | 1728 | ||
1706 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1729 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1707 | } | 1730 | } |
1708 | 1731 | ||
1709 | /* Set the last buffer to wrap. */ | 1732 | /* Set the last buffer to wrap. */ |
1710 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1733 | bdp = fec_enet_get_prevdesc(bdp, fep); |
1711 | bdp->cbd_sc |= BD_SC_WRAP; | 1734 | bdp->cbd_sc |= BD_SC_WRAP; |
1712 | 1735 | ||
1713 | bdp = fep->tx_bd_base; | 1736 | bdp = fep->tx_bd_base; |
1714 | for (i = 0; i < TX_RING_SIZE; i++) { | 1737 | for (i = 0; i < fep->tx_ring_size; i++) { |
1715 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | 1738 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); |
1716 | 1739 | ||
1717 | bdp->cbd_sc = 0; | 1740 | bdp->cbd_sc = 0; |
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) | |||
1722 | ebdp->cbd_esc = BD_ENET_TX_INT; | 1745 | ebdp->cbd_esc = BD_ENET_TX_INT; |
1723 | } | 1746 | } |
1724 | 1747 | ||
1725 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | 1748 | bdp = fec_enet_get_nextdesc(bdp, fep); |
1726 | } | 1749 | } |
1727 | 1750 | ||
1728 | /* Set the last buffer to wrap. */ | 1751 | /* Set the last buffer to wrap. */ |
1729 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | 1752 | bdp = fec_enet_get_prevdesc(bdp, fep); |
1730 | bdp->cbd_sc |= BD_SC_WRAP; | 1753 | bdp->cbd_sc |= BD_SC_WRAP; |
1731 | 1754 | ||
1732 | return 0; | 1755 | return 0; |
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev) | |||
1966 | /* Get the Ethernet address */ | 1989 | /* Get the Ethernet address */ |
1967 | fec_get_mac(ndev); | 1990 | fec_get_mac(ndev); |
1968 | 1991 | ||
1992 | /* init the tx & rx ring size */ | ||
1993 | fep->tx_ring_size = TX_RING_SIZE; | ||
1994 | fep->rx_ring_size = RX_RING_SIZE; | ||
1995 | |||
1969 | /* Set receive and transmit descriptor base. */ | 1996 | /* Set receive and transmit descriptor base. */ |
1970 | fep->rx_bd_base = cbd_base; | 1997 | fep->rx_bd_base = cbd_base; |
1971 | if (fep->bufdesc_ex) | 1998 | if (fep->bufdesc_ex) |
1972 | fep->tx_bd_base = (struct bufdesc *) | 1999 | fep->tx_bd_base = (struct bufdesc *) |
1973 | (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); | 2000 | (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); |
1974 | else | 2001 | else |
1975 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | 2002 | fep->tx_bd_base = cbd_base + fep->rx_ring_size; |
1976 | 2003 | ||
1977 | /* The FEC Ethernet specific entries in the device structure */ | 2004 | /* The FEC Ethernet specific entries in the device structure */ |
1978 | ndev->watchdog_timeo = TX_TIMEOUT; | 2005 | ndev->watchdog_timeo = TX_TIMEOUT; |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, | |||
3069 | jwrite32(jme, JME_APMC, apmc); | 3069 | jwrite32(jme, JME_APMC, apmc); |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
3073 | 3073 | ||
3074 | spin_lock_init(&jme->phy_lock); | 3074 | spin_lock_init(&jme->phy_lock); |
3075 | spin_lock_init(&jme->macaddr_lock); | 3075 | spin_lock_init(&jme->macaddr_lock); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2777c70c603b..e35bac7cfdf1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -138,7 +138,9 @@ | |||
138 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) | 138 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) |
139 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) | 139 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
140 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) | 140 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) |
141 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) | ||
141 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) | 142 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
143 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) | ||
142 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 | 144 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
143 | #define MVNETA_MIB_LATE_COLLISION 0x7c | 145 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
144 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 | 146 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 |
@@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp) | |||
948 | /* Assign port SDMA configuration */ | 950 | /* Assign port SDMA configuration */ |
949 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); | 951 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); |
950 | 952 | ||
953 | /* Disable PHY polling in hardware, since we're using the | ||
954 | * kernel phylib to do this. | ||
955 | */ | ||
956 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); | ||
957 | val &= ~MVNETA_PHY_POLLING_ENABLE; | ||
958 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); | ||
959 | |||
951 | mvneta_set_ucast_table(pp, -1); | 960 | mvneta_set_ucast_table(pp, -1); |
952 | mvneta_set_special_mcast_table(pp, -1); | 961 | mvneta_set_special_mcast_table(pp, -1); |
953 | mvneta_set_other_mcast_table(pp, -1); | 962 | mvneta_set_other_mcast_table(pp, -1); |
@@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev) | |||
2340 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | 2349 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
2341 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | 2350 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | |
2342 | MVNETA_GMAC_CONFIG_GMII_SPEED | | 2351 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
2343 | MVNETA_GMAC_CONFIG_FULL_DUPLEX); | 2352 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | |
2353 | MVNETA_GMAC_AN_SPEED_EN | | ||
2354 | MVNETA_GMAC_AN_DUPLEX_EN); | ||
2344 | 2355 | ||
2345 | if (phydev->duplex) | 2356 | if (phydev->duplex) |
2346 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | 2357 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
@@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev) | |||
2473 | return 0; | 2484 | return 0; |
2474 | } | 2485 | } |
2475 | 2486 | ||
2487 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2488 | { | ||
2489 | struct mvneta_port *pp = netdev_priv(dev); | ||
2490 | int ret; | ||
2491 | |||
2492 | if (!pp->phy_dev) | ||
2493 | return -ENOTSUPP; | ||
2494 | |||
2495 | ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); | ||
2496 | if (!ret) | ||
2497 | mvneta_adjust_link(dev); | ||
2498 | |||
2499 | return ret; | ||
2500 | } | ||
2501 | |||
2476 | /* Ethtool methods */ | 2502 | /* Ethtool methods */ |
2477 | 2503 | ||
2478 | /* Get settings (phy address, speed) for ethtools */ | 2504 | /* Get settings (phy address, speed) for ethtools */ |
@@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = { | |||
2591 | .ndo_change_mtu = mvneta_change_mtu, | 2617 | .ndo_change_mtu = mvneta_change_mtu, |
2592 | .ndo_tx_timeout = mvneta_tx_timeout, | 2618 | .ndo_tx_timeout = mvneta_tx_timeout, |
2593 | .ndo_get_stats64 = mvneta_get_stats64, | 2619 | .ndo_get_stats64 = mvneta_get_stats64, |
2620 | .ndo_do_ioctl = mvneta_ioctl, | ||
2594 | }; | 2621 | }; |
2595 | 2622 | ||
2596 | const struct ethtool_ops mvneta_eth_tool_ops = { | 2623 | const struct ethtool_ops mvneta_eth_tool_ops = { |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
@@ -1171,7 +1171,6 @@ typedef struct { | |||
1171 | 1171 | ||
1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
1173 | 1173 | ||
1174 | #define NETXEN_NETDEV_WEIGHT 128 | ||
1175 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1174 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
1176 | #define NETXEN_NIC_PEG_TUNE 0 | 1175 | #define NETXEN_NIC_PEG_TUNE 0 |
1177 | 1176 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1046e9461509..cbd75f97ffb3 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
198 | sds_ring = &recv_ctx->sds_rings[ring]; | 198 | sds_ring = &recv_ctx->sds_rings[ring]; |
199 | netif_napi_add(netdev, &sds_ring->napi, | 199 | netif_napi_add(netdev, &sds_ring->napi, |
200 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 200 | netxen_nic_poll, NAPI_POLL_WEIGHT); |
201 | } | 201 | } |
202 | 202 | ||
203 | return 0; | 203 | return 0; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 474c8a86a2af..5cd831ebfa83 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1348 | DMA_FROM_DEVICE); | 1348 | DMA_FROM_DEVICE); |
1349 | skb_put(skb, pkt_len); | 1349 | skb_put(skb, pkt_len); |
1350 | skb->protocol = eth_type_trans(skb, ndev); | 1350 | skb->protocol = eth_type_trans(skb, ndev); |
1351 | netif_rx(skb); | 1351 | netif_receive_skb(skb); |
1352 | ndev->stats.rx_packets++; | 1352 | ndev->stats.rx_packets++; |
1353 | ndev->stats.rx_bytes += pkt_len; | 1353 | ndev->stats.rx_bytes += pkt_len; |
1354 | } | 1354 | } |
@@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev) | |||
1906 | 1906 | ||
1907 | pm_runtime_get_sync(&mdp->pdev->dev); | 1907 | pm_runtime_get_sync(&mdp->pdev->dev); |
1908 | 1908 | ||
1909 | napi_enable(&mdp->napi); | ||
1910 | |||
1909 | ret = request_irq(ndev->irq, sh_eth_interrupt, | 1911 | ret = request_irq(ndev->irq, sh_eth_interrupt, |
1910 | mdp->cd->irq_flags, ndev->name, ndev); | 1912 | mdp->cd->irq_flags, ndev->name, ndev); |
1911 | if (ret) { | 1913 | if (ret) { |
1912 | dev_err(&ndev->dev, "Can not assign IRQ number\n"); | 1914 | dev_err(&ndev->dev, "Can not assign IRQ number\n"); |
1913 | return ret; | 1915 | goto out_napi_off; |
1914 | } | 1916 | } |
1915 | 1917 | ||
1916 | /* Descriptor set */ | 1918 | /* Descriptor set */ |
@@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev) | |||
1928 | if (ret) | 1930 | if (ret) |
1929 | goto out_free_irq; | 1931 | goto out_free_irq; |
1930 | 1932 | ||
1931 | napi_enable(&mdp->napi); | ||
1932 | |||
1933 | return ret; | 1933 | return ret; |
1934 | 1934 | ||
1935 | out_free_irq: | 1935 | out_free_irq: |
1936 | free_irq(ndev->irq, ndev); | 1936 | free_irq(ndev->irq, ndev); |
1937 | out_napi_off: | ||
1938 | napi_disable(&mdp->napi); | ||
1937 | pm_runtime_put_sync(&mdp->pdev->dev); | 1939 | pm_runtime_put_sync(&mdp->pdev->dev); |
1938 | return ret; | 1940 | return ret; |
1939 | } | 1941 | } |
@@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev) | |||
2025 | { | 2027 | { |
2026 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2028 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2027 | 2029 | ||
2028 | napi_disable(&mdp->napi); | ||
2029 | |||
2030 | netif_stop_queue(ndev); | 2030 | netif_stop_queue(ndev); |
2031 | 2031 | ||
2032 | /* Disable interrupts by clearing the interrupt mask. */ | 2032 | /* Disable interrupts by clearing the interrupt mask. */ |
@@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev) | |||
2044 | 2044 | ||
2045 | free_irq(ndev->irq, ndev); | 2045 | free_irq(ndev->irq, ndev); |
2046 | 2046 | ||
2047 | napi_disable(&mdp->napi); | ||
2048 | |||
2047 | /* Free all the skbuffs in the Rx queue. */ | 2049 | /* Free all the skbuffs in the Rx queue. */ |
2048 | sh_eth_ring_free(ndev); | 2050 | sh_eth_ring_free(ndev); |
2049 | 2051 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 623ebc50fe6b..7a0072003f34 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -71,19 +71,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
71 | plat->force_sf_dma_mode = 1; | 71 | plat->force_sf_dma_mode = 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 74 | if (of_find_property(np, "snps,pbl", NULL)) { |
75 | if (!dma_cfg) | 75 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), |
76 | return -ENOMEM; | 76 | GFP_KERNEL); |
77 | 77 | if (!dma_cfg) | |
78 | plat->dma_cfg = dma_cfg; | 78 | return -ENOMEM; |
79 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); | 79 | plat->dma_cfg = dma_cfg; |
80 | dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); | 80 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); |
81 | dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); | 81 | dma_cfg->fixed_burst = |
82 | of_property_read_bool(np, "snps,fixed-burst"); | ||
83 | dma_cfg->mixed_burst = | ||
84 | of_property_read_bool(np, "snps,mixed-burst"); | ||
85 | } | ||
82 | plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); | 86 | plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); |
83 | if (plat->force_thresh_dma_mode) { | 87 | if (plat->force_thresh_dma_mode) { |
84 | plat->force_sf_dma_mode = 0; | 88 | plat->force_sf_dma_mode = 0; |
85 | pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); | 89 | pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); |
86 | } | ||
87 | 90 | ||
88 | return 0; | 91 | return 0; |
89 | } | 92 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | |||
1466 | { | 1466 | { |
1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1468 | /* NAPI */ | 1468 | /* NAPI */ |
1469 | netif_napi_add(netdev, napi, | 1469 | netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); |
1470 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
1471 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1470 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
1472 | netdev->netdev_ops = &gelic_netdevice_ops; | 1471 | netdev->netdev_ops = &gelic_netdevice_ops; |
1473 | } | 1472 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h | |||
@@ -37,7 +37,6 @@ | |||
37 | #define GELIC_NET_RXBUF_ALIGN 128 | 37 | #define GELIC_NET_RXBUF_ALIGN 128 |
38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ | 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ |
39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ | 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ |
40 | #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) | ||
41 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL | 40 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL |
42 | 41 | ||
43 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ | 42 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) | |||
175 | printk(KERN_WARNING "Setting MDIO clock divisor to " | 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " |
176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); | 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); |
177 | clk_div = DEFAULT_CLOCK_DIVISOR; | 177 | clk_div = DEFAULT_CLOCK_DIVISOR; |
178 | of_node_put(np1); | ||
178 | goto issue; | 179 | goto issue; |
179 | } | 180 | } |
180 | 181 | ||
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
402 | }, | 402 | }, |
403 | /* HP hs2434 Mobile Broadband Module needs ZLPs */ | ||
404 | { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
405 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | ||
406 | }, | ||
403 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 407 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
404 | .driver_info = (unsigned long)&cdc_mbim_info, | 408 | .driver_info = (unsigned long)&cdc_mbim_info, |
405 | }, | 409 | }, |