diff options
author | Milosz Tanski <milosz@adfin.com> | 2013-09-06 12:41:20 -0400 |
---|---|---|
committer | Milosz Tanski <milosz@adfin.com> | 2013-09-06 12:41:20 -0400 |
commit | cd0a2df681ec2af45f50c555c2a39dc92a4dff71 (patch) | |
tree | 35d2278a9494582025f3dac08feb2266adef6a4d /drivers/net | |
parent | c35455791c1131e7ccbf56ea6fbdd562401c2ce2 (diff) | |
parent | 5a6f282a2052bb13171b53f03b34501cf72c33f1 (diff) |
Merge tag 'fscache-fixes-for-ceph' into wip-fscache
Patches for Ceph FS-Cache support
Diffstat (limited to 'drivers/net')
64 files changed, 657 insertions, 417 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) | |||
3714 | * The bonding ndo_neigh_setup is called at init time beofre any | 3714 | * The bonding ndo_neigh_setup is called at init time beofre any |
3715 | * slave exists. So we must declare proxy setup function which will | 3715 | * slave exists. So we must declare proxy setup function which will |
3716 | * be used at run time to resolve the actual slave neigh param setup. | 3716 | * be used at run time to resolve the actual slave neigh param setup. |
3717 | * | ||
3718 | * It's also called by master devices (such as vlans) to setup their | ||
3719 | * underlying devices. In that case - do nothing, we're already set up from | ||
3720 | * our init. | ||
3717 | */ | 3721 | */ |
3718 | static int bond_neigh_setup(struct net_device *dev, | 3722 | static int bond_neigh_setup(struct net_device *dev, |
3719 | struct neigh_parms *parms) | 3723 | struct neigh_parms *parms) |
3720 | { | 3724 | { |
3721 | parms->neigh_setup = bond_neigh_init; | 3725 | /* modify only our neigh_parms */ |
3726 | if (parms->dev == dev) | ||
3727 | parms->neigh_setup = bond_neigh_init; | ||
3722 | 3728 | ||
3723 | return 0; | 3729 | return 0; |
3724 | } | 3730 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
649 | if ((mc->ptr + rec_len) > mc->end) | 649 | if ((mc->ptr + rec_len) > mc->end) |
650 | goto decode_failed; | 650 | goto decode_failed; |
651 | 651 | ||
652 | memcpy(cf->data, mc->ptr, rec_len); | 652 | memcpy(cf->data, mc->ptr, cf->can_dlc); |
653 | mc->ptr += rec_len; | 653 | mc->ptr += rec_len; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
199 | struct arc_emac_priv *priv = netdev_priv(ndev); | 199 | struct arc_emac_priv *priv = netdev_priv(ndev); |
200 | unsigned int work_done; | 200 | unsigned int work_done; |
201 | 201 | ||
202 | for (work_done = 0; work_done <= budget; work_done++) { | 202 | for (work_done = 0; work_done < budget; work_done++) { |
203 | unsigned int *last_rx_bd = &priv->last_rx_bd; | 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..00b88cbfde25 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1333,6 +1333,8 @@ enum { | |||
1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, | 1333 | BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, |
1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, | 1334 | BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, |
1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, | 1335 | BNX2X_SP_RTNL_HYPERVISOR_VLAN, |
1336 | BNX2X_SP_RTNL_TX_STOP, | ||
1337 | BNX2X_SP_RTNL_TX_RESUME, | ||
1336 | }; | 1338 | }; |
1337 | 1339 | ||
1338 | struct bnx2x_prev_path_list { | 1340 | struct bnx2x_prev_path_list { |
@@ -1502,6 +1504,7 @@ struct bnx2x { | |||
1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) | 1504 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) |
1503 | #define IS_VF_FLAG (1 << 22) | 1505 | #define IS_VF_FLAG (1 << 22) |
1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) | 1506 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) |
1507 | #define BC_SUPPORTS_RMMOD_CMD (1 << 24) | ||
1505 | 1508 | ||
1506 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) | 1509 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) |
1507 | 1510 | ||
@@ -1830,6 +1833,8 @@ struct bnx2x { | |||
1830 | 1833 | ||
1831 | int fp_array_size; | 1834 | int fp_array_size; |
1832 | u32 dump_preset_idx; | 1835 | u32 dump_preset_idx; |
1836 | bool stats_started; | ||
1837 | struct semaphore stats_sema; | ||
1833 | }; | 1838 | }; |
1834 | 1839 | ||
1835 | /* Tx queues may be less or equal to Rx queues */ | 1840 | /* Tx queues may be less or equal to Rx queues */ |
@@ -2451,4 +2456,6 @@ enum bnx2x_pci_bus_speed { | |||
2451 | BNX2X_PCI_LINK_SPEED_5000 = 5000, | 2456 | BNX2X_PCI_LINK_SPEED_5000 = 5000, |
2452 | BNX2X_PCI_LINK_SPEED_8000 = 8000 | 2457 | BNX2X_PCI_LINK_SPEED_8000 = 8000 |
2453 | }; | 2458 | }; |
2459 | |||
2460 | void bnx2x_set_local_cmng(struct bnx2x *bp); | ||
2454 | #endif /* bnx2x.h */ | 2461 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index f2d1ff10054b..0cc26110868d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; | 53 | struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; |
54 | int old_max_eth_txqs, new_max_eth_txqs; | 54 | int old_max_eth_txqs, new_max_eth_txqs; |
55 | int old_txdata_index = 0, new_txdata_index = 0; | 55 | int old_txdata_index = 0, new_txdata_index = 0; |
56 | struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; | ||
56 | 57 | ||
57 | /* Copy the NAPI object as it has been already initialized */ | 58 | /* Copy the NAPI object as it has been already initialized */ |
58 | from_fp->napi = to_fp->napi; | 59 | from_fp->napi = to_fp->napi; |
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | |||
61 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | 62 | memcpy(to_fp, from_fp, sizeof(*to_fp)); |
62 | to_fp->index = to; | 63 | to_fp->index = to; |
63 | 64 | ||
65 | /* Retain the tpa_info of the original `to' version as we don't want | ||
66 | * 2 FPs to contain the same tpa_info pointer. | ||
67 | */ | ||
68 | to_fp->tpa_info = old_tpa_info; | ||
69 | |||
64 | /* move sp_objs contents as well, as their indices match fp ones */ | 70 | /* move sp_objs contents as well, as their indices match fp ones */ |
65 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); | 71 | memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); |
66 | 72 | ||
@@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) | |||
2956 | if (IS_PF(bp)) { | 2962 | if (IS_PF(bp)) { |
2957 | if (CNIC_LOADED(bp)) | 2963 | if (CNIC_LOADED(bp)) |
2958 | bnx2x_free_mem_cnic(bp); | 2964 | bnx2x_free_mem_cnic(bp); |
2959 | bnx2x_free_mem(bp); | ||
2960 | } | 2965 | } |
2966 | bnx2x_free_mem(bp); | ||
2967 | |||
2961 | bp->state = BNX2X_STATE_CLOSED; | 2968 | bp->state = BNX2X_STATE_CLOSED; |
2962 | bp->cnic_loaded = false; | 2969 | bp->cnic_loaded = false; |
2963 | 2970 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..fcf2761d8828 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -30,10 +30,8 @@ | |||
30 | #include "bnx2x_dcb.h" | 30 | #include "bnx2x_dcb.h" |
31 | 31 | ||
32 | /* forward declarations of dcbx related functions */ | 32 | /* forward declarations of dcbx related functions */ |
33 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
34 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); | 33 | static void bnx2x_pfc_set_pfc(struct bnx2x *bp); |
35 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); | 34 | static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); |
36 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
37 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, | 35 | static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, |
38 | u32 *set_configuration_ets_pg, | 36 | u32 *set_configuration_ets_pg, |
39 | u32 *pri_pg_tbl); | 37 | u32 *pri_pg_tbl); |
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp) | |||
425 | bnx2x_pfc_clear(bp); | 423 | bnx2x_pfc_clear(bp); |
426 | } | 424 | } |
427 | 425 | ||
428 | static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) | 426 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) |
429 | { | 427 | { |
430 | struct bnx2x_func_state_params func_params = {NULL}; | 428 | struct bnx2x_func_state_params func_params = {NULL}; |
429 | int rc; | ||
431 | 430 | ||
432 | func_params.f_obj = &bp->func_obj; | 431 | func_params.f_obj = &bp->func_obj; |
433 | func_params.cmd = BNX2X_F_CMD_TX_STOP; | 432 | func_params.cmd = BNX2X_F_CMD_TX_STOP; |
434 | 433 | ||
434 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
435 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
436 | |||
435 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); | 437 | DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); |
436 | return bnx2x_func_state_change(bp, &func_params); | 438 | |
439 | rc = bnx2x_func_state_change(bp, &func_params); | ||
440 | if (rc) { | ||
441 | BNX2X_ERR("Unable to hold traffic for HW configuration\n"); | ||
442 | bnx2x_panic(); | ||
443 | } | ||
444 | |||
445 | return rc; | ||
437 | } | 446 | } |
438 | 447 | ||
439 | static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) | 448 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) |
440 | { | 449 | { |
441 | struct bnx2x_func_state_params func_params = {NULL}; | 450 | struct bnx2x_func_state_params func_params = {NULL}; |
442 | struct bnx2x_func_tx_start_params *tx_params = | 451 | struct bnx2x_func_tx_start_params *tx_params = |
443 | &func_params.params.tx_start; | 452 | &func_params.params.tx_start; |
453 | int rc; | ||
444 | 454 | ||
445 | func_params.f_obj = &bp->func_obj; | 455 | func_params.f_obj = &bp->func_obj; |
446 | func_params.cmd = BNX2X_F_CMD_TX_START; | 456 | func_params.cmd = BNX2X_F_CMD_TX_START; |
447 | 457 | ||
458 | __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); | ||
459 | __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); | ||
460 | |||
448 | bnx2x_dcbx_fw_struct(bp, tx_params); | 461 | bnx2x_dcbx_fw_struct(bp, tx_params); |
449 | 462 | ||
450 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); | 463 | DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); |
451 | return bnx2x_func_state_change(bp, &func_params); | 464 | |
465 | rc = bnx2x_func_state_change(bp, &func_params); | ||
466 | if (rc) { | ||
467 | BNX2X_ERR("Unable to resume traffic after HW configuration\n"); | ||
468 | bnx2x_panic(); | ||
469 | } | ||
470 | |||
471 | return rc; | ||
452 | } | 472 | } |
453 | 473 | ||
454 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) | 474 | static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) |
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
744 | if (IS_MF(bp)) | 764 | if (IS_MF(bp)) |
745 | bnx2x_link_sync_notify(bp); | 765 | bnx2x_link_sync_notify(bp); |
746 | 766 | ||
747 | bnx2x_dcbx_stop_hw_tx(bp); | 767 | set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); |
768 | |||
769 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
748 | 770 | ||
749 | return; | 771 | return; |
750 | } | 772 | } |
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
753 | bnx2x_pfc_set_pfc(bp); | 775 | bnx2x_pfc_set_pfc(bp); |
754 | 776 | ||
755 | bnx2x_dcbx_update_ets_params(bp); | 777 | bnx2x_dcbx_update_ets_params(bp); |
756 | bnx2x_dcbx_resume_hw_tx(bp); | 778 | |
779 | /* ets may affect cmng configuration: reinit it in hw */ | ||
780 | bnx2x_set_local_cmng(bp); | ||
781 | |||
782 | set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); | ||
783 | |||
784 | schedule_delayed_work(&bp->sp_rtnl_task, 0); | ||
757 | 785 | ||
758 | return; | 786 | return; |
759 | case BNX2X_DCBX_STATE_TX_RELEASED: | 787 | case BNX2X_DCBX_STATE_TX_RELEASED: |
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, | |||
2363 | case DCB_FEATCFG_ATTR_PG: | 2391 | case DCB_FEATCFG_ATTR_PG: |
2364 | if (bp->dcbx_local_feat.ets.enabled) | 2392 | if (bp->dcbx_local_feat.ets.enabled) |
2365 | *flags |= DCB_FEATCFG_ENABLE; | 2393 | *flags |= DCB_FEATCFG_ENABLE; |
2366 | if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) | 2394 | if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | |
2395 | DCBX_REMOTE_MIB_ERROR)) | ||
2367 | *flags |= DCB_FEATCFG_ERROR; | 2396 | *flags |= DCB_FEATCFG_ERROR; |
2368 | break; | 2397 | break; |
2369 | case DCB_FEATCFG_ATTR_PFC: | 2398 | case DCB_FEATCFG_ATTR_PFC: |
2370 | if (bp->dcbx_local_feat.pfc.enabled) | 2399 | if (bp->dcbx_local_feat.pfc.enabled) |
2371 | *flags |= DCB_FEATCFG_ENABLE; | 2400 | *flags |= DCB_FEATCFG_ENABLE; |
2372 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | | 2401 | if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | |
2373 | DCBX_LOCAL_PFC_MISMATCH)) | 2402 | DCBX_LOCAL_PFC_MISMATCH | |
2403 | DCBX_REMOTE_MIB_ERROR)) | ||
2374 | *flags |= DCB_FEATCFG_ERROR; | 2404 | *flags |= DCB_FEATCFG_ERROR; |
2375 | break; | 2405 | break; |
2376 | case DCB_FEATCFG_ATTR_APP: | 2406 | case DCB_FEATCFG_ATTR_APP: |
2377 | if (bp->dcbx_local_feat.app.enabled) | 2407 | if (bp->dcbx_local_feat.app.enabled) |
2378 | *flags |= DCB_FEATCFG_ENABLE; | 2408 | *flags |= DCB_FEATCFG_ENABLE; |
2379 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | | 2409 | if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | |
2380 | DCBX_LOCAL_APP_MISMATCH)) | 2410 | DCBX_LOCAL_APP_MISMATCH | |
2411 | DCBX_REMOTE_MIB_ERROR)) | ||
2381 | *flags |= DCB_FEATCFG_ERROR; | 2412 | *flags |= DCB_FEATCFG_ERROR; |
2382 | break; | 2413 | break; |
2383 | default: | 2414 | default: |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 125bd1b6586f..804b8f64463e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | |||
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; | |||
199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); | 199 | int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); |
200 | #endif /* BCM_DCBNL */ | 200 | #endif /* BCM_DCBNL */ |
201 | 201 | ||
202 | int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); | ||
203 | int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); | ||
204 | |||
202 | #endif /* BNX2X_DCB_H */ | 205 | #endif /* BNX2X_DCB_H */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -1300,6 +1300,9 @@ struct drv_func_mb { | |||
1300 | 1300 | ||
1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 | 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 |
1302 | 1302 | ||
1303 | #define DRV_MSG_CODE_RMMOD 0xdb000000 | ||
1304 | #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f | ||
1305 | |||
1303 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | 1306 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 |
1304 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | 1307 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 |
1305 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | 1308 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 |
@@ -1372,6 +1375,8 @@ struct drv_func_mb { | |||
1372 | 1375 | ||
1373 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 | 1376 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 |
1374 | 1377 | ||
1378 | #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 | ||
1379 | |||
1375 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 | 1380 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
1376 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | 1381 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 |
1377 | 1382 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..1627a4e09c32 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp) | |||
2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; | 2261 | bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; |
2262 | } | 2262 | } |
2263 | 2263 | ||
2264 | static void bnx2x_init_dropless_fc(struct bnx2x *bp) | ||
2265 | { | ||
2266 | u32 pause_enabled = 0; | ||
2267 | |||
2268 | if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { | ||
2269 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | ||
2270 | pause_enabled = 1; | ||
2271 | |||
2272 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2273 | USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), | ||
2274 | pause_enabled); | ||
2275 | } | ||
2276 | |||
2277 | DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", | ||
2278 | pause_enabled ? "enabled" : "disabled"); | ||
2279 | } | ||
2280 | |||
2264 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | 2281 | int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) |
2265 | { | 2282 | { |
2266 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); | 2283 | int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); |
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) | |||
2294 | 2311 | ||
2295 | bnx2x_release_phy_lock(bp); | 2312 | bnx2x_release_phy_lock(bp); |
2296 | 2313 | ||
2314 | bnx2x_init_dropless_fc(bp); | ||
2315 | |||
2297 | bnx2x_calc_fc_adv(bp); | 2316 | bnx2x_calc_fc_adv(bp); |
2298 | 2317 | ||
2299 | if (bp->link_vars.link_up) { | 2318 | if (bp->link_vars.link_up) { |
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp) | |||
2315 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); | 2334 | bnx2x_phy_init(&bp->link_params, &bp->link_vars); |
2316 | bnx2x_release_phy_lock(bp); | 2335 | bnx2x_release_phy_lock(bp); |
2317 | 2336 | ||
2337 | bnx2x_init_dropless_fc(bp); | ||
2338 | |||
2318 | bnx2x_calc_fc_adv(bp); | 2339 | bnx2x_calc_fc_adv(bp); |
2319 | } else | 2340 | } else |
2320 | BNX2X_ERR("Bootcode is missing - can not set link\n"); | 2341 | BNX2X_ERR("Bootcode is missing - can not set link\n"); |
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2476 | 2497 | ||
2477 | input.port_rate = bp->link_vars.line_speed; | 2498 | input.port_rate = bp->link_vars.line_speed; |
2478 | 2499 | ||
2479 | if (cmng_type == CMNG_FNS_MINMAX) { | 2500 | if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { |
2480 | int vn; | 2501 | int vn; |
2481 | 2502 | ||
2482 | /* read mf conf from shmem */ | 2503 | /* read mf conf from shmem */ |
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp, | |||
2533 | } | 2554 | } |
2534 | } | 2555 | } |
2535 | 2556 | ||
2557 | /* init cmng mode in HW according to local configuration */ | ||
2558 | void bnx2x_set_local_cmng(struct bnx2x *bp) | ||
2559 | { | ||
2560 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | ||
2561 | |||
2562 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2563 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2564 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2565 | } else { | ||
2566 | /* rate shaping and fairness are disabled */ | ||
2567 | DP(NETIF_MSG_IFUP, | ||
2568 | "single function mode without fairness\n"); | ||
2569 | } | ||
2570 | } | ||
2571 | |||
2536 | /* This function is called upon link interrupt */ | 2572 | /* This function is called upon link interrupt */ |
2537 | static void bnx2x_link_attn(struct bnx2x *bp) | 2573 | static void bnx2x_link_attn(struct bnx2x *bp) |
2538 | { | 2574 | { |
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2541 | 2577 | ||
2542 | bnx2x_link_update(&bp->link_params, &bp->link_vars); | 2578 | bnx2x_link_update(&bp->link_params, &bp->link_vars); |
2543 | 2579 | ||
2544 | if (bp->link_vars.link_up) { | 2580 | bnx2x_init_dropless_fc(bp); |
2545 | |||
2546 | /* dropless flow control */ | ||
2547 | if (!CHIP_IS_E1(bp) && bp->dropless_fc) { | ||
2548 | int port = BP_PORT(bp); | ||
2549 | u32 pause_enabled = 0; | ||
2550 | 2581 | ||
2551 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) | 2582 | if (bp->link_vars.link_up) { |
2552 | pause_enabled = 1; | ||
2553 | |||
2554 | REG_WR(bp, BAR_USTRORM_INTMEM + | ||
2555 | USTORM_ETH_PAUSE_ENABLED_OFFSET(port), | ||
2556 | pause_enabled); | ||
2557 | } | ||
2558 | 2583 | ||
2559 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { | 2584 | if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { |
2560 | struct host_port_stats *pstats; | 2585 | struct host_port_stats *pstats; |
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2568 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2593 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2569 | } | 2594 | } |
2570 | 2595 | ||
2571 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2596 | if (bp->link_vars.link_up && bp->link_vars.line_speed) |
2572 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2597 | bnx2x_set_local_cmng(bp); |
2573 | |||
2574 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2575 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2576 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2577 | } else | ||
2578 | /* rate shaping and fairness are disabled */ | ||
2579 | DP(NETIF_MSG_IFUP, | ||
2580 | "single function mode without fairness\n"); | ||
2581 | } | ||
2582 | 2598 | ||
2583 | __bnx2x_link_report(bp); | 2599 | __bnx2x_link_report(bp); |
2584 | 2600 | ||
@@ -7839,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) | |||
7839 | { | 7855 | { |
7840 | int i; | 7856 | int i; |
7841 | 7857 | ||
7842 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7843 | sizeof(struct host_sp_status_block)); | ||
7844 | |||
7845 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, | 7858 | BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, |
7846 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); | 7859 | bp->fw_stats_data_sz + bp->fw_stats_req_sz); |
7847 | 7860 | ||
7861 | if (IS_VF(bp)) | ||
7862 | return; | ||
7863 | |||
7864 | BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, | ||
7865 | sizeof(struct host_sp_status_block)); | ||
7866 | |||
7848 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, | 7867 | BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, |
7849 | sizeof(struct bnx2x_slowpath)); | 7868 | sizeof(struct bnx2x_slowpath)); |
7850 | 7869 | ||
@@ -9639,6 +9658,12 @@ sp_rtnl_not_reset: | |||
9639 | &bp->sp_rtnl_state)) | 9658 | &bp->sp_rtnl_state)) |
9640 | bnx2x_pf_set_vfs_vlan(bp); | 9659 | bnx2x_pf_set_vfs_vlan(bp); |
9641 | 9660 | ||
9661 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) | ||
9662 | bnx2x_dcbx_stop_hw_tx(bp); | ||
9663 | |||
9664 | if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state)) | ||
9665 | bnx2x_dcbx_resume_hw_tx(bp); | ||
9666 | |||
9642 | /* work which needs rtnl lock not-taken (as it takes the lock itself and | 9667 | /* work which needs rtnl lock not-taken (as it takes the lock itself and |
9643 | * can be called from other contexts as well) | 9668 | * can be called from other contexts as well) |
9644 | */ | 9669 | */ |
@@ -10362,6 +10387,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
10362 | 10387 | ||
10363 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? | 10388 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? |
10364 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; | 10389 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; |
10390 | |||
10391 | bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? | ||
10392 | BC_SUPPORTS_RMMOD_CMD : 0; | ||
10393 | |||
10365 | boot_mode = SHMEM_RD(bp, | 10394 | boot_mode = SHMEM_RD(bp, |
10366 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & | 10395 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & |
10367 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; | 10396 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; |
@@ -11137,6 +11166,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) | |||
11137 | int tmp; | 11166 | int tmp; |
11138 | u32 cfg; | 11167 | u32 cfg; |
11139 | 11168 | ||
11169 | if (IS_VF(bp)) | ||
11170 | return 0; | ||
11171 | |||
11140 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { | 11172 | if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { |
11141 | /* Take function: tmp = func */ | 11173 | /* Take function: tmp = func */ |
11142 | tmp = BP_ABS_FUNC(bp); | 11174 | tmp = BP_ABS_FUNC(bp); |
@@ -11524,6 +11556,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
11524 | mutex_init(&bp->port.phy_mutex); | 11556 | mutex_init(&bp->port.phy_mutex); |
11525 | mutex_init(&bp->fw_mb_mutex); | 11557 | mutex_init(&bp->fw_mb_mutex); |
11526 | spin_lock_init(&bp->stats_lock); | 11558 | spin_lock_init(&bp->stats_lock); |
11559 | sema_init(&bp->stats_sema, 1); | ||
11527 | 11560 | ||
11528 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 11561 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
11529 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 11562 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -12817,13 +12850,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
12817 | bnx2x_dcbnl_update_applist(bp, true); | 12850 | bnx2x_dcbnl_update_applist(bp, true); |
12818 | #endif | 12851 | #endif |
12819 | 12852 | ||
12853 | if (IS_PF(bp) && | ||
12854 | !BP_NOMCP(bp) && | ||
12855 | (bp->flags & BC_SUPPORTS_RMMOD_CMD)) | ||
12856 | bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); | ||
12857 | |||
12820 | /* Close the interface - either directly or implicitly */ | 12858 | /* Close the interface - either directly or implicitly */ |
12821 | if (remove_netdev) { | 12859 | if (remove_netdev) { |
12822 | unregister_netdev(dev); | 12860 | unregister_netdev(dev); |
12823 | } else { | 12861 | } else { |
12824 | rtnl_lock(); | 12862 | rtnl_lock(); |
12825 | if (netif_running(dev)) | 12863 | dev_close(dev); |
12826 | bnx2x_close(dev); | ||
12827 | rtnl_unlock(); | 12864 | rtnl_unlock(); |
12828 | } | 12865 | } |
12829 | 12866 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..e8706e19f96f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, | |||
522 | return 0; | 522 | return 0; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int | ||
526 | bnx2x_vfop_config_vlan0(struct bnx2x *bp, | ||
527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac, | ||
528 | bool add) | ||
529 | { | ||
530 | int rc; | ||
531 | |||
532 | vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : | ||
533 | BNX2X_VLAN_MAC_DEL; | ||
534 | vlan_mac->user_req.u.vlan.vlan = 0; | ||
535 | |||
536 | rc = bnx2x_config_vlan_mac(bp, vlan_mac); | ||
537 | if (rc == -EEXIST) | ||
538 | rc = 0; | ||
539 | return rc; | ||
540 | } | ||
541 | |||
542 | static int bnx2x_vfop_config_list(struct bnx2x *bp, | 525 | static int bnx2x_vfop_config_list(struct bnx2x *bp, |
543 | struct bnx2x_vfop_filters *filters, | 526 | struct bnx2x_vfop_filters *filters, |
544 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) | 527 | struct bnx2x_vlan_mac_ramrod_params *vlan_mac) |
@@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
643 | 626 | ||
644 | case BNX2X_VFOP_VLAN_CONFIG_LIST: | 627 | case BNX2X_VFOP_VLAN_CONFIG_LIST: |
645 | /* next state */ | 628 | /* next state */ |
646 | vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; | 629 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; |
647 | |||
648 | /* remove vlan0 - could be no-op */ | ||
649 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); | ||
650 | if (vfop->rc) | ||
651 | goto op_err; | ||
652 | 630 | ||
653 | /* Do vlan list config. if this operation fails we try to | 631 | /* do list config */ |
654 | * restore vlan0 to keep the queue is working order | ||
655 | */ | ||
656 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); | 632 | vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); |
657 | if (!vfop->rc) { | 633 | if (!vfop->rc) { |
658 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); | 634 | set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); |
659 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); | 635 | vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); |
660 | } | 636 | } |
661 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ | ||
662 | |||
663 | case BNX2X_VFOP_VLAN_CONFIG_LIST_0: | ||
664 | /* next state */ | ||
665 | vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; | ||
666 | |||
667 | if (list_empty(&obj->head)) | ||
668 | /* add vlan0 */ | ||
669 | vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); | ||
670 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); | 637 | bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); |
671 | 638 | ||
672 | default: | 639 | default: |
@@ -1747,11 +1714,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) | |||
1747 | 1714 | ||
1748 | void bnx2x_iov_init_dmae(struct bnx2x *bp) | 1715 | void bnx2x_iov_init_dmae(struct bnx2x *bp) |
1749 | { | 1716 | { |
1750 | DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); | 1717 | if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) |
1751 | if (!IS_SRIOV(bp)) | 1718 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); |
1752 | return; | ||
1753 | |||
1754 | REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); | ||
1755 | } | 1719 | } |
1756 | 1720 | ||
1757 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) | 1721 | static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) |
@@ -2822,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) | |||
2822 | return 0; | 2786 | return 0; |
2823 | } | 2787 | } |
2824 | 2788 | ||
2789 | struct set_vf_state_cookie { | ||
2790 | struct bnx2x_virtf *vf; | ||
2791 | u8 state; | ||
2792 | }; | ||
2793 | |||
2794 | void bnx2x_set_vf_state(void *cookie) | ||
2795 | { | ||
2796 | struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; | ||
2797 | |||
2798 | p->vf->state = p->state; | ||
2799 | } | ||
2800 | |||
2825 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ | 2801 | /* VFOP close (teardown the queues, delete mcasts and close HW) */ |
2826 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | 2802 | static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) |
2827 | { | 2803 | { |
@@ -2872,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2872 | op_err: | 2848 | op_err: |
2873 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); | 2849 | BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); |
2874 | op_done: | 2850 | op_done: |
2875 | vf->state = VF_ACQUIRED; | 2851 | |
2852 | /* need to make sure there are no outstanding stats ramrods which may | ||
2853 | * cause the device to access the VF's stats buffer which it will free | ||
2854 | * as soon as we return from the close flow. | ||
2855 | */ | ||
2856 | { | ||
2857 | struct set_vf_state_cookie cookie; | ||
2858 | |||
2859 | cookie.vf = vf; | ||
2860 | cookie.state = VF_ACQUIRED; | ||
2861 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | ||
2862 | } | ||
2863 | |||
2876 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2864 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
2877 | bnx2x_vfop_end(bp, vf, vfop); | 2865 | bnx2x_vfop_end(bp, vf, vfop); |
2878 | } | 2866 | } |
@@ -3084,8 +3072,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp) | |||
3084 | pci_disable_sriov(bp->pdev); | 3072 | pci_disable_sriov(bp->pdev); |
3085 | } | 3073 | } |
3086 | 3074 | ||
3087 | static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | 3075 | static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, |
3088 | struct bnx2x_virtf *vf) | 3076 | struct bnx2x_virtf **vf, |
3077 | struct pf_vf_bulletin_content **bulletin) | ||
3089 | { | 3078 | { |
3090 | if (bp->state != BNX2X_STATE_OPEN) { | 3079 | if (bp->state != BNX2X_STATE_OPEN) { |
3091 | BNX2X_ERR("vf ndo called though PF is down\n"); | 3080 | BNX2X_ERR("vf ndo called though PF is down\n"); |
@@ -3103,12 +3092,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, | |||
3103 | return -EINVAL; | 3092 | return -EINVAL; |
3104 | } | 3093 | } |
3105 | 3094 | ||
3106 | if (!vf) { | 3095 | /* init members */ |
3096 | *vf = BP_VF(bp, vfidx); | ||
3097 | *bulletin = BP_VF_BULLETIN(bp, vfidx); | ||
3098 | |||
3099 | if (!*vf) { | ||
3107 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", | 3100 | BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", |
3108 | vfidx); | 3101 | vfidx); |
3109 | return -EINVAL; | 3102 | return -EINVAL; |
3110 | } | 3103 | } |
3111 | 3104 | ||
3105 | if (!*bulletin) { | ||
3106 | BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", | ||
3107 | vfidx); | ||
3108 | return -EINVAL; | ||
3109 | } | ||
3110 | |||
3112 | return 0; | 3111 | return 0; |
3113 | } | 3112 | } |
3114 | 3113 | ||
@@ -3116,17 +3115,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
3116 | struct ifla_vf_info *ivi) | 3115 | struct ifla_vf_info *ivi) |
3117 | { | 3116 | { |
3118 | struct bnx2x *bp = netdev_priv(dev); | 3117 | struct bnx2x *bp = netdev_priv(dev); |
3119 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3118 | struct bnx2x_virtf *vf = NULL; |
3120 | struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); | 3119 | struct pf_vf_bulletin_content *bulletin = NULL; |
3121 | struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | 3120 | struct bnx2x_vlan_mac_obj *mac_obj; |
3122 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3121 | struct bnx2x_vlan_mac_obj *vlan_obj; |
3123 | int rc; | 3122 | int rc; |
3124 | 3123 | ||
3125 | /* sanity */ | 3124 | /* sanity and init */ |
3126 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3125 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3127 | if (rc) | 3126 | if (rc) |
3128 | return rc; | 3127 | return rc; |
3129 | if (!mac_obj || !vlan_obj || !bulletin) { | 3128 | mac_obj = &bnx2x_vfq(vf, 0, mac_obj); |
3129 | vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); | ||
3130 | if (!mac_obj || !vlan_obj) { | ||
3130 | BNX2X_ERR("VF partially initialized\n"); | 3131 | BNX2X_ERR("VF partially initialized\n"); |
3131 | return -EINVAL; | 3132 | return -EINVAL; |
3132 | } | 3133 | } |
@@ -3183,11 +3184,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) | |||
3183 | { | 3184 | { |
3184 | struct bnx2x *bp = netdev_priv(dev); | 3185 | struct bnx2x *bp = netdev_priv(dev); |
3185 | int rc, q_logical_state; | 3186 | int rc, q_logical_state; |
3186 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3187 | struct bnx2x_virtf *vf = NULL; |
3187 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3188 | struct pf_vf_bulletin_content *bulletin = NULL; |
3188 | 3189 | ||
3189 | /* sanity */ | 3190 | /* sanity and init */ |
3190 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3191 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3191 | if (rc) | 3192 | if (rc) |
3192 | return rc; | 3193 | return rc; |
3193 | if (!is_valid_ether_addr(mac)) { | 3194 | if (!is_valid_ether_addr(mac)) { |
@@ -3249,11 +3250,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) | |||
3249 | { | 3250 | { |
3250 | struct bnx2x *bp = netdev_priv(dev); | 3251 | struct bnx2x *bp = netdev_priv(dev); |
3251 | int rc, q_logical_state; | 3252 | int rc, q_logical_state; |
3252 | struct bnx2x_virtf *vf = BP_VF(bp, vfidx); | 3253 | struct bnx2x_virtf *vf = NULL; |
3253 | struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); | 3254 | struct pf_vf_bulletin_content *bulletin = NULL; |
3254 | 3255 | ||
3255 | /* sanity */ | 3256 | /* sanity and init */ |
3256 | rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); | 3257 | rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); |
3257 | if (rc) | 3258 | if (rc) |
3258 | return rc; | 3259 | return rc; |
3259 | 3260 | ||
@@ -3463,7 +3464,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
3463 | alloc_mem_err: | 3464 | alloc_mem_err: |
3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3465 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
3465 | sizeof(struct bnx2x_vf_mbx_msg)); | 3466 | sizeof(struct bnx2x_vf_mbx_msg)); |
3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3467 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, |
3467 | sizeof(union pf_vf_bulletin)); | 3468 | sizeof(union pf_vf_bulletin)); |
3468 | return -ENOMEM; | 3469 | return -ENOMEM; |
3469 | } | 3470 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..86436c77af03 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | * Statistics service functions | 221 | * Statistics service functions |
222 | */ | 222 | */ |
223 | 223 | ||
224 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | 224 | /* should be called under stats_sema */ |
225 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
225 | { | 226 | { |
226 | struct dmae_command *dmae; | 227 | struct dmae_command *dmae; |
227 | u32 opcode; | 228 | u32 opcode; |
@@ -518,29 +519,47 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
518 | *stats_comp = 0; | 519 | *stats_comp = 0; |
519 | } | 520 | } |
520 | 521 | ||
521 | static void bnx2x_stats_start(struct bnx2x *bp) | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | ||
522 | { | 524 | { |
523 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | if (IS_PF(bp)) { |
524 | * is required | 526 | if (bp->port.pmf) |
525 | */ | 527 | bnx2x_port_stats_init(bp); |
526 | if (IS_VF(bp)) | ||
527 | return; | ||
528 | 528 | ||
529 | if (bp->port.pmf) | 529 | else if (bp->func_stx) |
530 | bnx2x_port_stats_init(bp); | 530 | bnx2x_func_stats_init(bp); |
531 | 531 | ||
532 | else if (bp->func_stx) | 532 | bnx2x_hw_stats_post(bp); |
533 | bnx2x_func_stats_init(bp); | 533 | bnx2x_storm_stats_post(bp); |
534 | } | ||
534 | 535 | ||
535 | bnx2x_hw_stats_post(bp); | 536 | bp->stats_started = true; |
536 | bnx2x_storm_stats_post(bp); | 537 | } |
538 | |||
539 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
540 | { | ||
541 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
542 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
543 | __bnx2x_stats_start(bp); | ||
544 | up(&bp->stats_sema); | ||
537 | } | 545 | } |
538 | 546 | ||
539 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 547 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
540 | { | 548 | { |
549 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
550 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
541 | bnx2x_stats_comp(bp); | 551 | bnx2x_stats_comp(bp); |
542 | bnx2x_stats_pmf_update(bp); | 552 | __bnx2x_stats_pmf_update(bp); |
543 | bnx2x_stats_start(bp); | 553 | __bnx2x_stats_start(bp); |
554 | up(&bp->stats_sema); | ||
555 | } | ||
556 | |||
557 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
558 | { | ||
559 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
560 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
561 | __bnx2x_stats_pmf_update(bp); | ||
562 | up(&bp->stats_sema); | ||
544 | } | 563 | } |
545 | 564 | ||
546 | static void bnx2x_stats_restart(struct bnx2x *bp) | 565 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -550,8 +569,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
550 | */ | 569 | */ |
551 | if (IS_VF(bp)) | 570 | if (IS_VF(bp)) |
552 | return; | 571 | return; |
572 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
573 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
553 | bnx2x_stats_comp(bp); | 574 | bnx2x_stats_comp(bp); |
554 | bnx2x_stats_start(bp); | 575 | __bnx2x_stats_start(bp); |
576 | up(&bp->stats_sema); | ||
555 | } | 577 | } |
556 | 578 | ||
557 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 579 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -888,9 +910,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) | |||
888 | /* Make sure we use the value of the counter | 910 | /* Make sure we use the value of the counter |
889 | * used for sending the last stats ramrod. | 911 | * used for sending the last stats ramrod. |
890 | */ | 912 | */ |
891 | spin_lock_bh(&bp->stats_lock); | ||
892 | cur_stats_counter = bp->stats_counter - 1; | 913 | cur_stats_counter = bp->stats_counter - 1; |
893 | spin_unlock_bh(&bp->stats_lock); | ||
894 | 914 | ||
895 | /* are storm stats valid? */ | 915 | /* are storm stats valid? */ |
896 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | 916 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { |
@@ -1227,12 +1247,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1227 | { | 1247 | { |
1228 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1248 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1229 | 1249 | ||
1230 | if (bnx2x_edebug_stats_stopped(bp)) | 1250 | /* we run update from timer context, so give up |
1251 | * if somebody is in the middle of transition | ||
1252 | */ | ||
1253 | if (down_trylock(&bp->stats_sema)) | ||
1231 | return; | 1254 | return; |
1232 | 1255 | ||
1256 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1257 | goto out; | ||
1258 | |||
1233 | if (IS_PF(bp)) { | 1259 | if (IS_PF(bp)) { |
1234 | if (*stats_comp != DMAE_COMP_VAL) | 1260 | if (*stats_comp != DMAE_COMP_VAL) |
1235 | return; | 1261 | goto out; |
1236 | 1262 | ||
1237 | if (bp->port.pmf) | 1263 | if (bp->port.pmf) |
1238 | bnx2x_hw_stats_update(bp); | 1264 | bnx2x_hw_stats_update(bp); |
@@ -1242,7 +1268,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1242 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1268 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1243 | bnx2x_panic(); | 1269 | bnx2x_panic(); |
1244 | } | 1270 | } |
1245 | return; | 1271 | goto out; |
1246 | } | 1272 | } |
1247 | } else { | 1273 | } else { |
1248 | /* vf doesn't collect HW statistics, and doesn't get completions | 1274 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1256,7 +1282,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1256 | 1282 | ||
1257 | /* vf is done */ | 1283 | /* vf is done */ |
1258 | if (IS_VF(bp)) | 1284 | if (IS_VF(bp)) |
1259 | return; | 1285 | goto out; |
1260 | 1286 | ||
1261 | if (netif_msg_timer(bp)) { | 1287 | if (netif_msg_timer(bp)) { |
1262 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1288 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1267,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | 1293 | ||
1268 | bnx2x_hw_stats_post(bp); | 1294 | bnx2x_hw_stats_post(bp); |
1269 | bnx2x_storm_stats_post(bp); | 1295 | bnx2x_storm_stats_post(bp); |
1296 | |||
1297 | out: | ||
1298 | up(&bp->stats_sema); | ||
1270 | } | 1299 | } |
1271 | 1300 | ||
1272 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1301 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1332,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1332 | { | 1361 | { |
1333 | int update = 0; | 1362 | int update = 0; |
1334 | 1363 | ||
1364 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1365 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1366 | |||
1367 | bp->stats_started = false; | ||
1368 | |||
1335 | bnx2x_stats_comp(bp); | 1369 | bnx2x_stats_comp(bp); |
1336 | 1370 | ||
1337 | if (bp->port.pmf) | 1371 | if (bp->port.pmf) |
@@ -1348,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1348 | bnx2x_hw_stats_post(bp); | 1382 | bnx2x_hw_stats_post(bp); |
1349 | bnx2x_stats_comp(bp); | 1383 | bnx2x_stats_comp(bp); |
1350 | } | 1384 | } |
1385 | |||
1386 | up(&bp->stats_sema); | ||
1351 | } | 1387 | } |
1352 | 1388 | ||
1353 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1389 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1376,15 +1412,17 @@ static const struct { | |||
1376 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1412 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1377 | { | 1413 | { |
1378 | enum bnx2x_stats_state state; | 1414 | enum bnx2x_stats_state state; |
1415 | void (*action)(struct bnx2x *bp); | ||
1379 | if (unlikely(bp->panic)) | 1416 | if (unlikely(bp->panic)) |
1380 | return; | 1417 | return; |
1381 | 1418 | ||
1382 | spin_lock_bh(&bp->stats_lock); | 1419 | spin_lock_bh(&bp->stats_lock); |
1383 | state = bp->stats_state; | 1420 | state = bp->stats_state; |
1384 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1421 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1422 | action = bnx2x_stats_stm[state][event].action; | ||
1385 | spin_unlock_bh(&bp->stats_lock); | 1423 | spin_unlock_bh(&bp->stats_lock); |
1386 | 1424 | ||
1387 | bnx2x_stats_stm[state][event].action(bp); | 1425 | action(bp); |
1388 | 1426 | ||
1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1427 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1428 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
@@ -1955,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1955 | estats->mac_discard); | 1993 | estats->mac_discard); |
1956 | } | 1994 | } |
1957 | } | 1995 | } |
1996 | |||
1997 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
1998 | void (func_to_exec)(void *cookie), | ||
1999 | void *cookie){ | ||
2000 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
2001 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
2002 | bnx2x_stats_comp(bp); | ||
2003 | func_to_exec(cookie); | ||
2004 | __bnx2x_stats_start(bp); | ||
2005 | up(&bp->stats_sema); | ||
2006 | } | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e8..f35845006cdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,6 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | ||
543 | void (func_to_exec)(void *cookie), | ||
544 | void *cookie); | ||
542 | 545 | ||
543 | /** | 546 | /** |
544 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
17796 | 17796 | ||
17797 | done: | 17797 | done: |
17798 | if (state == pci_channel_io_perm_failure) { | 17798 | if (state == pci_channel_io_perm_failure) { |
17799 | tg3_napi_enable(tp); | 17799 | if (netdev) { |
17800 | dev_close(netdev); | 17800 | tg3_napi_enable(tp); |
17801 | dev_close(netdev); | ||
17802 | } | ||
17801 | err = PCI_ERS_RESULT_DISCONNECT; | 17803 | err = PCI_ERS_RESULT_DISCONNECT; |
17802 | } else { | 17804 | } else { |
17803 | pci_disable_device(pdev); | 17805 | pci_disable_device(pdev); |
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17827 | rtnl_lock(); | 17829 | rtnl_lock(); |
17828 | 17830 | ||
17829 | if (pci_enable_device(pdev)) { | 17831 | if (pci_enable_device(pdev)) { |
17830 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | 17832 | dev_err(&pdev->dev, |
17833 | "Cannot re-enable PCI device after reset.\n"); | ||
17831 | goto done; | 17834 | goto done; |
17832 | } | 17835 | } |
17833 | 17836 | ||
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17835 | pci_restore_state(pdev); | 17838 | pci_restore_state(pdev); |
17836 | pci_save_state(pdev); | 17839 | pci_save_state(pdev); |
17837 | 17840 | ||
17838 | if (!netif_running(netdev)) { | 17841 | if (!netdev || !netif_running(netdev)) { |
17839 | rc = PCI_ERS_RESULT_RECOVERED; | 17842 | rc = PCI_ERS_RESULT_RECOVERED; |
17840 | goto done; | 17843 | goto done; |
17841 | } | 17844 | } |
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17847 | rc = PCI_ERS_RESULT_RECOVERED; | 17850 | rc = PCI_ERS_RESULT_RECOVERED; |
17848 | 17851 | ||
17849 | done: | 17852 | done: |
17850 | if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { | 17853 | if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { |
17851 | tg3_napi_enable(tp); | 17854 | tg3_napi_enable(tp); |
17852 | dev_close(netdev); | 17855 | dev_close(netdev); |
17853 | } | 17856 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |||
455 | q->pg_chunk.offset = 0; | 455 | q->pg_chunk.offset = 0; |
456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, | 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); |
458 | if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { | ||
459 | __free_pages(q->pg_chunk.page, order); | ||
460 | q->pg_chunk.page = NULL; | ||
461 | return -EIO; | ||
462 | } | ||
463 | q->pg_chunk.mapping = mapping; | 458 | q->pg_chunk.mapping = mapping; |
464 | } | 459 | } |
465 | sd->pg_chunk = q->pg_chunk; | 460 | sd->pg_chunk = q->pg_chunk; |
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
954 | return flits_to_desc(flits); | 949 | return flits_to_desc(flits); |
955 | } | 950 | } |
956 | 951 | ||
957 | |||
958 | /* map_skb - map a packet main body and its page fragments | ||
959 | * @pdev: the PCI device | ||
960 | * @skb: the packet | ||
961 | * @addr: placeholder to save the mapped addresses | ||
962 | * | ||
963 | * map the main body of an sk_buff and its page fragments, if any. | ||
964 | */ | ||
965 | static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, | ||
966 | dma_addr_t *addr) | ||
967 | { | ||
968 | const skb_frag_t *fp, *end; | ||
969 | const struct skb_shared_info *si; | ||
970 | |||
971 | *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), | ||
972 | PCI_DMA_TODEVICE); | ||
973 | if (pci_dma_mapping_error(pdev, *addr)) | ||
974 | goto out_err; | ||
975 | |||
976 | si = skb_shinfo(skb); | ||
977 | end = &si->frags[si->nr_frags]; | ||
978 | |||
979 | for (fp = si->frags; fp < end; fp++) { | ||
980 | *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), | ||
981 | DMA_TO_DEVICE); | ||
982 | if (pci_dma_mapping_error(pdev, *addr)) | ||
983 | goto unwind; | ||
984 | } | ||
985 | return 0; | ||
986 | |||
987 | unwind: | ||
988 | while (fp-- > si->frags) | ||
989 | dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), | ||
990 | DMA_TO_DEVICE); | ||
991 | |||
992 | pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); | ||
993 | out_err: | ||
994 | return -ENOMEM; | ||
995 | } | ||
996 | |||
997 | /** | 952 | /** |
998 | * write_sgl - populate a scatter/gather list for a packet | 953 | * make_sgl - populate a scatter/gather list for a packet |
999 | * @skb: the packet | 954 | * @skb: the packet |
1000 | * @sgp: the SGL to populate | 955 | * @sgp: the SGL to populate |
1001 | * @start: start address of skb main body data to include in the SGL | 956 | * @start: start address of skb main body data to include in the SGL |
1002 | * @len: length of skb main body data to include in the SGL | 957 | * @len: length of skb main body data to include in the SGL |
1003 | * @addr: the list of the mapped addresses | 958 | * @pdev: the PCI device |
1004 | * | 959 | * |
1005 | * Copies the scatter/gather list for the buffers that make up a packet | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
1006 | * and returns the SGL size in 8-byte words. The caller must size the SGL | 961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
1007 | * appropriately. | 962 | * appropriately. |
1008 | */ | 963 | */ |
1009 | static inline unsigned int write_sgl(const struct sk_buff *skb, | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
1010 | struct sg_ent *sgp, unsigned char *start, | 965 | struct sg_ent *sgp, unsigned char *start, |
1011 | unsigned int len, const dma_addr_t *addr) | 966 | unsigned int len, struct pci_dev *pdev) |
1012 | { | 967 | { |
1013 | unsigned int i, j = 0, k = 0, nfrags; | 968 | dma_addr_t mapping; |
969 | unsigned int i, j = 0, nfrags; | ||
1014 | 970 | ||
1015 | if (len) { | 971 | if (len) { |
972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | ||
1016 | sgp->len[0] = cpu_to_be32(len); | 973 | sgp->len[0] = cpu_to_be32(len); |
1017 | sgp->addr[j++] = cpu_to_be64(addr[k++]); | 974 | sgp->addr[0] = cpu_to_be64(mapping); |
975 | j = 1; | ||
1018 | } | 976 | } |
1019 | 977 | ||
1020 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
1021 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
1022 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1023 | 981 | ||
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
983 | DMA_TO_DEVICE); | ||
1024 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
1025 | sgp->addr[j] = cpu_to_be64(addr[k++]); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
1026 | j ^= 1; | 986 | j ^= 1; |
1027 | if (j == 0) | 987 | if (j == 0) |
1028 | ++sgp; | 988 | ++sgp; |
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1178 | const struct port_info *pi, | 1138 | const struct port_info *pi, |
1179 | unsigned int pidx, unsigned int gen, | 1139 | unsigned int pidx, unsigned int gen, |
1180 | struct sge_txq *q, unsigned int ndesc, | 1140 | struct sge_txq *q, unsigned int ndesc, |
1181 | unsigned int compl, const dma_addr_t *addr) | 1141 | unsigned int compl) |
1182 | { | 1142 | { |
1183 | unsigned int flits, sgl_flits, cntrl, tso_info; | 1143 | unsigned int flits, sgl_flits, cntrl, tso_info; |
1184 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | 1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; |
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1236 | } | 1196 | } |
1237 | 1197 | ||
1238 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1239 | sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
1240 | 1200 | ||
1241 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | 1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, |
1242 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | 1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), |
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | struct netdev_queue *txq; | 1227 | struct netdev_queue *txq; |
1268 | struct sge_qset *qs; | 1228 | struct sge_qset *qs; |
1269 | struct sge_txq *q; | 1229 | struct sge_txq *q; |
1270 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
1271 | 1230 | ||
1272 | /* | 1231 | /* |
1273 | * The chip min packet length is 9 octets but play safe and reject | 1232 | * The chip min packet length is 9 octets but play safe and reject |
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1296 | return NETDEV_TX_BUSY; | 1255 | return NETDEV_TX_BUSY; |
1297 | } | 1256 | } |
1298 | 1257 | ||
1299 | if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { | ||
1300 | dev_kfree_skb(skb); | ||
1301 | return NETDEV_TX_OK; | ||
1302 | } | ||
1303 | |||
1304 | q->in_use += ndesc; | 1258 | q->in_use += ndesc; |
1305 | if (unlikely(credits - ndesc < q->stop_thres)) { | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
1306 | t3_stop_tx_queue(txq, qs, q); | 1260 | t3_stop_tx_queue(txq, qs, q); |
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1358 | if (likely(!skb_shared(skb))) | 1312 | if (likely(!skb_shared(skb))) |
1359 | skb_orphan(skb); | 1313 | skb_orphan(skb); |
1360 | 1314 | ||
1361 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
1362 | check_ring_tx_db(adap, q); | 1316 | check_ring_tx_db(adap, q); |
1363 | return NETDEV_TX_OK; | 1317 | return NETDEV_TX_OK; |
1364 | } | 1318 | } |
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |||
1623 | */ | 1577 | */ |
1624 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | 1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, |
1625 | struct sge_txq *q, unsigned int pidx, | 1579 | struct sge_txq *q, unsigned int pidx, |
1626 | unsigned int gen, unsigned int ndesc, | 1580 | unsigned int gen, unsigned int ndesc) |
1627 | const dma_addr_t *addr) | ||
1628 | { | 1581 | { |
1629 | unsigned int sgl_flits, flits; | 1582 | unsigned int sgl_flits, flits; |
1630 | struct work_request_hdr *from; | 1583 | struct work_request_hdr *from; |
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1645 | 1598 | ||
1646 | flits = skb_transport_offset(skb) / 8; | 1599 | flits = skb_transport_offset(skb) / 8; |
1647 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1648 | sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1649 | skb_tail_pointer(skb) - | 1602 | skb->tail - skb->transport_header, |
1650 | skb_transport_header(skb), addr); | 1603 | adap->pdev); |
1651 | if (need_skb_unmap()) { | 1604 | if (need_skb_unmap()) { |
1652 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1605 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1653 | skb->destructor = deferred_unmap_destructor; | 1606 | skb->destructor = deferred_unmap_destructor; |
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1705 | goto again; | 1658 | goto again; |
1706 | } | 1659 | } |
1707 | 1660 | ||
1708 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { | ||
1709 | spin_unlock(&q->lock); | ||
1710 | return NET_XMIT_SUCCESS; | ||
1711 | } | ||
1712 | |||
1713 | gen = q->gen; | 1661 | gen = q->gen; |
1714 | q->in_use += ndesc; | 1662 | q->in_use += ndesc; |
1715 | pidx = q->pidx; | 1663 | pidx = q->pidx; |
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1720 | } | 1668 | } |
1721 | spin_unlock(&q->lock); | 1669 | spin_unlock(&q->lock); |
1722 | 1670 | ||
1723 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); | 1671 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1724 | check_ring_tx_db(adap, q); | 1672 | check_ring_tx_db(adap, q); |
1725 | return NET_XMIT_SUCCESS; | 1673 | return NET_XMIT_SUCCESS; |
1726 | } | 1674 | } |
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) | |||
1738 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | 1686 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; |
1739 | const struct port_info *pi = netdev_priv(qs->netdev); | 1687 | const struct port_info *pi = netdev_priv(qs->netdev); |
1740 | struct adapter *adap = pi->adapter; | 1688 | struct adapter *adap = pi->adapter; |
1741 | unsigned int written = 0; | ||
1742 | 1689 | ||
1743 | spin_lock(&q->lock); | 1690 | spin_lock(&q->lock); |
1744 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | 1691 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1758 | break; | 1705 | break; |
1759 | } | 1706 | } |
1760 | 1707 | ||
1761 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) | ||
1762 | break; | ||
1763 | |||
1764 | gen = q->gen; | 1708 | gen = q->gen; |
1765 | q->in_use += ndesc; | 1709 | q->in_use += ndesc; |
1766 | pidx = q->pidx; | 1710 | pidx = q->pidx; |
1767 | q->pidx += ndesc; | 1711 | q->pidx += ndesc; |
1768 | written += ndesc; | ||
1769 | if (q->pidx >= q->size) { | 1712 | if (q->pidx >= q->size) { |
1770 | q->pidx -= q->size; | 1713 | q->pidx -= q->size; |
1771 | q->gen ^= 1; | 1714 | q->gen ^= 1; |
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1773 | __skb_unlink(skb, &q->sendq); | 1716 | __skb_unlink(skb, &q->sendq); |
1774 | spin_unlock(&q->lock); | 1717 | spin_unlock(&q->lock); |
1775 | 1718 | ||
1776 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, | 1719 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1777 | (dma_addr_t *)skb->head); | ||
1778 | spin_lock(&q->lock); | 1720 | spin_lock(&q->lock); |
1779 | } | 1721 | } |
1780 | spin_unlock(&q->lock); | 1722 | spin_unlock(&q->lock); |
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1784 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | 1726 | set_bit(TXQ_LAST_PKT_DB, &q->flags); |
1785 | #endif | 1727 | #endif |
1786 | wmb(); | 1728 | wmb(); |
1787 | if (likely(written)) | 1729 | t3_write_reg(adap, A_SG_KDOORBELL, |
1788 | t3_write_reg(adap, A_SG_KDOORBELL, | 1730 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1789 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | ||
1790 | } | 1731 | } |
1791 | 1732 | ||
1792 | /** | 1733 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) | |||
3048 | 3048 | ||
3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); | 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); |
3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); | 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); |
3051 | |||
3052 | /* Clear flags that driver is not interested in */ | ||
3053 | adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; | ||
3051 | } | 3054 | } |
3052 | err: | 3055 | err: |
3053 | mutex_unlock(&adapter->mbox_lock); | 3056 | mutex_unlock(&adapter->mbox_lock); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -563,6 +563,12 @@ enum be_if_flags { | |||
563 | BE_IF_FLAGS_MULTICAST = 0x1000 | 563 | BE_IF_FLAGS_MULTICAST = 0x1000 |
564 | }; | 564 | }; |
565 | 565 | ||
566 | #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ | ||
567 | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ | ||
568 | BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ | ||
569 | BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ | ||
570 | BE_IF_FLAGS_UNTAGGED) | ||
571 | |||
566 | /* An RX interface is an object with one or more MAC addresses and | 572 | /* An RX interface is an object with one or more MAC addresses and |
567 | * filtering capabilities. */ | 573 | * filtering capabilities. */ |
568 | struct be_cmd_req_if_create { | 574 | struct be_cmd_req_if_create { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 181edb522450..3d91a5ec61a4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev) | |||
2563 | /* Wait for all pending tx completions to arrive so that | 2563 | /* Wait for all pending tx completions to arrive so that |
2564 | * all tx skbs are freed. | 2564 | * all tx skbs are freed. |
2565 | */ | 2565 | */ |
2566 | be_tx_compl_clean(adapter); | ||
2567 | netif_tx_disable(netdev); | 2566 | netif_tx_disable(netdev); |
2567 | be_tx_compl_clean(adapter); | ||
2568 | 2568 | ||
2569 | be_rx_qs_destroy(adapter); | 2569 | be_rx_qs_destroy(adapter); |
2570 | 2570 | ||
@@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev) | |||
4373 | pci_set_power_state(pdev, PCI_D0); | 4373 | pci_set_power_state(pdev, PCI_D0); |
4374 | pci_restore_state(pdev); | 4374 | pci_restore_state(pdev); |
4375 | 4375 | ||
4376 | status = be_fw_wait_ready(adapter); | ||
4377 | if (status) | ||
4378 | return status; | ||
4379 | |||
4376 | /* tell fw we're ready to fire cmds */ | 4380 | /* tell fw we're ready to fire cmds */ |
4377 | status = be_cmd_fw_init(adapter); | 4381 | status = be_cmd_fw_init(adapter); |
4378 | if (status) | 4382 | if (status) |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 77ea0db0bbfc..c610a2716be4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget) | |||
971 | htons(ETH_P_8021Q), | 971 | htons(ETH_P_8021Q), |
972 | vlan_tag); | 972 | vlan_tag); |
973 | 973 | ||
974 | if (!skb_defer_rx_timestamp(skb)) | 974 | napi_gro_receive(&fep->napi, skb); |
975 | napi_gro_receive(&fep->napi, skb); | ||
976 | } | 975 | } |
977 | 976 | ||
978 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | 977 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, |
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf6054..23de82a9da82 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c | |||
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, | |||
3069 | jwrite32(jme, JME_APMC, apmc); | 3069 | jwrite32(jme, JME_APMC, apmc); |
3070 | } | 3070 | } |
3071 | 3071 | ||
3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) | 3072 | NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) |
3073 | 3073 | ||
3074 | spin_lock_init(&jme->phy_lock); | 3074 | spin_lock_init(&jme->phy_lock); |
3075 | spin_lock_init(&jme->macaddr_lock); | 3075 | spin_lock_init(&jme->macaddr_lock); |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | { | 936 | { |
937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 938 | dma_addr_t map; |
939 | 939 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | 942 | ||
943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
944 | rd->dma_hi = map >> 32; | 944 | return -1; |
945 | |||
946 | rd->dma_lo = lower_32_bits(map); | ||
947 | rd->dma_hi = upper_32_bits(map); | ||
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
956 | } | 960 | } |
957 | 961 | ||
958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
1015 | 1019 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
1022 | dev_kfree_skb(skb); | ||
1023 | return -EIO; | ||
1024 | } | ||
1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
1019 | 1026 | ||
1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
2544 | 2551 | ||
2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
2546 | 2553 | ||
2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
2730 | int i; | 2737 | int i; |
2731 | u32 control, len; | 2738 | u32 control, len; |
2732 | u64 map; | 2739 | dma_addr_t map; |
2733 | 2740 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2750 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2754 | goto mapping_error; | ||
2755 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
2748 | 2758 | ||
2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
2751 | 2761 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2788 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2792 | goto mapping_unwind; | ||
2781 | 2793 | ||
2782 | e = e->next; | 2794 | e = e->next; |
2783 | e->skb = skb; | 2795 | e->skb = skb; |
2784 | tf = e->desc; | 2796 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2798 | ||
2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2803 | ||
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2815 | } | 2827 | } |
2816 | 2828 | ||
2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
2830 | |||
2831 | mapping_unwind: | ||
2832 | e = skge->tx_ring.to_use; | ||
2833 | pci_unmap_single(hw->pdev, | ||
2834 | dma_unmap_addr(e, mapaddr), | ||
2835 | dma_unmap_len(e, maplen), | ||
2836 | PCI_DMA_TODEVICE); | ||
2837 | while (i-- > 0) { | ||
2838 | e = e->next; | ||
2839 | pci_unmap_page(hw->pdev, | ||
2840 | dma_unmap_addr(e, mapaddr), | ||
2841 | dma_unmap_len(e, maplen), | ||
2842 | PCI_DMA_TODEVICE); | ||
2843 | } | ||
2844 | |||
2845 | mapping_error: | ||
2846 | if (net_ratelimit()) | ||
2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2848 | dev_kfree_skb(skb); | ||
2849 | return NETDEV_TX_OK; | ||
2818 | } | 2850 | } |
2819 | 2851 | ||
2820 | 2852 | ||
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3045 | 3077 | ||
3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
3081 | PCI_DMA_FROMDEVICE); | ||
3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
3086 | PCI_DMA_FROMDEVICE); | ||
3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3054 | } else { | 3088 | } else { |
3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3058 | if (!nskb) | 3092 | if (!nskb) |
3059 | goto resubmit; | 3093 | goto resubmit; |
3060 | 3094 | ||
3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
3096 | dev_kfree_skb(nskb); | ||
3097 | goto resubmit; | ||
3098 | } | ||
3099 | |||
3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
3065 | skb = e->skb; | 3104 | skb = e->skb; |
3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3068 | } | 3106 | } |
3069 | 3107 | ||
3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | CMD_IF_REV = 4, | 49 | CMD_IF_REV = 5, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | enum { | 52 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
269 | { | 269 | { |
270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
271 | s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | 271 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
272 | 272 | ||
273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); |
274 | mlx5_core_req_pages_handler(dev, func_id, npages); | 274 | mlx5_core_req_pages_handler(dev, func_id, npages); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | |||
113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; |
114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; |
115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; |
116 | caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | 116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; |
117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); |
118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); |
119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -55,33 +55,9 @@ enum { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(health_lock); | 57 | static DEFINE_SPINLOCK(health_lock); |
58 | |||
59 | static LIST_HEAD(health_list); | 58 | static LIST_HEAD(health_list); |
60 | static struct work_struct health_work; | 59 | static struct work_struct health_work; |
61 | 60 | ||
62 | static health_handler_t reg_handler; | ||
63 | int mlx5_register_health_report_handler(health_handler_t handler) | ||
64 | { | ||
65 | spin_lock_irq(&health_lock); | ||
66 | if (reg_handler) { | ||
67 | spin_unlock_irq(&health_lock); | ||
68 | return -EEXIST; | ||
69 | } | ||
70 | reg_handler = handler; | ||
71 | spin_unlock_irq(&health_lock); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||
76 | |||
77 | void mlx5_unregister_health_report_handler(void) | ||
78 | { | ||
79 | spin_lock_irq(&health_lock); | ||
80 | reg_handler = NULL; | ||
81 | spin_unlock_irq(&health_lock); | ||
82 | } | ||
83 | EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||
84 | |||
85 | static void health_care(struct work_struct *work) | 61 | static void health_care(struct work_struct *work) |
86 | { | 62 | { |
87 | struct mlx5_core_health *health, *n; | 63 | struct mlx5_core_health *health, *n; |
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) | |||
98 | priv = container_of(health, struct mlx5_priv, health); | 74 | priv = container_of(health, struct mlx5_priv, health); |
99 | dev = container_of(priv, struct mlx5_core_dev, priv); | 75 | dev = container_of(priv, struct mlx5_core_dev, priv); |
100 | mlx5_core_warn(dev, "handling bad device here\n"); | 76 | mlx5_core_warn(dev, "handling bad device here\n"); |
77 | /* nothing yet */ | ||
101 | spin_lock_irq(&health_lock); | 78 | spin_lock_irq(&health_lock); |
102 | if (reg_handler) | ||
103 | reg_handler(dev->pdev, health->health, | ||
104 | sizeof(health->health)); | ||
105 | |||
106 | list_del_init(&health->list); | 79 | list_del_init(&health->list); |
107 | spin_unlock_irq(&health_lock); | 80 | spin_unlock_irq(&health_lock); |
108 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
@@ -43,10 +43,16 @@ enum { | |||
43 | MLX5_PAGES_TAKE = 2 | 43 | MLX5_PAGES_TAKE = 2 |
44 | }; | 44 | }; |
45 | 45 | ||
46 | enum { | ||
47 | MLX5_BOOT_PAGES = 1, | ||
48 | MLX5_INIT_PAGES = 2, | ||
49 | MLX5_POST_INIT_PAGES = 3 | ||
50 | }; | ||
51 | |||
46 | struct mlx5_pages_req { | 52 | struct mlx5_pages_req { |
47 | struct mlx5_core_dev *dev; | 53 | struct mlx5_core_dev *dev; |
48 | u32 func_id; | 54 | u32 func_id; |
49 | s16 npages; | 55 | s32 npages; |
50 | struct work_struct work; | 56 | struct work_struct work; |
51 | }; | 57 | }; |
52 | 58 | ||
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { | |||
64 | 70 | ||
65 | struct mlx5_query_pages_outbox { | 71 | struct mlx5_query_pages_outbox { |
66 | struct mlx5_outbox_hdr hdr; | 72 | struct mlx5_outbox_hdr hdr; |
67 | __be16 num_boot_pages; | 73 | __be16 rsvd; |
68 | __be16 func_id; | 74 | __be16 func_id; |
69 | __be16 init_pages; | 75 | __be32 num_pages; |
70 | __be16 num_pages; | ||
71 | }; | 76 | }; |
72 | 77 | ||
73 | struct mlx5_manage_pages_inbox { | 78 | struct mlx5_manage_pages_inbox { |
74 | struct mlx5_inbox_hdr hdr; | 79 | struct mlx5_inbox_hdr hdr; |
75 | __be16 rsvd0; | 80 | __be16 rsvd; |
76 | __be16 func_id; | 81 | __be16 func_id; |
77 | __be16 rsvd1; | 82 | __be32 num_entries; |
78 | __be16 num_entries; | ||
79 | u8 rsvd2[16]; | ||
80 | __be64 pas[0]; | 83 | __be64 pas[0]; |
81 | }; | 84 | }; |
82 | 85 | ||
83 | struct mlx5_manage_pages_outbox { | 86 | struct mlx5_manage_pages_outbox { |
84 | struct mlx5_outbox_hdr hdr; | 87 | struct mlx5_outbox_hdr hdr; |
85 | u8 rsvd0[2]; | 88 | __be32 num_entries; |
86 | __be16 num_entries; | 89 | u8 rsvd[4]; |
87 | u8 rsvd1[20]; | ||
88 | __be64 pas[0]; | 90 | __be64 pas[0]; |
89 | }; | 91 | }; |
90 | 92 | ||
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
146 | } | 148 | } |
147 | 149 | ||
148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 150 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
149 | s16 *pages, s16 *init_pages, u16 *boot_pages) | 151 | s32 *npages, int boot) |
150 | { | 152 | { |
151 | struct mlx5_query_pages_inbox in; | 153 | struct mlx5_query_pages_inbox in; |
152 | struct mlx5_query_pages_outbox out; | 154 | struct mlx5_query_pages_outbox out; |
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
155 | memset(&in, 0, sizeof(in)); | 157 | memset(&in, 0, sizeof(in)); |
156 | memset(&out, 0, sizeof(out)); | 158 | memset(&out, 0, sizeof(out)); |
157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | 159 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); |
160 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); | ||
161 | |||
158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 162 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
159 | if (err) | 163 | if (err) |
160 | return err; | 164 | return err; |
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
162 | if (out.hdr.status) | 166 | if (out.hdr.status) |
163 | return mlx5_cmd_status_to_err(&out.hdr); | 167 | return mlx5_cmd_status_to_err(&out.hdr); |
164 | 168 | ||
165 | if (pages) | 169 | *npages = be32_to_cpu(out.num_pages); |
166 | *pages = be16_to_cpu(out.num_pages); | ||
167 | |||
168 | if (init_pages) | ||
169 | *init_pages = be16_to_cpu(out.init_pages); | ||
170 | |||
171 | if (boot_pages) | ||
172 | *boot_pages = be16_to_cpu(out.num_boot_pages); | ||
173 | |||
174 | *func_id = be16_to_cpu(out.func_id); | 170 | *func_id = be16_to_cpu(out.func_id); |
175 | 171 | ||
176 | return err; | 172 | return err; |
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |||
224 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 220 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
225 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | 221 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); |
226 | in->func_id = cpu_to_be16(func_id); | 222 | in->func_id = cpu_to_be16(func_id); |
227 | in->num_entries = cpu_to_be16(npages); | 223 | in->num_entries = cpu_to_be32(npages); |
228 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 224 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
229 | mlx5_core_dbg(dev, "err %d\n", err); | 225 | mlx5_core_dbg(dev, "err %d\n", err); |
230 | if (err) { | 226 | if (err) { |
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
292 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 288 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
293 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | 289 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); |
294 | in.func_id = cpu_to_be16(func_id); | 290 | in.func_id = cpu_to_be16(func_id); |
295 | in.num_entries = cpu_to_be16(npages); | 291 | in.num_entries = cpu_to_be32(npages); |
296 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | 292 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
297 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | 293 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); |
298 | if (err) { | 294 | if (err) { |
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
306 | goto out_free; | 302 | goto out_free; |
307 | } | 303 | } |
308 | 304 | ||
309 | num_claimed = be16_to_cpu(out->num_entries); | 305 | num_claimed = be32_to_cpu(out->num_entries); |
310 | if (nclaimed) | 306 | if (nclaimed) |
311 | *nclaimed = num_claimed; | 307 | *nclaimed = num_claimed; |
312 | 308 | ||
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) | |||
345 | } | 341 | } |
346 | 342 | ||
347 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 343 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
348 | s16 npages) | 344 | s32 npages) |
349 | { | 345 | { |
350 | struct mlx5_pages_req *req; | 346 | struct mlx5_pages_req *req; |
351 | 347 | ||
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
364 | 360 | ||
365 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) | 361 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
366 | { | 362 | { |
367 | u16 uninitialized_var(boot_pages); | ||
368 | s16 uninitialized_var(init_pages); | ||
369 | u16 uninitialized_var(func_id); | 363 | u16 uninitialized_var(func_id); |
364 | s32 uninitialized_var(npages); | ||
370 | int err; | 365 | int err; |
371 | 366 | ||
372 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, | 367 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
373 | &boot_pages); | ||
374 | if (err) | 368 | if (err) |
375 | return err; | 369 | return err; |
376 | 370 | ||
371 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", | ||
372 | npages, boot ? "boot" : "init", func_id); | ||
377 | 373 | ||
378 | mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", | 374 | return give_pages(dev, func_id, npages, 0); |
379 | init_pages, boot_pages, func_id); | ||
380 | return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); | ||
381 | } | 375 | } |
382 | 376 | ||
383 | static int optimal_reclaimed_pages(void) | 377 | static int optimal_reclaimed_pages(void) |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c9..32675e16021e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h | |||
@@ -1171,7 +1171,6 @@ typedef struct { | |||
1171 | 1171 | ||
1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1172 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
1173 | 1173 | ||
1174 | #define NETXEN_NETDEV_WEIGHT 128 | ||
1175 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1174 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
1176 | #define NETXEN_NIC_PEG_TUNE 0 | 1175 | #define NETXEN_NIC_PEG_TUNE 0 |
1177 | 1176 | ||
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index c401b0b4353d..ec4cf7fd4123 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | |||
197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 197 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
198 | sds_ring = &recv_ctx->sds_rings[ring]; | 198 | sds_ring = &recv_ctx->sds_rings[ring]; |
199 | netif_napi_add(netdev, &sds_ring->napi, | 199 | netif_napi_add(netdev, &sds_ring->napi, |
200 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 200 | netxen_nic_poll, NAPI_POLL_WEIGHT); |
201 | } | 201 | } |
202 | 202 | ||
203 | return 0; | 203 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
3266 | u8 val; | 3266 | u8 val; |
3267 | int ret, max_sds_rings = adapter->max_sds_rings; | 3267 | int ret, max_sds_rings = adapter->max_sds_rings; |
3268 | 3268 | ||
3269 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { | ||
3270 | netdev_info(netdev, "Device is resetting\n"); | ||
3271 | return -EBUSY; | ||
3272 | } | ||
3273 | |||
3269 | if (qlcnic_get_diag_lock(adapter)) { | 3274 | if (qlcnic_get_diag_lock(adapter)) { |
3270 | netdev_info(netdev, "Device in diagnostics mode\n"); | 3275 | netdev_info(netdev, "Device in diagnostics mode\n"); |
3271 | return -EBUSY; | 3276 | return -EBUSY; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
629 | return -EIO; | 629 | return -EIO; |
630 | } | 630 | } |
631 | 631 | ||
632 | qlcnic_set_drv_version(adapter); | 632 | if (adapter->portnum == 0) |
633 | qlcnic_set_drv_version(adapter); | ||
633 | qlcnic_83xx_idc_attach_driver(adapter); | 634 | qlcnic_83xx_idc_attach_driver(adapter); |
634 | 635 | ||
635 | return 0; | 636 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2165 | if (err) | 2165 | if (err) |
2166 | goto err_out_disable_mbx_intr; | 2166 | goto err_out_disable_mbx_intr; |
2167 | 2167 | ||
2168 | qlcnic_set_drv_version(adapter); | 2168 | if (adapter->portnum == 0) |
2169 | qlcnic_set_drv_version(adapter); | ||
2169 | 2170 | ||
2170 | pci_set_drvdata(pdev, adapter); | 2171 | pci_set_drvdata(pdev, adapter); |
2171 | 2172 | ||
@@ -3085,7 +3086,8 @@ done: | |||
3085 | adapter->fw_fail_cnt = 0; | 3086 | adapter->fw_fail_cnt = 0; |
3086 | adapter->flags &= ~QLCNIC_FW_HANG; | 3087 | adapter->flags &= ~QLCNIC_FW_HANG; |
3087 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 3088 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
3088 | qlcnic_set_drv_version(adapter); | 3089 | if (adapter->portnum == 0) |
3090 | qlcnic_set_drv_version(adapter); | ||
3089 | 3091 | ||
3090 | if (!qlcnic_clr_drv_state(adapter)) | 3092 | if (!qlcnic_clr_drv_state(adapter)) |
3091 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, | 3093 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, | |||
170 | 170 | ||
171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { | 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { |
172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); | 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); |
173 | if (!err) { | 173 | if (err) { |
174 | dev_info(&adapter->pdev->dev, | 174 | netdev_err(adapter->netdev, |
175 | "Failed to get current beacon state\n"); | 175 | "Failed to get current beacon state\n"); |
176 | } else { | 176 | } else { |
177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) | 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) |
178 | ahw->beacon_state = 0; | 178 | ahw->beacon_state = 0; |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -524,6 +524,7 @@ rx_status_loop: | |||
524 | PCI_DMA_FROMDEVICE); | 524 | PCI_DMA_FROMDEVICE); |
525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { | 525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { |
526 | dev->stats.rx_dropped++; | 526 | dev->stats.rx_dropped++; |
527 | kfree_skb(new_skb); | ||
527 | goto rx_next; | 528 | goto rx_next; |
528 | } | 529 | } |
529 | 530 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b5eb4195fc99..85e5c97191dd 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7088 | 7088 | ||
7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 7089 | RTL_W8(Cfg9346, Cfg9346_Unlock); |
7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); | 7090 | RTL_W8(Config1, RTL_R8(Config1) | PMEnable); |
7091 | RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); | 7091 | RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); |
7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) | 7092 | if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) |
7093 | tp->features |= RTL_FEATURE_WOL; | 7093 | tp->features |= RTL_FEATURE_WOL; |
7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) | 7094 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) |
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2a469b27a506..30d744235d27 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c | |||
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, | |||
675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); | 675 | BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); |
676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != | 676 | BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != |
677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); | 677 | EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); |
678 | rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; | 678 | rep_index = spec->type - EFX_FILTER_UC_DEF; |
679 | ins_index = rep_index; | 679 | ins_index = rep_index; |
680 | 680 | ||
681 | spin_lock_bh(&state->lock); | 681 | spin_lock_bh(&state->lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
35 | unsigned int entry = priv->cur_tx % txsize; | 35 | unsigned int entry = priv->cur_tx % txsize; |
36 | struct dma_desc *desc = priv->dma_tx + entry; | 36 | struct dma_desc *desc; |
37 | unsigned int nopaged_len = skb_headlen(skb); | 37 | unsigned int nopaged_len = skb_headlen(skb); |
38 | unsigned int bmax, len; | 38 | unsigned int bmax, len; |
39 | 39 | ||
40 | if (priv->extend_desc) | ||
41 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
42 | else | ||
43 | desc = priv->dma_tx + entry; | ||
44 | |||
40 | if (priv->plat->enh_desc) | 45 | if (priv->plat->enh_desc) |
41 | bmax = BUF_SIZE_8KiB; | 46 | bmax = BUF_SIZE_8KiB; |
42 | else | 47 | else |
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
54 | STMMAC_RING_MODE); | 59 | STMMAC_RING_MODE); |
55 | wmb(); | 60 | wmb(); |
56 | entry = (++priv->cur_tx) % txsize; | 61 | entry = (++priv->cur_tx) % txsize; |
57 | desc = priv->dma_tx + entry; | 62 | |
63 | if (priv->extend_desc) | ||
64 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
65 | else | ||
66 | desc = priv->dma_tx + entry; | ||
58 | 67 | ||
59 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 68 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
60 | len, DMA_TO_DEVICE); | 69 | len, DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
939 | 939 | ||
940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
942 | if (unlikely(skb == NULL)) { | 942 | if (!skb) { |
943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
944 | return 1; | 944 | return -ENOMEM; |
945 | } | 945 | } |
946 | skb_reserve(skb, NET_IP_ALIGN); | 946 | skb_reserve(skb, NET_IP_ALIGN); |
947 | priv->rx_skbuff[i] = skb; | 947 | priv->rx_skbuff[i] = skb; |
948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
949 | priv->dma_buf_sz, | 949 | priv->dma_buf_sz, |
950 | DMA_FROM_DEVICE); | 950 | DMA_FROM_DEVICE); |
951 | if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { | ||
952 | pr_err("%s: DMA mapping error\n", __func__); | ||
953 | dev_kfree_skb_any(skb); | ||
954 | return -EINVAL; | ||
955 | } | ||
951 | 956 | ||
952 | p->des2 = priv->rx_skbuff_dma[i]; | 957 | p->des2 = priv->rx_skbuff_dma[i]; |
953 | 958 | ||
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
958 | return 0; | 963 | return 0; |
959 | } | 964 | } |
960 | 965 | ||
966 | static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | ||
967 | { | ||
968 | if (priv->rx_skbuff[i]) { | ||
969 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
970 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
971 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
972 | } | ||
973 | priv->rx_skbuff[i] = NULL; | ||
974 | } | ||
975 | |||
961 | /** | 976 | /** |
962 | * init_dma_desc_rings - init the RX/TX descriptor rings | 977 | * init_dma_desc_rings - init the RX/TX descriptor rings |
963 | * @dev: net device structure | 978 | * @dev: net device structure |
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
965 | * and allocates the socket buffers. It suppors the chained and ring | 980 | * and allocates the socket buffers. It suppors the chained and ring |
966 | * modes. | 981 | * modes. |
967 | */ | 982 | */ |
968 | static void init_dma_desc_rings(struct net_device *dev) | 983 | static int init_dma_desc_rings(struct net_device *dev) |
969 | { | 984 | { |
970 | int i; | 985 | int i; |
971 | struct stmmac_priv *priv = netdev_priv(dev); | 986 | struct stmmac_priv *priv = netdev_priv(dev); |
972 | unsigned int txsize = priv->dma_tx_size; | 987 | unsigned int txsize = priv->dma_tx_size; |
973 | unsigned int rxsize = priv->dma_rx_size; | 988 | unsigned int rxsize = priv->dma_rx_size; |
974 | unsigned int bfsize = 0; | 989 | unsigned int bfsize = 0; |
990 | int ret = -ENOMEM; | ||
975 | 991 | ||
976 | /* Set the max buffer size according to the DESC mode | 992 | /* Set the max buffer size according to the DESC mode |
977 | * and the MTU. Note that RING mode allows 16KiB bsize. | 993 | * and the MTU. Note that RING mode allows 16KiB bsize. |
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
992 | dma_extended_desc), | 1008 | dma_extended_desc), |
993 | &priv->dma_rx_phy, | 1009 | &priv->dma_rx_phy, |
994 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1011 | if (!priv->dma_erx) | ||
1012 | goto err_dma; | ||
1013 | |||
995 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * | 1014 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * |
996 | sizeof(struct | 1015 | sizeof(struct |
997 | dma_extended_desc), | 1016 | dma_extended_desc), |
998 | &priv->dma_tx_phy, | 1017 | &priv->dma_tx_phy, |
999 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
1000 | if ((!priv->dma_erx) || (!priv->dma_etx)) | 1019 | if (!priv->dma_etx) { |
1001 | return; | 1020 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1021 | sizeof(struct dma_extended_desc), | ||
1022 | priv->dma_erx, priv->dma_rx_phy); | ||
1023 | goto err_dma; | ||
1024 | } | ||
1002 | } else { | 1025 | } else { |
1003 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * | 1026 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * |
1004 | sizeof(struct dma_desc), | 1027 | sizeof(struct dma_desc), |
1005 | &priv->dma_rx_phy, | 1028 | &priv->dma_rx_phy, |
1006 | GFP_KERNEL); | 1029 | GFP_KERNEL); |
1030 | if (!priv->dma_rx) | ||
1031 | goto err_dma; | ||
1032 | |||
1007 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * | 1033 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * |
1008 | sizeof(struct dma_desc), | 1034 | sizeof(struct dma_desc), |
1009 | &priv->dma_tx_phy, | 1035 | &priv->dma_tx_phy, |
1010 | GFP_KERNEL); | 1036 | GFP_KERNEL); |
1011 | if ((!priv->dma_rx) || (!priv->dma_tx)) | 1037 | if (!priv->dma_tx) { |
1012 | return; | 1038 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1039 | sizeof(struct dma_desc), | ||
1040 | priv->dma_rx, priv->dma_rx_phy); | ||
1041 | goto err_dma; | ||
1042 | } | ||
1013 | } | 1043 | } |
1014 | 1044 | ||
1015 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), | 1045 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), |
1016 | GFP_KERNEL); | 1046 | GFP_KERNEL); |
1047 | if (!priv->rx_skbuff_dma) | ||
1048 | goto err_rx_skbuff_dma; | ||
1049 | |||
1017 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), | 1050 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), |
1018 | GFP_KERNEL); | 1051 | GFP_KERNEL); |
1052 | if (!priv->rx_skbuff) | ||
1053 | goto err_rx_skbuff; | ||
1054 | |||
1019 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1055 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), |
1020 | GFP_KERNEL); | 1056 | GFP_KERNEL); |
1057 | if (!priv->tx_skbuff_dma) | ||
1058 | goto err_tx_skbuff_dma; | ||
1059 | |||
1021 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), | 1060 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), |
1022 | GFP_KERNEL); | 1061 | GFP_KERNEL); |
1062 | if (!priv->tx_skbuff) | ||
1063 | goto err_tx_skbuff; | ||
1064 | |||
1023 | if (netif_msg_probe(priv)) { | 1065 | if (netif_msg_probe(priv)) { |
1024 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, | 1066 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, |
1025 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); | 1067 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); |
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1034 | else | 1076 | else |
1035 | p = priv->dma_rx + i; | 1077 | p = priv->dma_rx + i; |
1036 | 1078 | ||
1037 | if (stmmac_init_rx_buffers(priv, p, i)) | 1079 | ret = stmmac_init_rx_buffers(priv, p, i); |
1038 | break; | 1080 | if (ret) |
1081 | goto err_init_rx_buffers; | ||
1039 | 1082 | ||
1040 | if (netif_msg_probe(priv)) | 1083 | if (netif_msg_probe(priv)) |
1041 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | 1084 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], |
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1081 | 1124 | ||
1082 | if (netif_msg_hw(priv)) | 1125 | if (netif_msg_hw(priv)) |
1083 | stmmac_display_rings(priv); | 1126 | stmmac_display_rings(priv); |
1127 | |||
1128 | return 0; | ||
1129 | err_init_rx_buffers: | ||
1130 | while (--i >= 0) | ||
1131 | stmmac_free_rx_buffers(priv, i); | ||
1132 | kfree(priv->tx_skbuff); | ||
1133 | err_tx_skbuff: | ||
1134 | kfree(priv->tx_skbuff_dma); | ||
1135 | err_tx_skbuff_dma: | ||
1136 | kfree(priv->rx_skbuff); | ||
1137 | err_rx_skbuff: | ||
1138 | kfree(priv->rx_skbuff_dma); | ||
1139 | err_rx_skbuff_dma: | ||
1140 | if (priv->extend_desc) { | ||
1141 | dma_free_coherent(priv->device, priv->dma_tx_size * | ||
1142 | sizeof(struct dma_extended_desc), | ||
1143 | priv->dma_etx, priv->dma_tx_phy); | ||
1144 | dma_free_coherent(priv->device, priv->dma_rx_size * | ||
1145 | sizeof(struct dma_extended_desc), | ||
1146 | priv->dma_erx, priv->dma_rx_phy); | ||
1147 | } else { | ||
1148 | dma_free_coherent(priv->device, | ||
1149 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
1150 | priv->dma_tx, priv->dma_tx_phy); | ||
1151 | dma_free_coherent(priv->device, | ||
1152 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
1153 | priv->dma_rx, priv->dma_rx_phy); | ||
1154 | } | ||
1155 | err_dma: | ||
1156 | return ret; | ||
1084 | } | 1157 | } |
1085 | 1158 | ||
1086 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | 1159 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
1087 | { | 1160 | { |
1088 | int i; | 1161 | int i; |
1089 | 1162 | ||
1090 | for (i = 0; i < priv->dma_rx_size; i++) { | 1163 | for (i = 0; i < priv->dma_rx_size; i++) |
1091 | if (priv->rx_skbuff[i]) { | 1164 | stmmac_free_rx_buffers(priv, i); |
1092 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
1093 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
1094 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
1095 | } | ||
1096 | priv->rx_skbuff[i] = NULL; | ||
1097 | } | ||
1098 | } | 1165 | } |
1099 | 1166 | ||
1100 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | 1167 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) | |||
1560 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 1627 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
1561 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1628 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
1562 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1629 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
1563 | init_dma_desc_rings(dev); | 1630 | |
1631 | ret = init_dma_desc_rings(dev); | ||
1632 | if (ret < 0) { | ||
1633 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1634 | goto dma_desc_error; | ||
1635 | } | ||
1564 | 1636 | ||
1565 | /* DMA initialization and SW reset */ | 1637 | /* DMA initialization and SW reset */ |
1566 | ret = stmmac_init_dma_engine(priv); | 1638 | ret = stmmac_init_dma_engine(priv); |
1567 | if (ret < 0) { | 1639 | if (ret < 0) { |
1568 | pr_err("%s: DMA initialization failed\n", __func__); | 1640 | pr_err("%s: DMA engine initialization failed\n", __func__); |
1569 | goto init_error; | 1641 | goto init_error; |
1570 | } | 1642 | } |
1571 | 1643 | ||
@@ -1672,6 +1744,7 @@ wolirq_error: | |||
1672 | 1744 | ||
1673 | init_error: | 1745 | init_error: |
1674 | free_dma_desc_resources(priv); | 1746 | free_dma_desc_resources(priv); |
1747 | dma_desc_error: | ||
1675 | if (priv->phydev) | 1748 | if (priv->phydev) |
1676 | phy_disconnect(priv->phydev); | 1749 | phy_disconnect(priv->phydev); |
1677 | phy_error: | 1750 | phy_error: |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 03de76c7a177..1c83a44c547b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, | |||
71 | plat->force_sf_dma_mode = 1; | 71 | plat->force_sf_dma_mode = 1; |
72 | } | 72 | } |
73 | 73 | ||
74 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 74 | if (of_find_property(np, "snps,pbl", NULL)) { |
75 | if (!dma_cfg) | 75 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), |
76 | return -ENOMEM; | 76 | GFP_KERNEL); |
77 | 77 | if (!dma_cfg) | |
78 | plat->dma_cfg = dma_cfg; | 78 | return -ENOMEM; |
79 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); | 79 | plat->dma_cfg = dma_cfg; |
80 | dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); | 80 | of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); |
81 | dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); | 81 | dma_cfg->fixed_burst = |
82 | of_property_read_bool(np, "snps,fixed-burst"); | ||
83 | dma_cfg->mixed_burst = | ||
84 | of_property_read_bool(np, "snps,mixed-burst"); | ||
85 | } | ||
82 | 86 | ||
83 | return 0; | 87 | return 0; |
84 | } | 88 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e618..9c805e0c0cae 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c | |||
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, | |||
1466 | { | 1466 | { |
1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; | 1467 | netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; |
1468 | /* NAPI */ | 1468 | /* NAPI */ |
1469 | netif_napi_add(netdev, napi, | 1469 | netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); |
1470 | gelic_net_poll, GELIC_NET_NAPI_WEIGHT); | ||
1471 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; | 1470 | netdev->ethtool_ops = &gelic_ether_ethtool_ops; |
1472 | netdev->netdev_ops = &gelic_netdevice_ops; | 1471 | netdev->netdev_ops = &gelic_netdevice_ops; |
1473 | } | 1472 | } |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac1909..309abb472aa2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h | |||
@@ -37,7 +37,6 @@ | |||
37 | #define GELIC_NET_RXBUF_ALIGN 128 | 37 | #define GELIC_NET_RXBUF_ALIGN 128 |
38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ | 38 | #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ |
39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ | 39 | #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ |
40 | #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) | ||
41 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL | 40 | #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL |
42 | 41 | ||
43 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ | 42 | #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2100 | 2100 | ||
2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
2102 | } | 2102 | } |
2103 | netif_rx(skb); | 2103 | netif_receive_skb(skb); |
2104 | 2104 | ||
2105 | stats->rx_bytes += pkt_len; | 2105 | stats->rx_bytes += pkt_len; |
2106 | stats->rx_packets++; | 2106 | stats->rx_packets++; |
@@ -2884,6 +2884,7 @@ out: | |||
2884 | return ret; | 2884 | return ret; |
2885 | 2885 | ||
2886 | err_iounmap: | 2886 | err_iounmap: |
2887 | netif_napi_del(&vptr->napi); | ||
2887 | iounmap(regs); | 2888 | iounmap(regs); |
2888 | err_free_dev: | 2889 | err_free_dev: |
2889 | free_netdev(netdev); | 2890 | free_netdev(netdev); |
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) | |||
2904 | struct velocity_info *vptr = netdev_priv(netdev); | 2905 | struct velocity_info *vptr = netdev_priv(netdev); |
2905 | 2906 | ||
2906 | unregister_netdev(netdev); | 2907 | unregister_netdev(netdev); |
2908 | netif_napi_del(&vptr->napi); | ||
2907 | iounmap(vptr->mac_regs); | 2909 | iounmap(vptr->mac_regs); |
2908 | free_netdev(netdev); | 2910 | free_netdev(netdev); |
2909 | velocity_nics--; | 2911 | velocity_nics--; |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121e..64b4639f43b6 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | |||
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) | |||
175 | printk(KERN_WARNING "Setting MDIO clock divisor to " | 175 | printk(KERN_WARNING "Setting MDIO clock divisor to " |
176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); | 176 | "default %d\n", DEFAULT_CLOCK_DIVISOR); |
177 | clk_div = DEFAULT_CLOCK_DIVISOR; | 177 | clk_div = DEFAULT_CLOCK_DIVISOR; |
178 | of_node_put(np1); | ||
178 | goto issue; | 179 | goto issue; |
179 | } | 180 | } |
180 | 181 | ||
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index 51f2bc376101..2dcc60fb37f1 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c | |||
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); | 210 | pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); |
211 | pci_write_config_byte(pcidev,0x5a,0xc0); | 211 | pci_write_config_byte(pcidev,0x5a,0xc0); |
212 | WriteLPCReg(0x28, 0x70 ); | 212 | WriteLPCReg(0x28, 0x70 ); |
213 | if (via_ircc_open(pcidev, &info, 0x3076) == 0) | 213 | rc = via_ircc_open(pcidev, &info, 0x3076); |
214 | rc=0; | ||
215 | } else | 214 | } else |
216 | rc = -ENODEV; //IR not turn on | 215 | rc = -ENODEV; //IR not turn on |
217 | } else { //Not VT1211 | 216 | } else { //Not VT1211 |
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
249 | info.irq=FirIRQ; | 248 | info.irq=FirIRQ; |
250 | info.dma=FirDRQ1; | 249 | info.dma=FirDRQ1; |
251 | info.dma2=FirDRQ0; | 250 | info.dma2=FirDRQ0; |
252 | if (via_ircc_open(pcidev, &info, 0x3096) == 0) | 251 | rc = via_ircc_open(pcidev, &info, 0x3096); |
253 | rc=0; | ||
254 | } else | 252 | } else |
255 | rc = -ENODEV; //IR not turn on !!!!! | 253 | rc = -ENODEV; //IR not turn on !!!!! |
256 | }//Not VT1211 | 254 | }//Not VT1211 |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d0f9c2fd1d4f..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
739 | return -EADDRNOTAVAIL; | 739 | return -EADDRNOTAVAIL; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (data && data[IFLA_MACVLAN_FLAGS] && | ||
743 | nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) | ||
744 | return -EINVAL; | ||
745 | |||
742 | if (data && data[IFLA_MACVLAN_MODE]) { | 746 | if (data && data[IFLA_MACVLAN_MODE]) { |
743 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { | 747 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { |
744 | case MACVLAN_MODE_PRIVATE: | 748 | case MACVLAN_MODE_PRIVATE: |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..ea53abb20988 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops; | |||
68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ | 68 | #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ |
69 | NETIF_F_TSO6 | NETIF_F_UFO) | 69 | NETIF_F_TSO6 | NETIF_F_UFO) |
70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) | 70 | #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
71 | #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) | ||
72 | |||
71 | /* | 73 | /* |
72 | * RCU usage: | 74 | * RCU usage: |
73 | * The macvtap_queue and the macvlan_dev are loosely coupled, the | 75 | * The macvtap_queue and the macvlan_dev are loosely coupled, the |
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
278 | { | 280 | { |
279 | struct macvlan_dev *vlan = netdev_priv(dev); | 281 | struct macvlan_dev *vlan = netdev_priv(dev); |
280 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); | 282 | struct macvtap_queue *q = macvtap_get_queue(dev, skb); |
281 | netdev_features_t features; | 283 | netdev_features_t features = TAP_FEATURES; |
284 | |||
282 | if (!q) | 285 | if (!q) |
283 | goto drop; | 286 | goto drop; |
284 | 287 | ||
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb) | |||
287 | 290 | ||
288 | skb->dev = dev; | 291 | skb->dev = dev; |
289 | /* Apply the forward feature mask so that we perform segmentation | 292 | /* Apply the forward feature mask so that we perform segmentation |
290 | * according to users wishes. | 293 | * according to users wishes. This only works if VNET_HDR is |
294 | * enabled. | ||
291 | */ | 295 | */ |
292 | features = netif_skb_features(skb) & vlan->tap_features; | 296 | if (q->flags & IFF_VNET_HDR) |
297 | features |= vlan->tap_features; | ||
293 | if (netif_needs_gso(skb, features)) { | 298 | if (netif_needs_gso(skb, features)) { |
294 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); | 299 | struct sk_buff *segs = __skb_gso_segment(skb, features, false); |
295 | 300 | ||
@@ -818,10 +823,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 823 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 824 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
820 | } | 825 | } |
821 | if (vlan) | 826 | if (vlan) { |
827 | local_bh_disable(); | ||
822 | macvlan_start_xmit(skb, vlan->dev); | 828 | macvlan_start_xmit(skb, vlan->dev); |
823 | else | 829 | local_bh_enable(); |
830 | } else { | ||
824 | kfree_skb(skb); | 831 | kfree_skb(skb); |
832 | } | ||
825 | rcu_read_unlock(); | 833 | rcu_read_unlock(); |
826 | 834 | ||
827 | return total_len; | 835 | return total_len; |
@@ -912,8 +920,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
912 | done: | 920 | done: |
913 | rcu_read_lock(); | 921 | rcu_read_lock(); |
914 | vlan = rcu_dereference(q->vlan); | 922 | vlan = rcu_dereference(q->vlan); |
915 | if (vlan) | 923 | if (vlan) { |
924 | preempt_disable(); | ||
916 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | 925 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
926 | preempt_enable(); | ||
927 | } | ||
917 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
918 | 929 | ||
919 | return ret ? ret : copied; | 930 | return ret ? ret : copied; |
@@ -1058,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg) | |||
1058 | /* tap_features are the same as features on tun/tap and | 1069 | /* tap_features are the same as features on tun/tap and |
1059 | * reflect user expectations. | 1070 | * reflect user expectations. |
1060 | */ | 1071 | */ |
1061 | vlan->tap_features = vlan->dev->features & | 1072 | vlan->tap_features = feature_mask; |
1062 | (feature_mask | ~TUN_OFFLOADS); | ||
1063 | vlan->set_features = features; | 1073 | vlan->set_features = features; |
1064 | netdev_update_features(vlan->dev); | 1074 | netdev_update_features(vlan->dev); |
1065 | 1075 | ||
@@ -1155,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, | |||
1155 | TUN_F_TSO_ECN | TUN_F_UFO)) | 1165 | TUN_F_TSO_ECN | TUN_F_UFO)) |
1156 | return -EINVAL; | 1166 | return -EINVAL; |
1157 | 1167 | ||
1158 | /* TODO: only accept frames with the features that | ||
1159 | got enabled for forwarded frames */ | ||
1160 | if (!(q->flags & IFF_VNET_HDR)) | ||
1161 | return -EINVAL; | ||
1162 | rtnl_lock(); | 1168 | rtnl_lock(); |
1163 | ret = set_offload(q, arg); | 1169 | ret = set_offload(q, arg); |
1164 | rtnl_unlock(); | 1170 | rtnl_unlock(); |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 8e7af8354342..138de837977f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #define RTL821x_INER_INIT 0x6400 | 23 | #define RTL821x_INER_INIT 0x6400 |
24 | #define RTL821x_INSR 0x13 | 24 | #define RTL821x_INSR 0x13 |
25 | 25 | ||
26 | #define RTL8211E_INER_LINK_STAT 0x10 | 26 | #define RTL8211E_INER_LINK_STATUS 0x400 |
27 | 27 | ||
28 | MODULE_DESCRIPTION("Realtek PHY driver"); | 28 | MODULE_DESCRIPTION("Realtek PHY driver"); |
29 | MODULE_AUTHOR("Johnson Leung"); | 29 | MODULE_AUTHOR("Johnson Leung"); |
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev) | |||
57 | 57 | ||
58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) | 58 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
59 | err = phy_write(phydev, RTL821x_INER, | 59 | err = phy_write(phydev, RTL821x_INER, |
60 | RTL8211E_INER_LINK_STAT); | 60 | RTL8211E_INER_LINK_STATUS); |
61 | else | 61 | else |
62 | err = phy_write(phydev, RTL821x_INER, 0); | 62 | err = phy_write(phydev, RTL821x_INER, 0); |
63 | 63 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1074 | u32 rxhash; | 1074 | u32 rxhash; |
1075 | 1075 | ||
1076 | if (!(tun->flags & TUN_NO_PI)) { | 1076 | if (!(tun->flags & TUN_NO_PI)) { |
1077 | if ((len -= sizeof(pi)) > total_len) | 1077 | if (len < sizeof(pi)) |
1078 | return -EINVAL; | 1078 | return -EINVAL; |
1079 | len -= sizeof(pi); | ||
1079 | 1080 | ||
1080 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 1081 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
1081 | return -EFAULT; | 1082 | return -EFAULT; |
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | if (tun->flags & TUN_VNET_HDR) { | 1086 | if (tun->flags & TUN_VNET_HDR) { |
1086 | if ((len -= tun->vnet_hdr_sz) > total_len) | 1087 | if (len < tun->vnet_hdr_sz) |
1087 | return -EINVAL; | 1088 | return -EINVAL; |
1089 | len -= tun->vnet_hdr_sz; | ||
1088 | 1090 | ||
1089 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 1091 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
1090 | return -EFAULT; | 1092 | return -EFAULT; |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 872819851aef..25ba7eca9a13 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 400 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 401 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
402 | }, | 402 | }, |
403 | /* HP hs2434 Mobile Broadband Module needs ZLPs */ | ||
404 | { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
405 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | ||
406 | }, | ||
403 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 407 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
404 | .driver_info = (unsigned long)&cdc_mbim_info, | 408 | .driver_info = (unsigned long)&cdc_mbim_info, |
405 | }, | 409 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index cba1d46e672e..86292e6aaf49 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2816,13 +2816,16 @@ exit: | |||
2816 | static int hso_get_config_data(struct usb_interface *interface) | 2816 | static int hso_get_config_data(struct usb_interface *interface) |
2817 | { | 2817 | { |
2818 | struct usb_device *usbdev = interface_to_usbdev(interface); | 2818 | struct usb_device *usbdev = interface_to_usbdev(interface); |
2819 | u8 config_data[17]; | 2819 | u8 *config_data = kmalloc(17, GFP_KERNEL); |
2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; | 2820 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; |
2821 | s32 result; | 2821 | s32 result; |
2822 | 2822 | ||
2823 | if (!config_data) | ||
2824 | return -ENOMEM; | ||
2823 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), | 2825 | if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), |
2824 | 0x86, 0xC0, 0, 0, config_data, 17, | 2826 | 0x86, 0xC0, 0, 0, config_data, 17, |
2825 | USB_CTRL_SET_TIMEOUT) != 0x11) { | 2827 | USB_CTRL_SET_TIMEOUT) != 0x11) { |
2828 | kfree(config_data); | ||
2826 | return -EIO; | 2829 | return -EIO; |
2827 | } | 2830 | } |
2828 | 2831 | ||
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface) | |||
2873 | if (config_data[16] & 0x1) | 2876 | if (config_data[16] & 0x1) |
2874 | result |= HSO_INFO_CRC_BUG; | 2877 | result |= HSO_INFO_CRC_BUG; |
2875 | 2878 | ||
2879 | kfree(config_data); | ||
2876 | return result; | 2880 | return result; |
2877 | } | 2881 | } |
2878 | 2882 | ||
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface, | |||
2886 | struct hso_shared_int *shared_int; | 2890 | struct hso_shared_int *shared_int; |
2887 | struct hso_device *tmp_dev = NULL; | 2891 | struct hso_device *tmp_dev = NULL; |
2888 | 2892 | ||
2893 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2894 | dev_err(&interface->dev, "Not our interface\n"); | ||
2895 | return -ENODEV; | ||
2896 | } | ||
2897 | |||
2889 | if_num = interface->altsetting->desc.bInterfaceNumber; | 2898 | if_num = interface->altsetting->desc.bInterfaceNumber; |
2890 | 2899 | ||
2891 | /* Get the interface/port specification from either driver_info or from | 2900 | /* Get the interface/port specification from either driver_info or from |
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface, | |||
2895 | else | 2904 | else |
2896 | port_spec = hso_get_config_data(interface); | 2905 | port_spec = hso_get_config_data(interface); |
2897 | 2906 | ||
2898 | if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { | ||
2899 | dev_err(&interface->dev, "Not our interface\n"); | ||
2900 | return -ENODEV; | ||
2901 | } | ||
2902 | /* Check if we need to switch to alt interfaces prior to port | 2907 | /* Check if we need to switch to alt interfaces prior to port |
2903 | * configuration */ | 2908 | * configuration */ |
2904 | if (interface->num_altsetting > 1) | 2909 | if (interface->num_altsetting > 1) |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f4c6db419ddb..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev) | |||
1386 | return -ENOTCONN; | 1386 | return -ENOTCONN; |
1387 | 1387 | ||
1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && | 1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
1389 | ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | 1389 | vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { |
1390 | vxlan_sock_hold(vs); | 1390 | vxlan_sock_hold(vs); |
1391 | dev_hold(dev); | 1391 | dev_hold(dev); |
1392 | queue_work(vxlan_wq, &vxlan->igmp_join); | 1392 | queue_work(vxlan_wq, &vxlan->igmp_join); |
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | 1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); |
1794 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1794 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1795 | 1795 | ||
1796 | flush_workqueue(vxlan_wq); | ||
1797 | |||
1798 | spin_lock(&vn->sock_lock); | 1796 | spin_lock(&vn->sock_lock); |
1799 | hlist_del_rcu(&vxlan->hlist); | 1797 | hlist_del_rcu(&vxlan->hlist); |
1800 | spin_unlock(&vn->sock_lock); | 1798 | spin_unlock(&vn->sock_lock); |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index e602c9519709..c028df76b564 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, | |||
448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; | 448 | struct ieee80211_conf *cur_conf = &priv->hw->conf; |
449 | bool txok; | 449 | bool txok; |
450 | int slot; | 450 | int slot; |
451 | int hdrlen, padsize; | ||
451 | 452 | ||
452 | slot = strip_drv_header(priv, skb); | 453 | slot = strip_drv_header(priv, skb); |
453 | if (slot < 0) { | 454 | if (slot < 0) { |
@@ -504,6 +505,15 @@ send_mac80211: | |||
504 | 505 | ||
505 | ath9k_htc_tx_clear_slot(priv, slot); | 506 | ath9k_htc_tx_clear_slot(priv, slot); |
506 | 507 | ||
508 | /* Remove padding before handing frame back to mac80211 */ | ||
509 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
510 | |||
511 | padsize = hdrlen & 3; | ||
512 | if (padsize && skb->len > hdrlen + padsize) { | ||
513 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
514 | skb_pull(skb, padsize); | ||
515 | } | ||
516 | |||
507 | /* Send status to mac80211 */ | 517 | /* Send status to mac80211 */ |
508 | ieee80211_tx_status(priv->hw, skb); | 518 | ieee80211_tx_status(priv->hw, skb); |
509 | } | 519 | } |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 16f8b201642b..026a2a067b46 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
802 | IEEE80211_HW_PS_NULLFUNC_STACK | | 802 | IEEE80211_HW_PS_NULLFUNC_STACK | |
803 | IEEE80211_HW_SPECTRUM_MGMT | | 803 | IEEE80211_HW_SPECTRUM_MGMT | |
804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | | 804 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
805 | IEEE80211_HW_SUPPORTS_RC_TABLE; | 805 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
806 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
806 | 807 | ||
807 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { | 808 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { |
808 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; | 809 | hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1737a3e33685..cb5a65553ac7 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc) | |||
173 | { | 173 | { |
174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); | 174 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); |
175 | 175 | ||
176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || | 176 | if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) |
177 | AR_SREV_9550(sc->sc_ah)) | ||
178 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, | 177 | ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, |
179 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); | 178 | msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); |
180 | 179 | ||
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 4a33c6e39ca2..349fa22a921a 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c | |||
@@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size) | |||
1860 | IEEE80211_HW_PS_NULLFUNC_STACK | | 1860 | IEEE80211_HW_PS_NULLFUNC_STACK | |
1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | | 1861 | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | |
1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | | 1862 | IEEE80211_HW_SUPPORTS_RC_TABLE | |
1863 | IEEE80211_HW_SIGNAL_DBM; | 1863 | IEEE80211_HW_SIGNAL_DBM | |
1864 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
1864 | 1865 | ||
1865 | if (!modparam_noht) { | 1866 | if (!modparam_noht) { |
1866 | /* | 1867 | /* |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) | |||
1406 | if (!priv->join_status) | 1406 | if (!priv->join_status) |
1407 | goto done; | 1407 | goto done; |
1408 | 1408 | ||
1409 | if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { | 1409 | if (priv->join_status == CW1200_JOIN_STATUS_AP) |
1410 | wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", | 1410 | goto done; |
1411 | priv->join_status); | ||
1412 | BUG_ON(1); | ||
1413 | } | ||
1414 | 1411 | ||
1415 | cancel_work_sync(&priv->update_filtering_work); | 1412 | cancel_work_sync(&priv->update_filtering_work); |
1416 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); | 1413 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); |
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index ac074731335a..e5090309824e 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c | |||
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, | |||
523 | 523 | ||
524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); | 524 | data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); |
525 | 525 | ||
526 | memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); | 526 | memcpy(extra, addr, sizeof(struct sockaddr) * data->length); |
527 | data->flags = 1; /* has quality information */ | 527 | data->flags = 1; /* has quality information */ |
528 | memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, | 528 | memcpy(extra + sizeof(struct sockaddr) * data->length, qual, |
529 | sizeof(struct iw_quality) * data->length); | 529 | sizeof(struct iw_quality) * data->length); |
530 | 530 | ||
531 | kfree(addr); | 531 | kfree(addr); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..7acf5ee23582 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -4460,13 +4460,13 @@ il4965_irq_tasklet(struct il_priv *il) | |||
4460 | * is killed. Hence update the killswitch state here. The | 4460 | * is killed. Hence update the killswitch state here. The |
4461 | * rfkill handler will care about restarting if needed. | 4461 | * rfkill handler will care about restarting if needed. |
4462 | */ | 4462 | */ |
4463 | if (!test_bit(S_ALIVE, &il->status)) { | 4463 | if (hw_rf_kill) { |
4464 | if (hw_rf_kill) | 4464 | set_bit(S_RFKILL, &il->status); |
4465 | set_bit(S_RFKILL, &il->status); | 4465 | } else { |
4466 | else | 4466 | clear_bit(S_RFKILL, &il->status); |
4467 | clear_bit(S_RFKILL, &il->status); | 4467 | il_force_reset(il, true); |
4468 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4469 | } | 4468 | } |
4469 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | ||
4470 | 4470 | ||
4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
4472 | } | 4472 | } |
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) | |||
5334 | 5334 | ||
5335 | il->active_rate = RATES_MASK; | 5335 | il->active_rate = RATES_MASK; |
5336 | 5336 | ||
5337 | il_power_update_mode(il, true); | ||
5338 | D_INFO("Updated power mode\n"); | ||
5339 | |||
5337 | if (il_is_associated(il)) { | 5340 | if (il_is_associated(il)) { |
5338 | struct il_rxon_cmd *active_rxon = | 5341 | struct il_rxon_cmd *active_rxon = |
5339 | (struct il_rxon_cmd *)&il->active; | 5342 | (struct il_rxon_cmd *)&il->active; |
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) | |||
5364 | D_INFO("ALIVE processing complete.\n"); | 5367 | D_INFO("ALIVE processing complete.\n"); |
5365 | wake_up(&il->wait_command_queue); | 5368 | wake_up(&il->wait_command_queue); |
5366 | 5369 | ||
5367 | il_power_update_mode(il, true); | ||
5368 | D_INFO("Updated power mode\n"); | ||
5369 | |||
5370 | return; | 5370 | return; |
5371 | 5371 | ||
5372 | restart: | 5372 | restart: |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) | |||
4660 | 4660 | ||
4661 | return 0; | 4661 | return 0; |
4662 | } | 4662 | } |
4663 | EXPORT_SYMBOL(il_force_reset); | ||
4663 | 4664 | ||
4664 | int | 4665 | int |
4665 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 4666 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 822f1a00efbb..319387263e12 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) | |||
1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 1068 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
1069 | return; | 1069 | return; |
1070 | 1070 | ||
1071 | if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) | 1071 | if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) |
1072 | return; | ||
1073 | |||
1074 | if (ctx->vif) | ||
1072 | ieee80211_chswitch_done(ctx->vif, is_success); | 1075 | ieee80211_chswitch_done(ctx->vif, is_success); |
1073 | } | 1076 | } |
1074 | 1077 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index a70c7b9d9bad..ff8cc75c189d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h | |||
@@ -97,8 +97,6 @@ | |||
97 | 97 | ||
98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) | 98 | #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) |
99 | 99 | ||
100 | #define APMG_RTC_INT_STT_RFKILL (0x10000000) | ||
101 | |||
102 | /* Device system time */ | 100 | /* Device system time */ |
103 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C | 101 | #define DEVICE_SYSTEM_TIME_REG 0xA0206C |
104 | 102 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index ad9bbca99213..7fd6fbfbc1b3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) | |||
138 | schedule_work(&mvm->roc_done_wk); | 138 | schedule_work(&mvm->roc_done_wk); |
139 | } | 139 | } |
140 | 140 | ||
141 | static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, | ||
142 | struct ieee80211_vif *vif, | ||
143 | const char *errmsg) | ||
144 | { | ||
145 | if (vif->type != NL80211_IFTYPE_STATION) | ||
146 | return false; | ||
147 | if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) | ||
148 | return false; | ||
149 | if (errmsg) | ||
150 | IWL_ERR(mvm, "%s\n", errmsg); | ||
151 | ieee80211_connection_loss(vif); | ||
152 | return true; | ||
153 | } | ||
154 | |||
141 | /* | 155 | /* |
142 | * Handles a FW notification for an event that is known to the driver. | 156 | * Handles a FW notification for an event that is known to the driver. |
143 | * | 157 | * |
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
163 | * P2P Device discoveribility, while there are other higher priority | 177 | * P2P Device discoveribility, while there are other higher priority |
164 | * events in the system). | 178 | * events in the system). |
165 | */ | 179 | */ |
166 | WARN_ONCE(!le32_to_cpu(notif->status), | 180 | if (WARN_ONCE(!le32_to_cpu(notif->status), |
167 | "Failed to schedule time event\n"); | 181 | "Failed to schedule time event\n")) { |
182 | if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) { | ||
183 | iwl_mvm_te_clear_data(mvm, te_data); | ||
184 | return; | ||
185 | } | ||
186 | } | ||
168 | 187 | ||
169 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { | 188 | if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { |
170 | IWL_DEBUG_TE(mvm, | 189 | IWL_DEBUG_TE(mvm, |
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, | |||
180 | * By now, we should have finished association | 199 | * By now, we should have finished association |
181 | * and know the dtim period. | 200 | * and know the dtim period. |
182 | */ | 201 | */ |
183 | if (te_data->vif->type == NL80211_IFTYPE_STATION && | 202 | iwl_mvm_te_check_disconnect(mvm, te_data->vif, |
184 | (!te_data->vif->bss_conf.assoc || | 203 | "No assocation and the time event is over already..."); |
185 | !te_data->vif->bss_conf.dtim_period)) { | ||
186 | IWL_ERR(mvm, | ||
187 | "No assocation and the time event is over already...\n"); | ||
188 | ieee80211_connection_loss(te_data->vif); | ||
189 | } | ||
190 | |||
191 | iwl_mvm_te_clear_data(mvm, te_data); | 204 | iwl_mvm_te_clear_data(mvm, te_data); |
192 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { | 205 | } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { |
193 | te_data->running = true; | 206 | te_data->running = true; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index f600e68a410a..fd848cd1583e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) | |||
888 | 888 | ||
889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 889 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
890 | if (hw_rfkill) { | 890 | if (hw_rfkill) { |
891 | /* | ||
892 | * Clear the interrupt in APMG if the NIC is going down. | ||
893 | * Note that when the NIC exits RFkill (else branch), we | ||
894 | * can't access prph and the NIC will be reset in | ||
895 | * start_hw anyway. | ||
896 | */ | ||
897 | iwl_write_prph(trans, APMG_RTC_INT_STT_REG, | ||
898 | APMG_RTC_INT_STT_RFKILL); | ||
899 | set_bit(STATUS_RFKILL, &trans_pcie->status); | 891 | set_bit(STATUS_RFKILL, &trans_pcie->status); |
900 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, | 892 | if (test_and_clear_bit(STATUS_HCMD_ACTIVE, |
901 | &trans_pcie->status)) | 893 | &trans_pcie->status)) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 96cfcdd39079..390e2f058aff 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
1502 | spin_lock_init(&trans_pcie->reg_lock); | 1502 | spin_lock_init(&trans_pcie->reg_lock); |
1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | 1503 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); |
1504 | 1504 | ||
1505 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1506 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1507 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1508 | PCIE_LINK_STATE_CLKPM); | ||
1509 | |||
1510 | if (pci_enable_device(pdev)) { | 1505 | if (pci_enable_device(pdev)) { |
1511 | err = -ENODEV; | 1506 | err = -ENODEV; |
1512 | goto out_no_pci; | 1507 | goto out_no_pci; |
1513 | } | 1508 | } |
1514 | 1509 | ||
1510 | /* W/A - seems to solve weird behavior. We need to remove this if we | ||
1511 | * don't want to stay in L1 all the time. This wastes a lot of power */ | ||
1512 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
1513 | PCIE_LINK_STATE_CLKPM); | ||
1514 | |||
1515 | pci_set_master(pdev); | 1515 | pci_set_master(pdev); |
1516 | 1516 | ||
1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | 1517 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 1f80ea5e29dd..1b41c8eda12d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | |||
6133 | IEEE80211_HW_SUPPORTS_PS | | 6133 | IEEE80211_HW_SUPPORTS_PS | |
6134 | IEEE80211_HW_PS_NULLFUNC_STACK | | 6134 | IEEE80211_HW_PS_NULLFUNC_STACK | |
6135 | IEEE80211_HW_AMPDU_AGGREGATION | | 6135 | IEEE80211_HW_AMPDU_AGGREGATION | |
6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS; | 6136 | IEEE80211_HW_REPORTS_TX_ACK_STATUS | |
6137 | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; | ||
6137 | 6138 | ||
6138 | /* | 6139 | /* |
6139 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices | 6140 | * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 4941f201d6c8..b8ba1f925e75 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) | |||
98 | goto exit; | 98 | goto exit; |
99 | 99 | ||
100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, | 100 | err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, |
101 | USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); | 101 | USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); |
102 | if (err < 0) | 102 | if (err < 0) |
103 | goto exit; | 103 | goto exit; |
104 | 104 | ||
105 | memcpy(&ret, buf, sizeof(ret)); | ||
106 | |||
105 | if (ret & 0x80) { | 107 | if (ret & 0x80) { |
106 | err = -EIO; | 108 | err = -EIO; |
107 | goto exit; | 109 | goto exit; |