aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS18
-rw-r--r--drivers/bcma/driver_pci_host.c2
-rw-r--r--drivers/connector/cn_proc.c8
-rw-r--r--drivers/isdn/hisax/st5481_usb.c12
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c64
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/freescale/fec.c85
-rw-r--r--drivers/net/ethernet/freescale/fec.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169.c27
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/usb/Kconfig18
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c31
-rw-r--r--drivers/net/usb/ax88179_178a.c1448
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c104
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c75
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h4
-rw-r--r--include/net/tcp.h4
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/irda/iriap.c7
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/tx.c77
-rw-r--r--net/netfilter/ipset/ip_set_core.c3
-rw-r--r--net/rds/message.c8
-rw-r--r--net/sctp/endpointola.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/tsnmap.c13
-rw-r--r--net/sctp/ulpqueue.c87
-rw-r--r--net/wireless/nl80211.c61
59 files changed, 2083 insertions, 334 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e95b1e944eb7..95616582c728 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
114 114
115 ----------------------------------- 115 -----------------------------------
116 116
1173C505 NETWORK DRIVER
118M: Philip Blundell <philb@gnu.org>
119L: netdev@vger.kernel.org
120S: Maintained
121F: drivers/net/ethernet/i825xx/3c505*
122
1233C59X NETWORK DRIVER 1173C59X NETWORK DRIVER
124M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de> 118M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
125L: netdev@vger.kernel.org 119L: netdev@vger.kernel.org
@@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
2361S: Maintained 2355S: Maintained
2362F: drivers/video/cyber2000fb.* 2356F: drivers/video/cyber2000fb.*
2363 2357
2364CYCLADES 2X SYNC CARD DRIVER
2365M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2366W: http://oops.ghostprotocols.net:81/blog
2367S: Maintained
2368F: drivers/net/wan/cycx*
2369
2370CYCLADES ASYNC MUX DRIVER 2358CYCLADES ASYNC MUX DRIVER
2371W: http://www.cyclades.com/ 2359W: http://www.cyclades.com/
2372S: Orphan 2360S: Orphan
@@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
3067F: drivers/video/s1d13xxxfb.c 3055F: drivers/video/s1d13xxxfb.c
3068F: include/video/s1d13xxxfb.h 3056F: include/video/s1d13xxxfb.h
3069 3057
3070ETHEREXPRESS-16 NETWORK DRIVER
3071M: Philip Blundell <philb@gnu.org>
3072L: netdev@vger.kernel.org
3073S: Maintained
3074F: drivers/net/ethernet/i825xx/eexpress.*
3075
3076ETHERNET BRIDGE 3058ETHERNET BRIDGE
3077M: Stephen Hemminger <stephen@networkplumber.org> 3059M: Stephen Hemminger <stephen@networkplumber.org>
3078L: bridge@lists.linux-foundation.org 3060L: bridge@lists.linux-foundation.org
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d3bde6cec927..30629a3d44cc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
404 return; 404 return;
405 } 405 }
406 406
407 spin_lock_init(&pc_host->cfgspace_lock);
408
407 pc->host_controller = pc_host; 409 pc->host_controller = pc_host;
408 pc_host->pci_controller.io_resource = &pc_host->io_resource; 410 pc_host->pci_controller.io_resource = &pc_host->io_resource;
409 pc_host->pci_controller.mem_resource = &pc_host->mem_resource; 411 pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index fce2000eec31..1110478dd0fd 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
313 (task_active_pid_ns(current) != &init_pid_ns)) 313 (task_active_pid_ns(current) != &init_pid_ns))
314 return; 314 return;
315 315
316 /* Can only change if privileged. */
317 if (!capable(CAP_NET_ADMIN)) {
318 err = EPERM;
319 goto out;
320 }
321
316 mc_op = (enum proc_cn_mcast_op *)msg->data; 322 mc_op = (enum proc_cn_mcast_op *)msg->data;
317 switch (*mc_op) { 323 switch (*mc_op) {
318 case PROC_CN_MCAST_LISTEN: 324 case PROC_CN_MCAST_LISTEN:
@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
325 err = EINVAL; 331 err = EINVAL;
326 break; 332 break;
327 } 333 }
334
335out:
328 cn_proc_ack(err, msg->seq, msg->ack); 336 cn_proc_ack(err, msg->seq, msg->ack);
329} 337}
330 338
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index 017c67ea3f4c..ead0a4fb7448 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
294 // Allocate URBs and buffers for interrupt endpoint 294 // Allocate URBs and buffers for interrupt endpoint
295 urb = usb_alloc_urb(0, GFP_KERNEL); 295 urb = usb_alloc_urb(0, GFP_KERNEL);
296 if (!urb) { 296 if (!urb) {
297 return -ENOMEM; 297 goto err1;
298 } 298 }
299 intr->urb = urb; 299 intr->urb = urb;
300 300
301 buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL); 301 buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
302 if (!buf) { 302 if (!buf) {
303 return -ENOMEM; 303 goto err2;
304 } 304 }
305 305
306 endpoint = &altsetting->endpoint[EP_INT-1]; 306 endpoint = &altsetting->endpoint[EP_INT-1];
@@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
313 endpoint->desc.bInterval); 313 endpoint->desc.bInterval);
314 314
315 return 0; 315 return 0;
316err2:
317 usb_free_urb(intr->urb);
318 intr->urb = NULL;
319err1:
320 usb_free_urb(ctrl->urb);
321 ctrl->urb = NULL;
322
323 return -ENOMEM;
316} 324}
317 325
318/* 326/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 11d01d67b3f5..7bd068a6056a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1629 1629
1630 /* If this is the first slave, then we need to set the master's hardware 1630 /* If this is the first slave, then we need to set the master's hardware
1631 * address to be the same as the slave's. */ 1631 * address to be the same as the slave's. */
1632 if (bond->dev_addr_from_first) 1632 if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
1633 bond_set_dev_addr(bond->dev, slave_dev); 1633 bond_set_dev_addr(bond->dev, slave_dev);
1634 1634
1635 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); 1635 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 639049d7e92d..da5f4397f87c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
301 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", 301 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
302 ring->start); 302 ring->start);
303 } else { 303 } else {
304 /* Omit CRC. */
305 len -= ETH_FCS_LEN;
306
304 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); 307 new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
305 if (new_skb) { 308 if (new_skb) {
306 skb_put(new_skb, len); 309 skb_put(new_skb, len);
307 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, 310 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
308 new_skb->data, 311 new_skb->data,
309 len); 312 len);
313 skb_checksum_none_assert(skb);
310 new_skb->protocol = 314 new_skb->protocol =
311 eth_type_trans(new_skb, bgmac->net_dev); 315 eth_type_trans(new_skb, bgmac->net_dev);
312 netif_receive_skb(new_skb); 316 netif_receive_skb(new_skb);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecac04a3687c..a923bc4d5a1f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3142 tsum = ~csum_fold(csum_add((__force __wsum) csum, 3142 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3143 csum_partial(t_header, -fix, 0))); 3143 csum_partial(t_header, -fix, 0)));
3144 3144
3145 return bswab16(csum); 3145 return bswab16(tsum);
3146} 3146}
3147 3147
3148static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3148static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 9a674b14b403..edfa67adf2f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full; 281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) 282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full; 283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
284 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
285 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
284 } 286 }
285 287
286 cmd->maxtxpkt = 0; 288 cmd->maxtxpkt = 0;
@@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
463 ADVERTISED_10000baseKR_Full)) 465 ADVERTISED_10000baseKR_Full))
464 bp->link_params.speed_cap_mask[cfg_idx] |= 466 bp->link_params.speed_cap_mask[cfg_idx] |=
465 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; 467 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
468
469 if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
470 bp->link_params.speed_cap_mask[cfg_idx] |=
471 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
466 } 472 }
467 } else { /* forced speed */ 473 } else { /* forced speed */
468 /* advertise the requested speed and duplex if supported */ 474 /* advertise the requested speed and duplex if supported */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 1663e0b6b5a0..31c5787970db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10422 MDIO_PMA_DEVAD, 10422 MDIO_PMA_DEVAD,
10423 MDIO_PMA_REG_8481_LED1_MASK, 10423 MDIO_PMA_REG_8481_LED1_MASK,
10424 0x0); 10424 0x0);
10425 if (phy->type ==
10426 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10427 /* Disable MI_INT interrupt before setting LED4
10428 * source to constant off.
10429 */
10430 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
10431 params->port*4) &
10432 NIG_MASK_MI_INT) {
10433 params->link_flags |=
10434 LINK_FLAGS_INT_DISABLED;
10435
10436 bnx2x_bits_dis(
10437 bp,
10438 NIG_REG_MASK_INTERRUPT_PORT0 +
10439 params->port*4,
10440 NIG_MASK_MI_INT);
10441 }
10442 bnx2x_cl45_write(bp, phy,
10443 MDIO_PMA_DEVAD,
10444 MDIO_PMA_REG_8481_SIGNAL_MASK,
10445 0x0);
10446 }
10425 } 10447 }
10426 break; 10448 break;
10427 case LED_MODE_ON: 10449 case LED_MODE_ON:
@@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10468 MDIO_PMA_DEVAD, 10490 MDIO_PMA_DEVAD,
10469 MDIO_PMA_REG_8481_LED1_MASK, 10491 MDIO_PMA_REG_8481_LED1_MASK,
10470 0x20); 10492 0x20);
10493 if (phy->type ==
10494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10495 /* Disable MI_INT interrupt before setting LED4
10496 * source to constant on.
10497 */
10498 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
10499 params->port*4) &
10500 NIG_MASK_MI_INT) {
10501 params->link_flags |=
10502 LINK_FLAGS_INT_DISABLED;
10503
10504 bnx2x_bits_dis(
10505 bp,
10506 NIG_REG_MASK_INTERRUPT_PORT0 +
10507 params->port*4,
10508 NIG_MASK_MI_INT);
10509 }
10510 bnx2x_cl45_write(bp, phy,
10511 MDIO_PMA_DEVAD,
10512 MDIO_PMA_REG_8481_SIGNAL_MASK,
10513 0x20);
10514 }
10471 } 10515 }
10472 break; 10516 break;
10473 10517
@@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10532 MDIO_PMA_DEVAD, 10576 MDIO_PMA_DEVAD,
10533 MDIO_PMA_REG_8481_LINK_SIGNAL, 10577 MDIO_PMA_REG_8481_LINK_SIGNAL,
10534 val); 10578 val);
10579 if (phy->type ==
10580 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10581 /* Restore LED4 source to external link,
10582 * and re-enable interrupts.
10583 */
10584 bnx2x_cl45_write(bp, phy,
10585 MDIO_PMA_DEVAD,
10586 MDIO_PMA_REG_8481_SIGNAL_MASK,
10587 0x40);
10588 if (params->link_flags &
10589 LINK_FLAGS_INT_DISABLED) {
10590 bnx2x_link_int_enable(params);
10591 params->link_flags &=
10592 ~LINK_FLAGS_INT_DISABLED;
10593 }
10594 }
10535 } 10595 }
10536 break; 10596 break;
10537 } 10597 }
@@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11791 phy->media_type = ETH_PHY_KR; 11851 phy->media_type = ETH_PHY_KR;
11792 phy->flags |= FLAGS_WC_DUAL_MODE; 11852 phy->flags |= FLAGS_WC_DUAL_MODE;
11793 phy->supported &= (SUPPORTED_20000baseKR2_Full | 11853 phy->supported &= (SUPPORTED_20000baseKR2_Full |
11854 SUPPORTED_10000baseT_Full |
11855 SUPPORTED_1000baseT_Full |
11794 SUPPORTED_Autoneg | 11856 SUPPORTED_Autoneg |
11795 SUPPORTED_FIBRE | 11857 SUPPORTED_FIBRE |
11796 SUPPORTED_Pause | 11858 SUPPORTED_Pause |
@@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13437 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 13499 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13438 bnx2x_set_aer_mmd(params, phy); 13500 bnx2x_set_aer_mmd(params, phy);
13439 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 13501 if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
13440 (phy->speed_cap_mask & SPEED_20000)) 13502 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
13441 bnx2x_check_kr2_wa(params, vars, phy); 13503 bnx2x_check_kr2_wa(params, vars, phy);
13442 bnx2x_check_over_curr(params, vars); 13504 bnx2x_check_over_curr(params, vars);
13443 if (vars->rx_tx_asic_rst) 13505 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d25c7d79787a..be5c195d03dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -307,7 +307,8 @@ struct link_params {
307 struct bnx2x *bp; 307 struct bnx2x *bp;
308 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when 308 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
309 req_flow_ctrl is set to AUTO */ 309 req_flow_ctrl is set to AUTO */
310 u16 rsrv1; 310 u16 link_flags;
311#define LINK_FLAGS_INT_DISABLED (1<<0)
311 u32 lfa_base; 312 u32 lfa_base;
312}; 313};
313 314
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fccc3bf2141d..069a155d16ed 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
246 struct bufdesc *bdp; 246 struct bufdesc *bdp;
247 void *bufaddr; 247 void *bufaddr;
248 unsigned short status; 248 unsigned short status;
249 unsigned long flags; 249 unsigned int index;
250 250
251 if (!fep->link) { 251 if (!fep->link) {
252 /* Link is down or autonegotiation is in progress. */ 252 /* Link is down or autonegotiation is in progress. */
253 return NETDEV_TX_BUSY; 253 return NETDEV_TX_BUSY;
254 } 254 }
255 255
256 spin_lock_irqsave(&fep->hw_lock, flags);
257 /* Fill in a Tx ring entry */ 256 /* Fill in a Tx ring entry */
258 bdp = fep->cur_tx; 257 bdp = fep->cur_tx;
259 258
@@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
264 * This should not happen, since ndev->tbusy should be set. 263 * This should not happen, since ndev->tbusy should be set.
265 */ 264 */
266 printk("%s: tx queue full!.\n", ndev->name); 265 printk("%s: tx queue full!.\n", ndev->name);
267 spin_unlock_irqrestore(&fep->hw_lock, flags);
268 return NETDEV_TX_BUSY; 266 return NETDEV_TX_BUSY;
269 } 267 }
270 268
@@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
280 * 4-byte boundaries. Use bounce buffers to copy data 278 * 4-byte boundaries. Use bounce buffers to copy data
281 * and get it aligned. Ugh. 279 * and get it aligned. Ugh.
282 */ 280 */
281 if (fep->bufdesc_ex)
282 index = (struct bufdesc_ex *)bdp -
283 (struct bufdesc_ex *)fep->tx_bd_base;
284 else
285 index = bdp - fep->tx_bd_base;
286
283 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 287 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
284 unsigned int index;
285 if (fep->bufdesc_ex)
286 index = (struct bufdesc_ex *)bdp -
287 (struct bufdesc_ex *)fep->tx_bd_base;
288 else
289 index = bdp - fep->tx_bd_base;
290 memcpy(fep->tx_bounce[index], skb->data, skb->len); 288 memcpy(fep->tx_bounce[index], skb->data, skb->len);
291 bufaddr = fep->tx_bounce[index]; 289 bufaddr = fep->tx_bounce[index];
292 } 290 }
@@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
300 swap_buffer(bufaddr, skb->len); 298 swap_buffer(bufaddr, skb->len);
301 299
302 /* Save skb pointer */ 300 /* Save skb pointer */
303 fep->tx_skbuff[fep->skb_cur] = skb; 301 fep->tx_skbuff[index] = skb;
304
305 ndev->stats.tx_bytes += skb->len;
306 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
307 302
308 /* Push the data cache so the CPM does not get stale memory 303 /* Push the data cache so the CPM does not get stale memory
309 * data. 304 * data.
@@ -331,26 +326,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
331 ebdp->cbd_esc = BD_ENET_TX_INT; 326 ebdp->cbd_esc = BD_ENET_TX_INT;
332 } 327 }
333 } 328 }
334 /* Trigger transmission start */
335 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
336
337 /* If this was the last BD in the ring, start at the beginning again. */ 329 /* If this was the last BD in the ring, start at the beginning again. */
338 if (status & BD_ENET_TX_WRAP) 330 if (status & BD_ENET_TX_WRAP)
339 bdp = fep->tx_bd_base; 331 bdp = fep->tx_bd_base;
340 else 332 else
341 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 333 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
342 334
343 if (bdp == fep->dirty_tx) { 335 fep->cur_tx = bdp;
344 fep->tx_full = 1; 336
337 if (fep->cur_tx == fep->dirty_tx)
345 netif_stop_queue(ndev); 338 netif_stop_queue(ndev);
346 }
347 339
348 fep->cur_tx = bdp; 340 /* Trigger transmission start */
341 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
349 342
350 skb_tx_timestamp(skb); 343 skb_tx_timestamp(skb);
351 344
352 spin_unlock_irqrestore(&fep->hw_lock, flags);
353
354 return NETDEV_TX_OK; 345 return NETDEV_TX_OK;
355} 346}
356 347
@@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
406 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) 397 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
407 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); 398 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
408 399
409 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
410 fep->cur_rx = fep->rx_bd_base; 400 fep->cur_rx = fep->rx_bd_base;
411 401
412 /* Reset SKB transmit buffers. */
413 fep->skb_cur = fep->skb_dirty = 0;
414 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 402 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
415 if (fep->tx_skbuff[i]) { 403 if (fep->tx_skbuff[i]) {
416 dev_kfree_skb_any(fep->tx_skbuff[i]); 404 dev_kfree_skb_any(fep->tx_skbuff[i]);
@@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
573 struct bufdesc *bdp; 561 struct bufdesc *bdp;
574 unsigned short status; 562 unsigned short status;
575 struct sk_buff *skb; 563 struct sk_buff *skb;
564 int index = 0;
576 565
577 fep = netdev_priv(ndev); 566 fep = netdev_priv(ndev);
578 spin_lock(&fep->hw_lock);
579 bdp = fep->dirty_tx; 567 bdp = fep->dirty_tx;
580 568
569 /* get next bdp of dirty_tx */
570 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
571 bdp = fep->tx_bd_base;
572 else
573 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
574
581 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 575 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
582 if (bdp == fep->cur_tx && fep->tx_full == 0) 576
577 /* current queue is empty */
578 if (bdp == fep->cur_tx)
583 break; 579 break;
584 580
581 if (fep->bufdesc_ex)
582 index = (struct bufdesc_ex *)bdp -
583 (struct bufdesc_ex *)fep->tx_bd_base;
584 else
585 index = bdp - fep->tx_bd_base;
586
585 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 587 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
586 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 588 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
587 bdp->cbd_bufaddr = 0; 589 bdp->cbd_bufaddr = 0;
588 590
589 skb = fep->tx_skbuff[fep->skb_dirty]; 591 skb = fep->tx_skbuff[index];
592
590 /* Check for errors. */ 593 /* Check for errors. */
591 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 594 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
592 BD_ENET_TX_RL | BD_ENET_TX_UN | 595 BD_ENET_TX_RL | BD_ENET_TX_UN |
@@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
631 634
632 /* Free the sk buffer associated with this last transmit */ 635 /* Free the sk buffer associated with this last transmit */
633 dev_kfree_skb_any(skb); 636 dev_kfree_skb_any(skb);
634 fep->tx_skbuff[fep->skb_dirty] = NULL; 637 fep->tx_skbuff[index] = NULL;
635 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 638
639 fep->dirty_tx = bdp;
636 640
637 /* Update pointer to next buffer descriptor to be transmitted */ 641 /* Update pointer to next buffer descriptor to be transmitted */
638 if (status & BD_ENET_TX_WRAP) 642 if (status & BD_ENET_TX_WRAP)
@@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
642 646
643 /* Since we have freed up a buffer, the ring is no longer full 647 /* Since we have freed up a buffer, the ring is no longer full
644 */ 648 */
645 if (fep->tx_full) { 649 if (fep->dirty_tx != fep->cur_tx) {
646 fep->tx_full = 0;
647 if (netif_queue_stopped(ndev)) 650 if (netif_queue_stopped(ndev))
648 netif_wake_queue(ndev); 651 netif_wake_queue(ndev);
649 } 652 }
650 } 653 }
651 fep->dirty_tx = bdp; 654 return;
652 spin_unlock(&fep->hw_lock);
653} 655}
654 656
655 657
@@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
816 int_events = readl(fep->hwp + FEC_IEVENT); 818 int_events = readl(fep->hwp + FEC_IEVENT);
817 writel(int_events, fep->hwp + FEC_IEVENT); 819 writel(int_events, fep->hwp + FEC_IEVENT);
818 820
819 if (int_events & FEC_ENET_RXF) { 821 if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
820 ret = IRQ_HANDLED; 822 ret = IRQ_HANDLED;
821 823
822 /* Disable the RX interrupt */ 824 /* Disable the RX interrupt */
@@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
827 } 829 }
828 } 830 }
829 831
830 /* Transmit OK, or non-fatal error. Update the buffer
831 * descriptors. FEC handles all errors, we just discover
832 * them as part of the transmit process.
833 */
834 if (int_events & FEC_ENET_TXF) {
835 ret = IRQ_HANDLED;
836 fec_enet_tx(ndev);
837 }
838
839 if (int_events & FEC_ENET_MII) { 832 if (int_events & FEC_ENET_MII) {
840 ret = IRQ_HANDLED; 833 ret = IRQ_HANDLED;
841 complete(&fep->mdio_done); 834 complete(&fep->mdio_done);
@@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
851 int pkts = fec_enet_rx(ndev, budget); 844 int pkts = fec_enet_rx(ndev, budget);
852 struct fec_enet_private *fep = netdev_priv(ndev); 845 struct fec_enet_private *fep = netdev_priv(ndev);
853 846
847 fec_enet_tx(ndev);
848
854 if (pkts < budget) { 849 if (pkts < budget) {
855 napi_complete(napi); 850 napi_complete(napi);
856 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 851 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
1646 1641
1647 /* ...and the same for transmit */ 1642 /* ...and the same for transmit */
1648 bdp = fep->tx_bd_base; 1643 bdp = fep->tx_bd_base;
1644 fep->cur_tx = bdp;
1649 for (i = 0; i < TX_RING_SIZE; i++) { 1645 for (i = 0; i < TX_RING_SIZE; i++) {
1650 1646
1651 /* Initialize the BD for every fragment in the page. */ 1647 /* Initialize the BD for every fragment in the page. */
@@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
1657 /* Set the last buffer to wrap */ 1653 /* Set the last buffer to wrap */
1658 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); 1654 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
1659 bdp->cbd_sc |= BD_SC_WRAP; 1655 bdp->cbd_sc |= BD_SC_WRAP;
1656 fep->dirty_tx = bdp;
1660 1657
1661 fec_restart(ndev, 0); 1658 fec_restart(ndev, 0);
1662 1659
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 01579b8e37c4..f5390071efd0 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -97,6 +97,13 @@ struct bufdesc {
97 unsigned short cbd_sc; /* Control and status info */ 97 unsigned short cbd_sc; /* Control and status info */
98 unsigned long cbd_bufaddr; /* Buffer address */ 98 unsigned long cbd_bufaddr; /* Buffer address */
99}; 99};
100#else
101struct bufdesc {
102 unsigned short cbd_sc; /* Control and status info */
103 unsigned short cbd_datlen; /* Data length */
104 unsigned long cbd_bufaddr; /* Buffer address */
105};
106#endif
100 107
101struct bufdesc_ex { 108struct bufdesc_ex {
102 struct bufdesc desc; 109 struct bufdesc desc;
@@ -107,14 +114,6 @@ struct bufdesc_ex {
107 unsigned short res0[4]; 114 unsigned short res0[4];
108}; 115};
109 116
110#else
111struct bufdesc {
112 unsigned short cbd_sc; /* Control and status info */
113 unsigned short cbd_datlen; /* Data length */
114 unsigned long cbd_bufaddr; /* Buffer address */
115};
116#endif
117
118/* 117/*
119 * The following definitions courtesy of commproc.h, which where 118 * The following definitions courtesy of commproc.h, which where
120 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). 119 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
@@ -214,8 +213,6 @@ struct fec_enet_private {
214 unsigned char *tx_bounce[TX_RING_SIZE]; 213 unsigned char *tx_bounce[TX_RING_SIZE];
215 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 214 struct sk_buff *tx_skbuff[TX_RING_SIZE];
216 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 215 struct sk_buff *rx_skbuff[RX_RING_SIZE];
217 ushort skb_cur;
218 ushort skb_dirty;
219 216
220 /* CPM dual port RAM relative addresses */ 217 /* CPM dual port RAM relative addresses */
221 dma_addr_t bd_dma; 218 dma_addr_t bd_dma;
@@ -227,7 +224,6 @@ struct fec_enet_private {
227 /* The ring entries to be free()ed */ 224 /* The ring entries to be free()ed */
228 struct bufdesc *dirty_tx; 225 struct bufdesc *dirty_tx;
229 226
230 uint tx_full;
231 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 227 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
232 spinlock_t hw_lock; 228 spinlock_t hw_lock;
233 229
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 8900398ba103..28fb50a1e9c3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4765 4765
4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4767 4767
4768 rtl_tx_performance_tweak(pdev, 4768 if (tp->dev->mtu <= ETH_DATA_LEN) {
4769 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4769 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4770 PCI_EXP_DEVCTL_NOSNOOP_EN);
4771 }
4770} 4772}
4771 4773
4772static void rtl_hw_start_8168bef(struct rtl8169_private *tp) 4774static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4789 4791
4790 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4792 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4791 4793
4792 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4794 if (tp->dev->mtu <= ETH_DATA_LEN)
4795 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4793 4796
4794 rtl_disable_clock_request(pdev); 4797 rtl_disable_clock_request(pdev);
4795 4798
@@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4822 4825
4823 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4826 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4824 4827
4825 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4828 if (tp->dev->mtu <= ETH_DATA_LEN)
4829 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4826 4830
4827 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4831 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4828} 4832}
@@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4841 4845
4842 RTL_W8(MaxTxPacketSize, TxPacketMax); 4846 RTL_W8(MaxTxPacketSize, TxPacketMax);
4843 4847
4844 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4848 if (tp->dev->mtu <= ETH_DATA_LEN)
4849 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4845 4850
4846 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4851 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4847} 4852}
@@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4901 4906
4902 RTL_W8(MaxTxPacketSize, TxPacketMax); 4907 RTL_W8(MaxTxPacketSize, TxPacketMax);
4903 4908
4904 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4909 if (tp->dev->mtu <= ETH_DATA_LEN)
4910 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4905 4911
4906 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4912 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4907} 4913}
@@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4913 4919
4914 rtl_csi_access_enable_1(tp); 4920 rtl_csi_access_enable_1(tp);
4915 4921
4916 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4922 if (tp->dev->mtu <= ETH_DATA_LEN)
4923 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4917 4924
4918 RTL_W8(MaxTxPacketSize, TxPacketMax); 4925 RTL_W8(MaxTxPacketSize, TxPacketMax);
4919 4926
@@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4972 4979
4973 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4980 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4974 4981
4975 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4982 if (tp->dev->mtu <= ETH_DATA_LEN)
4983 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4976 4984
4977 RTL_W8(MaxTxPacketSize, TxPacketMax); 4985 RTL_W8(MaxTxPacketSize, TxPacketMax);
4978 4986
@@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4998 5006
4999 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 5007 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5000 5008
5001 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5009 if (tp->dev->mtu <= ETH_DATA_LEN)
5010 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5002 5011
5003 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5012 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5004 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5013 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index bf57b3cb16ab..0bc00991d310 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
779 tx_queue->txd.entries); 779 tx_queue->txd.entries);
780 } 780 }
781 781
782 efx_device_detach_sync(efx);
782 efx_stop_all(efx); 783 efx_stop_all(efx);
783 efx_stop_interrupts(efx, true); 784 efx_stop_interrupts(efx, true);
784 785
@@ -832,6 +833,7 @@ out:
832 833
833 efx_start_interrupts(efx, true); 834 efx_start_interrupts(efx, true);
834 efx_start_all(efx); 835 efx_start_all(efx);
836 netif_device_attach(efx->net_dev);
835 return rc; 837 return rc;
836 838
837rollback: 839rollback:
@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
1641 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1643 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1642 efx_flush_all(efx); 1644 efx_flush_all(efx);
1643 1645
1644 /* Stop the kernel transmit interface late, so the watchdog 1646 /* Stop the kernel transmit interface. This is only valid if
1645 * timer isn't ticking over the flush */ 1647 * the device is stopped or detached; otherwise the watchdog
1648 * may fire immediately.
1649 */
1650 WARN_ON(netif_running(efx->net_dev) &&
1651 netif_device_present(efx->net_dev));
1646 netif_tx_disable(efx->net_dev); 1652 netif_tx_disable(efx->net_dev);
1647 1653
1648 efx_stop_datapath(efx); 1654 efx_stop_datapath(efx);
@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1963 if (new_mtu > EFX_MAX_MTU) 1969 if (new_mtu > EFX_MAX_MTU)
1964 return -EINVAL; 1970 return -EINVAL;
1965 1971
1966 efx_stop_all(efx);
1967
1968 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 1972 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
1969 1973
1974 efx_device_detach_sync(efx);
1975 efx_stop_all(efx);
1976
1970 mutex_lock(&efx->mac_lock); 1977 mutex_lock(&efx->mac_lock);
1971 net_dev->mtu = new_mtu; 1978 net_dev->mtu = new_mtu;
1972 efx->type->reconfigure_mac(efx); 1979 efx->type->reconfigure_mac(efx);
1973 mutex_unlock(&efx->mac_lock); 1980 mutex_unlock(&efx->mac_lock);
1974 1981
1975 efx_start_all(efx); 1982 efx_start_all(efx);
1983 netif_device_attach(efx->net_dev);
1976 return 0; 1984 return 0;
1977} 1985}
1978 1986
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2d756c1d7142..0a90abd2421b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -210,6 +210,7 @@ struct efx_tx_queue {
210 * Will be %NULL if the buffer slot is currently free. 210 * Will be %NULL if the buffer slot is currently free.
211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. 211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
212 * Will be %NULL if the buffer slot is currently free. 212 * Will be %NULL if the buffer slot is currently free.
213 * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
213 * @len: Buffer length, in bytes. 214 * @len: Buffer length, in bytes.
214 * @flags: Flags for buffer and packet state. 215 * @flags: Flags for buffer and packet state.
215 */ 216 */
@@ -219,7 +220,8 @@ struct efx_rx_buffer {
219 struct sk_buff *skb; 220 struct sk_buff *skb;
220 struct page *page; 221 struct page *page;
221 } u; 222 } u;
222 unsigned int len; 223 u16 page_offset;
224 u16 len;
223 u16 flags; 225 u16 flags;
224}; 226};
225#define EFX_RX_BUF_PAGE 0x0001 227#define EFX_RX_BUF_PAGE 0x0001
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d780a0d096b4..879ff5849bbd 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, 90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf) 91 struct efx_rx_buffer *buf)
92{ 92{
93 /* Offset is always within one page, so we don't need to consider 93 return buf->page_offset + efx->type->rx_buffer_hash_size;
94 * the page order.
95 */
96 return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
97 efx->type->rx_buffer_hash_size;
98} 94}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 96{
@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 183 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 184 struct efx_rx_buffer *rx_buf;
189 struct page *page; 185 struct page *page;
186 unsigned int page_offset;
190 struct efx_rx_page_state *state; 187 struct efx_rx_page_state *state;
191 dma_addr_t dma_addr; 188 dma_addr_t dma_addr;
192 unsigned index, count; 189 unsigned index, count;
@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 state->dma_addr = dma_addr; 208 state->dma_addr = dma_addr;
212 209
213 dma_addr += sizeof(struct efx_rx_page_state); 210 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state);
214 212
215 split: 213 split:
216 index = rx_queue->added_count & rx_queue->ptr_mask; 214 index = rx_queue->added_count & rx_queue->ptr_mask;
217 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
218 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
219 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset;
220 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
221 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
222 ++rx_queue->added_count; 221 ++rx_queue->added_count;
@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
227 /* Use the second half of the page */ 226 /* Use the second half of the page */
228 get_page(page); 227 get_page(page);
229 dma_addr += (PAGE_SIZE >> 1); 228 dma_addr += (PAGE_SIZE >> 1);
229 page_offset += (PAGE_SIZE >> 1);
230 ++count; 230 ++count;
231 goto split; 231 goto split;
232 } 232 }
@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
236} 236}
237 237
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 238static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf) 239 struct efx_rx_buffer *rx_buf,
240 unsigned int used_len)
240{ 241{
241 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
242 struct efx_rx_page_state *state; 243 struct efx_rx_page_state *state;
@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
247 state->dma_addr, 248 state->dma_addr,
248 efx_rx_buf_size(efx), 249 efx_rx_buf_size(efx),
249 DMA_FROM_DEVICE); 250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
250 } 255 }
251 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { 256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
252 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, 257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270 struct efx_rx_buffer *rx_buf) 275 struct efx_rx_buffer *rx_buf)
271{ 276{
272 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
273 efx_free_rx_buffer(rx_queue->efx, rx_buf); 278 efx_free_rx_buffer(rx_queue->efx, rx_buf);
274} 279}
275 280
@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
535 goto out; 540 goto out;
536 } 541 }
537 542
538 /* Release card resources - assumes all RX buffers consumed in-order 543 /* Release and/or sync DMA mapping - assumes all RX buffers
539 * per RX queue 544 * consumed in-order per RX queue
540 */ 545 */
541 efx_unmap_rx_buffer(efx, rx_buf); 546 efx_unmap_rx_buffer(efx, rx_buf, len);
542 547
543 /* Prefetch nice and early so data will (hopefully) be in cache by 548 /* Prefetch nice and early so data will (hopefully) be in cache by
544 * the time we look at it. 549 * the time we look at it.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7e93df6585e7..01ffbc486982 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
731 731
732 writel(vlan, &priv->host_port_regs->port_vlan); 732 writel(vlan, &priv->host_port_regs->port_vlan);
733 733
734 for (i = 0; i < 2; i++) 734 for (i = 0; i < priv->data.slaves; i++)
735 slave_write(priv->slaves + i, vlan, reg); 735 slave_write(priv->slaves + i, vlan, reg);
736 736
737 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, 737 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 29934446436a..abf7b6153d00 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
257 .phy_id = PHY_ID_KSZ9021, 257 .phy_id = PHY_ID_KSZ9021,
258 .phy_id_mask = 0x000ffffe, 258 .phy_id_mask = 0x000ffffe,
259 .name = "Micrel KSZ9021 Gigabit PHY", 259 .name = "Micrel KSZ9021 Gigabit PHY",
260 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 260 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
261 | SUPPORTED_Asym_Pause),
262 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 261 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
263 .config_init = kszphy_config_init, 262 .config_init = kszphy_config_init,
264 .config_aneg = genphy_config_aneg, 263 .config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9930f9999561..3657b4a29124 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
44 44
45void phy_device_free(struct phy_device *phydev) 45void phy_device_free(struct phy_device *phydev)
46{ 46{
47 kfree(phydev); 47 put_device(&phydev->dev);
48} 48}
49EXPORT_SYMBOL(phy_device_free); 49EXPORT_SYMBOL(phy_device_free);
50 50
51static void phy_device_release(struct device *dev) 51static void phy_device_release(struct device *dev)
52{ 52{
53 phy_device_free(to_phy_device(dev)); 53 kfree(to_phy_device(dev));
54} 54}
55 55
56static struct phy_driver genphy_driver; 56static struct phy_driver genphy_driver;
@@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
201 there's no driver _already_ loaded. */ 201 there's no driver _already_ loaded. */
202 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); 202 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
203 203
204 device_initialize(&dev->dev);
205
204 return dev; 206 return dev;
205} 207}
206EXPORT_SYMBOL(phy_device_create); 208EXPORT_SYMBOL(phy_device_create);
@@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
363 /* Run all of the fixups for this PHY */ 365 /* Run all of the fixups for this PHY */
364 phy_scan_fixups(phydev); 366 phy_scan_fixups(phydev);
365 367
366 err = device_register(&phydev->dev); 368 err = device_add(&phydev->dev);
367 if (err) { 369 if (err) {
368 pr_err("phy %d failed to register\n", phydev->addr); 370 pr_err("PHY %d failed to add\n", phydev->addr);
369 goto out; 371 goto out;
370 } 372 }
371 373
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index da92ed3797aa..3b6e9b83342d 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -156,6 +156,24 @@ config USB_NET_AX8817X
156 This driver creates an interface named "ethX", where X depends on 156 This driver creates an interface named "ethX", where X depends on
157 what other networking devices you have in use. 157 what other networking devices you have in use.
158 158
159config USB_NET_AX88179_178A
160 tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
161 depends on USB_USBNET
162 select CRC32
163 select PHYLIB
164 default y
165 help
166 This option adds support for ASIX AX88179 based USB 3.0/2.0
167 to Gigabit Ethernet adapters.
168
169 This driver should work with at least the following devices:
170 * ASIX AX88179
171 * ASIX AX88178A
172 * Sitcomm LN-032
173
174 This driver creates an interface named "ethX", where X depends on
175 what other networking devices you have in use.
176
159config USB_NET_CDCETHER 177config USB_NET_CDCETHER
160 tristate "CDC Ethernet support (smart devices such as cable modems)" 178 tristate "CDC Ethernet support (smart devices such as cable modems)"
161 depends on USB_USBNET 179 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 478691326f37..119b06c9aa16 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_HSO) += hso.o 9obj-$(CONFIG_USB_HSO) += hso.o
10obj-$(CONFIG_USB_NET_AX8817X) += asix.o 10obj-$(CONFIG_USB_NET_AX8817X) += asix.o
11asix-y := asix_devices.o asix_common.o ax88172a.o 11asix-y := asix_devices.o asix_common.o ax88172a.o
12obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
12obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 13obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
13obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 14obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
14obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 15obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2205dbc8d32f..709753469099 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
924 .tx_fixup = asix_tx_fixup, 924 .tx_fixup = asix_tx_fixup,
925}; 925};
926 926
927/*
928 * USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
929 * no-name packaging.
930 * USB device strings are:
931 * 1: Manufacturer: USBLINK
932 * 2: Product: HG20F9 USB2.0
933 * 3: Serial: 000003
934 * Appears to be compatible with Asix 88772B.
935 */
936static const struct driver_info hg20f9_info = {
937 .description = "HG20F9 USB 2.0 Ethernet",
938 .bind = ax88772_bind,
939 .unbind = ax88772_unbind,
940 .status = asix_status,
941 .link_reset = ax88772_link_reset,
942 .reset = ax88772_reset,
943 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
944 FLAG_MULTI_PACKET,
945 .rx_fixup = asix_rx_fixup_common,
946 .tx_fixup = asix_tx_fixup,
947 .data = FLAG_EEPROM_MAC,
948};
949
927extern const struct driver_info ax88172a_info; 950extern const struct driver_info ax88172a_info;
928 951
929static const struct usb_device_id products [] = { 952static const struct usb_device_id products [] = {
@@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
1063 /* ASIX 88172a demo board */ 1086 /* ASIX 88172a demo board */
1064 USB_DEVICE(0x0b95, 0x172a), 1087 USB_DEVICE(0x0b95, 0x172a),
1065 .driver_info = (unsigned long) &ax88172a_info, 1088 .driver_info = (unsigned long) &ax88172a_info,
1089}, {
1090 /*
1091 * USBLINK HG20F9 "USB 2.0 LAN"
1092 * Appears to have gazumped Linksys's manufacturer ID but
1093 * doesn't (yet) conflict with any known Linksys product.
1094 */
1095 USB_DEVICE(0x066b, 0x20f9),
1096 .driver_info = (unsigned long) &hg20f9_info,
1066}, 1097},
1067 { }, // END 1098 { }, // END
1068}; 1099};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
new file mode 100644
index 000000000000..71c27d8d214f
--- /dev/null
+++ b/drivers/net/usb/ax88179_178a.c
@@ -0,0 +1,1448 @@
1/*
2 * ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet Devices
3 *
4 * Copyright (C) 2011-2013 ASIX
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/etherdevice.h>
23#include <linux/mii.h>
24#include <linux/usb.h>
25#include <linux/crc32.h>
26#include <linux/usb/usbnet.h>
27
28#define AX88179_PHY_ID 0x03
29#define AX_EEPROM_LEN 0x100
30#define AX88179_EEPROM_MAGIC 0x17900b95
31#define AX_MCAST_FLTSIZE 8
32#define AX_MAX_MCAST 64
33#define AX_INT_PPLS_LINK ((u32)BIT(16))
34#define AX_RXHDR_L4_TYPE_MASK 0x1c
35#define AX_RXHDR_L4_TYPE_UDP 4
36#define AX_RXHDR_L4_TYPE_TCP 16
37#define AX_RXHDR_L3CSUM_ERR 2
38#define AX_RXHDR_L4CSUM_ERR 1
39#define AX_RXHDR_CRC_ERR ((u32)BIT(31))
40#define AX_RXHDR_DROP_ERR ((u32)BIT(30))
41#define AX_ACCESS_MAC 0x01
42#define AX_ACCESS_PHY 0x02
43#define AX_ACCESS_EEPROM 0x04
44#define AX_ACCESS_EFUS 0x05
45#define AX_PAUSE_WATERLVL_HIGH 0x54
46#define AX_PAUSE_WATERLVL_LOW 0x55
47
48#define PHYSICAL_LINK_STATUS 0x02
49 #define AX_USB_SS 0x04
50 #define AX_USB_HS 0x02
51
52#define GENERAL_STATUS 0x03
53/* Check AX88179 version. UA1:Bit2 = 0, UA2:Bit2 = 1 */
54 #define AX_SECLD 0x04
55
56#define AX_SROM_ADDR 0x07
57#define AX_SROM_CMD 0x0a
58 #define EEP_RD 0x04
59 #define EEP_BUSY 0x10
60
61#define AX_SROM_DATA_LOW 0x08
62#define AX_SROM_DATA_HIGH 0x09
63
64#define AX_RX_CTL 0x0b
65 #define AX_RX_CTL_DROPCRCERR 0x0100
66 #define AX_RX_CTL_IPE 0x0200
67 #define AX_RX_CTL_START 0x0080
68 #define AX_RX_CTL_AP 0x0020
69 #define AX_RX_CTL_AM 0x0010
70 #define AX_RX_CTL_AB 0x0008
71 #define AX_RX_CTL_AMALL 0x0002
72 #define AX_RX_CTL_PRO 0x0001
73 #define AX_RX_CTL_STOP 0x0000
74
75#define AX_NODE_ID 0x10
76#define AX_MULFLTARY 0x16
77
78#define AX_MEDIUM_STATUS_MODE 0x22
79 #define AX_MEDIUM_GIGAMODE 0x01
80 #define AX_MEDIUM_FULL_DUPLEX 0x02
81 #define AX_MEDIUM_ALWAYS_ONE 0x04
82 #define AX_MEDIUM_EN_125MHZ 0x08
83 #define AX_MEDIUM_RXFLOW_CTRLEN 0x10
84 #define AX_MEDIUM_TXFLOW_CTRLEN 0x20
85 #define AX_MEDIUM_RECEIVE_EN 0x100
86 #define AX_MEDIUM_PS 0x200
87 #define AX_MEDIUM_JUMBO_EN 0x8040
88
89#define AX_MONITOR_MOD 0x24
90 #define AX_MONITOR_MODE_RWLC 0x02
91 #define AX_MONITOR_MODE_RWMP 0x04
92 #define AX_MONITOR_MODE_PMEPOL 0x20
93 #define AX_MONITOR_MODE_PMETYPE 0x40
94
95#define AX_GPIO_CTRL 0x25
96 #define AX_GPIO_CTRL_GPIO3EN 0x80
97 #define AX_GPIO_CTRL_GPIO2EN 0x40
98 #define AX_GPIO_CTRL_GPIO1EN 0x20
99
100#define AX_PHYPWR_RSTCTL 0x26
101 #define AX_PHYPWR_RSTCTL_BZ 0x0010
102 #define AX_PHYPWR_RSTCTL_IPRL 0x0020
103 #define AX_PHYPWR_RSTCTL_AT 0x1000
104
105#define AX_RX_BULKIN_QCTRL 0x2e
106#define AX_CLK_SELECT 0x33
107 #define AX_CLK_SELECT_BCS 0x01
108 #define AX_CLK_SELECT_ACS 0x02
109 #define AX_CLK_SELECT_ULR 0x08
110
111#define AX_RXCOE_CTL 0x34
112 #define AX_RXCOE_IP 0x01
113 #define AX_RXCOE_TCP 0x02
114 #define AX_RXCOE_UDP 0x04
115 #define AX_RXCOE_TCPV6 0x20
116 #define AX_RXCOE_UDPV6 0x40
117
118#define AX_TXCOE_CTL 0x35
119 #define AX_TXCOE_IP 0x01
120 #define AX_TXCOE_TCP 0x02
121 #define AX_TXCOE_UDP 0x04
122 #define AX_TXCOE_TCPV6 0x20
123 #define AX_TXCOE_UDPV6 0x40
124
125#define AX_LEDCTRL 0x73
126
127#define GMII_PHY_PHYSR 0x11
128 #define GMII_PHY_PHYSR_SMASK 0xc000
129 #define GMII_PHY_PHYSR_GIGA 0x8000
130 #define GMII_PHY_PHYSR_100 0x4000
131 #define GMII_PHY_PHYSR_FULL 0x2000
132 #define GMII_PHY_PHYSR_LINK 0x400
133
134#define GMII_LED_ACT 0x1a
135 #define GMII_LED_ACTIVE_MASK 0xff8f
136 #define GMII_LED0_ACTIVE BIT(4)
137 #define GMII_LED1_ACTIVE BIT(5)
138 #define GMII_LED2_ACTIVE BIT(6)
139
140#define GMII_LED_LINK 0x1c
141 #define GMII_LED_LINK_MASK 0xf888
142 #define GMII_LED0_LINK_10 BIT(0)
143 #define GMII_LED0_LINK_100 BIT(1)
144 #define GMII_LED0_LINK_1000 BIT(2)
145 #define GMII_LED1_LINK_10 BIT(4)
146 #define GMII_LED1_LINK_100 BIT(5)
147 #define GMII_LED1_LINK_1000 BIT(6)
148 #define GMII_LED2_LINK_10 BIT(8)
149 #define GMII_LED2_LINK_100 BIT(9)
150 #define GMII_LED2_LINK_1000 BIT(10)
151 #define LED0_ACTIVE BIT(0)
152 #define LED0_LINK_10 BIT(1)
153 #define LED0_LINK_100 BIT(2)
154 #define LED0_LINK_1000 BIT(3)
155 #define LED0_FD BIT(4)
156 #define LED0_USB3_MASK 0x001f
157 #define LED1_ACTIVE BIT(5)
158 #define LED1_LINK_10 BIT(6)
159 #define LED1_LINK_100 BIT(7)
160 #define LED1_LINK_1000 BIT(8)
161 #define LED1_FD BIT(9)
162 #define LED1_USB3_MASK 0x03e0
163 #define LED2_ACTIVE BIT(10)
164 #define LED2_LINK_1000 BIT(13)
165 #define LED2_LINK_100 BIT(12)
166 #define LED2_LINK_10 BIT(11)
167 #define LED2_FD BIT(14)
168 #define LED_VALID BIT(15)
169 #define LED2_USB3_MASK 0x7c00
170
171#define GMII_PHYPAGE 0x1e
172#define GMII_PHY_PAGE_SELECT 0x1f
173 #define GMII_PHY_PGSEL_EXT 0x0007
174 #define GMII_PHY_PGSEL_PAGE0 0x0000
175
176struct ax88179_data {
177 u16 rxctl;
178 u16 reserved;
179};
180
181struct ax88179_int_data {
182 __le32 intdata1;
183 __le32 intdata2;
184};
185
186static const struct {
187 unsigned char ctrl, timer_l, timer_h, size, ifg;
188} AX88179_BULKIN_SIZE[] = {
189 {7, 0x4f, 0, 0x12, 0xff},
190 {7, 0x20, 3, 0x16, 0xff},
191 {7, 0xae, 7, 0x18, 0xff},
192 {7, 0xcc, 0x4c, 0x18, 8},
193};
194
195static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
196 u16 size, void *data, int in_pm)
197{
198 int ret;
199 int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
200
201 BUG_ON(!dev);
202
203 if (!in_pm)
204 fn = usbnet_read_cmd;
205 else
206 fn = usbnet_read_cmd_nopm;
207
208 ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
209 value, index, data, size);
210
211 if (unlikely(ret < 0))
212 netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
213 index, ret);
214
215 return ret;
216}
217
218static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
219 u16 size, void *data, int in_pm)
220{
221 int ret;
222 int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
223
224 BUG_ON(!dev);
225
226 if (!in_pm)
227 fn = usbnet_write_cmd;
228 else
229 fn = usbnet_write_cmd_nopm;
230
231 ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
232 value, index, data, size);
233
234 if (unlikely(ret < 0))
235 netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
236 index, ret);
237
238 return ret;
239}
240
241static void ax88179_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
242 u16 index, u16 size, void *data)
243{
244 u16 buf;
245
246 if (2 == size) {
247 buf = *((u16 *)data);
248 cpu_to_le16s(&buf);
249 usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
250 USB_RECIP_DEVICE, value, index, &buf,
251 size);
252 } else {
253 usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
254 USB_RECIP_DEVICE, value, index, data,
255 size);
256 }
257}
258
259static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
260 u16 index, u16 size, void *data)
261{
262 int ret;
263
264 if (2 == size) {
265 u16 buf;
266 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
267 le16_to_cpus(&buf);
268 *((u16 *)data) = buf;
269 } else if (4 == size) {
270 u32 buf;
271 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
272 le32_to_cpus(&buf);
273 *((u32 *)data) = buf;
274 } else {
275 ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
276 }
277
278 return ret;
279}
280
281static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
282 u16 index, u16 size, void *data)
283{
284 int ret;
285
286 if (2 == size) {
287 u16 buf;
288 buf = *((u16 *)data);
289 cpu_to_le16s(&buf);
290 ret = __ax88179_write_cmd(dev, cmd, value, index,
291 size, &buf, 1);
292 } else {
293 ret = __ax88179_write_cmd(dev, cmd, value, index,
294 size, data, 1);
295 }
296
297 return ret;
298}
299
300static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
301 u16 size, void *data)
302{
303 int ret;
304
305 if (2 == size) {
306 u16 buf;
307 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
308 le16_to_cpus(&buf);
309 *((u16 *)data) = buf;
310 } else if (4 == size) {
311 u32 buf;
312 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
313 le32_to_cpus(&buf);
314 *((u32 *)data) = buf;
315 } else {
316 ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 0);
317 }
318
319 return ret;
320}
321
322static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
323 u16 size, void *data)
324{
325 int ret;
326
327 if (2 == size) {
328 u16 buf;
329 buf = *((u16 *)data);
330 cpu_to_le16s(&buf);
331 ret = __ax88179_write_cmd(dev, cmd, value, index,
332 size, &buf, 0);
333 } else {
334 ret = __ax88179_write_cmd(dev, cmd, value, index,
335 size, data, 0);
336 }
337
338 return ret;
339}
340
341static void ax88179_status(struct usbnet *dev, struct urb *urb)
342{
343 struct ax88179_int_data *event;
344 u32 link;
345
346 if (urb->actual_length < 8)
347 return;
348
349 event = urb->transfer_buffer;
350 le32_to_cpus((void *)&event->intdata1);
351
352 link = (((__force u32)event->intdata1) & AX_INT_PPLS_LINK) >> 16;
353
354 if (netif_carrier_ok(dev->net) != link) {
355 if (link)
356 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
357 else
358 netif_carrier_off(dev->net);
359
360 netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
361 }
362}
363
364static int ax88179_mdio_read(struct net_device *netdev, int phy_id, int loc)
365{
366 struct usbnet *dev = netdev_priv(netdev);
367 u16 res;
368
369 ax88179_read_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
370 return res;
371}
372
373static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
374 int val)
375{
376 struct usbnet *dev = netdev_priv(netdev);
377 u16 res = (u16) val;
378
379 ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
380}
381
382static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
383{
384 struct usbnet *dev = usb_get_intfdata(intf);
385 u16 tmp16;
386 u8 tmp8;
387
388 usbnet_suspend(intf, message);
389
390 /* Disable RX path */
391 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
392 2, 2, &tmp16);
393 tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
394 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
395 2, 2, &tmp16);
396
397 /* Force bulk-in zero length */
398 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
399 2, 2, &tmp16);
400
401 tmp16 |= AX_PHYPWR_RSTCTL_BZ | AX_PHYPWR_RSTCTL_IPRL;
402 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
403 2, 2, &tmp16);
404
405 /* change clock */
406 tmp8 = 0;
407 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
408
409 /* Configure RX control register => stop operation */
410 tmp16 = AX_RX_CTL_STOP;
411 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
412
413 return 0;
414}
415
416/* This function is used to enable the autodetach function. */
417/* This function is determined by offset 0x43 of EEPROM */
418static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
419{
420 u16 tmp16;
421 u8 tmp8;
422 int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
423 int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
424
425 if (!in_pm) {
426 fnr = ax88179_read_cmd;
427 fnw = ax88179_write_cmd;
428 } else {
429 fnr = ax88179_read_cmd_nopm;
430 fnw = ax88179_write_cmd_nopm;
431 }
432
433 if (fnr(dev, AX_ACCESS_EEPROM, 0x43, 1, 2, &tmp16) < 0)
434 return 0;
435
436 if ((tmp16 == 0xFFFF) || (!(tmp16 & 0x0100)))
437 return 0;
438
439 /* Enable Auto Detach bit */
440 tmp8 = 0;
441 fnr(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
442 tmp8 |= AX_CLK_SELECT_ULR;
443 fnw(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
444
445 fnr(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
446 tmp16 |= AX_PHYPWR_RSTCTL_AT;
447 fnw(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
448
449 return 0;
450}
451
452static int ax88179_resume(struct usb_interface *intf)
453{
454 struct usbnet *dev = usb_get_intfdata(intf);
455 u16 tmp16;
456 u8 tmp8;
457
458 netif_carrier_off(dev->net);
459
460 /* Power up ethernet PHY */
461 tmp16 = 0;
462 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
463 2, 2, &tmp16);
464 udelay(1000);
465
466 tmp16 = AX_PHYPWR_RSTCTL_IPRL;
467 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL,
468 2, 2, &tmp16);
469 msleep(200);
470
471 /* Ethernet PHY Auto Detach*/
472 ax88179_auto_detach(dev, 1);
473
474 /* Enable clock */
475 ax88179_read_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
476 tmp8 |= AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
477 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp8);
478 msleep(100);
479
480 /* Configure RX control register => start operation */
481 tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
482 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
483 ax88179_write_cmd_nopm(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
484
485 return usbnet_resume(intf);
486}
487
488static void
489ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
490{
491 struct usbnet *dev = netdev_priv(net);
492 u8 opt;
493
494 if (ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
495 1, 1, &opt) < 0) {
496 wolinfo->supported = 0;
497 wolinfo->wolopts = 0;
498 return;
499 }
500
501 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
502 wolinfo->wolopts = 0;
503 if (opt & AX_MONITOR_MODE_RWLC)
504 wolinfo->wolopts |= WAKE_PHY;
505 if (opt & AX_MONITOR_MODE_RWMP)
506 wolinfo->wolopts |= WAKE_MAGIC;
507}
508
509static int
510ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
511{
512 struct usbnet *dev = netdev_priv(net);
513 u8 opt = 0;
514
515 if (wolinfo->wolopts & WAKE_PHY)
516 opt |= AX_MONITOR_MODE_RWLC;
517 if (wolinfo->wolopts & WAKE_MAGIC)
518 opt |= AX_MONITOR_MODE_RWMP;
519
520 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD,
521 1, 1, &opt) < 0)
522 return -EINVAL;
523
524 return 0;
525}
526
527static int ax88179_get_eeprom_len(struct net_device *net)
528{
529 return AX_EEPROM_LEN;
530}
531
532static int
533ax88179_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
534 u8 *data)
535{
536 struct usbnet *dev = netdev_priv(net);
537 u16 *eeprom_buff;
538 int first_word, last_word;
539 int i, ret;
540
541 if (eeprom->len == 0)
542 return -EINVAL;
543
544 eeprom->magic = AX88179_EEPROM_MAGIC;
545
546 first_word = eeprom->offset >> 1;
547 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
548 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
549 GFP_KERNEL);
550 if (!eeprom_buff)
551 return -ENOMEM;
552
553 /* ax88179/178A returns 2 bytes from eeprom on read */
554 for (i = first_word; i <= last_word; i++) {
555 ret = __ax88179_read_cmd(dev, AX_ACCESS_EEPROM, i, 1, 2,
556 &eeprom_buff[i - first_word],
557 0);
558 if (ret < 0) {
559 kfree(eeprom_buff);
560 return -EIO;
561 }
562 }
563
564 memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
565 kfree(eeprom_buff);
566 return 0;
567}
568
569static int ax88179_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
570{
571 struct usbnet *dev = netdev_priv(net);
572 return mii_ethtool_gset(&dev->mii, cmd);
573}
574
575static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
576{
577 struct usbnet *dev = netdev_priv(net);
578 return mii_ethtool_sset(&dev->mii, cmd);
579}
580
581
582static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
583{
584 struct usbnet *dev = netdev_priv(net);
585 return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
586}
587
588static const struct ethtool_ops ax88179_ethtool_ops = {
589 .get_link = ethtool_op_get_link,
590 .get_msglevel = usbnet_get_msglevel,
591 .set_msglevel = usbnet_set_msglevel,
592 .get_wol = ax88179_get_wol,
593 .set_wol = ax88179_set_wol,
594 .get_eeprom_len = ax88179_get_eeprom_len,
595 .get_eeprom = ax88179_get_eeprom,
596 .get_settings = ax88179_get_settings,
597 .set_settings = ax88179_set_settings,
598 .nway_reset = usbnet_nway_reset,
599};
600
601static void ax88179_set_multicast(struct net_device *net)
602{
603 struct usbnet *dev = netdev_priv(net);
604 struct ax88179_data *data = (struct ax88179_data *)dev->data;
605 u8 *m_filter = ((u8 *)dev->data) + 12;
606
607 data->rxctl = (AX_RX_CTL_START | AX_RX_CTL_AB | AX_RX_CTL_IPE);
608
609 if (net->flags & IFF_PROMISC) {
610 data->rxctl |= AX_RX_CTL_PRO;
611 } else if (net->flags & IFF_ALLMULTI ||
612 netdev_mc_count(net) > AX_MAX_MCAST) {
613 data->rxctl |= AX_RX_CTL_AMALL;
614 } else if (netdev_mc_empty(net)) {
615 /* just broadcast and directed */
616 } else {
617 /* We use the 20 byte dev->data for our 8 byte filter buffer
618 * to avoid allocating memory that is tricky to free later
619 */
620 u32 crc_bits;
621 struct netdev_hw_addr *ha;
622
623 memset(m_filter, 0, AX_MCAST_FLTSIZE);
624
625 netdev_for_each_mc_addr(ha, net) {
626 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
627 *(m_filter + (crc_bits >> 3)) |= (1 << (crc_bits & 7));
628 }
629
630 ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_MULFLTARY,
631 AX_MCAST_FLTSIZE, AX_MCAST_FLTSIZE,
632 m_filter);
633
634 data->rxctl |= AX_RX_CTL_AM;
635 }
636
637 ax88179_write_cmd_async(dev, AX_ACCESS_MAC, AX_RX_CTL,
638 2, 2, &data->rxctl);
639}
640
641static int
642ax88179_set_features(struct net_device *net, netdev_features_t features)
643{
644 u8 tmp;
645 struct usbnet *dev = netdev_priv(net);
646 netdev_features_t changed = net->features ^ features;
647
648 if (changed & NETIF_F_IP_CSUM) {
649 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
650 tmp ^= AX_TXCOE_TCP | AX_TXCOE_UDP;
651 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
652 }
653
654 if (changed & NETIF_F_IPV6_CSUM) {
655 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
656 tmp ^= AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
657 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, &tmp);
658 }
659
660 if (changed & NETIF_F_RXCSUM) {
661 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
662 tmp ^= AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
663 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
664 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, &tmp);
665 }
666
667 return 0;
668}
669
670static int ax88179_change_mtu(struct net_device *net, int new_mtu)
671{
672 struct usbnet *dev = netdev_priv(net);
673 u16 tmp16;
674
675 if (new_mtu <= 0 || new_mtu > 4088)
676 return -EINVAL;
677
678 net->mtu = new_mtu;
679 dev->hard_mtu = net->mtu + net->hard_header_len;
680
681 if (net->mtu > 1500) {
682 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
683 2, 2, &tmp16);
684 tmp16 |= AX_MEDIUM_JUMBO_EN;
685 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
686 2, 2, &tmp16);
687 } else {
688 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
689 2, 2, &tmp16);
690 tmp16 &= ~AX_MEDIUM_JUMBO_EN;
691 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
692 2, 2, &tmp16);
693 }
694
695 return 0;
696}
697
698static int ax88179_set_mac_addr(struct net_device *net, void *p)
699{
700 struct usbnet *dev = netdev_priv(net);
701 struct sockaddr *addr = p;
702
703 if (netif_running(net))
704 return -EBUSY;
705 if (!is_valid_ether_addr(addr->sa_data))
706 return -EADDRNOTAVAIL;
707
708 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
709
710 /* Set the MAC address */
711 return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
712 ETH_ALEN, net->dev_addr);
713}
714
715static const struct net_device_ops ax88179_netdev_ops = {
716 .ndo_open = usbnet_open,
717 .ndo_stop = usbnet_stop,
718 .ndo_start_xmit = usbnet_start_xmit,
719 .ndo_tx_timeout = usbnet_tx_timeout,
720 .ndo_change_mtu = ax88179_change_mtu,
721 .ndo_set_mac_address = ax88179_set_mac_addr,
722 .ndo_validate_addr = eth_validate_addr,
723 .ndo_do_ioctl = ax88179_ioctl,
724 .ndo_set_rx_mode = ax88179_set_multicast,
725 .ndo_set_features = ax88179_set_features,
726};
727
728static int ax88179_check_eeprom(struct usbnet *dev)
729{
730 u8 i, buf, eeprom[20];
731 u16 csum, delay = HZ / 10;
732 unsigned long jtimeout;
733
734 /* Read EEPROM content */
735 for (i = 0; i < 6; i++) {
736 buf = i;
737 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
738 1, 1, &buf) < 0)
739 return -EINVAL;
740
741 buf = EEP_RD;
742 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
743 1, 1, &buf) < 0)
744 return -EINVAL;
745
746 jtimeout = jiffies + delay;
747 do {
748 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
749 1, 1, &buf);
750
751 if (time_after(jiffies, jtimeout))
752 return -EINVAL;
753
754 } while (buf & EEP_BUSY);
755
756 __ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
757 2, 2, &eeprom[i * 2], 0);
758
759 if ((i == 0) && (eeprom[0] == 0xFF))
760 return -EINVAL;
761 }
762
763 csum = eeprom[6] + eeprom[7] + eeprom[8] + eeprom[9];
764 csum = (csum >> 8) + (csum & 0xff);
765 if ((csum + eeprom[10]) != 0xff)
766 return -EINVAL;
767
768 return 0;
769}
770
771static int ax88179_check_efuse(struct usbnet *dev, u16 *ledmode)
772{
773 u8 i;
774 u8 efuse[64];
775 u16 csum = 0;
776
777 if (ax88179_read_cmd(dev, AX_ACCESS_EFUS, 0, 64, 64, efuse) < 0)
778 return -EINVAL;
779
780 if (*efuse == 0xFF)
781 return -EINVAL;
782
783 for (i = 0; i < 64; i++)
784 csum = csum + efuse[i];
785
786 while (csum > 255)
787 csum = (csum & 0x00FF) + ((csum >> 8) & 0x00FF);
788
789 if (csum != 0xFF)
790 return -EINVAL;
791
792 *ledmode = (efuse[51] << 8) | efuse[52];
793
794 return 0;
795}
796
797static int ax88179_convert_old_led(struct usbnet *dev, u16 *ledvalue)
798{
799 u16 led;
800
801 /* Loaded the old eFuse LED Mode */
802 if (ax88179_read_cmd(dev, AX_ACCESS_EEPROM, 0x3C, 1, 2, &led) < 0)
803 return -EINVAL;
804
805 led >>= 8;
806 switch (led) {
807 case 0xFF:
808 led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
809 LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
810 LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
811 break;
812 case 0xFE:
813 led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 | LED_VALID;
814 break;
815 case 0xFD:
816 led = LED0_ACTIVE | LED1_LINK_1000 | LED2_LINK_100 |
817 LED2_LINK_10 | LED_VALID;
818 break;
819 case 0xFC:
820 led = LED0_ACTIVE | LED1_ACTIVE | LED1_LINK_1000 | LED2_ACTIVE |
821 LED2_LINK_100 | LED2_LINK_10 | LED_VALID;
822 break;
823 default:
824 led = LED0_ACTIVE | LED1_LINK_10 | LED1_LINK_100 |
825 LED1_LINK_1000 | LED2_ACTIVE | LED2_LINK_10 |
826 LED2_LINK_100 | LED2_LINK_1000 | LED_VALID;
827 break;
828 }
829
830 *ledvalue = led;
831
832 return 0;
833}
834
835static int ax88179_led_setting(struct usbnet *dev)
836{
837 u8 ledfd, value = 0;
838 u16 tmp, ledact, ledlink, ledvalue = 0, delay = HZ / 10;
839 unsigned long jtimeout;
840
841 /* Check AX88179 version. UA1 or UA2*/
842 ax88179_read_cmd(dev, AX_ACCESS_MAC, GENERAL_STATUS, 1, 1, &value);
843
844 if (!(value & AX_SECLD)) { /* UA1 */
845 value = AX_GPIO_CTRL_GPIO3EN | AX_GPIO_CTRL_GPIO2EN |
846 AX_GPIO_CTRL_GPIO1EN;
847 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_GPIO_CTRL,
848 1, 1, &value) < 0)
849 return -EINVAL;
850 }
851
852 /* Check EEPROM */
853 if (!ax88179_check_eeprom(dev)) {
854 value = 0x42;
855 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_ADDR,
856 1, 1, &value) < 0)
857 return -EINVAL;
858
859 value = EEP_RD;
860 if (ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
861 1, 1, &value) < 0)
862 return -EINVAL;
863
864 jtimeout = jiffies + delay;
865 do {
866 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_CMD,
867 1, 1, &value);
868
869 if (time_after(jiffies, jtimeout))
870 return -EINVAL;
871
872 } while (value & EEP_BUSY);
873
874 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_HIGH,
875 1, 1, &value);
876 ledvalue = (value << 8);
877
878 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_SROM_DATA_LOW,
879 1, 1, &value);
880 ledvalue |= value;
881
882 /* load internal ROM for defaule setting */
883 if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
884 ax88179_convert_old_led(dev, &ledvalue);
885
886 } else if (!ax88179_check_efuse(dev, &ledvalue)) {
887 if ((ledvalue == 0xFFFF) || ((ledvalue & LED_VALID) == 0))
888 ax88179_convert_old_led(dev, &ledvalue);
889 } else {
890 ax88179_convert_old_led(dev, &ledvalue);
891 }
892
893 tmp = GMII_PHY_PGSEL_EXT;
894 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
895 GMII_PHY_PAGE_SELECT, 2, &tmp);
896
897 tmp = 0x2c;
898 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
899 GMII_PHYPAGE, 2, &tmp);
900
901 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
902 GMII_LED_ACT, 2, &ledact);
903
904 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
905 GMII_LED_LINK, 2, &ledlink);
906
907 ledact &= GMII_LED_ACTIVE_MASK;
908 ledlink &= GMII_LED_LINK_MASK;
909
910 if (ledvalue & LED0_ACTIVE)
911 ledact |= GMII_LED0_ACTIVE;
912
913 if (ledvalue & LED1_ACTIVE)
914 ledact |= GMII_LED1_ACTIVE;
915
916 if (ledvalue & LED2_ACTIVE)
917 ledact |= GMII_LED2_ACTIVE;
918
919 if (ledvalue & LED0_LINK_10)
920 ledlink |= GMII_LED0_LINK_10;
921
922 if (ledvalue & LED1_LINK_10)
923 ledlink |= GMII_LED1_LINK_10;
924
925 if (ledvalue & LED2_LINK_10)
926 ledlink |= GMII_LED2_LINK_10;
927
928 if (ledvalue & LED0_LINK_100)
929 ledlink |= GMII_LED0_LINK_100;
930
931 if (ledvalue & LED1_LINK_100)
932 ledlink |= GMII_LED1_LINK_100;
933
934 if (ledvalue & LED2_LINK_100)
935 ledlink |= GMII_LED2_LINK_100;
936
937 if (ledvalue & LED0_LINK_1000)
938 ledlink |= GMII_LED0_LINK_1000;
939
940 if (ledvalue & LED1_LINK_1000)
941 ledlink |= GMII_LED1_LINK_1000;
942
943 if (ledvalue & LED2_LINK_1000)
944 ledlink |= GMII_LED2_LINK_1000;
945
946 tmp = ledact;
947 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
948 GMII_LED_ACT, 2, &tmp);
949
950 tmp = ledlink;
951 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
952 GMII_LED_LINK, 2, &tmp);
953
954 tmp = GMII_PHY_PGSEL_PAGE0;
955 ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
956 GMII_PHY_PAGE_SELECT, 2, &tmp);
957
958 /* LED full duplex setting */
959 ledfd = 0;
960 if (ledvalue & LED0_FD)
961 ledfd |= 0x01;
962 else if ((ledvalue & LED0_USB3_MASK) == 0)
963 ledfd |= 0x02;
964
965 if (ledvalue & LED1_FD)
966 ledfd |= 0x04;
967 else if ((ledvalue & LED1_USB3_MASK) == 0)
968 ledfd |= 0x08;
969
970 if (ledvalue & LED2_FD)
971 ledfd |= 0x10;
972 else if ((ledvalue & LED2_USB3_MASK) == 0)
973 ledfd |= 0x20;
974
975 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_LEDCTRL, 1, 1, &ledfd);
976
977 return 0;
978}
979
980static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
981{
982 u8 buf[5];
983 u16 *tmp16;
984 u8 *tmp;
985 struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
986
987 usbnet_get_endpoints(dev, intf);
988
989 tmp16 = (u16 *)buf;
990 tmp = (u8 *)buf;
991
992 memset(ax179_data, 0, sizeof(*ax179_data));
993
994 /* Power up ethernet PHY */
995 *tmp16 = 0;
996 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
997 *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
998 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
999 msleep(200);
1000
1001 *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
1002 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
1003 msleep(100);
1004
1005 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
1006 ETH_ALEN, dev->net->dev_addr);
1007 memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
1008
1009 /* RX bulk configuration */
1010 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1011 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1012
1013 dev->rx_urb_size = 1024 * 20;
1014
1015 *tmp = 0x34;
1016 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
1017
1018 *tmp = 0x52;
1019 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1020 1, 1, tmp);
1021
1022 dev->net->netdev_ops = &ax88179_netdev_ops;
1023 dev->net->ethtool_ops = &ax88179_ethtool_ops;
1024 dev->net->needed_headroom = 8;
1025
1026 /* Initialize MII structure */
1027 dev->mii.dev = dev->net;
1028 dev->mii.mdio_read = ax88179_mdio_read;
1029 dev->mii.mdio_write = ax88179_mdio_write;
1030 dev->mii.phy_id_mask = 0xff;
1031 dev->mii.reg_num_mask = 0xff;
1032 dev->mii.phy_id = 0x03;
1033 dev->mii.supports_gmii = 1;
1034
1035 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1036 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1037
1038 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1039 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1040
1041 /* Enable checksum offload */
1042 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1043 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
1044 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
1045
1046 *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
1047 AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
1048 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
1049
1050 /* Configure RX control register => start operation */
1051 *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
1052 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
1053 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
1054
1055 *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
1056 AX_MONITOR_MODE_RWMP;
1057 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
1058
1059 /* Configure default medium type => giga */
1060 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1061 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
1062 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
1063 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1064 2, 2, tmp16);
1065
1066 ax88179_led_setting(dev);
1067
1068 /* Restart autoneg */
1069 mii_nway_restart(&dev->mii);
1070
1071 netif_carrier_off(dev->net);
1072
1073 return 0;
1074}
1075
1076static void ax88179_unbind(struct usbnet *dev, struct usb_interface *intf)
1077{
1078 u16 tmp16;
1079
1080 /* Configure RX control register => stop operation */
1081 tmp16 = AX_RX_CTL_STOP;
1082 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &tmp16);
1083
1084 tmp16 = 0;
1085 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, &tmp16);
1086
1087 /* Power down ethernet PHY */
1088 tmp16 = 0;
1089 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, &tmp16);
1090}
1091
1092static void
1093ax88179_rx_checksum(struct sk_buff *skb, u32 *pkt_hdr)
1094{
1095 skb->ip_summed = CHECKSUM_NONE;
1096
1097 /* checksum error bit is set */
1098 if ((*pkt_hdr & AX_RXHDR_L3CSUM_ERR) ||
1099 (*pkt_hdr & AX_RXHDR_L4CSUM_ERR))
1100 return;
1101
1102 /* It must be a TCP or UDP packet with a valid checksum */
1103 if (((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_TCP) ||
1104 ((*pkt_hdr & AX_RXHDR_L4_TYPE_MASK) == AX_RXHDR_L4_TYPE_UDP))
1105 skb->ip_summed = CHECKSUM_UNNECESSARY;
1106}
1107
1108static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1109{
1110 struct sk_buff *ax_skb;
1111 int pkt_cnt;
1112 u32 rx_hdr;
1113 u16 hdr_off;
1114 u32 *pkt_hdr;
1115
1116 skb_trim(skb, skb->len - 4);
1117 memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
1118 le32_to_cpus(&rx_hdr);
1119
1120 pkt_cnt = (u16)rx_hdr;
1121 hdr_off = (u16)(rx_hdr >> 16);
1122 pkt_hdr = (u32 *)(skb->data + hdr_off);
1123
1124 while (pkt_cnt--) {
1125 u16 pkt_len;
1126
1127 le32_to_cpus(pkt_hdr);
1128 pkt_len = (*pkt_hdr >> 16) & 0x1fff;
1129
1130 /* Check CRC or runt packet */
1131 if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
1132 (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
1133 skb_pull(skb, (pkt_len + 7) & 0xFFF8);
1134 pkt_hdr++;
1135 continue;
1136 }
1137
1138 if (pkt_cnt == 0) {
1139 /* Skip IP alignment psudo header */
1140 skb_pull(skb, 2);
1141 skb->len = pkt_len;
1142 skb_set_tail_pointer(skb, pkt_len);
1143 skb->truesize = pkt_len + sizeof(struct sk_buff);
1144 ax88179_rx_checksum(skb, pkt_hdr);
1145 return 1;
1146 }
1147
1148 ax_skb = skb_clone(skb, GFP_ATOMIC);
1149 if (ax_skb) {
1150 ax_skb->len = pkt_len;
1151 ax_skb->data = skb->data + 2;
1152 skb_set_tail_pointer(ax_skb, pkt_len);
1153 ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
1154 ax88179_rx_checksum(ax_skb, pkt_hdr);
1155 usbnet_skb_return(dev, ax_skb);
1156 } else {
1157 return 0;
1158 }
1159
1160 skb_pull(skb, (pkt_len + 7) & 0xFFF8);
1161 pkt_hdr++;
1162 }
1163 return 1;
1164}
1165
1166static struct sk_buff *
1167ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1168{
1169 u32 tx_hdr1, tx_hdr2;
1170 int frame_size = dev->maxpacket;
1171 int mss = skb_shinfo(skb)->gso_size;
1172 int headroom;
1173 int tailroom;
1174
1175 tx_hdr1 = skb->len;
1176 tx_hdr2 = mss;
1177 if (((skb->len + 8) % frame_size) == 0)
1178 tx_hdr2 |= 0x80008000; /* Enable padding */
1179
1180 skb_linearize(skb);
1181 headroom = skb_headroom(skb);
1182 tailroom = skb_tailroom(skb);
1183
1184 if (!skb_header_cloned(skb) &&
1185 !skb_cloned(skb) &&
1186 (headroom + tailroom) >= 8) {
1187 if (headroom < 8) {
1188 skb->data = memmove(skb->head + 8, skb->data, skb->len);
1189 skb_set_tail_pointer(skb, skb->len);
1190 }
1191 } else {
1192 struct sk_buff *skb2;
1193
1194 skb2 = skb_copy_expand(skb, 8, 0, flags);
1195 dev_kfree_skb_any(skb);
1196 skb = skb2;
1197 if (!skb)
1198 return NULL;
1199 }
1200
1201 skb_push(skb, 4);
1202 cpu_to_le32s(&tx_hdr2);
1203 skb_copy_to_linear_data(skb, &tx_hdr2, 4);
1204
1205 skb_push(skb, 4);
1206 cpu_to_le32s(&tx_hdr1);
1207 skb_copy_to_linear_data(skb, &tx_hdr1, 4);
1208
1209 return skb;
1210}
1211
1212static int ax88179_link_reset(struct usbnet *dev)
1213{
1214 struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
1215 u8 tmp[5], link_sts;
1216 u16 mode, tmp16, delay = HZ / 10;
1217 u32 tmp32 = 0x40000000;
1218 unsigned long jtimeout;
1219
1220 jtimeout = jiffies + delay;
1221 while (tmp32 & 0x40000000) {
1222 mode = 0;
1223 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, &mode);
1224 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2,
1225 &ax179_data->rxctl);
1226
1227 /*link up, check the usb device control TX FIFO full or empty*/
1228 ax88179_read_cmd(dev, 0x81, 0x8c, 0, 4, &tmp32);
1229
1230 if (time_after(jiffies, jtimeout))
1231 return 0;
1232 }
1233
1234 mode = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1235 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE;
1236
1237 ax88179_read_cmd(dev, AX_ACCESS_MAC, PHYSICAL_LINK_STATUS,
1238 1, 1, &link_sts);
1239
1240 ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
1241 GMII_PHY_PHYSR, 2, &tmp16);
1242
1243 if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
1244 return 0;
1245 } else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
1246 mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
1247 if (dev->net->mtu > 1500)
1248 mode |= AX_MEDIUM_JUMBO_EN;
1249
1250 if (link_sts & AX_USB_SS)
1251 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1252 else if (link_sts & AX_USB_HS)
1253 memcpy(tmp, &AX88179_BULKIN_SIZE[1], 5);
1254 else
1255 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1256 } else if (GMII_PHY_PHYSR_100 == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
1257 mode |= AX_MEDIUM_PS;
1258
1259 if (link_sts & (AX_USB_SS | AX_USB_HS))
1260 memcpy(tmp, &AX88179_BULKIN_SIZE[2], 5);
1261 else
1262 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1263 } else {
1264 memcpy(tmp, &AX88179_BULKIN_SIZE[3], 5);
1265 }
1266
1267 /* RX bulk configuration */
1268 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1269
1270 dev->rx_urb_size = (1024 * (tmp[3] + 2));
1271
1272 if (tmp16 & GMII_PHY_PHYSR_FULL)
1273 mode |= AX_MEDIUM_FULL_DUPLEX;
1274 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1275 2, 2, &mode);
1276
1277 netif_carrier_on(dev->net);
1278
1279 return 0;
1280}
1281
1282static int ax88179_reset(struct usbnet *dev)
1283{
1284 u8 buf[5];
1285 u16 *tmp16;
1286 u8 *tmp;
1287
1288 tmp16 = (u16 *)buf;
1289 tmp = (u8 *)buf;
1290
1291 /* Power up ethernet PHY */
1292 *tmp16 = 0;
1293 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
1294
1295 *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
1296 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
1297 msleep(200);
1298
1299 *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
1300 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
1301 msleep(100);
1302
1303 /* Ethernet PHY Auto Detach*/
1304 ax88179_auto_detach(dev, 0);
1305
1306 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
1307 dev->net->dev_addr);
1308 memcpy(dev->net->perm_addr, dev->net->dev_addr, ETH_ALEN);
1309
1310 /* RX bulk configuration */
1311 memcpy(tmp, &AX88179_BULKIN_SIZE[0], 5);
1312 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_BULKIN_QCTRL, 5, 5, tmp);
1313
1314 dev->rx_urb_size = 1024 * 20;
1315
1316 *tmp = 0x34;
1317 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_LOW, 1, 1, tmp);
1318
1319 *tmp = 0x52;
1320 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PAUSE_WATERLVL_HIGH,
1321 1, 1, tmp);
1322
1323 dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1324 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1325
1326 dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1327 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
1328
1329 /* Enable checksum offload */
1330 *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
1331 AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
1332 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RXCOE_CTL, 1, 1, tmp);
1333
1334 *tmp = AX_TXCOE_IP | AX_TXCOE_TCP | AX_TXCOE_UDP |
1335 AX_TXCOE_TCPV6 | AX_TXCOE_UDPV6;
1336 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_TXCOE_CTL, 1, 1, tmp);
1337
1338 /* Configure RX control register => start operation */
1339 *tmp16 = AX_RX_CTL_DROPCRCERR | AX_RX_CTL_IPE | AX_RX_CTL_START |
1340 AX_RX_CTL_AP | AX_RX_CTL_AMALL | AX_RX_CTL_AB;
1341 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_RX_CTL, 2, 2, tmp16);
1342
1343 *tmp = AX_MONITOR_MODE_PMETYPE | AX_MONITOR_MODE_PMEPOL |
1344 AX_MONITOR_MODE_RWMP;
1345 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MONITOR_MOD, 1, 1, tmp);
1346
1347 /* Configure default medium type => giga */
1348 *tmp16 = AX_MEDIUM_RECEIVE_EN | AX_MEDIUM_TXFLOW_CTRLEN |
1349 AX_MEDIUM_RXFLOW_CTRLEN | AX_MEDIUM_ALWAYS_ONE |
1350 AX_MEDIUM_FULL_DUPLEX | AX_MEDIUM_GIGAMODE;
1351 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1352 2, 2, tmp16);
1353
1354 ax88179_led_setting(dev);
1355
1356 /* Restart autoneg */
1357 mii_nway_restart(&dev->mii);
1358
1359 netif_carrier_off(dev->net);
1360
1361 return 0;
1362}
1363
1364static int ax88179_stop(struct usbnet *dev)
1365{
1366 u16 tmp16;
1367
1368 ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1369 2, 2, &tmp16);
1370 tmp16 &= ~AX_MEDIUM_RECEIVE_EN;
1371 ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
1372 2, 2, &tmp16);
1373
1374 return 0;
1375}
1376
1377static const struct driver_info ax88179_info = {
1378 .description = "ASIX AX88179 USB 3.0 Gigibit Ethernet",
1379 .bind = ax88179_bind,
1380 .unbind = ax88179_unbind,
1381 .status = ax88179_status,
1382 .link_reset = ax88179_link_reset,
1383 .reset = ax88179_reset,
1384 .stop = ax88179_stop,
1385 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1386 .rx_fixup = ax88179_rx_fixup,
1387 .tx_fixup = ax88179_tx_fixup,
1388};
1389
1390static const struct driver_info ax88178a_info = {
1391 .description = "ASIX AX88178A USB 2.0 Gigibit Ethernet",
1392 .bind = ax88179_bind,
1393 .unbind = ax88179_unbind,
1394 .status = ax88179_status,
1395 .link_reset = ax88179_link_reset,
1396 .reset = ax88179_reset,
1397 .stop = ax88179_stop,
1398 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1399 .rx_fixup = ax88179_rx_fixup,
1400 .tx_fixup = ax88179_tx_fixup,
1401};
1402
1403static const struct driver_info sitecom_info = {
1404 .description = "Sitecom USB 3.0 to Gigabit Adapter",
1405 .bind = ax88179_bind,
1406 .unbind = ax88179_unbind,
1407 .status = ax88179_status,
1408 .link_reset = ax88179_link_reset,
1409 .reset = ax88179_reset,
1410 .stop = ax88179_stop,
1411 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1412 .rx_fixup = ax88179_rx_fixup,
1413 .tx_fixup = ax88179_tx_fixup,
1414};
1415
1416static const struct usb_device_id products[] = {
1417{
1418 /* ASIX AX88179 10/100/1000 */
1419 USB_DEVICE(0x0b95, 0x1790),
1420 .driver_info = (unsigned long)&ax88179_info,
1421}, {
1422 /* ASIX AX88178A 10/100/1000 */
1423 USB_DEVICE(0x0b95, 0x178a),
1424 .driver_info = (unsigned long)&ax88178a_info,
1425}, {
1426 /* Sitecom USB 3.0 to Gigabit Adapter */
1427 USB_DEVICE(0x0df6, 0x0072),
1428 .driver_info = (unsigned long) &sitecom_info,
1429},
1430 { },
1431};
1432MODULE_DEVICE_TABLE(usb, products);
1433
1434static struct usb_driver ax88179_178a_driver = {
1435 .name = "ax88179_178a",
1436 .id_table = products,
1437 .probe = usbnet_probe,
1438 .suspend = ax88179_suspend,
1439 .resume = ax88179_resume,
1440 .disconnect = usbnet_disconnect,
1441 .supports_autosuspend = 1,
1442 .disable_hub_initiated_lpm = 1,
1443};
1444
1445module_usb_driver(ax88179_178a_driver);
1446
1447MODULE_DESCRIPTION("ASIX AX88179/178A based USB 3.0/2.0 Gigabit Ethernet Devices");
1448MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4a8c25a22294..61b74a2b89ac 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
1213 .driver_info = (unsigned long) &wwan_info, 1213 .driver_info = (unsigned long) &wwan_info,
1214 }, 1214 },
1215 1215
1216 /* tag Huawei devices as wwan */
1217 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
1218 USB_CLASS_COMM,
1219 USB_CDC_SUBCLASS_NCM,
1220 USB_CDC_PROTO_NONE),
1221 .driver_info = (unsigned long)&wwan_info,
1222 },
1223
1216 /* Huawei NCM devices disguised as vendor specific */ 1224 /* Huawei NCM devices disguised as vendor specific */
1217 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), 1225 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
1218 .driver_info = (unsigned long)&wwan_info, 1226 .driver_info = (unsigned long)&wwan_info,
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 5f845beeb18b..050ca4a4850d 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -27,7 +27,7 @@
27#define WME_MAX_BA WME_BA_BMP_SIZE 27#define WME_MAX_BA WME_BA_BMP_SIZE
28#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 28#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
29 29
30#define ATH_RSSI_DUMMY_MARKER 0x127 30#define ATH_RSSI_DUMMY_MARKER 127
31#define ATH_RSSI_LPF_LEN 10 31#define ATH_RSSI_LPF_LEN 10
32#define RSSI_LPF_THRESHOLD -20 32#define RSSI_LPF_THRESHOLD -20
33#define ATH_RSSI_EP_MULTIPLIER (1<<7) 33#define ATH_RSSI_EP_MULTIPLIER (1<<7)
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 96bfb18078fa..d3b099d7898b 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -22,6 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
25#include <linux/leds.h> 26#include <linux/leds.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <net/mac80211.h> 28#include <net/mac80211.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3ad1fd05c5e7..bd8251c1c749 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1067 1067
1068 last_rssi = priv->rx.last_rssi; 1068 last_rssi = priv->rx.last_rssi;
1069 1069
1070 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 1070 if (ieee80211_is_beacon(hdr->frame_control) &&
1071 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, 1071 !is_zero_ether_addr(common->curbssid) &&
1072 ATH_RSSI_EP_MULTIPLIER); 1072 ether_addr_equal(hdr->addr3, common->curbssid)) {
1073 s8 rssi = rxbuf->rxstatus.rs_rssi;
1073 1074
1074 if (rxbuf->rxstatus.rs_rssi < 0) 1075 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
1075 rxbuf->rxstatus.rs_rssi = 0; 1076 rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
1076 1077
1077 if (ieee80211_is_beacon(fc)) 1078 if (rssi < 0)
1078 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; 1079 rssi = 0;
1080
1081 priv->ah->stats.avgbrssi = rssi;
1082 }
1079 1083
1080 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); 1084 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
1081 rx_status->band = hw->conf.channel->band; 1085 rx_status->band = hw->conf.channel->band;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2a2ae403e0e5..07e25260c31d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1463 reset_type = ATH9K_RESET_POWER_ON; 1463 reset_type = ATH9K_RESET_POWER_ON;
1464 else 1464 else
1465 reset_type = ATH9K_RESET_COLD; 1465 reset_type = ATH9K_RESET_COLD;
1466 } 1466 } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
1467 (REG_READ(ah, AR_CR) & AR_CR_RXE))
1468 reset_type = ATH9K_RESET_COLD;
1467 1469
1468 if (!ath9k_hw_set_reset_reg(ah, reset_type)) 1470 if (!ath9k_hw_set_reset_reg(ah, reset_type))
1469 return false; 1471 return false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 9a0f45ec9e01..10f01793d7a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
349TRACE_EVENT(iwlwifi_dev_hcmd, 349TRACE_EVENT(iwlwifi_dev_hcmd,
350 TP_PROTO(const struct device *dev, 350 TP_PROTO(const struct device *dev,
351 struct iwl_host_cmd *cmd, u16 total_size, 351 struct iwl_host_cmd *cmd, u16 total_size,
352 const void *hdr, size_t hdr_len), 352 struct iwl_cmd_header *hdr),
353 TP_ARGS(dev, cmd, total_size, hdr, hdr_len), 353 TP_ARGS(dev, cmd, total_size, hdr),
354 TP_STRUCT__entry( 354 TP_STRUCT__entry(
355 DEV_ENTRY 355 DEV_ENTRY
356 __dynamic_array(u8, hcmd, total_size) 356 __dynamic_array(u8, hcmd, total_size)
357 __field(u32, flags) 357 __field(u32, flags)
358 ), 358 ),
359 TP_fast_assign( 359 TP_fast_assign(
360 int i, offset = hdr_len; 360 int i, offset = sizeof(*hdr);
361 361
362 DEV_ASSIGN; 362 DEV_ASSIGN;
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, hdr_len); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
370 continue;
371 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
372 cmd->data[i], cmd->len[i]); 370 cmd->data[i], cmd->len[i]);
373 offset += cmd->len[i]; 371 offset += cmd->len[i];
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 14fc8d39fc28..3392011a8768 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
136 u8 data[]; 136 u8 data[];
137} __packed; 137} __packed;
138 138
139#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
140static inline void iwl_phy_db_test_pic(__le32 pic)
141{
142 WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
143}
144
145struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans) 139struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
146{ 140{
147 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db), 141 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
@@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
260 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num; 254 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
261 } 255 }
262 256
263 /* Test PIC */
264 if (type != IWL_PHY_DB_CFG)
265 iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
266 (size / sizeof(__le32)) - 1));
267
268 IWL_DEBUG_INFO(phy_db->trans, 257 IWL_DEBUG_INFO(phy_db->trans,
269 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n", 258 "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
270 __func__, __LINE__, type, size); 259 __func__, __LINE__, type, size);
@@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
372 *size = entry->size; 361 *size = entry->size;
373 } 362 }
374 363
375 /* Test PIC */
376 if (type != IWL_PHY_DB_CFG)
377 iwl_phy_db_test_pic(*(((__le32 *)*data) +
378 (*size / sizeof(__le32)) - 1));
379
380 IWL_DEBUG_INFO(phy_db->trans, 364 IWL_DEBUG_INFO(phy_db->trans,
381 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n", 365 "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
382 __func__, __LINE__, type, *size); 366 __func__, __LINE__, type, *size);
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index c64d864799cd..994c8c263dc0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -61,6 +61,7 @@
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/etherdevice.h>
64#include <net/cfg80211.h> 65#include <net/cfg80211.h>
65#include <net/ipv6.h> 66#include <net/ipv6.h>
66#include "iwl-modparams.h" 67#include "iwl-modparams.h"
@@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
192 sizeof(wkc), &wkc); 193 sizeof(wkc), &wkc);
193 data->error = ret != 0; 194 data->error = ret != 0;
194 195
196 mvm->ptk_ivlen = key->iv_len;
197 mvm->ptk_icvlen = key->icv_len;
198 mvm->gtk_ivlen = key->iv_len;
199 mvm->gtk_icvlen = key->icv_len;
200
195 /* don't upload key again */ 201 /* don't upload key again */
196 goto out_unlock; 202 goto out_unlock;
197 } 203 }
@@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
304 */ 310 */
305 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
306 key->hw_key_idx = 0; 312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len;
307 } else { 315 } else {
308 data->gtk_key_idx++; 316 data->gtk_key_idx++;
309 key->hw_key_idx = data->gtk_key_idx; 317 key->hw_key_idx = data->gtk_key_idx;
318 mvm->gtk_ivlen = key->iv_len;
319 mvm->gtk_icvlen = key->icv_len;
310 } 320 }
311 321
312 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); 322 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
@@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
649 /* We reprogram keys and shouldn't allocate new key indices */ 659 /* We reprogram keys and shouldn't allocate new key indices */
650 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 660 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
651 661
662 mvm->ptk_ivlen = 0;
663 mvm->ptk_icvlen = 0;
664 mvm->ptk_ivlen = 0;
665 mvm->ptk_icvlen = 0;
666
652 /* 667 /*
653 * The D3 firmware still hardcodes the AP station ID for the 668 * The D3 firmware still hardcodes the AP station ID for the
654 * BSS we're associated with as 0. As a result, we have to move 669 * BSS we're associated with as 0. As a result, we have to move
@@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
783 struct iwl_wowlan_status *status; 798 struct iwl_wowlan_status *status;
784 u32 reasons; 799 u32 reasons;
785 int ret, len; 800 int ret, len;
786 bool pkt8023 = false;
787 struct sk_buff *pkt = NULL; 801 struct sk_buff *pkt = NULL;
788 802
789 iwl_trans_read_mem_bytes(mvm->trans, base, 803 iwl_trans_read_mem_bytes(mvm->trans, base,
@@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
824 status = (void *)cmd.resp_pkt->data; 838 status = (void *)cmd.resp_pkt->data;
825 839
826 if (len - sizeof(struct iwl_cmd_header) != 840 if (len - sizeof(struct iwl_cmd_header) !=
827 sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) { 841 sizeof(*status) +
842 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
828 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 843 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
829 goto out; 844 goto out;
830 } 845 }
@@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
836 goto report; 851 goto report;
837 } 852 }
838 853
839 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) { 854 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
840 wakeup.magic_pkt = true; 855 wakeup.magic_pkt = true;
841 pkt8023 = true;
842 }
843 856
844 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) { 857 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
845 wakeup.pattern_idx = 858 wakeup.pattern_idx =
846 le16_to_cpu(status->pattern_number); 859 le16_to_cpu(status->pattern_number);
847 pkt8023 = true;
848 }
849 860
850 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | 861 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
851 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) 862 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
852 wakeup.disconnect = true; 863 wakeup.disconnect = true;
853 864
854 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) { 865 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
855 wakeup.gtk_rekey_failure = true; 866 wakeup.gtk_rekey_failure = true;
856 pkt8023 = true;
857 }
858 867
859 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) { 868 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
860 wakeup.rfkill_release = true; 869 wakeup.rfkill_release = true;
861 pkt8023 = true;
862 }
863 870
864 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) { 871 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
865 wakeup.eap_identity_req = true; 872 wakeup.eap_identity_req = true;
866 pkt8023 = true;
867 }
868 873
869 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) { 874 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
870 wakeup.four_way_handshake = true; 875 wakeup.four_way_handshake = true;
871 pkt8023 = true;
872 }
873 876
874 if (status->wake_packet_bufsize) { 877 if (status->wake_packet_bufsize) {
875 u32 pktsize = le32_to_cpu(status->wake_packet_bufsize); 878 int pktsize = le32_to_cpu(status->wake_packet_bufsize);
876 u32 pktlen = le32_to_cpu(status->wake_packet_length); 879 int pktlen = le32_to_cpu(status->wake_packet_length);
880 const u8 *pktdata = status->wake_packet;
881 struct ieee80211_hdr *hdr = (void *)pktdata;
882 int truncated = pktlen - pktsize;
883
884 /* this would be a firmware bug */
885 if (WARN_ON_ONCE(truncated < 0))
886 truncated = 0;
887
888 if (ieee80211_is_data(hdr->frame_control)) {
889 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
890 int ivlen = 0, icvlen = 4; /* also FCS */
877 891
878 if (pkt8023) {
879 pkt = alloc_skb(pktsize, GFP_KERNEL); 892 pkt = alloc_skb(pktsize, GFP_KERNEL);
880 if (!pkt) 893 if (!pkt)
881 goto report; 894 goto report;
882 memcpy(skb_put(pkt, pktsize), status->wake_packet, 895
883 pktsize); 896 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
897 pktdata += hdrlen;
898 pktsize -= hdrlen;
899
900 if (ieee80211_has_protected(hdr->frame_control)) {
901 if (is_multicast_ether_addr(hdr->addr1)) {
902 ivlen = mvm->gtk_ivlen;
903 icvlen += mvm->gtk_icvlen;
904 } else {
905 ivlen = mvm->ptk_ivlen;
906 icvlen += mvm->ptk_icvlen;
907 }
908 }
909
910 /* if truncated, FCS/ICV is (partially) gone */
911 if (truncated >= icvlen) {
912 icvlen = 0;
913 truncated -= icvlen;
914 } else {
915 icvlen -= truncated;
916 truncated = 0;
917 }
918
919 pktsize -= ivlen + icvlen;
920 pktdata += ivlen;
921
922 memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
923
884 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) 924 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
885 goto report; 925 goto report;
886 wakeup.packet = pkt->data; 926 wakeup.packet = pkt->data;
887 wakeup.packet_present_len = pkt->len; 927 wakeup.packet_present_len = pkt->len;
888 wakeup.packet_len = pkt->len - (pktlen - pktsize); 928 wakeup.packet_len = pkt->len - truncated;
889 wakeup.packet_80211 = false; 929 wakeup.packet_80211 = false;
890 } else { 930 } else {
931 int fcslen = 4;
932
933 if (truncated >= 4) {
934 truncated -= 4;
935 fcslen = 0;
936 } else {
937 fcslen -= truncated;
938 truncated = 0;
939 }
940 pktsize -= fcslen;
891 wakeup.packet = status->wake_packet; 941 wakeup.packet = status->wake_packet;
892 wakeup.packet_present_len = pktsize; 942 wakeup.packet_present_len = pktsize;
893 wakeup.packet_len = pktlen; 943 wakeup.packet_len = pktlen - truncated;
894 wakeup.packet_80211 = true; 944 wakeup.packet_80211 = true;
895 } 945 }
896 } 946 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e8264e11b12d..7e169b085afe 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
557 return ret; 557 return ret;
558} 558}
559 559
560static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 560static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
561 struct ieee80211_vif *vif) 561 struct ieee80211_vif *vif)
562{ 562{
563 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
564 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
565 u32 tfd_msk = 0, ac; 563 u32 tfd_msk = 0, ac;
566 564
567 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) 565 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
594 */ 592 */
595 flush_work(&mvm->sta_drained_wk); 593 flush_work(&mvm->sta_drained_wk);
596 } 594 }
595}
596
597static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
598 struct ieee80211_vif *vif)
599{
600 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
601 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
602
603 iwl_mvm_prepare_mac_removal(mvm, vif);
597 604
598 mutex_lock(&mvm->mutex); 605 mutex_lock(&mvm->mutex);
599 606
600 /* 607 /*
601 * For AP/GO interface, the tear down of the resources allocated to the 608 * For AP/GO interface, the tear down of the resources allocated to the
602 * interface should be handled as part of the bss_info_changed flow. 609 * interface is be handled as part of the stop_ap flow.
603 */ 610 */
604 if (vif->type == NL80211_IFTYPE_AP) { 611 if (vif->type == NL80211_IFTYPE_AP) {
605 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 612 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
@@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
763 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 770 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
764 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 771 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
765 772
773 iwl_mvm_prepare_mac_removal(mvm, vif);
774
766 mutex_lock(&mvm->mutex); 775 mutex_lock(&mvm->mutex);
767 776
768 mvmvif->ap_active = false; 777 mvmvif->ap_active = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 4e339ccfa800..537711b10478 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -327,6 +327,10 @@ struct iwl_mvm {
327 struct led_classdev led; 327 struct led_classdev led;
328 328
329 struct ieee80211_vif *p2p_device_vif; 329 struct ieee80211_vif *p2p_device_vif;
330
331#ifdef CONFIG_PM_SLEEP
332 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
333#endif
330}; 334};
331 335
332/* Extract MVM priv from op_mode and _hw */ 336/* Extract MVM priv from op_mode and _hw */
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index aa2a39a637dd..3d62e8055352 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -182,6 +182,15 @@ struct iwl_queue {
182#define TFD_TX_CMD_SLOTS 256 182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32 183#define TFD_CMD_SLOTS 32
184 184
185/*
186 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to
189 * copy, even for HCMDs it must be big enough to fit the
190 * DRAM scratch from the TX cmd, at least 16 bytes.
191 */
192#define IWL_HCMD_MIN_COPY_SIZE 16
193
185struct iwl_pcie_txq_entry { 194struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd; 195 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd; 196 struct iwl_device_cmd *copy_cmd;
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8e9e3212fe78..8b625a7f5685 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1152 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1153 dma_addr_t phys_addr;
1154 int idx; 1154 int idx;
1155 u16 copy_size, cmd_size; 1155 u16 copy_size, cmd_size, dma_size;
1156 bool had_nocopy = false; 1156 bool had_nocopy = false;
1157 int i; 1157 int i;
1158 u32 cmd_pos; 1158 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS];
1159 1161
1160 copy_size = sizeof(out_cmd->hdr); 1162 copy_size = sizeof(out_cmd->hdr);
1161 cmd_size = sizeof(out_cmd->hdr); 1163 cmd_size = sizeof(out_cmd->hdr);
@@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1164 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
1165 1167
1166 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1169 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i];
1171
1167 if (!cmd->len[i]) 1172 if (!cmd->len[i])
1168 continue; 1173 continue;
1174
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1178
1179 if (copy > cmdlen[i])
1180 copy = cmdlen[i];
1181 cmdlen[i] -= copy;
1182 cmddata[i] += copy;
1183 copy_size += copy;
1184 }
1185
1169 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1186 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1170 had_nocopy = true; 1187 had_nocopy = true;
1171 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1188 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
@@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1185 goto free_dup_buf; 1202 goto free_dup_buf;
1186 } 1203 }
1187 1204
1188 dup_buf = kmemdup(cmd->data[i], cmd->len[i], 1205 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1189 GFP_ATOMIC); 1206 GFP_ATOMIC);
1190 if (!dup_buf) 1207 if (!dup_buf)
1191 return -ENOMEM; 1208 return -ENOMEM;
@@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1195 idx = -EINVAL; 1212 idx = -EINVAL;
1196 goto free_dup_buf; 1213 goto free_dup_buf;
1197 } 1214 }
1198 copy_size += cmd->len[i]; 1215 copy_size += cmdlen[i];
1199 } 1216 }
1200 cmd_size += cmd->len[i]; 1217 cmd_size += cmd->len[i];
1201 } 1218 }
@@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1242 1259
1243 /* and copy the data that needs to be copied */ 1260 /* and copy the data that needs to be copied */
1244 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1261 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr);
1245 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1246 if (!cmd->len[i]) 1264 int copy = 0;
1265
1266 if (!cmd->len)
1247 continue; 1267 continue;
1248 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1268
1249 IWL_HCMD_DFL_DUP)) 1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
1250 break; 1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
1251 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); 1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
1252 cmd_pos += cmd->len[i]; 1272
1273 if (copy > cmd->len[i])
1274 copy = cmd->len[i];
1275 }
1276
1277 /* copy everything if not nocopy/dup */
1278 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1279 IWL_HCMD_DFL_DUP)))
1280 copy = cmd->len[i];
1281
1282 if (copy) {
1283 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1284 cmd_pos += copy;
1285 copy_size += copy;
1286 }
1253 } 1287 }
1254 1288
1255 WARN_ON_ONCE(txq->entries[idx].copy_cmd); 1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
@@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1275 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1276 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1277 1311
1278 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 1312 /*
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
1314 * still map at least that many bytes for the hardware to write back to.
1315 * We have enough space, so that's not a problem.
1316 */
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
1318
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
1279 DMA_BIDIRECTIONAL); 1320 DMA_BIDIRECTIONAL);
1280 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1281 idx = -ENOMEM; 1322 idx = -ENOMEM;
@@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1283 } 1324 }
1284 1325
1285 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1286 dma_unmap_len_set(out_meta, len, copy_size); 1327 dma_unmap_len_set(out_meta, len, dma_size);
1287 1328
1288 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); 1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1289 1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */
1290 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1291 const void *data = cmd->data[i]; 1333 const void *data = cmddata[i];
1292 1334
1293 if (!cmd->len[i]) 1335 if (!cmdlen[i])
1294 continue; 1336 continue;
1295 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1337 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1296 IWL_HCMD_DFL_DUP))) 1338 IWL_HCMD_DFL_DUP)))
@@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1298 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1299 data = dup_buf; 1341 data = dup_buf;
1300 phys_addr = dma_map_single(trans->dev, (void *)data, 1342 phys_addr = dma_map_single(trans->dev, (void *)data,
1301 cmd->len[i], DMA_BIDIRECTIONAL); 1343 cmdlen[i], DMA_BIDIRECTIONAL);
1302 if (dma_mapping_error(trans->dev, phys_addr)) { 1344 if (dma_mapping_error(trans->dev, phys_addr)) {
1303 iwl_pcie_tfd_unmap(trans, out_meta, 1345 iwl_pcie_tfd_unmap(trans, out_meta,
1304 &txq->tfds[q->write_ptr], 1346 &txq->tfds[q->write_ptr],
@@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1307 goto out; 1349 goto out;
1308 } 1350 }
1309 1351
1310 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); 1352 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
1311 } 1353 }
1312 1354
1313 out_meta->flags = cmd->flags; 1355 out_meta->flags = cmd->flags;
@@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1317 1359
1318 txq->need_update = 1; 1360 txq->need_update = 1;
1319 1361
1320 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, 1362 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
1321 &out_cmd->hdr, copy_size);
1322 1363
1323 /* start timer if queue currently empty */ 1364 /* start timer if queue currently empty */
1324 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) 1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 739309e70d8b..45578335e420 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
825 825
826 sdio_release_host(func); 826 sdio_release_host(func);
827 827
828 /* Set fw_ready before queuing any commands so that
829 * lbs_thread won't block from sending them to firmware.
830 */
831 priv->fw_ready = 1;
832
828 /* 833 /*
829 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 834 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
830 */ 835 */
@@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
839 netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n"); 844 netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
840 } 845 }
841 846
842 priv->fw_ready = 1;
843 wake_up(&card->pwron_waitq); 847 wake_up(&card->pwron_waitq);
844 848
845 if (!card->started) { 849 if (!card->started) {
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 35c79722c361..5c395e2e6a2b 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
302 i++; 302 i++;
303 usleep_range(10, 20); 303 usleep_range(10, 20);
304 /* 50ms max wait */ 304 /* 50ms max wait */
305 if (i == 50000) 305 if (i == 5000)
306 break; 306 break;
307 } 307 }
308 308
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 1031db66474a..189744db65e0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1236 */ 1236 */
1237 if_limit = &rt2x00dev->if_limits_ap; 1237 if_limit = &rt2x00dev->if_limits_ap;
1238 if_limit->max = rt2x00dev->ops->max_ap_intf; 1238 if_limit->max = rt2x00dev->ops->max_ap_intf;
1239 if_limit->types = BIT(NL80211_IFTYPE_AP) | 1239 if_limit->types = BIT(NL80211_IFTYPE_AP);
1240 BIT(NL80211_IFTYPE_MESH_POINT); 1240#ifdef CONFIG_MAC80211_MESH
1241 if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
1242#endif
1241 1243
1242 /* 1244 /*
1243 * Build up AP interface combinations structure. 1245 * Build up AP interface combinations structure.
@@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1309 rt2x00dev->hw->wiphy->interface_modes |= 1311 rt2x00dev->hw->wiphy->interface_modes |=
1310 BIT(NL80211_IFTYPE_ADHOC) | 1312 BIT(NL80211_IFTYPE_ADHOC) |
1311 BIT(NL80211_IFTYPE_AP) | 1313 BIT(NL80211_IFTYPE_AP) |
1314#ifdef CONFIG_MAC80211_MESH
1312 BIT(NL80211_IFTYPE_MESH_POINT) | 1315 BIT(NL80211_IFTYPE_MESH_POINT) |
1316#endif
1313 BIT(NL80211_IFTYPE_WDS); 1317 BIT(NL80211_IFTYPE_WDS);
1314 1318
1315 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 1319 rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ef9acd3c8450..01d25e6fc792 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -854,6 +854,8 @@ type_pf_tresize(struct ip_set *set, bool retried)
854retry: 854retry:
855 ret = 0; 855 ret = 0;
856 htable_bits++; 856 htable_bits++;
857 pr_debug("attempt to resize set %s from %u to %u, t %p\n",
858 set->name, orig->htable_bits, htable_bits, orig);
857 if (!htable_bits) { 859 if (!htable_bits) {
858 /* In case we have plenty of memory :-) */ 860 /* In case we have plenty of memory :-) */
859 pr_warning("Cannot increase the hashsize of set %s further\n", 861 pr_warning("Cannot increase the hashsize of set %s further\n",
@@ -873,7 +875,7 @@ retry:
873 data = ahash_tdata(n, j); 875 data = ahash_tdata(n, j);
874 m = hbucket(t, HKEY(data, h->initval, htable_bits)); 876 m = hbucket(t, HKEY(data, h->initval, htable_bits));
875 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, 877 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
876 type_pf_data_timeout(data)); 878 ip_set_timeout_get(type_pf_data_timeout(data)));
877 if (ret < 0) { 879 if (ret < 0) {
878 read_unlock_bh(&set->lock); 880 read_unlock_bh(&set->lock);
879 ahash_destroy(t); 881 ahash_destroy(t);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 23f2e98d4b65..cf0694d4ad60 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1045 if (sysctl_tcp_low_latency || !tp->ucopy.task) 1045 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1046 return false; 1046 return false;
1047 1047
1048 if (skb->len <= tcp_hdrlen(skb) &&
1049 skb_queue_len(&tp->ucopy.prequeue) == 0)
1050 return false;
1051
1048 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1052 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1049 tp->ucopy.memory += skb->truesize; 1053 tp->ucopy.memory += skb->truesize;
1050 if (tp->ucopy.memory > sk->sk_rcvbuf) { 1054 if (tp->ucopy.memory > sk->sk_rcvbuf) {
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 1ae1d9cb278d..21760f008974 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
118 return NULL; 118 return NULL;
119} 119}
120 120
121void caif_flow_cb(struct sk_buff *skb) 121static void caif_flow_cb(struct sk_buff *skb)
122{ 122{
123 struct caif_device_entry *caifd; 123 struct caif_device_entry *caifd;
124 void (*dtor)(struct sk_buff *skb) = NULL; 124 void (*dtor)(struct sk_buff *skb) = NULL;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index 3ebc8cbc91ff..ef8ebaa993cf 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
81 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 81 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
82} 82}
83 83
84struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], 84static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
85 u8 braddr[ETH_ALEN]) 85 u8 braddr[ETH_ALEN])
86{ 86{
87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); 87 struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
88 88
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 87abd3e2bd32..2bdf802e28e2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
228 icmp_send(skb, ICMP_DEST_UNREACH, 228 icmp_send(skb, ICMP_DEST_UNREACH,
229 ICMP_PROT_UNREACH, 0); 229 ICMP_PROT_UNREACH, 0);
230 } 230 }
231 } else 231 kfree_skb(skb);
232 } else {
232 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); 233 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
233 kfree_skb(skb); 234 consume_skb(skb);
235 }
234 } 236 }
235 } 237 }
236 out: 238 out:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a759e19496d2..0d9bdacce99f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5485 if (tcp_checksum_complete_user(sk, skb)) 5485 if (tcp_checksum_complete_user(sk, skb))
5486 goto csum_error; 5486 goto csum_error;
5487 5487
5488 if ((int)skb->truesize > sk->sk_forward_alloc)
5489 goto step5;
5490
5488 /* Predicted packet is in window by definition. 5491 /* Predicted packet is in window by definition.
5489 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5492 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5490 * Hence, check seq<=rcv_wup reduces to: 5493 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5496 5499
5497 tcp_rcv_rtt_measure_ts(sk, skb); 5500 tcp_rcv_rtt_measure_ts(sk, skb);
5498 5501
5499 if ((int)skb->truesize > sk->sk_forward_alloc)
5500 goto step5;
5501
5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5503 5503
5504 /* Bulk data transfer: receiver */ 5504 /* Bulk data transfer: receiver */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5b10414e619e..b1876e52091e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -241,9 +241,11 @@ resubmit:
241 icmpv6_send(skb, ICMPV6_PARAMPROB, 241 icmpv6_send(skb, ICMPV6_PARAMPROB,
242 ICMPV6_UNK_NEXTHDR, nhoff); 242 ICMPV6_UNK_NEXTHDR, nhoff);
243 } 243 }
244 } else 244 kfree_skb(skb);
245 } else {
245 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 246 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
246 kfree_skb(skb); 247 consume_skb(skb);
248 }
247 } 249 }
248 rcu_read_unlock(); 250 rcu_read_unlock();
249 return 0; 251 return 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 928266569689..e5fe0041adfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
1915restart: 1915restart:
1916 read_lock_bh(&table->tb6_lock); 1916 read_lock_bh(&table->tb6_lock);
1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { 1917 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { 1918 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1919 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
1919 dst_hold(&rt->dst); 1920 dst_hold(&rt->dst);
1920 read_unlock_bh(&table->tb6_lock); 1921 read_unlock_bh(&table->tb6_lock);
1921 ip6_del_rt(rt); 1922 ip6_del_rt(rt);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index e71e85ba2bf1..29340a9a6fb9 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
495/* case CS_ISO_8859_9: */ 495/* case CS_ISO_8859_9: */
496/* case CS_UNICODE: */ 496/* case CS_UNICODE: */
497 default: 497 default:
498 IRDA_DEBUG(0, "%s(), charset %s, not supported\n", 498 IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
499 __func__, ias_charset_types[charset]); 499 __func__, charset,
500 charset < ARRAY_SIZE(ias_charset_types) ?
501 ias_charset_types[charset] :
502 "(unknown)");
500 503
501 /* Aborting, close connection! */ 504 /* Aborting, close connection! */
502 iriap_disconnect_request(self); 505 iriap_disconnect_request(self);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 3f4e3afc191a..6a53371dba1f 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
355 l2tp_xmit_skb(session, skb, session->hdr_len); 355 l2tp_xmit_skb(session, skb, session->hdr_len);
356 356
357 sock_put(ps->tunnel_sock); 357 sock_put(ps->tunnel_sock);
358 sock_put(sk);
358 359
359 return error; 360 return error;
360 361
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 09d96a8f6c2c..808f5fcd1ced 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3285,13 +3285,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3285 struct cfg80211_chan_def *chandef) 3285 struct cfg80211_chan_def *chandef)
3286{ 3286{
3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 3287 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
3288 struct ieee80211_local *local = wiphy_priv(wiphy);
3288 struct ieee80211_chanctx_conf *chanctx_conf; 3289 struct ieee80211_chanctx_conf *chanctx_conf;
3289 int ret = -ENODATA; 3290 int ret = -ENODATA;
3290 3291
3291 rcu_read_lock(); 3292 rcu_read_lock();
3292 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3293 if (local->use_chanctx) {
3293 if (chanctx_conf) { 3294 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3294 *chandef = chanctx_conf->def; 3295 if (chanctx_conf) {
3296 *chandef = chanctx_conf->def;
3297 ret = 0;
3298 }
3299 } else if (local->open_count == local->monitors) {
3300 *chandef = local->monitor_chandef;
3295 ret = 0; 3301 ret = 0;
3296 } 3302 }
3297 rcu_read_unlock(); 3303 rcu_read_unlock();
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 2c059e54e885..640afab304d7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
107 107
108 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
109 109
110 active = !list_empty(&local->chanctx_list); 110 active = !list_empty(&local->chanctx_list) || local->monitors;
111 111
112 if (!local->ops->remain_on_channel) { 112 if (!local->ops->remain_on_channel) {
113 list_for_each_entry(roc, &local->roc_list, list) { 113 list_for_each_entry(roc, &local->roc_list, list) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index de8548bf0a7f..ce78d1149f1d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1231 if (local->queue_stop_reasons[q] || 1231 if (local->queue_stop_reasons[q] ||
1232 (!txpending && !skb_queue_empty(&local->pending[q]))) { 1232 (!txpending && !skb_queue_empty(&local->pending[q]))) {
1233 if (unlikely(info->flags & 1233 if (unlikely(info->flags &
1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK && 1234 IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
1235 local->queue_stop_reasons[q] & 1235 if (local->queue_stop_reasons[q] &
1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) { 1236 ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
1237 /*
1238 * Drop off-channel frames if queues
1239 * are stopped for any reason other
1240 * than off-channel operation. Never
1241 * queue them.
1242 */
1243 spin_unlock_irqrestore(
1244 &local->queue_stop_reason_lock,
1245 flags);
1246 ieee80211_purge_tx_queue(&local->hw,
1247 skbs);
1248 return true;
1249 }
1250 } else {
1251
1237 /* 1252 /*
1238 * Drop off-channel frames if queues are stopped 1253 * Since queue is stopped, queue up frames for
1239 * for any reason other than off-channel 1254 * later transmission from the tx-pending
1240 * operation. Never queue them. 1255 * tasklet when the queue is woken again.
1241 */ 1256 */
1242 spin_unlock_irqrestore( 1257 if (txpending)
1243 &local->queue_stop_reason_lock, flags); 1258 skb_queue_splice_init(skbs,
1244 ieee80211_purge_tx_queue(&local->hw, skbs); 1259 &local->pending[q]);
1245 return true; 1260 else
1261 skb_queue_splice_tail_init(skbs,
1262 &local->pending[q]);
1263
1264 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1265 flags);
1266 return false;
1246 } 1267 }
1247
1248 /*
1249 * Since queue is stopped, queue up frames for later
1250 * transmission from the tx-pending tasklet when the
1251 * queue is woken again.
1252 */
1253 if (txpending)
1254 skb_queue_splice_init(skbs, &local->pending[q]);
1255 else
1256 skb_queue_splice_tail_init(skbs,
1257 &local->pending[q]);
1258
1259 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1260 flags);
1261 return false;
1262 } 1268 }
1263 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1269 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1264 1270
@@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1844 } 1850 }
1845 1851
1846 if (!is_multicast_ether_addr(skb->data)) { 1852 if (!is_multicast_ether_addr(skb->data)) {
1853 struct sta_info *next_hop;
1854 bool mpp_lookup = true;
1855
1847 mpath = mesh_path_lookup(sdata, skb->data); 1856 mpath = mesh_path_lookup(sdata, skb->data);
1848 if (!mpath) 1857 if (mpath) {
1858 mpp_lookup = false;
1859 next_hop = rcu_dereference(mpath->next_hop);
1860 if (!next_hop ||
1861 !(mpath->flags & (MESH_PATH_ACTIVE |
1862 MESH_PATH_RESOLVING)))
1863 mpp_lookup = true;
1864 }
1865
1866 if (mpp_lookup)
1849 mppath = mpp_path_lookup(sdata, skb->data); 1867 mppath = mpp_path_lookup(sdata, skb->data);
1868
1869 if (mppath && mpath)
1870 mesh_path_del(mpath->sdata, mpath->dst);
1850 } 1871 }
1851 1872
1852 /* 1873 /*
@@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
2350 if (local->tim_in_locked_section) { 2371 if (local->tim_in_locked_section) {
2351 __ieee80211_beacon_add_tim(sdata, ps, skb); 2372 __ieee80211_beacon_add_tim(sdata, ps, skb);
2352 } else { 2373 } else {
2353 spin_lock(&local->tim_lock); 2374 spin_lock_bh(&local->tim_lock);
2354 __ieee80211_beacon_add_tim(sdata, ps, skb); 2375 __ieee80211_beacon_add_tim(sdata, ps, skb);
2355 spin_unlock(&local->tim_lock); 2376 spin_unlock_bh(&local->tim_lock);
2356 } 2377 }
2357 2378
2358 return 0; 2379 return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index f82b2e606cfd..1ba9dbc0e107 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
1470 if (ret == -EAGAIN) 1470 if (ret == -EAGAIN)
1471 ret = 1; 1471 ret = 1;
1472 1472
1473 return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST; 1473 return (ret < 0 && ret != -ENOTEMPTY) ? ret :
1474 ret > 0 ? 0 : -IPSET_ERR_EXIST;
1474} 1475}
1475 1476
1476/* Get headed data of a set */ 1477/* Get headed data of a set */
diff --git a/net/rds/message.c b/net/rds/message.c
index f0a4658f3273..aba232f9f308 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
82void rds_message_put(struct rds_message *rm) 82void rds_message_put(struct rds_message *rm)
83{ 83{
84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 84 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
85 if (atomic_read(&rm->m_refcount) == 0) { 85 WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
86printk(KERN_CRIT "danger refcount zero on %p\n", rm);
87WARN_ON(1);
88 }
89 if (atomic_dec_and_test(&rm->m_refcount)) { 86 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item)); 87 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item)); 88 BUG_ON(!list_empty(&rm->m_conn_item));
@@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
197{ 194{
198 struct rds_message *rm; 195 struct rds_message *rm;
199 196
197 if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
198 return NULL;
199
200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); 200 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
201 if (!rm) 201 if (!rm)
202 goto out; 202 goto out;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2b3ef03c6098..12ed45dbe75d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
155 155
156 /* SCTP-AUTH extensions*/ 156 /* SCTP-AUTH extensions*/
157 INIT_LIST_HEAD(&ep->endpoint_shared_keys); 157 INIT_LIST_HEAD(&ep->endpoint_shared_keys);
158 null_key = sctp_auth_shkey_create(0, GFP_KERNEL); 158 null_key = sctp_auth_shkey_create(0, gfp);
159 if (!null_key) 159 if (!null_key)
160 goto nomem; 160 goto nomem;
161 161
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c99458df3f3f..b9070736b8d9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5653 if (len < sizeof(sctp_assoc_t)) 5653 if (len < sizeof(sctp_assoc_t))
5654 return -EINVAL; 5654 return -EINVAL;
5655 5655
5656 /* Allow the struct to grow and fill in as much as possible */
5657 len = min_t(size_t, len, sizeof(sas));
5658
5656 if (copy_from_user(&sas, optval, len)) 5659 if (copy_from_user(&sas, optval, len))
5657 return -EFAULT; 5660 return -EFAULT;
5658 5661
@@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5686 /* Mark beginning of a new observation period */ 5689 /* Mark beginning of a new observation period */
5687 asoc->stats.max_obs_rto = asoc->rto_min; 5690 asoc->stats.max_obs_rto = asoc->rto_min;
5688 5691
5689 /* Allow the struct to grow and fill in as much as possible */
5690 len = min_t(size_t, len, sizeof(sas));
5691
5692 if (put_user(len, optlen)) 5692 if (put_user(len, optlen))
5693 return -EFAULT; 5693 return -EFAULT;
5694 5694
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index 442ad4ed6315..825ea94415b3 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -41,8 +41,6 @@
41#include <net/sctp/sctp.h> 41#include <net/sctp/sctp.h>
42#include <net/sctp/sm.h> 42#include <net/sctp/sm.h>
43 43
44#define MAX_KMALLOC_SIZE 131072
45
46static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, 44static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
47 __u16 out); 45 __u16 out);
48 46
@@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
65 int size; 63 int size;
66 64
67 size = sctp_ssnmap_size(in, out); 65 size = sctp_ssnmap_size(in, out);
68 if (size <= MAX_KMALLOC_SIZE) 66 if (size <= KMALLOC_MAX_SIZE)
69 retval = kmalloc(size, gfp); 67 retval = kmalloc(size, gfp);
70 else 68 else
71 retval = (struct sctp_ssnmap *) 69 retval = (struct sctp_ssnmap *)
@@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
82 return retval; 80 return retval;
83 81
84fail_map: 82fail_map:
85 if (size <= MAX_KMALLOC_SIZE) 83 if (size <= KMALLOC_MAX_SIZE)
86 kfree(retval); 84 kfree(retval);
87 else 85 else
88 free_pages((unsigned long)retval, get_order(size)); 86 free_pages((unsigned long)retval, get_order(size));
@@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
124 int size; 122 int size;
125 123
126 size = sctp_ssnmap_size(map->in.len, map->out.len); 124 size = sctp_ssnmap_size(map->in.len, map->out.len);
127 if (size <= MAX_KMALLOC_SIZE) 125 if (size <= KMALLOC_MAX_SIZE)
128 kfree(map); 126 kfree(map);
129 else 127 else
130 free_pages((unsigned long)map, get_order(size)); 128 free_pages((unsigned long)map, get_order(size));
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 5f25e0c92c31..396c45174e5b 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -51,7 +51,7 @@
51static void sctp_tsnmap_update(struct sctp_tsnmap *map); 51static void sctp_tsnmap_update(struct sctp_tsnmap *map);
52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, 52static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
53 __u16 len, __u16 *start, __u16 *end); 53 __u16 len, __u16 *start, __u16 *end);
54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap); 54static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
55 55
56/* Initialize a block of memory as a tsnmap. */ 56/* Initialize a block of memory as a tsnmap. */
57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, 57struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
@@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
124 124
125 gap = tsn - map->base_tsn; 125 gap = tsn - map->base_tsn;
126 126
127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap)) 127 if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 if (!sctp_tsnmap_has_gap(map) && gap == 0) { 130 if (!sctp_tsnmap_has_gap(map) && gap == 0) {
@@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
360 return ngaps; 360 return ngaps;
361} 361}
362 362
363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap) 363static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
364{ 364{
365 unsigned long *new; 365 unsigned long *new;
366 unsigned long inc; 366 unsigned long inc;
367 u16 len; 367 u16 len;
368 368
369 if (gap >= SCTP_TSN_MAP_SIZE) 369 if (size > SCTP_TSN_MAP_SIZE)
370 return 0; 370 return 0;
371 371
372 inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; 372 inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); 373 len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
374 374
375 new = kzalloc(len>>3, GFP_ATOMIC); 375 new = kzalloc(len>>3, GFP_ATOMIC);
376 if (!new) 376 if (!new)
377 return 0; 377 return 0;
378 378
379 bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); 379 bitmap_copy(new, map->tsn_map,
380 map->max_tsn_seen - map->cumulative_tsn_ack_point);
380 kfree(map->tsn_map); 381 kfree(map->tsn_map);
381 map->tsn_map = new; 382 map->tsn_map = new;
382 map->len = len; 383 map->len = len;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ada17464b65b..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
106{ 106{
107 struct sk_buff_head temp; 107 struct sk_buff_head temp;
108 struct sctp_ulpevent *event; 108 struct sctp_ulpevent *event;
109 int event_eor = 0;
109 110
110 /* Create an event from the incoming chunk. */ 111 /* Create an event from the incoming chunk. */
111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list. 129 * very first SKB on the 'temp' list.
129 */ 130 */
130 if (event) 131 if (event) {
132 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
131 sctp_ulpq_tail_event(ulpq, event); 133 sctp_ulpq_tail_event(ulpq, event);
134 }
132 135
133 return 0; 136 return event_eor;
134} 137}
135 138
136/* Add a new event for propagation to the ULP. */ 139/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
540 ctsn = cevent->tsn; 543 ctsn = cevent->tsn;
541 544
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 545 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
546 case SCTP_DATA_FIRST_FRAG:
547 if (!first_frag)
548 return NULL;
549 goto done;
543 case SCTP_DATA_MIDDLE_FRAG: 550 case SCTP_DATA_MIDDLE_FRAG:
544 if (!first_frag) { 551 if (!first_frag) {
545 first_frag = pos; 552 first_frag = pos;
546 next_tsn = ctsn + 1; 553 next_tsn = ctsn + 1;
547 last_frag = pos; 554 last_frag = pos;
548 } else if (next_tsn == ctsn) 555 } else if (next_tsn == ctsn) {
549 next_tsn++; 556 next_tsn++;
550 else 557 last_frag = pos;
558 } else
551 goto done; 559 goto done;
552 break; 560 break;
553 case SCTP_DATA_LAST_FRAG: 561 case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
651 } else 659 } else
652 goto done; 660 goto done;
653 break; 661 break;
662
663 case SCTP_DATA_LAST_FRAG:
664 if (!first_frag)
665 return NULL;
666 else
667 goto done;
668 break;
669
654 default: 670 default:
655 return NULL; 671 return NULL;
656 } 672 }
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
962 struct sk_buff_head *list, __u16 needed) 978 struct sk_buff_head *list, __u16 needed)
963{ 979{
964 __u16 freed = 0; 980 __u16 freed = 0;
965 __u32 tsn; 981 __u32 tsn, last_tsn;
966 struct sk_buff *skb; 982 struct sk_buff *skb, *flist, *last;
967 struct sctp_ulpevent *event; 983 struct sctp_ulpevent *event;
968 struct sctp_tsnmap *tsnmap; 984 struct sctp_tsnmap *tsnmap;
969 985
970 tsnmap = &ulpq->asoc->peer.tsn_map; 986 tsnmap = &ulpq->asoc->peer.tsn_map;
971 987
972 while ((skb = __skb_dequeue_tail(list)) != NULL) { 988 while ((skb = skb_peek_tail(list)) != NULL) {
973 freed += skb_headlen(skb);
974 event = sctp_skb2event(skb); 989 event = sctp_skb2event(skb);
975 tsn = event->tsn; 990 tsn = event->tsn;
976 991
992 /* Don't renege below the Cumulative TSN ACK Point. */
993 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
994 break;
995
996 /* Events in ordering queue may have multiple fragments
997 * corresponding to additional TSNs. Sum the total
998 * freed space; find the last TSN.
999 */
1000 freed += skb_headlen(skb);
1001 flist = skb_shinfo(skb)->frag_list;
1002 for (last = flist; flist; flist = flist->next) {
1003 last = flist;
1004 freed += skb_headlen(last);
1005 }
1006 if (last)
1007 last_tsn = sctp_skb2event(last)->tsn;
1008 else
1009 last_tsn = tsn;
1010
1011 /* Unlink the event, then renege all applicable TSNs. */
1012 __skb_unlink(skb, list);
977 sctp_ulpevent_free(event); 1013 sctp_ulpevent_free(event);
978 sctp_tsnmap_renege(tsnmap, tsn); 1014 while (TSN_lte(tsn, last_tsn)) {
1015 sctp_tsnmap_renege(tsnmap, tsn);
1016 tsn++;
1017 }
979 if (freed >= needed) 1018 if (freed >= needed)
980 return freed; 1019 return freed;
981 } 1020 }
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1002 struct sctp_ulpevent *event; 1041 struct sctp_ulpevent *event;
1003 struct sctp_association *asoc; 1042 struct sctp_association *asoc;
1004 struct sctp_sock *sp; 1043 struct sctp_sock *sp;
1044 __u32 ctsn;
1045 struct sk_buff *skb;
1005 1046
1006 asoc = ulpq->asoc; 1047 asoc = ulpq->asoc;
1007 sp = sctp_sk(asoc->base.sk); 1048 sp = sctp_sk(asoc->base.sk);
1008 1049
1009 /* If the association is already in Partial Delivery mode 1050 /* If the association is already in Partial Delivery mode
1010 * we have noting to do. 1051 * we have nothing to do.
1011 */ 1052 */
1012 if (ulpq->pd_mode) 1053 if (ulpq->pd_mode)
1013 return; 1054 return;
1014 1055
1056 /* Data must be at or below the Cumulative TSN ACK Point to
1057 * start partial delivery.
1058 */
1059 skb = skb_peek(&asoc->ulpq.reasm);
1060 if (skb != NULL) {
1061 ctsn = sctp_skb2event(skb)->tsn;
1062 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1063 return;
1064 }
1065
1015 /* If the user enabled fragment interleave socket option, 1066 /* If the user enabled fragment interleave socket option,
1016 * multiple associations can enter partial delivery. 1067 * multiple associations can enter partial delivery.
1017 * Otherwise, we can only enter partial delivery if the 1068 * Otherwise, we can only enter partial delivery if the
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1054 } 1105 }
1055 /* If able to free enough room, accept this chunk. */ 1106 /* If able to free enough room, accept this chunk. */
1056 if (chunk && (freed >= needed)) { 1107 if (chunk && (freed >= needed)) {
1057 __u32 tsn; 1108 int retval;
1058 tsn = ntohl(chunk->subh.data_hdr->tsn); 1109 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1059 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); 1110 /*
1060 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1111 * Enter partial delivery if chunk has not been
1061 1112 * delivered; otherwise, drain the reassembly queue.
1062 sctp_ulpq_partial_delivery(ulpq, gfp); 1113 */
1114 if (retval <= 0)
1115 sctp_ulpq_partial_delivery(ulpq, gfp);
1116 else if (retval == 1)
1117 sctp_ulpq_reasm_drain(ulpq);
1063 } 1118 }
1064 1119
1065 sk_mem_reclaim(asoc->base.sk); 1120 sk_mem_reclaim(asoc->base.sk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 35545ccc30fd..e652d05ff712 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -554,16 +554,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
556 goto nla_put_failure; 556 goto nla_put_failure;
557 if (chan->flags & IEEE80211_CHAN_RADAR) { 557 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
558 u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered); 558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 559 goto nla_put_failure;
560 goto nla_put_failure;
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
562 chan->dfs_state))
563 goto nla_put_failure;
564 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
565 goto nla_put_failure;
566 }
567 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && 560 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
568 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) 561 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
569 goto nla_put_failure; 562 goto nla_put_failure;
@@ -900,9 +893,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
900 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 893 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
901 c->max_interfaces)) 894 c->max_interfaces))
902 goto nla_put_failure; 895 goto nla_put_failure;
903 if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
904 c->radar_detect_widths))
905 goto nla_put_failure;
906 896
907 nla_nest_end(msg, nl_combi); 897 nla_nest_end(msg, nl_combi);
908 } 898 }
@@ -914,48 +904,6 @@ nla_put_failure:
914 return -ENOBUFS; 904 return -ENOBUFS;
915} 905}
916 906
917#ifdef CONFIG_PM
918static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
919 struct sk_buff *msg)
920{
921 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
922 struct nlattr *nl_tcp;
923
924 if (!tcp)
925 return 0;
926
927 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
928 if (!nl_tcp)
929 return -ENOBUFS;
930
931 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
932 tcp->data_payload_max))
933 return -ENOBUFS;
934
935 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
936 tcp->data_payload_max))
937 return -ENOBUFS;
938
939 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
940 return -ENOBUFS;
941
942 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
943 sizeof(*tcp->tok), tcp->tok))
944 return -ENOBUFS;
945
946 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
947 tcp->data_interval_max))
948 return -ENOBUFS;
949
950 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
951 tcp->wake_payload_max))
952 return -ENOBUFS;
953
954 nla_nest_end(msg, nl_tcp);
955 return 0;
956}
957#endif
958
959static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 907static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
960 struct cfg80211_registered_device *dev) 908 struct cfg80211_registered_device *dev)
961{ 909{
@@ -1330,9 +1278,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
1330 goto nla_put_failure; 1278 goto nla_put_failure;
1331 } 1279 }
1332 1280
1333 if (nl80211_send_wowlan_tcp_caps(dev, msg))
1334 goto nla_put_failure;
1335
1336 nla_nest_end(msg, nl_wowlan); 1281 nla_nest_end(msg, nl_wowlan);
1337 } 1282 }
1338#endif 1283#endif