aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/net/bpf_jit_comp.c36
-rw-r--r--drivers/net/bonding/bond_alb.c27
-rw-r--r--drivers/net/dsa/mv88e6060.c1
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c22
-rw-r--r--drivers/net/dsa/mv88e6131.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c111
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c38
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/skge.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/team/team.c136
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c10
-rw-r--r--include/linux/if_team.h10
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/res_counter.h11
-rw-r--r--include/linux/snmp.h1
-rw-r--r--include/net/bluetooth/hci.h2
-rw-r--r--include/net/netprio_cgroup.h1
-rw-r--r--include/net/sock.h13
-rw-r--r--kernel/res_counter.c25
-rw-r--r--mm/memcontrol.c4
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_cubic.c10
-rw-r--r--net/ipv4/tcp_input.c41
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/addrconf.c61
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/debugfs_key.c7
-rw-r--r--net/mac80211/mesh_hwmp.c8
-rw-r--r--net/mac80211/mesh_plink.c4
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/rds/af_rds.c20
-rw-r--r--net/sched/sch_netem.c2
102 files changed, 904 insertions, 512 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 7b65f752c5f8..7c1b765ecc59 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
151 cleanup_addr = proglen; /* epilogue address */ 151 cleanup_addr = proglen; /* epilogue address */
152 152
153 for (pass = 0; pass < 10; pass++) { 153 for (pass = 0; pass < 10; pass++) {
154 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
154 /* no prologue/epilogue for trivial filters (RET something) */ 155 /* no prologue/epilogue for trivial filters (RET something) */
155 proglen = 0; 156 proglen = 0;
156 prog = temp; 157 prog = temp;
157 158
158 if (seen) { 159 if (seen_or_pass0) {
159 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ 160 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
160 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ 161 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
161 /* note : must save %rbx in case bpf_error is hit */ 162 /* note : must save %rbx in case bpf_error is hit */
162 if (seen & (SEEN_XREG | SEEN_DATAREF)) 163 if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
163 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ 164 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
164 if (seen & SEEN_XREG) 165 if (seen_or_pass0 & SEEN_XREG)
165 CLEAR_X(); /* make sure we dont leek kernel memory */ 166 CLEAR_X(); /* make sure we dont leek kernel memory */
166 167
167 /* 168 /*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
170 * r9 = skb->len - skb->data_len 171 * r9 = skb->len - skb->data_len
171 * r8 = skb->data 172 * r8 = skb->data
172 */ 173 */
173 if (seen & SEEN_DATAREF) { 174 if (seen_or_pass0 & SEEN_DATAREF) {
174 if (offsetof(struct sk_buff, len) <= 127) 175 if (offsetof(struct sk_buff, len) <= 127)
175 /* mov off8(%rdi),%r9d */ 176 /* mov off8(%rdi),%r9d */
176 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); 177 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
260 case BPF_S_ALU_DIV_X: /* A /= X; */ 261 case BPF_S_ALU_DIV_X: /* A /= X; */
261 seen |= SEEN_XREG; 262 seen |= SEEN_XREG;
262 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ 263 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
263 if (pc_ret0 != -1) 264 if (pc_ret0 > 0) {
264 EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); 265 /* addrs[pc_ret0 - 1] is start address of target
265 else { 266 * (addrs[i] - 4) is the address following this jmp
267 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
268 */
269 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
270 (addrs[i] - 4));
271 } else {
266 EMIT_COND_JMP(X86_JNE, 2 + 5); 272 EMIT_COND_JMP(X86_JNE, 2 + 5);
267 CLEAR_A(); 273 CLEAR_A();
268 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ 274 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
335 } 341 }
336 /* fallinto */ 342 /* fallinto */
337 case BPF_S_RET_A: 343 case BPF_S_RET_A:
338 if (seen) { 344 if (seen_or_pass0) {
339 if (i != flen - 1) { 345 if (i != flen - 1) {
340 EMIT_JMP(cleanup_addr - addrs[i]); 346 EMIT_JMP(cleanup_addr - addrs[i]);
341 break; 347 break;
342 } 348 }
343 if (seen & SEEN_XREG) 349 if (seen_or_pass0 & SEEN_XREG)
344 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ 350 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
345 EMIT1(0xc9); /* leaveq */ 351 EMIT1(0xc9); /* leaveq */
346 } 352 }
@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
483 goto common_load; 489 goto common_load;
484 case BPF_S_LDX_B_MSH: 490 case BPF_S_LDX_B_MSH:
485 if ((int)K < 0) { 491 if ((int)K < 0) {
486 if (pc_ret0 != -1) { 492 if (pc_ret0 > 0) {
487 EMIT_JMP(addrs[pc_ret0] - addrs[i]); 493 /* addrs[pc_ret0 - 1] is the start address */
494 EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
488 break; 495 break;
489 } 496 }
490 CLEAR_A(); 497 CLEAR_A();
@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
599 * use it to give the cleanup instruction(s) addr 606 * use it to give the cleanup instruction(s) addr
600 */ 607 */
601 cleanup_addr = proglen - 1; /* ret */ 608 cleanup_addr = proglen - 1; /* ret */
602 if (seen) 609 if (seen_or_pass0)
603 cleanup_addr -= 1; /* leaveq */ 610 cleanup_addr -= 1; /* leaveq */
604 if (seen & SEEN_XREG) 611 if (seen_or_pass0 & SEEN_XREG)
605 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ 612 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
606 613
607 if (image) { 614 if (image) {
608 WARN_ON(proglen != oldproglen); 615 if (proglen != oldproglen)
616 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
609 break; 617 break;
610 } 618 }
611 if (proglen == oldproglen) { 619 if (proglen == oldproglen) {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 342626f4bc46..f820b26b9db3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -909,16 +909,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
909 } 909 }
910} 910}
911 911
912/* hw is a boolean parameter that determines whether we should try and 912static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
913 * set the hw address of the device as well as the hw address of the
914 * net_device
915 */
916static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
917{ 913{
918 struct net_device *dev = slave->dev; 914 struct net_device *dev = slave->dev;
919 struct sockaddr s_addr; 915 struct sockaddr s_addr;
920 916
921 if (!hw) { 917 if (slave->bond->params.mode == BOND_MODE_TLB) {
922 memcpy(dev->dev_addr, addr, dev->addr_len); 918 memcpy(dev->dev_addr, addr, dev->addr_len);
923 return 0; 919 return 0;
924 } 920 }
@@ -948,8 +944,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
948 u8 tmp_mac_addr[ETH_ALEN]; 944 u8 tmp_mac_addr[ETH_ALEN];
949 945
950 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); 946 memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
951 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); 947 alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
952 alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); 948 alb_set_slave_mac_addr(slave2, tmp_mac_addr);
953 949
954} 950}
955 951
@@ -1096,8 +1092,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1096 1092
1097 /* Try setting slave mac to bond address and fall-through 1093 /* Try setting slave mac to bond address and fall-through
1098 to code handling that situation below... */ 1094 to code handling that situation below... */
1099 alb_set_slave_mac_addr(slave, bond->dev->dev_addr, 1095 alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
1100 bond->alb_info.rlb_enabled);
1101 } 1096 }
1102 1097
1103 /* The slave's address is equal to the address of the bond. 1098 /* The slave's address is equal to the address of the bond.
@@ -1133,8 +1128,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
1133 } 1128 }
1134 1129
1135 if (free_mac_slave) { 1130 if (free_mac_slave) {
1136 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, 1131 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
1137 bond->alb_info.rlb_enabled);
1138 1132
1139 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", 1133 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1140 bond->dev->name, slave->dev->name, 1134 bond->dev->name, slave->dev->name,
@@ -1491,8 +1485,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1491{ 1485{
1492 int res; 1486 int res;
1493 1487
1494 res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, 1488 res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
1495 bond->alb_info.rlb_enabled);
1496 if (res) { 1489 if (res) {
1497 return res; 1490 return res;
1498 } 1491 }
@@ -1643,8 +1636,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1643 alb_swap_mac_addr(bond, swap_slave, new_slave); 1636 alb_swap_mac_addr(bond, swap_slave, new_slave);
1644 } else { 1637 } else {
1645 /* set the new_slave to the bond mac address */ 1638 /* set the new_slave to the bond mac address */
1646 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, 1639 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1647 bond->alb_info.rlb_enabled);
1648 } 1640 }
1649 1641
1650 if (swap_slave) { 1642 if (swap_slave) {
@@ -1704,8 +1696,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1704 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1696 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
1705 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); 1697 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
1706 } else { 1698 } else {
1707 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1699 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1708 bond->alb_info.rlb_enabled);
1709 1700
1710 read_lock(&bond->lock); 1701 read_lock(&bond->lock);
1711 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1702 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 7fc4e81d4d43..325391d19bad 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/phy.h> 14#include <linux/phy.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index c0a458fc698f..c17c75b9f531 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/phy.h> 14#include <linux/phy.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
@@ -20,12 +21,25 @@ static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
20 21
21 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 22 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
22 if (ret >= 0) { 23 if (ret >= 0) {
23 ret &= 0xfff0; 24 if (ret == 0x1212)
24 if (ret == 0x1210) 25 return "Marvell 88E6123 (A1)";
26 if (ret == 0x1213)
27 return "Marvell 88E6123 (A2)";
28 if ((ret & 0xfff0) == 0x1210)
25 return "Marvell 88E6123"; 29 return "Marvell 88E6123";
26 if (ret == 0x1610) 30
31 if (ret == 0x1612)
32 return "Marvell 88E6161 (A1)";
33 if (ret == 0x1613)
34 return "Marvell 88E6161 (A2)";
35 if ((ret & 0xfff0) == 0x1610)
27 return "Marvell 88E6161"; 36 return "Marvell 88E6161";
28 if (ret == 0x1650) 37
38 if (ret == 0x1652)
39 return "Marvell 88E6165 (A1)";
40 if (ret == 0x1653)
41 return "Marvell 88e6165 (A2)";
42 if ((ret & 0xfff0) == 0x1650)
29 return "Marvell 88E6165"; 43 return "Marvell 88E6165";
30 } 44 }
31 45
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index e0eb68243834..55888b06d8b4 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/phy.h> 14#include <linux/phy.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5467c040824a..a2c62c2f30ee 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/phy.h> 14#include <linux/phy.h>
14#include <net/dsa.h> 15#include <net/dsa.h>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2b731b253598..03f3935fd8c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3117,7 +3117,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3117 int rx_ring_size = 0; 3117 int rx_ring_size = 0;
3118 3118
3119#ifdef BCM_CNIC 3119#ifdef BCM_CNIC
3120 if (IS_MF_ISCSI_SD(bp)) { 3120 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
3121 rx_ring_size = MIN_RX_SIZE_NONTPA; 3121 rx_ring_size = MIN_RX_SIZE_NONTPA;
3122 bp->rx_ring_size = rx_ring_size; 3122 bp->rx_ring_size = rx_ring_size;
3123 } else 3123 } else
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f99c6e312a5d..31a8b38ab15e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1738,7 +1738,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1738 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; 1738 struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
1739 u16 tx_start_idx, tx_idx; 1739 u16 tx_start_idx, tx_idx;
1740 u16 rx_start_idx, rx_idx; 1740 u16 rx_start_idx, rx_idx;
1741 u16 pkt_prod, bd_prod, rx_comp_cons; 1741 u16 pkt_prod, bd_prod;
1742 struct sw_tx_bd *tx_buf; 1742 struct sw_tx_bd *tx_buf;
1743 struct eth_tx_start_bd *tx_start_bd; 1743 struct eth_tx_start_bd *tx_start_bd;
1744 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 1744 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
@@ -1873,8 +1873,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1873 if (rx_idx != rx_start_idx + num_pkts) 1873 if (rx_idx != rx_start_idx + num_pkts)
1874 goto test_loopback_exit; 1874 goto test_loopback_exit;
1875 1875
1876 rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); 1876 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
1877 cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
1878 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 1877 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1879 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 1878 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
1880 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) 1879 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
@@ -2121,18 +2120,16 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2121 case ETH_SS_STATS: 2120 case ETH_SS_STATS:
2122 if (is_multi(bp)) { 2121 if (is_multi(bp)) {
2123 num_stats = bnx2x_num_stat_queues(bp) * 2122 num_stats = bnx2x_num_stat_queues(bp) *
2124 BNX2X_NUM_Q_STATS; 2123 BNX2X_NUM_Q_STATS;
2125 if (!IS_MF_MODE_STAT(bp)) 2124 } else
2126 num_stats += BNX2X_NUM_STATS; 2125 num_stats = 0;
2127 } else { 2126 if (IS_MF_MODE_STAT(bp)) {
2128 if (IS_MF_MODE_STAT(bp)) { 2127 for (i = 0; i < BNX2X_NUM_STATS; i++)
2129 num_stats = 0; 2128 if (IS_FUNC_STAT(i))
2130 for (i = 0; i < BNX2X_NUM_STATS; i++) 2129 num_stats++;
2131 if (IS_FUNC_STAT(i)) 2130 } else
2132 num_stats++; 2131 num_stats += BNX2X_NUM_STATS;
2133 } else 2132
2134 num_stats = BNX2X_NUM_STATS;
2135 }
2136 return num_stats; 2133 return num_stats;
2137 2134
2138 case ETH_SS_TEST: 2135 case ETH_SS_TEST:
@@ -2151,8 +2148,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2151 2148
2152 switch (stringset) { 2149 switch (stringset) {
2153 case ETH_SS_STATS: 2150 case ETH_SS_STATS:
2151 k = 0;
2154 if (is_multi(bp)) { 2152 if (is_multi(bp)) {
2155 k = 0;
2156 for_each_eth_queue(bp, i) { 2153 for_each_eth_queue(bp, i) {
2157 memset(queue_name, 0, sizeof(queue_name)); 2154 memset(queue_name, 0, sizeof(queue_name));
2158 sprintf(queue_name, "%d", i); 2155 sprintf(queue_name, "%d", i);
@@ -2163,20 +2160,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
2163 queue_name); 2160 queue_name);
2164 k += BNX2X_NUM_Q_STATS; 2161 k += BNX2X_NUM_Q_STATS;
2165 } 2162 }
2166 if (IS_MF_MODE_STAT(bp))
2167 break;
2168 for (j = 0; j < BNX2X_NUM_STATS; j++)
2169 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
2170 bnx2x_stats_arr[j].string);
2171 } else {
2172 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
2173 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
2174 continue;
2175 strcpy(buf + j*ETH_GSTRING_LEN,
2176 bnx2x_stats_arr[i].string);
2177 j++;
2178 }
2179 } 2163 }
2164
2165
2166 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
2167 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
2168 continue;
2169 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
2170 bnx2x_stats_arr[i].string);
2171 j++;
2172 }
2173
2180 break; 2174 break;
2181 2175
2182 case ETH_SS_TEST: 2176 case ETH_SS_TEST:
@@ -2190,10 +2184,9 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2190{ 2184{
2191 struct bnx2x *bp = netdev_priv(dev); 2185 struct bnx2x *bp = netdev_priv(dev);
2192 u32 *hw_stats, *offset; 2186 u32 *hw_stats, *offset;
2193 int i, j, k; 2187 int i, j, k = 0;
2194 2188
2195 if (is_multi(bp)) { 2189 if (is_multi(bp)) {
2196 k = 0;
2197 for_each_eth_queue(bp, i) { 2190 for_each_eth_queue(bp, i) {
2198 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2191 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
2199 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 2192 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
@@ -2214,46 +2207,28 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
2214 } 2207 }
2215 k += BNX2X_NUM_Q_STATS; 2208 k += BNX2X_NUM_Q_STATS;
2216 } 2209 }
2217 if (IS_MF_MODE_STAT(bp)) 2210 }
2218 return; 2211
2219 hw_stats = (u32 *)&bp->eth_stats; 2212 hw_stats = (u32 *)&bp->eth_stats;
2220 for (j = 0; j < BNX2X_NUM_STATS; j++) { 2213 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
2221 if (bnx2x_stats_arr[j].size == 0) { 2214 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
2222 /* skip this counter */ 2215 continue;
2223 buf[k + j] = 0; 2216 if (bnx2x_stats_arr[i].size == 0) {
2224 continue; 2217 /* skip this counter */
2225 } 2218 buf[k + j] = 0;
2226 offset = (hw_stats + bnx2x_stats_arr[j].offset); 2219 j++;
2227 if (bnx2x_stats_arr[j].size == 4) { 2220 continue;
2228 /* 4-byte counter */
2229 buf[k + j] = (u64) *offset;
2230 continue;
2231 }
2232 /* 8-byte counter */
2233 buf[k + j] = HILO_U64(*offset, *(offset + 1));
2234 } 2221 }
2235 } else { 2222 offset = (hw_stats + bnx2x_stats_arr[i].offset);
2236 hw_stats = (u32 *)&bp->eth_stats; 2223 if (bnx2x_stats_arr[i].size == 4) {
2237 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2224 /* 4-byte counter */
2238 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2225 buf[k + j] = (u64) *offset;
2239 continue;
2240 if (bnx2x_stats_arr[i].size == 0) {
2241 /* skip this counter */
2242 buf[j] = 0;
2243 j++;
2244 continue;
2245 }
2246 offset = (hw_stats + bnx2x_stats_arr[i].offset);
2247 if (bnx2x_stats_arr[i].size == 4) {
2248 /* 4-byte counter */
2249 buf[j] = (u64) *offset;
2250 j++;
2251 continue;
2252 }
2253 /* 8-byte counter */
2254 buf[j] = HILO_U64(*offset, *(offset + 1));
2255 j++; 2226 j++;
2227 continue;
2256 } 2228 }
2229 /* 8-byte counter */
2230 buf[k + j] = HILO_U64(*offset, *(offset + 1));
2231 j++;
2257 } 2232 }
2258} 2233}
2259 2234
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ffeaaa95ed96..1e3f978ee6da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -941,7 +941,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
941 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 941 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
942 942
943 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 943 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
944 i, j, rx_bd[1], rx_bd[0], sw_bd->skb); 944 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
945 } 945 }
946 946
947 start = RX_SGE(fp->rx_sge_prod); 947 start = RX_SGE(fp->rx_sge_prod);
@@ -10536,6 +10536,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10536{ 10536{
10537 struct bnx2x *bp; 10537 struct bnx2x *bp;
10538 int rc; 10538 int rc;
10539 bool chip_is_e1x = (board_type == BCM57710 ||
10540 board_type == BCM57711 ||
10541 board_type == BCM57711E);
10539 10542
10540 SET_NETDEV_DEV(dev, &pdev->dev); 10543 SET_NETDEV_DEV(dev, &pdev->dev);
10541 bp = netdev_priv(dev); 10544 bp = netdev_priv(dev);
@@ -10624,7 +10627,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10624 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 10627 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
10625 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 10628 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
10626 10629
10627 if (CHIP_IS_E1x(bp)) { 10630 if (chip_is_e1x) {
10628 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 10631 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
10629 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 10632 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
10630 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 10633 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@@ -10635,9 +10638,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10635 * Enable internal target-read (in case we are probed after PF FLR). 10638 * Enable internal target-read (in case we are probed after PF FLR).
10636 * Must be done prior to any BAR read access. Only for 57712 and up 10639 * Must be done prior to any BAR read access. Only for 57712 and up
10637 */ 10640 */
10638 if (board_type != BCM57710 && 10641 if (!chip_is_e1x)
10639 board_type != BCM57711 &&
10640 board_type != BCM57711E)
10641 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 10642 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
10642 10643
10643 /* Reset the load counter */ 10644 /* Reset the load counter */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 5ac616093f9f..cb6339c35571 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -50,6 +50,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
50 int exe_len, 50 int exe_len,
51 union bnx2x_qable_obj *owner, 51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate, 52 exe_q_validate validate,
53 exe_q_remove remove,
53 exe_q_optimize optimize, 54 exe_q_optimize optimize,
54 exe_q_execute exec, 55 exe_q_execute exec,
55 exe_q_get get) 56 exe_q_get get)
@@ -66,6 +67,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
66 67
67 /* Owner specific callbacks */ 68 /* Owner specific callbacks */
68 o->validate = validate; 69 o->validate = validate;
70 o->remove = remove;
69 o->optimize = optimize; 71 o->optimize = optimize;
70 o->execute = exec; 72 o->execute = exec;
71 o->get = get; 73 o->get = get;
@@ -1340,6 +1342,35 @@ static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1340 } 1342 }
1341} 1343}
1342 1344
1345static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1346 union bnx2x_qable_obj *qo,
1347 struct bnx2x_exeq_elem *elem)
1348{
1349 int rc = 0;
1350
1351 /* If consumption wasn't required, nothing to do */
1352 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1353 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1354 return 0;
1355
1356 switch (elem->cmd_data.vlan_mac.cmd) {
1357 case BNX2X_VLAN_MAC_ADD:
1358 case BNX2X_VLAN_MAC_MOVE:
1359 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1360 break;
1361 case BNX2X_VLAN_MAC_DEL:
1362 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1363 break;
1364 default:
1365 return -EINVAL;
1366 }
1367
1368 if (rc != true)
1369 return -EINVAL;
1370
1371 return 0;
1372}
1373
1343/** 1374/**
1344 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. 1375 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1345 * 1376 *
@@ -1801,8 +1832,14 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1801 1832
1802 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 1833 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1803 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 1834 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1804 *vlan_mac_flags) 1835 *vlan_mac_flags) {
1836 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1837 if (rc) {
1838 BNX2X_ERR("Failed to remove command\n");
1839 return rc;
1840 }
1805 list_del(&exeq_pos->link); 1841 list_del(&exeq_pos->link);
1842 }
1806 } 1843 }
1807 1844
1808 spin_unlock_bh(&exeq->lock); 1845 spin_unlock_bh(&exeq->lock);
@@ -1908,6 +1945,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
1908 bnx2x_exe_queue_init(bp, 1945 bnx2x_exe_queue_init(bp,
1909 &mac_obj->exe_queue, 1, qable_obj, 1946 &mac_obj->exe_queue, 1, qable_obj,
1910 bnx2x_validate_vlan_mac, 1947 bnx2x_validate_vlan_mac,
1948 bnx2x_remove_vlan_mac,
1911 bnx2x_optimize_vlan_mac, 1949 bnx2x_optimize_vlan_mac,
1912 bnx2x_execute_vlan_mac, 1950 bnx2x_execute_vlan_mac,
1913 bnx2x_exeq_get_mac); 1951 bnx2x_exeq_get_mac);
@@ -1924,6 +1962,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
1924 bnx2x_exe_queue_init(bp, 1962 bnx2x_exe_queue_init(bp,
1925 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, 1963 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1926 qable_obj, bnx2x_validate_vlan_mac, 1964 qable_obj, bnx2x_validate_vlan_mac,
1965 bnx2x_remove_vlan_mac,
1927 bnx2x_optimize_vlan_mac, 1966 bnx2x_optimize_vlan_mac,
1928 bnx2x_execute_vlan_mac, 1967 bnx2x_execute_vlan_mac,
1929 bnx2x_exeq_get_mac); 1968 bnx2x_exeq_get_mac);
@@ -1963,6 +2002,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
1963 bnx2x_exe_queue_init(bp, 2002 bnx2x_exe_queue_init(bp,
1964 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, 2003 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1965 qable_obj, bnx2x_validate_vlan_mac, 2004 qable_obj, bnx2x_validate_vlan_mac,
2005 bnx2x_remove_vlan_mac,
1966 bnx2x_optimize_vlan_mac, 2006 bnx2x_optimize_vlan_mac,
1967 bnx2x_execute_vlan_mac, 2007 bnx2x_execute_vlan_mac,
1968 bnx2x_exeq_get_vlan); 2008 bnx2x_exeq_get_vlan);
@@ -2009,6 +2049,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2009 bnx2x_exe_queue_init(bp, 2049 bnx2x_exe_queue_init(bp,
2010 &vlan_mac_obj->exe_queue, 1, qable_obj, 2050 &vlan_mac_obj->exe_queue, 1, qable_obj,
2011 bnx2x_validate_vlan_mac, 2051 bnx2x_validate_vlan_mac,
2052 bnx2x_remove_vlan_mac,
2012 bnx2x_optimize_vlan_mac, 2053 bnx2x_optimize_vlan_mac,
2013 bnx2x_execute_vlan_mac, 2054 bnx2x_execute_vlan_mac,
2014 bnx2x_exeq_get_vlan_mac); 2055 bnx2x_exeq_get_vlan_mac);
@@ -2025,6 +2066,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2025 &vlan_mac_obj->exe_queue, 2066 &vlan_mac_obj->exe_queue,
2026 CLASSIFY_RULES_COUNT, 2067 CLASSIFY_RULES_COUNT,
2027 qable_obj, bnx2x_validate_vlan_mac, 2068 qable_obj, bnx2x_validate_vlan_mac,
2069 bnx2x_remove_vlan_mac,
2028 bnx2x_optimize_vlan_mac, 2070 bnx2x_optimize_vlan_mac,
2029 bnx2x_execute_vlan_mac, 2071 bnx2x_execute_vlan_mac,
2030 bnx2x_exeq_get_vlan_mac); 2072 bnx2x_exeq_get_vlan_mac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 992308ff82e8..66da39f0c84a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -161,6 +161,10 @@ typedef int (*exe_q_validate)(struct bnx2x *bp,
161 union bnx2x_qable_obj *o, 161 union bnx2x_qable_obj *o,
162 struct bnx2x_exeq_elem *elem); 162 struct bnx2x_exeq_elem *elem);
163 163
164typedef int (*exe_q_remove)(struct bnx2x *bp,
165 union bnx2x_qable_obj *o,
166 struct bnx2x_exeq_elem *elem);
167
164/** 168/**
165 * @return positive is entry was optimized, 0 - if not, negative 169 * @return positive is entry was optimized, 0 - if not, negative
166 * in case of an error. 170 * in case of an error.
@@ -203,11 +207,18 @@ struct bnx2x_exe_queue_obj {
203 */ 207 */
204 exe_q_validate validate; 208 exe_q_validate validate;
205 209
210 /**
211 * Called before removing pending commands, cleaning allocated
212 * resources (e.g., credits from validate)
213 */
214 exe_q_remove remove;
206 215
207 /** 216 /**
208 * This will try to cancel the current pending commands list 217 * This will try to cancel the current pending commands list
209 * considering the new command. 218 * considering the new command.
210 * 219 *
220 * Returns the number of optimized commands or a negative error code
221 *
211 * Must run under exe_queue->lock 222 * Must run under exe_queue->lock
212 */ 223 */
213 exe_q_optimize optimize; 224 exe_q_optimize optimize;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d529af99157d..a1f2e0fed78b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6667,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6667 iph = ip_hdr(skb); 6667 iph = ip_hdr(skb);
6668 tcp_opt_len = tcp_optlen(skb); 6668 tcp_opt_len = tcp_optlen(skb);
6669 6669
6670 if (skb_is_gso_v6(skb)) { 6670 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6671 hdr_len = skb_headlen(skb) - ETH_HLEN;
6672 } else {
6673 u32 ip_tcp_len;
6674
6675 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6676 hdr_len = ip_tcp_len + tcp_opt_len;
6677 6671
6672 if (!skb_is_gso_v6(skb)) {
6678 iph->check = 0; 6673 iph->check = 0;
6679 iph->tot_len = htons(mss + hdr_len); 6674 iph->tot_len = htons(mss + hdr_len);
6680 } 6675 }
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index fe0c29acdbe6..ee93a2087fe6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "2.1.1.28" 35#define DRV_VERSION "2.1.1.31"
36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2fd9db4b1be5..ab3f67f980d8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -57,11 +57,13 @@
57 57
58#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 58#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
59#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 59#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
60#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
60 61
61/* Supported devices */ 62/* Supported devices */
62static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { 63static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
63 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
64 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, 65 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
66 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
65 { 0, } /* end of table */ 67 { 0, } /* end of table */
66}; 68};
67 69
@@ -132,6 +134,11 @@ int enic_sriov_enabled(struct enic *enic)
132 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; 134 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
133} 135}
134 136
137static int enic_is_sriov_vf(struct enic *enic)
138{
139 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
140}
141
135int enic_is_valid_vf(struct enic *enic, int vf) 142int enic_is_valid_vf(struct enic *enic, int vf)
136{ 143{
137#ifdef CONFIG_PCI_IOV 144#ifdef CONFIG_PCI_IOV
@@ -437,7 +444,7 @@ static void enic_mtu_check(struct enic *enic)
437 444
438 if (mtu && mtu != enic->port_mtu) { 445 if (mtu && mtu != enic->port_mtu) {
439 enic->port_mtu = mtu; 446 enic->port_mtu = mtu;
440 if (enic_is_dynamic(enic)) { 447 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
441 mtu = max_t(int, ENIC_MIN_MTU, 448 mtu = max_t(int, ENIC_MIN_MTU,
442 min_t(int, ENIC_MAX_MTU, mtu)); 449 min_t(int, ENIC_MAX_MTU, mtu));
443 if (mtu != netdev->mtu) 450 if (mtu != netdev->mtu)
@@ -849,7 +856,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
849{ 856{
850 struct enic *enic = netdev_priv(netdev); 857 struct enic *enic = netdev_priv(netdev);
851 858
852 if (enic_is_dynamic(enic)) { 859 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
853 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) 860 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
854 return -EADDRNOTAVAIL; 861 return -EADDRNOTAVAIL;
855 } else { 862 } else {
@@ -1608,7 +1615,7 @@ static int enic_open(struct net_device *netdev)
1608 for (i = 0; i < enic->rq_count; i++) 1615 for (i = 0; i < enic->rq_count; i++)
1609 vnic_rq_enable(&enic->rq[i]); 1616 vnic_rq_enable(&enic->rq[i]);
1610 1617
1611 if (!enic_is_dynamic(enic)) 1618 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1612 enic_dev_add_station_addr(enic); 1619 enic_dev_add_station_addr(enic);
1613 1620
1614 enic_set_rx_mode(netdev); 1621 enic_set_rx_mode(netdev);
@@ -1659,7 +1666,7 @@ static int enic_stop(struct net_device *netdev)
1659 netif_carrier_off(netdev); 1666 netif_carrier_off(netdev);
1660 netif_tx_disable(netdev); 1667 netif_tx_disable(netdev);
1661 1668
1662 if (!enic_is_dynamic(enic)) 1669 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1663 enic_dev_del_station_addr(enic); 1670 enic_dev_del_station_addr(enic);
1664 1671
1665 for (i = 0; i < enic->wq_count; i++) { 1672 for (i = 0; i < enic->wq_count; i++) {
@@ -1696,7 +1703,7 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1696 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) 1703 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1697 return -EINVAL; 1704 return -EINVAL;
1698 1705
1699 if (enic_is_dynamic(enic)) 1706 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
1700 return -EOPNOTSUPP; 1707 return -EOPNOTSUPP;
1701 1708
1702 if (running) 1709 if (running)
@@ -2263,10 +2270,10 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2263 int using_dac = 0; 2270 int using_dac = 0;
2264 unsigned int i; 2271 unsigned int i;
2265 int err; 2272 int err;
2266 int num_pps = 1;
2267#ifdef CONFIG_PCI_IOV 2273#ifdef CONFIG_PCI_IOV
2268 int pos = 0; 2274 int pos = 0;
2269#endif 2275#endif
2276 int num_pps = 1;
2270 2277
2271 /* Allocate net device structure and initialize. Private 2278 /* Allocate net device structure and initialize. Private
2272 * instance data is initialized to zero. 2279 * instance data is initialized to zero.
@@ -2376,14 +2383,14 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2376 num_pps = enic->num_vfs; 2383 num_pps = enic->num_vfs;
2377 } 2384 }
2378 } 2385 }
2379
2380#endif 2386#endif
2387
2381 /* Allocate structure for port profiles */ 2388 /* Allocate structure for port profiles */
2382 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); 2389 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2383 if (!enic->pp) { 2390 if (!enic->pp) {
2384 pr_err("port profile alloc failed, aborting\n"); 2391 pr_err("port profile alloc failed, aborting\n");
2385 err = -ENOMEM; 2392 err = -ENOMEM;
2386 goto err_out_disable_sriov; 2393 goto err_out_disable_sriov_pp;
2387 } 2394 }
2388 2395
2389 /* Issue device open to get device in known state 2396 /* Issue device open to get device in known state
@@ -2392,7 +2399,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2392 err = enic_dev_open(enic); 2399 err = enic_dev_open(enic);
2393 if (err) { 2400 if (err) {
2394 dev_err(dev, "vNIC dev open failed, aborting\n"); 2401 dev_err(dev, "vNIC dev open failed, aborting\n");
2395 goto err_out_free_pp; 2402 goto err_out_disable_sriov;
2396 } 2403 }
2397 2404
2398 /* Setup devcmd lock 2405 /* Setup devcmd lock
@@ -2426,7 +2433,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2426 * called later by an upper layer. 2433 * called later by an upper layer.
2427 */ 2434 */
2428 2435
2429 if (!enic_is_dynamic(enic)) { 2436 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) {
2430 err = vnic_dev_init(enic->vdev, 0); 2437 err = vnic_dev_init(enic->vdev, 0);
2431 if (err) { 2438 if (err) {
2432 dev_err(dev, "vNIC dev init failed, aborting\n"); 2439 dev_err(dev, "vNIC dev init failed, aborting\n");
@@ -2460,8 +2467,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2460 (void)enic_change_mtu(netdev, enic->port_mtu); 2467 (void)enic_change_mtu(netdev, enic->port_mtu);
2461 2468
2462#ifdef CONFIG_PCI_IOV 2469#ifdef CONFIG_PCI_IOV
2463 if (enic_is_dynamic(enic) && pdev->is_virtfn && 2470 if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr))
2464 is_zero_ether_addr(enic->mac_addr))
2465 random_ether_addr(enic->mac_addr); 2471 random_ether_addr(enic->mac_addr);
2466#endif 2472#endif
2467 2473
@@ -2474,7 +2480,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2474 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2480 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2475 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2481 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2476 2482
2477 if (enic_is_dynamic(enic)) 2483 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2478 netdev->netdev_ops = &enic_netdev_dynamic_ops; 2484 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2479 else 2485 else
2480 netdev->netdev_ops = &enic_netdev_ops; 2486 netdev->netdev_ops = &enic_netdev_ops;
@@ -2516,17 +2522,17 @@ err_out_dev_deinit:
2516 enic_dev_deinit(enic); 2522 enic_dev_deinit(enic);
2517err_out_dev_close: 2523err_out_dev_close:
2518 vnic_dev_close(enic->vdev); 2524 vnic_dev_close(enic->vdev);
2519err_out_free_pp:
2520 kfree(enic->pp);
2521err_out_disable_sriov: 2525err_out_disable_sriov:
2526 kfree(enic->pp);
2527err_out_disable_sriov_pp:
2522#ifdef CONFIG_PCI_IOV 2528#ifdef CONFIG_PCI_IOV
2523 if (enic_sriov_enabled(enic)) { 2529 if (enic_sriov_enabled(enic)) {
2524 pci_disable_sriov(pdev); 2530 pci_disable_sriov(pdev);
2525 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; 2531 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
2526 } 2532 }
2527err_out_vnic_unregister: 2533err_out_vnic_unregister:
2528 vnic_dev_unregister(enic->vdev);
2529#endif 2534#endif
2535 vnic_dev_unregister(enic->vdev);
2530err_out_iounmap: 2536err_out_iounmap:
2531 enic_iounmap(enic); 2537 enic_iounmap(enic);
2532err_out_release_regions: 2538err_out_release_regions:
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a6bcdb5cd2be..e703d64434f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1786,8 +1786,7 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
1786static u32 be_num_rxqs_want(struct be_adapter *adapter) 1786static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787{ 1787{
1788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 1788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1789 !sriov_enabled(adapter) && be_physfn(adapter) && 1789 !sriov_enabled(adapter) && be_physfn(adapter)) {
1790 !be_is_mc(adapter)) {
1791 return 1 + MAX_RSS_QS; /* one default non-RSS queue */ 1790 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1792 } else { 1791 } else {
1793 dev_warn(&adapter->pdev->dev, 1792 dev_warn(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index fb5579a3b19d..47f85c337cf7 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -25,6 +25,7 @@
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h>
28#include <linux/io.h> 29#include <linux/io.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index a127cb2476c7..bb336a0959c9 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -25,6 +25,7 @@
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h>
28#include <linux/io.h> 29#include <linux/io.h>
29#include <linux/mii.h> 30#include <linux/mii.h>
30#include <linux/module.h> 31#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c6e4621b6262..6565c463185c 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82575 PCI-Express Ethernet Linux driver 3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2011 Intel Corporation. 4# Copyright(c) 1999 - 2012 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b8e20f037d0a..08bdc33715ee 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 08a757eb6608..b927d79ab536 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f5fc5725ea94..aed217449f0d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 4519a1367170..f67cbd3fa307 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 73aac082c44d..f57338afd71f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw)
151 * Writes value at the given offset in the register array which stores 151 * Writes value at the given offset in the register array which stores
152 * the VLAN filter table. 152 * the VLAN filter table.
153 **/ 153 **/
154void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) 154static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
155{ 155{
156 int i; 156 int i;
157 157
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e45996b4ea34..cbddc4e51e30 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 469d95eaa154..5988b8958baf 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index eddb0f83dcea..dbcfa3d5caec 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 40407124e722..fa2c6ba62139 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index a2a7ca9fa733..825b0228cac0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2011 Intel Corporation. 4 Copyright(c) 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index b17d7c20f817..789de5b83aad 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 8510797b9d81..4c32ac66ff39 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 0a860bc1198e..ccdf36d503fd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3d12e67eebb4..8e33bdd33eea 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7998bf4d5946..aa399a8a8f0d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 01e5e89ef959..e91d73c8aa4e 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2011 Intel Corporation. 4 Copyright(c) 2007-2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -68,7 +68,7 @@ char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION; 68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] = 69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver"; 70 "Intel(R) Gigabit Ethernet Network Driver";
71static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; 71static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
72 72
73static const struct e1000_info *igb_info_tbl[] = { 73static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info, 74 [board_82575] = &e1000_82575_info,
@@ -4003,8 +4003,8 @@ set_itr_now:
4003 } 4003 }
4004} 4004}
4005 4005
4006void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, 4006static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4007 u32 type_tucmd, u32 mss_l4len_idx) 4007 u32 type_tucmd, u32 mss_l4len_idx)
4008{ 4008{
4009 struct e1000_adv_tx_context_desc *context_desc; 4009 struct e1000_adv_tx_context_desc *context_desc;
4010 u16 i = tx_ring->next_to_use; 4010 u16 i = tx_ring->next_to_use;
@@ -5623,7 +5623,7 @@ static irqreturn_t igb_intr(int irq, void *data)
5623 return IRQ_HANDLED; 5623 return IRQ_HANDLED;
5624} 5624}
5625 5625
5626void igb_ring_irq_enable(struct igb_q_vector *q_vector) 5626static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5627{ 5627{
5628 struct igb_adapter *adapter = q_vector->adapter; 5628 struct igb_adapter *adapter = q_vector->adapter;
5629 struct e1000_hw *hw = &adapter->hw; 5629 struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 7b600a1f6366..2dba53446064 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -468,6 +468,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
468 468
469void igbvf_set_ethtool_ops(struct net_device *netdev) 469void igbvf_set_ethtool_ops(struct net_device *netdev)
470{ 470{
471 /* have to "undeclare" const on this struct to remove warnings */ 471 SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
472 SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
473} 472}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index fd3da3076c2f..a4b20c865759 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1194,11 +1194,6 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1194 struct igbvf_adapter *adapter = netdev_priv(netdev); 1194 struct igbvf_adapter *adapter = netdev_priv(netdev);
1195 struct e1000_hw *hw = &adapter->hw; 1195 struct e1000_hw *hw = &adapter->hw;
1196 1196
1197 igbvf_irq_disable(adapter);
1198
1199 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1200 igbvf_irq_enable(adapter);
1201
1202 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1197 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1203 dev_err(&adapter->pdev->dev, 1198 dev_err(&adapter->pdev->dev,
1204 "Failed to remove vlan id %d\n", vid); 1199 "Failed to remove vlan id %d\n", vid);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 802bfa0f62cc..775602ef90e5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -161,19 +161,19 @@
161 161
162/* Receive DMA Registers */ 162/* Receive DMA Registers */
163#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ 163#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
164 (0x0D000 + ((_i - 64) * 0x40))) 164 (0x0D000 + (((_i) - 64) * 0x40)))
165#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ 165#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
166 (0x0D004 + ((_i - 64) * 0x40))) 166 (0x0D004 + (((_i) - 64) * 0x40)))
167#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ 167#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
168 (0x0D008 + ((_i - 64) * 0x40))) 168 (0x0D008 + (((_i) - 64) * 0x40)))
169#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ 169#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
170 (0x0D010 + ((_i - 64) * 0x40))) 170 (0x0D010 + (((_i) - 64) * 0x40)))
171#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ 171#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
172 (0x0D018 + ((_i - 64) * 0x40))) 172 (0x0D018 + (((_i) - 64) * 0x40)))
173#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ 173#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
174 (0x0D028 + ((_i - 64) * 0x40))) 174 (0x0D028 + (((_i) - 64) * 0x40)))
175#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ 175#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
176 (0x0D02C + ((_i - 64) * 0x40))) 176 (0x0D02C + (((_i) - 64) * 0x40)))
177#define IXGBE_RSCDBU 0x03028 177#define IXGBE_RSCDBU 0x03028
178#define IXGBE_RDDCC 0x02F20 178#define IXGBE_RDDCC 0x02F20
179#define IXGBE_RXMEMWRAP 0x03190 179#define IXGBE_RXMEMWRAP 0x03190
@@ -186,7 +186,7 @@
186 */ 186 */
187#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 187#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
188 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 188 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
189 (0x0D014 + ((_i - 64) * 0x40)))) 189 (0x0D014 + (((_i) - 64) * 0x40))))
190/* 190/*
191 * Rx DCA Control Register: 191 * Rx DCA Control Register:
192 * 00-15 : 0x02200 + n*4 192 * 00-15 : 0x02200 + n*4
@@ -195,7 +195,7 @@
195 */ 195 */
196#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 196#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
197 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 197 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
198 (0x0D00C + ((_i - 64) * 0x40)))) 198 (0x0D00C + (((_i) - 64) * 0x40))))
199#define IXGBE_RDRXCTL 0x02F00 199#define IXGBE_RDRXCTL 0x02F00
200#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 200#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
201 /* 8 of these 0x03C00 - 0x03C1C */ 201 /* 8 of these 0x03C00 - 0x03C1C */
@@ -344,9 +344,9 @@
344 344
345#define IXGBE_WUPL 0x05900 345#define IXGBE_WUPL 0x05900
346#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 346#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
347#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ 347#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
348#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host 348#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
349 * Filter Table */ 349 * Filter Table */
350 350
351#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 351#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
352#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 352#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
@@ -1485,7 +1485,7 @@ enum {
1485#define IXGBE_LED_BLINK_BASE 0x00000080 1485#define IXGBE_LED_BLINK_BASE 0x00000080
1486#define IXGBE_LED_MODE_MASK_BASE 0x0000000F 1486#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
1487#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) 1487#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
1488#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) 1488#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i))
1489#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) 1489#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
1490#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) 1490#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
1491#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) 1491#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
@@ -2068,9 +2068,9 @@ enum {
2068 2068
2069/* SR-IOV specific macros */ 2069/* SR-IOV specific macros */
2070#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) 2070#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
2071#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) 2071#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
2072#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) 2072#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
2073#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) 2073#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
2074 2074
2075enum ixgbe_fdir_pballoc_type { 2075enum ixgbe_fdir_pballoc_type {
2076 IXGBE_FDIR_PBALLOC_NONE = 0, 2076 IXGBE_FDIR_PBALLOC_NONE = 0,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index dc8e6511c640..c85700318147 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -56,7 +56,8 @@ struct ixgbe_stats {
56 offsetof(struct ixgbevf_adapter, m), \ 56 offsetof(struct ixgbevf_adapter, m), \
57 offsetof(struct ixgbevf_adapter, b), \ 57 offsetof(struct ixgbevf_adapter, b), \
58 offsetof(struct ixgbevf_adapter, r) 58 offsetof(struct ixgbevf_adapter, r)
59static struct ixgbe_stats ixgbe_gstrings_stats[] = { 59
60static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
60 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, 61 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
61 stats.saved_reset_vfgprc)}, 62 stats.saved_reset_vfgprc)},
62 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, 63 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
@@ -671,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
671 return 0; 672 return 0;
672} 673}
673 674
674static struct ethtool_ops ixgbevf_ethtool_ops = { 675static const struct ethtool_ops ixgbevf_ethtool_ops = {
675 .get_settings = ixgbevf_get_settings, 676 .get_settings = ixgbevf_get_settings,
676 .get_drvinfo = ixgbevf_get_drvinfo, 677 .get_drvinfo = ixgbevf_get_drvinfo,
677 .get_regs_len = ixgbevf_get_regs_len, 678 .get_regs_len = ixgbevf_get_regs_len,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e6c9d1a927a9..9075c1d61039 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -279,12 +279,12 @@ enum ixgbevf_boards {
279 board_X540_vf, 279 board_X540_vf,
280}; 280};
281 281
282extern struct ixgbevf_info ixgbevf_82599_vf_info; 282extern const struct ixgbevf_info ixgbevf_82599_vf_info;
283extern struct ixgbevf_info ixgbevf_X540_vf_info; 283extern const struct ixgbevf_info ixgbevf_X540_vf_info;
284extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; 284extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
285 285
286/* needed by ethtool.c */ 286/* needed by ethtool.c */
287extern char ixgbevf_driver_name[]; 287extern const char ixgbevf_driver_name[];
288extern const char ixgbevf_driver_version[]; 288extern const char ixgbevf_driver_version[];
289 289
290extern int ixgbevf_up(struct ixgbevf_adapter *adapter); 290extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 891162d1610c..bed411bada21 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -53,7 +53,7 @@
53 53
54#include "ixgbevf.h" 54#include "ixgbevf.h"
55 55
56char ixgbevf_driver_name[] = "ixgbevf"; 56const char ixgbevf_driver_name[] = "ixgbevf";
57static const char ixgbevf_driver_string[] = 57static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 59
@@ -917,31 +917,34 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
917 struct ixgbe_hw *hw = &adapter->hw; 917 struct ixgbe_hw *hw = &adapter->hw;
918 u32 eicr; 918 u32 eicr;
919 u32 msg; 919 u32 msg;
920 bool got_ack = false;
920 921
921 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 922 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
922 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 923 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
923 924
924 if (!hw->mbx.ops.check_for_ack(hw)) { 925 if (!hw->mbx.ops.check_for_ack(hw))
925 /* 926 got_ack = true;
926 * checking for the ack clears the PFACK bit. Place
927 * it back in the v2p_mailbox cache so that anyone
928 * polling for an ack will not miss it. Also
929 * avoid the read below because the code to read
930 * the mailbox will also clear the ack bit. This was
931 * causing lost acks. Just cache the bit and exit
932 * the IRQ handler.
933 */
934 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
935 goto out;
936 }
937 927
938 /* Not an ack interrupt, go ahead and read the message */ 928 if (!hw->mbx.ops.check_for_msg(hw)) {
939 hw->mbx.ops.read(hw, &msg, 1); 929 hw->mbx.ops.read(hw, &msg, 1);
940 930
941 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 931 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
942 mod_timer(&adapter->watchdog_timer, 932 mod_timer(&adapter->watchdog_timer,
943 round_jiffies(jiffies + 1)); 933 round_jiffies(jiffies + 1));
944 934
935 if (msg & IXGBE_VT_MSGTYPE_NACK)
936 pr_warn("Last Request of type %2.2x to PF Nacked\n",
937 msg & 0xFF);
938 goto out;
939 }
940
941 /*
942 * checking for the ack clears the PFACK bit. Place
943 * it back in the v2p_mailbox cache so that anyone
944 * polling for an ack will not miss it
945 */
946 if (got_ack)
947 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
945out: 948out:
946 return IRQ_HANDLED; 949 return IRQ_HANDLED;
947} 950}
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 930fa83f2568..13532d9ba72d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -26,6 +26,7 @@
26*******************************************************************************/ 26*******************************************************************************/
27 27
28#include "mbx.h" 28#include "mbx.h"
29#include "ixgbevf.h"
29 30
30/** 31/**
31 * ixgbevf_poll_for_msg - Wait for message notification 32 * ixgbevf_poll_for_msg - Wait for message notification
@@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
328 return 0; 329 return 0;
329} 330}
330 331
331struct ixgbe_mbx_operations ixgbevf_mbx_ops = { 332const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
332 .init_params = ixgbevf_init_mbx_params_vf, 333 .init_params = ixgbevf_init_mbx_params_vf,
333 .read = ixgbevf_read_mbx_vf, 334 .read = ixgbevf_read_mbx_vf,
334 .write = ixgbevf_write_mbx_vf, 335 .write = ixgbevf_write_mbx_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 21533e300367..d0138d7a31a1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -26,6 +26,7 @@
26*******************************************************************************/ 26*******************************************************************************/
27 27
28#include "vf.h" 28#include "vf.h"
29#include "ixgbevf.h"
29 30
30/** 31/**
31 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx 32 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
@@ -401,7 +402,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
401 return 0; 402 return 0;
402} 403}
403 404
404static struct ixgbe_mac_operations ixgbevf_mac_ops = { 405static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
405 .init_hw = ixgbevf_init_hw_vf, 406 .init_hw = ixgbevf_init_hw_vf,
406 .reset_hw = ixgbevf_reset_hw_vf, 407 .reset_hw = ixgbevf_reset_hw_vf,
407 .start_hw = ixgbevf_start_hw_vf, 408 .start_hw = ixgbevf_start_hw_vf,
@@ -415,12 +416,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
415 .set_vfta = ixgbevf_set_vfta_vf, 416 .set_vfta = ixgbevf_set_vfta_vf,
416}; 417};
417 418
418struct ixgbevf_info ixgbevf_82599_vf_info = { 419const struct ixgbevf_info ixgbevf_82599_vf_info = {
419 .mac = ixgbe_mac_82599_vf, 420 .mac = ixgbe_mac_82599_vf,
420 .mac_ops = &ixgbevf_mac_ops, 421 .mac_ops = &ixgbevf_mac_ops,
421}; 422};
422 423
423struct ixgbevf_info ixgbevf_X540_vf_info = { 424const struct ixgbevf_info ixgbevf_X540_vf_info = {
424 .mac = ixgbe_mac_X540_vf, 425 .mac = ixgbe_mac_X540_vf,
425 .mac_ops = &ixgbevf_mac_ops, 426 .mac_ops = &ixgbevf_mac_ops,
426}; 427};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 10306b492ee6..d556619a9212 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -167,7 +167,7 @@ struct ixgbevf_hw_stats {
167 167
168struct ixgbevf_info { 168struct ixgbevf_info {
169 enum ixgbe_mac_type mac; 169 enum ixgbe_mac_type mac;
170 struct ixgbe_mac_operations *mac_ops; 170 const struct ixgbe_mac_operations *mac_ops;
171}; 171};
172 172
173#endif /* __IXGBE_VF_H__ */ 173#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 9c049d2cb97d..9edecfa1f0f4 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
136#define INT_MASK 0x0068 136#define INT_MASK 0x0068
137#define INT_MASK_EXT 0x006c 137#define INT_MASK_EXT 0x006c
138#define TX_FIFO_URGENT_THRESHOLD 0x0074 138#define TX_FIFO_URGENT_THRESHOLD 0x0074
139#define RX_DISCARD_FRAME_CNT 0x0084
140#define RX_OVERRUN_FRAME_CNT 0x0088
139#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 141#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
140#define TX_BW_RATE_MOVED 0x00e0 142#define TX_BW_RATE_MOVED 0x00e0
141#define TX_BW_MTU_MOVED 0x00e8 143#define TX_BW_MTU_MOVED 0x00e8
@@ -334,6 +336,9 @@ struct mib_counters {
334 u32 bad_crc_event; 336 u32 bad_crc_event;
335 u32 collision; 337 u32 collision;
336 u32 late_collision; 338 u32 late_collision;
339 /* Non MIB hardware counters */
340 u32 rx_discard;
341 u32 rx_overrun;
337}; 342};
338 343
339struct lro_counters { 344struct lro_counters {
@@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp)
1225 1230
1226 for (i = 0; i < 0x80; i += 4) 1231 for (i = 0; i < 0x80; i += 4)
1227 mib_read(mp, i); 1232 mib_read(mp, i);
1233
1234 /* Clear non MIB hw counters also */
1235 rdlp(mp, RX_DISCARD_FRAME_CNT);
1236 rdlp(mp, RX_OVERRUN_FRAME_CNT);
1228} 1237}
1229 1238
1230static void mib_counters_update(struct mv643xx_eth_private *mp) 1239static void mib_counters_update(struct mv643xx_eth_private *mp)
@@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1262 p->bad_crc_event += mib_read(mp, 0x74); 1271 p->bad_crc_event += mib_read(mp, 0x74);
1263 p->collision += mib_read(mp, 0x78); 1272 p->collision += mib_read(mp, 0x78);
1264 p->late_collision += mib_read(mp, 0x7c); 1273 p->late_collision += mib_read(mp, 0x7c);
1274 /* Non MIB hardware counters */
1275 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1276 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1265 spin_unlock_bh(&mp->mib_counters_lock); 1277 spin_unlock_bh(&mp->mib_counters_lock);
1266 1278
1267 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1279 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
@@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1413 MIBSTAT(bad_crc_event), 1425 MIBSTAT(bad_crc_event),
1414 MIBSTAT(collision), 1426 MIBSTAT(collision),
1415 MIBSTAT(late_collision), 1427 MIBSTAT(late_collision),
1428 MIBSTAT(rx_discard),
1429 MIBSTAT(rx_overrun),
1416 LROSTAT(lro_aggregated), 1430 LROSTAT(lro_aggregated),
1417 LROSTAT(lro_flushed), 1431 LROSTAT(lro_flushed),
1418 LROSTAT(lro_no_desc), 1432 LROSTAT(lro_no_desc),
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 18a87a57fc0a..edb9bda55d55 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
931} 931}
932 932
933/* Allocate and setup a new buffer for receiving */ 933/* Allocate and setup a new buffer for receiving */
934static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 934static int skge_rx_setup(struct pci_dev *pdev,
935 struct sk_buff *skb, unsigned int bufsize) 935 struct skge_element *e,
936 struct sk_buff *skb, unsigned int bufsize)
936{ 937{
937 struct skge_rx_desc *rd = e->desc; 938 struct skge_rx_desc *rd = e->desc;
938 u64 map; 939 dma_addr_t map;
939 940
940 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 941 map = pci_map_single(pdev, skb->data, bufsize,
941 PCI_DMA_FROMDEVICE); 942 PCI_DMA_FROMDEVICE);
943 if (pci_dma_mapping_error(pdev, map))
944 goto mapping_error;
942 945
943 rd->dma_lo = map; 946 rd->dma_lo = lower_32_bits(map);
944 rd->dma_hi = map >> 32; 947 rd->dma_hi = upper_32_bits(map);
945 e->skb = skb; 948 e->skb = skb;
946 rd->csum1_start = ETH_HLEN; 949 rd->csum1_start = ETH_HLEN;
947 rd->csum2_start = ETH_HLEN; 950 rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
953 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 956 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
954 dma_unmap_addr_set(e, mapaddr, map); 957 dma_unmap_addr_set(e, mapaddr, map);
955 dma_unmap_len_set(e, maplen, bufsize); 958 dma_unmap_len_set(e, maplen, bufsize);
959 return 0;
960
961mapping_error:
962 if (net_ratelimit())
963 dev_warn(&pdev->dev, "%s: rx mapping error\n",
964 skb->dev->name);
965 return -EIO;
956} 966}
957 967
958/* Resume receiving using existing skb, 968/* Resume receiving using existing skb,
@@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev)
1014 return -ENOMEM; 1024 return -ENOMEM;
1015 1025
1016 skb_reserve(skb, NET_IP_ALIGN); 1026 skb_reserve(skb, NET_IP_ALIGN);
1017 skge_rx_setup(skge, e, skb, skge->rx_buf_size); 1027 if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
1028 kfree_skb(skb);
1029 return -ENOMEM;
1030 }
1031
1018 } while ((e = e->next) != ring->start); 1032 } while ((e = e->next) != ring->start);
1019 1033
1020 ring->to_clean = ring->start; 1034 ring->to_clean = ring->start;
@@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev)
2576 } 2590 }
2577 2591
2578 /* Initialize MAC */ 2592 /* Initialize MAC */
2593 netif_carrier_off(dev);
2579 spin_lock_bh(&hw->phy_lock); 2594 spin_lock_bh(&hw->phy_lock);
2580 if (is_genesis(hw)) 2595 if (is_genesis(hw))
2581 genesis_mac_init(hw, port); 2596 genesis_mac_init(hw, port);
@@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2728 struct skge_tx_desc *td; 2743 struct skge_tx_desc *td;
2729 int i; 2744 int i;
2730 u32 control, len; 2745 u32 control, len;
2731 u64 map; 2746 dma_addr_t map;
2732 2747
2733 if (skb_padto(skb, ETH_ZLEN)) 2748 if (skb_padto(skb, ETH_ZLEN))
2734 return NETDEV_TX_OK; 2749 return NETDEV_TX_OK;
@@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2742 e->skb = skb; 2757 e->skb = skb;
2743 len = skb_headlen(skb); 2758 len = skb_headlen(skb);
2744 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2759 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2760 if (pci_dma_mapping_error(hw->pdev, map))
2761 goto mapping_error;
2762
2745 dma_unmap_addr_set(e, mapaddr, map); 2763 dma_unmap_addr_set(e, mapaddr, map);
2746 dma_unmap_len_set(e, maplen, len); 2764 dma_unmap_len_set(e, maplen, len);
2747 2765
2748 td->dma_lo = map; 2766 td->dma_lo = lower_32_bits(map);
2749 td->dma_hi = map >> 32; 2767 td->dma_hi = upper_32_bits(map);
2750 2768
2751 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2769 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2752 const int offset = skb_checksum_start_offset(skb); 2770 const int offset = skb_checksum_start_offset(skb);
@@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2777 2795
2778 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2796 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
2779 skb_frag_size(frag), DMA_TO_DEVICE); 2797 skb_frag_size(frag), DMA_TO_DEVICE);
2798 if (dma_mapping_error(&hw->pdev->dev, map))
2799 goto mapping_unwind;
2780 2800
2781 e = e->next; 2801 e = e->next;
2782 e->skb = skb; 2802 e->skb = skb;
2783 tf = e->desc; 2803 tf = e->desc;
2784 BUG_ON(tf->control & BMU_OWN); 2804 BUG_ON(tf->control & BMU_OWN);
2785 2805
2786 tf->dma_lo = map; 2806 tf->dma_lo = lower_32_bits(map);
2787 tf->dma_hi = (u64) map >> 32; 2807 tf->dma_hi = upper_32_bits(map);
2788 dma_unmap_addr_set(e, mapaddr, map); 2808 dma_unmap_addr_set(e, mapaddr, map);
2789 dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2809 dma_unmap_len_set(e, maplen, skb_frag_size(frag));
2790 2810
@@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2797 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; 2817 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2798 wmb(); 2818 wmb();
2799 2819
2820 netdev_sent_queue(dev, skb->len);
2821
2800 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2822 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2801 2823
2802 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, 2824 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2812 } 2834 }
2813 2835
2814 return NETDEV_TX_OK; 2836 return NETDEV_TX_OK;
2837
2838mapping_unwind:
2839 /* unroll any pages that were already mapped. */
2840 if (e != skge->tx_ring.to_use) {
2841 struct skge_element *u;
2842
2843 for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
2844 pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
2845 dma_unmap_len(u, maplen),
2846 PCI_DMA_TODEVICE);
2847 e = skge->tx_ring.to_use;
2848 }
2849 /* undo the mapping for the skb header */
2850 pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
2851 dma_unmap_len(e, maplen),
2852 PCI_DMA_TODEVICE);
2853mapping_error:
2854 /* mapping error causes error message and packet to be discarded. */
2855 if (net_ratelimit())
2856 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2857 dev_kfree_skb(skb);
2858 return NETDEV_TX_OK;
2815} 2859}
2816 2860
2817 2861
2818/* Free resources associated with this reing element */ 2862/* Free resources associated with this reing element */
2819static void skge_tx_free(struct skge_port *skge, struct skge_element *e, 2863static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
2820 u32 control) 2864 u32 control)
2821{ 2865{
2822 struct pci_dev *pdev = skge->hw->pdev;
2823
2824 /* skb header vs. fragment */ 2866 /* skb header vs. fragment */
2825 if (control & BMU_STF) 2867 if (control & BMU_STF)
2826 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), 2868 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2830 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), 2872 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
2831 dma_unmap_len(e, maplen), 2873 dma_unmap_len(e, maplen),
2832 PCI_DMA_TODEVICE); 2874 PCI_DMA_TODEVICE);
2833
2834 if (control & BMU_EOF) {
2835 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
2836 "tx done slot %td\n", e - skge->tx_ring.start);
2837
2838 dev_kfree_skb(e->skb);
2839 }
2840} 2875}
2841 2876
2842/* Free all buffers in transmit ring */ 2877/* Free all buffers in transmit ring */
@@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)
2847 2882
2848 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2883 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2849 struct skge_tx_desc *td = e->desc; 2884 struct skge_tx_desc *td = e->desc;
2850 skge_tx_free(skge, e, td->control); 2885
2886 skge_tx_unmap(skge->hw->pdev, e, td->control);
2887
2888 if (td->control & BMU_EOF)
2889 dev_kfree_skb(e->skb);
2851 td->control = 0; 2890 td->control = 0;
2852 } 2891 }
2853 2892
2893 netdev_reset_queue(dev);
2854 skge->tx_ring.to_clean = e; 2894 skge->tx_ring.to_clean = e;
2855} 2895}
2856 2896
@@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3059 if (!nskb) 3099 if (!nskb)
3060 goto resubmit; 3100 goto resubmit;
3061 3101
3102 if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
3103 dev_kfree_skb(nskb);
3104 goto resubmit;
3105 }
3106
3062 pci_unmap_single(skge->hw->pdev, 3107 pci_unmap_single(skge->hw->pdev,
3063 dma_unmap_addr(e, mapaddr), 3108 dma_unmap_addr(e, mapaddr),
3064 dma_unmap_len(e, maplen), 3109 dma_unmap_len(e, maplen),
3065 PCI_DMA_FROMDEVICE); 3110 PCI_DMA_FROMDEVICE);
3066 skb = e->skb; 3111 skb = e->skb;
3067 prefetch(skb->data); 3112 prefetch(skb->data);
3068 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
3069 } 3113 }
3070 3114
3071 skb_put(skb, len); 3115 skb_put(skb, len);
@@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
3111 struct skge_port *skge = netdev_priv(dev); 3155 struct skge_port *skge = netdev_priv(dev);
3112 struct skge_ring *ring = &skge->tx_ring; 3156 struct skge_ring *ring = &skge->tx_ring;
3113 struct skge_element *e; 3157 struct skge_element *e;
3158 unsigned int bytes_compl = 0, pkts_compl = 0;
3114 3159
3115 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); 3160 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
3116 3161
@@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
3120 if (control & BMU_OWN) 3165 if (control & BMU_OWN)
3121 break; 3166 break;
3122 3167
3123 skge_tx_free(skge, e, control); 3168 skge_tx_unmap(skge->hw->pdev, e, control);
3169
3170 if (control & BMU_EOF) {
3171 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
3172 "tx done slot %td\n",
3173 e - skge->tx_ring.start);
3174
3175 pkts_compl++;
3176 bytes_compl += e->skb->len;
3177
3178 dev_kfree_skb(e->skb);
3179 }
3124 } 3180 }
3181 netdev_completed_queue(dev, pkts_compl, bytes_compl);
3125 skge->tx_ring.to_clean = e; 3182 skge->tx_ring.to_clean = e;
3126 3183
3127 /* Can run lockless until we need to synchronize to restart queue. */ 3184 /* Can run lockless until we need to synchronize to restart queue. */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 978f593094c0..405e6ac3faf6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1247,6 +1247,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1247 u32 reply; 1247 u32 reply;
1248 u32 slave_status = 0; 1248 u32 slave_status = 0;
1249 u8 is_going_down = 0; 1249 u8 is_going_down = 0;
1250 int i;
1250 1251
1251 slave_state[slave].comm_toggle ^= 1; 1252 slave_state[slave].comm_toggle ^= 1;
1252 reply = (u32) slave_state[slave].comm_toggle << 31; 1253 reply = (u32) slave_state[slave].comm_toggle << 31;
@@ -1258,6 +1259,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1258 if (cmd == MLX4_COMM_CMD_RESET) { 1259 if (cmd == MLX4_COMM_CMD_RESET) {
1259 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1260 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1260 slave_state[slave].active = false; 1261 slave_state[slave].active = false;
1262 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1263 slave_state[slave].event_eq[i].eqn = -1;
1264 slave_state[slave].event_eq[i].token = 0;
1265 }
1261 /*check if we are in the middle of FLR process, 1266 /*check if we are in the middle of FLR process,
1262 if so return "retry" status to the slave*/ 1267 if so return "retry" status to the slave*/
1263 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { 1268 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
@@ -1452,7 +1457,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1452{ 1457{
1453 struct mlx4_priv *priv = mlx4_priv(dev); 1458 struct mlx4_priv *priv = mlx4_priv(dev);
1454 struct mlx4_slave_state *s_state; 1459 struct mlx4_slave_state *s_state;
1455 int i, err, port; 1460 int i, j, err, port;
1456 1461
1457 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 1462 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1458 &priv->mfunc.vhcr_dma, 1463 &priv->mfunc.vhcr_dma,
@@ -1485,6 +1490,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
1485 for (i = 0; i < dev->num_slaves; ++i) { 1490 for (i = 0; i < dev->num_slaves; ++i) {
1486 s_state = &priv->mfunc.master.slave_state[i]; 1491 s_state = &priv->mfunc.master.slave_state[i];
1487 s_state->last_cmd = MLX4_COMM_CMD_RESET; 1492 s_state->last_cmd = MLX4_COMM_CMD_RESET;
1493 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
1494 s_state->event_eq[j].eqn = -1;
1488 __raw_writel((__force u32) 0, 1495 __raw_writel((__force u32) 0,
1489 &priv->mfunc.comm[i].slave_write); 1496 &priv->mfunc.comm[i].slave_write);
1490 __raw_writel((__force u32) 0, 1497 __raw_writel((__force u32) 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 475f9d6af955..7e64033d7de3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -96,7 +96,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
96static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 96static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
97 int cq_num) 97 int cq_num)
98{ 98{
99 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0, 99 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
100 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, 100 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
101 MLX4_CMD_WRAPPED); 101 MLX4_CMD_WRAPPED);
102} 102}
@@ -111,7 +111,7 @@ static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
111static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 111static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
112 int cq_num) 112 int cq_num)
113{ 113{
114 return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0, 114 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
115 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, 115 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
116 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 116 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
117} 117}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 7dbc6a230779..70346fd7f9c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -183,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev,
183static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 183static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
184{ 184{
185 struct mlx4_en_priv *priv = netdev_priv(dev); 185 struct mlx4_en_priv *priv = netdev_priv(dev);
186 int bit_count = hweight64(priv->stats_bitmap);
186 187
187 switch (sset) { 188 switch (sset) {
188 case ETH_SS_STATS: 189 case ETH_SS_STATS:
189 return NUM_ALL_STATS + 190 return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
190 (priv->tx_ring_num + priv->rx_ring_num) * 2; 191 (priv->tx_ring_num + priv->rx_ring_num) * 2;
191 case ETH_SS_TEST: 192 case ETH_SS_TEST:
192 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 193 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@@ -201,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
201{ 202{
202 struct mlx4_en_priv *priv = netdev_priv(dev); 203 struct mlx4_en_priv *priv = netdev_priv(dev);
203 int index = 0; 204 int index = 0;
204 int i; 205 int i, j = 0;
205 206
206 spin_lock_bh(&priv->stats_lock); 207 spin_lock_bh(&priv->stats_lock);
207 208
208 for (i = 0; i < NUM_MAIN_STATS; i++) 209 if (!(priv->stats_bitmap)) {
209 data[index++] = ((unsigned long *) &priv->stats)[i]; 210 for (i = 0; i < NUM_MAIN_STATS; i++)
210 for (i = 0; i < NUM_PORT_STATS; i++) 211 data[index++] =
211 data[index++] = ((unsigned long *) &priv->port_stats)[i]; 212 ((unsigned long *) &priv->stats)[i];
213 for (i = 0; i < NUM_PORT_STATS; i++)
214 data[index++] =
215 ((unsigned long *) &priv->port_stats)[i];
216 for (i = 0; i < NUM_PKT_STATS; i++)
217 data[index++] =
218 ((unsigned long *) &priv->pkstats)[i];
219 } else {
220 for (i = 0; i < NUM_MAIN_STATS; i++) {
221 if ((priv->stats_bitmap >> j) & 1)
222 data[index++] =
223 ((unsigned long *) &priv->stats)[i];
224 j++;
225 }
226 for (i = 0; i < NUM_PORT_STATS; i++) {
227 if ((priv->stats_bitmap >> j) & 1)
228 data[index++] =
229 ((unsigned long *) &priv->port_stats)[i];
230 j++;
231 }
232 }
212 for (i = 0; i < priv->tx_ring_num; i++) { 233 for (i = 0; i < priv->tx_ring_num; i++) {
213 data[index++] = priv->tx_ring[i].packets; 234 data[index++] = priv->tx_ring[i].packets;
214 data[index++] = priv->tx_ring[i].bytes; 235 data[index++] = priv->tx_ring[i].bytes;
@@ -217,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
217 data[index++] = priv->rx_ring[i].packets; 238 data[index++] = priv->rx_ring[i].packets;
218 data[index++] = priv->rx_ring[i].bytes; 239 data[index++] = priv->rx_ring[i].bytes;
219 } 240 }
220 for (i = 0; i < NUM_PKT_STATS; i++)
221 data[index++] = ((unsigned long *) &priv->pkstats)[i];
222 spin_unlock_bh(&priv->stats_lock); 241 spin_unlock_bh(&priv->stats_lock);
223 242
224} 243}
@@ -247,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev,
247 266
248 case ETH_SS_STATS: 267 case ETH_SS_STATS:
249 /* Add main counters */ 268 /* Add main counters */
250 for (i = 0; i < NUM_MAIN_STATS; i++) 269 if (!priv->stats_bitmap) {
251 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); 270 for (i = 0; i < NUM_MAIN_STATS; i++)
252 for (i = 0; i< NUM_PORT_STATS; i++) 271 strcpy(data + (index++) * ETH_GSTRING_LEN,
253 strcpy(data + (index++) * ETH_GSTRING_LEN, 272 main_strings[i]);
254 main_strings[i + NUM_MAIN_STATS]); 273 for (i = 0; i < NUM_PORT_STATS; i++)
274 strcpy(data + (index++) * ETH_GSTRING_LEN,
275 main_strings[i +
276 NUM_MAIN_STATS]);
277 for (i = 0; i < NUM_PKT_STATS; i++)
278 strcpy(data + (index++) * ETH_GSTRING_LEN,
279 main_strings[i +
280 NUM_MAIN_STATS +
281 NUM_PORT_STATS]);
282 } else
283 for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
284 if ((priv->stats_bitmap >> i) & 1) {
285 strcpy(data +
286 (index++) * ETH_GSTRING_LEN,
287 main_strings[i]);
288 }
289 if (!(priv->stats_bitmap >> i))
290 break;
291 }
255 for (i = 0; i < priv->tx_ring_num; i++) { 292 for (i = 0; i < priv->tx_ring_num; i++) {
256 sprintf(data + (index++) * ETH_GSTRING_LEN, 293 sprintf(data + (index++) * ETH_GSTRING_LEN,
257 "tx%d_packets", i); 294 "tx%d_packets", i);
@@ -264,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
264 sprintf(data + (index++) * ETH_GSTRING_LEN, 301 sprintf(data + (index++) * ETH_GSTRING_LEN,
265 "rx%d_bytes", i); 302 "rx%d_bytes", i);
266 } 303 }
267 for (i = 0; i< NUM_PKT_STATS; i++)
268 strcpy(data + (index++) * ETH_GSTRING_LEN,
269 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
270 break; 304 break;
271 } 305 }
272} 306}
@@ -479,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
479 param->tx_pending = priv->tx_ring[0].size; 513 param->tx_pending = priv->tx_ring[0].size;
480} 514}
481 515
516static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
517{
518 struct mlx4_en_priv *priv = netdev_priv(dev);
519
520 return priv->rx_ring_num;
521}
522
523static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
524{
525 struct mlx4_en_priv *priv = netdev_priv(dev);
526 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
527 int rss_rings;
528 size_t n = priv->rx_ring_num;
529 int err = 0;
530
531 rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
532
533 while (n--) {
534 ring_index[n] = rss_map->qps[n % rss_rings].qpn -
535 rss_map->base_qpn;
536 }
537
538 return err;
539}
540
541static int mlx4_en_set_rxfh_indir(struct net_device *dev,
542 const u32 *ring_index)
543{
544 struct mlx4_en_priv *priv = netdev_priv(dev);
545 struct mlx4_en_dev *mdev = priv->mdev;
546 int port_up = 0;
547 int err = 0;
548 int i;
549 int rss_rings = 0;
550
551 /* Calculate RSS table size and make sure flows are spread evenly
552 * between rings
553 */
554 for (i = 0; i < priv->rx_ring_num; i++) {
555 if (i > 0 && !ring_index[i] && !rss_rings)
556 rss_rings = i;
557
558 if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
559 return -EINVAL;
560 }
561
562 if (!rss_rings)
563 rss_rings = priv->rx_ring_num;
564
565 /* RSS table size must be an order of 2 */
566 if (!is_power_of_2(rss_rings))
567 return -EINVAL;
568
569 mutex_lock(&mdev->state_lock);
570 if (priv->port_up) {
571 port_up = 1;
572 mlx4_en_stop_port(dev);
573 }
574
575 priv->prof->rss_rings = rss_rings;
576
577 if (port_up) {
578 err = mlx4_en_start_port(dev);
579 if (err)
580 en_err(priv, "Failed starting port\n");
581 }
582
583 mutex_unlock(&mdev->state_lock);
584 return err;
585}
586
587static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
588 u32 *rule_locs)
589{
590 struct mlx4_en_priv *priv = netdev_priv(dev);
591 int err = 0;
592
593 switch (cmd->cmd) {
594 case ETHTOOL_GRXRINGS:
595 cmd->data = priv->rx_ring_num;
596 break;
597 default:
598 err = -EOPNOTSUPP;
599 break;
600 }
601
602 return err;
603}
604
482const struct ethtool_ops mlx4_en_ethtool_ops = { 605const struct ethtool_ops mlx4_en_ethtool_ops = {
483 .get_drvinfo = mlx4_en_get_drvinfo, 606 .get_drvinfo = mlx4_en_get_drvinfo,
484 .get_settings = mlx4_en_get_settings, 607 .get_settings = mlx4_en_get_settings,
@@ -498,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
498 .set_pauseparam = mlx4_en_set_pauseparam, 621 .set_pauseparam = mlx4_en_set_pauseparam,
499 .get_ringparam = mlx4_en_get_ringparam, 622 .get_ringparam = mlx4_en_get_ringparam,
500 .set_ringparam = mlx4_en_set_ringparam, 623 .set_ringparam = mlx4_en_set_ringparam,
624 .get_rxnfc = mlx4_en_get_rxnfc,
625 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
626 .get_rxfh_indir = mlx4_en_get_rxfh_indir,
627 .set_rxfh_indir = mlx4_en_set_rxfh_indir,
501}; 628};
502 629
503 630
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a06096fcc0b8..2097a7d3c5b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -62,10 +62,6 @@ static const char mlx4_en_version[] =
62 * Device scope module parameters 62 * Device scope module parameters
63 */ 63 */
64 64
65
66/* Enable RSS TCP traffic */
67MLX4_EN_PARM_INT(tcp_rss, 1,
68 "Enable RSS for incomming TCP traffic or disabled (0)");
69/* Enable RSS UDP traffic */ 65/* Enable RSS UDP traffic */
70MLX4_EN_PARM_INT(udp_rss, 1, 66MLX4_EN_PARM_INT(udp_rss, 1,
71 "Enable RSS for incomming UDP traffic or disabled (0)"); 67 "Enable RSS for incomming UDP traffic or disabled (0)");
@@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
104 struct mlx4_en_profile *params = &mdev->profile; 100 struct mlx4_en_profile *params = &mdev->profile;
105 int i; 101 int i;
106 102
107 params->tcp_rss = tcp_rss;
108 params->udp_rss = udp_rss; 103 params->udp_rss = udp_rss;
109 if (params->udp_rss && !(mdev->dev->caps.flags 104 if (params->udp_rss && !(mdev->dev->caps.flags
110 & MLX4_DEV_CAP_FLAG_UDP_RSS)) { 105 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
@@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
120 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
121 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
122 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 117 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
118 params->prof[i].rss_rings = 0;
123 } 119 }
124 120
125 return 0; 121 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 72fa807b69ce..467ae5824875 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -702,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev)
702 /* Schedule multicast task to populate multicast list */ 702 /* Schedule multicast task to populate multicast list */
703 queue_work(mdev->workqueue, &priv->mcast_task); 703 queue_work(mdev->workqueue, &priv->mcast_task);
704 704
705 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
706
705 priv->port_up = true; 707 priv->port_up = true;
706 netif_tx_start_all_queues(dev); 708 netif_tx_start_all_queues(dev);
707 return 0; 709 return 0;
@@ -807,38 +809,50 @@ static void mlx4_en_restart(struct work_struct *work)
807 mutex_unlock(&mdev->state_lock); 809 mutex_unlock(&mdev->state_lock);
808} 810}
809 811
810 812static void mlx4_en_clear_stats(struct net_device *dev)
811static int mlx4_en_open(struct net_device *dev)
812{ 813{
813 struct mlx4_en_priv *priv = netdev_priv(dev); 814 struct mlx4_en_priv *priv = netdev_priv(dev);
814 struct mlx4_en_dev *mdev = priv->mdev; 815 struct mlx4_en_dev *mdev = priv->mdev;
815 int i; 816 int i;
816 int err = 0;
817
818 mutex_lock(&mdev->state_lock);
819
820 if (!mdev->device_up) {
821 en_err(priv, "Cannot open - device down/disabled\n");
822 err = -EBUSY;
823 goto out;
824 }
825 817
826 /* Reset HW statistics and performance counters */
827 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 818 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
828 en_dbg(HW, priv, "Failed dumping statistics\n"); 819 en_dbg(HW, priv, "Failed dumping statistics\n");
829 820
830 memset(&priv->stats, 0, sizeof(priv->stats)); 821 memset(&priv->stats, 0, sizeof(priv->stats));
831 memset(&priv->pstats, 0, sizeof(priv->pstats)); 822 memset(&priv->pstats, 0, sizeof(priv->pstats));
823 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
824 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
832 825
833 for (i = 0; i < priv->tx_ring_num; i++) { 826 for (i = 0; i < priv->tx_ring_num; i++) {
834 priv->tx_ring[i].bytes = 0; 827 priv->tx_ring[i].bytes = 0;
835 priv->tx_ring[i].packets = 0; 828 priv->tx_ring[i].packets = 0;
829 priv->tx_ring[i].tx_csum = 0;
836 } 830 }
837 for (i = 0; i < priv->rx_ring_num; i++) { 831 for (i = 0; i < priv->rx_ring_num; i++) {
838 priv->rx_ring[i].bytes = 0; 832 priv->rx_ring[i].bytes = 0;
839 priv->rx_ring[i].packets = 0; 833 priv->rx_ring[i].packets = 0;
834 priv->rx_ring[i].csum_ok = 0;
835 priv->rx_ring[i].csum_none = 0;
836 }
837}
838
839static int mlx4_en_open(struct net_device *dev)
840{
841 struct mlx4_en_priv *priv = netdev_priv(dev);
842 struct mlx4_en_dev *mdev = priv->mdev;
843 int err = 0;
844
845 mutex_lock(&mdev->state_lock);
846
847 if (!mdev->device_up) {
848 en_err(priv, "Cannot open - device down/disabled\n");
849 err = -EBUSY;
850 goto out;
840 } 851 }
841 852
853 /* Reset HW statistics and SW counters */
854 mlx4_en_clear_stats(dev);
855
842 err = mlx4_en_start_port(dev); 856 err = mlx4_en_start_port(dev);
843 if (err) 857 if (err)
844 en_err(priv, "Failed starting port:%d\n", priv->port); 858 en_err(priv, "Failed starting port:%d\n", priv->port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e8d6ad2dce0a..971d4b6b8dfe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -853,6 +853,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
853 struct mlx4_en_rss_map *rss_map = &priv->rss_map; 853 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
854 struct mlx4_qp_context context; 854 struct mlx4_qp_context context;
855 struct mlx4_rss_context *rss_context; 855 struct mlx4_rss_context *rss_context;
856 int rss_rings;
856 void *ptr; 857 void *ptr;
857 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | 858 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
858 MLX4_RSS_TCP_IPV6); 859 MLX4_RSS_TCP_IPV6);
@@ -893,10 +894,15 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
893 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 894 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
894 priv->rx_ring[0].cqn, &context); 895 priv->rx_ring[0].cqn, &context);
895 896
897 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
898 rss_rings = priv->rx_ring_num;
899 else
900 rss_rings = priv->prof->rss_rings;
901
896 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) 902 ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
897 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; 903 + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
898 rss_context = ptr; 904 rss_context = ptr;
899 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 905 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
900 (rss_map->base_qpn)); 906 (rss_map->base_qpn));
901 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 907 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
902 if (priv->mdev->profile.udp_rss) { 908 if (priv->mdev->profile.udp_rss) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1e9b55eb7217..55d7bd4e210a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -513,25 +513,22 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
513{ 513{
514 struct mlx4_priv *priv = mlx4_priv(dev); 514 struct mlx4_priv *priv = mlx4_priv(dev);
515 struct mlx4_slave_event_eq_info *event_eq = 515 struct mlx4_slave_event_eq_info *event_eq =
516 &priv->mfunc.master.slave_state[slave].event_eq; 516 priv->mfunc.master.slave_state[slave].event_eq;
517 u32 in_modifier = vhcr->in_modifier; 517 u32 in_modifier = vhcr->in_modifier;
518 u32 eqn = in_modifier & 0x1FF; 518 u32 eqn = in_modifier & 0x1FF;
519 u64 in_param = vhcr->in_param; 519 u64 in_param = vhcr->in_param;
520 int err = 0; 520 int err = 0;
521 int i;
521 522
522 if (slave == dev->caps.function) 523 if (slave == dev->caps.function)
523 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 524 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
524 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 525 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
525 MLX4_CMD_NATIVE); 526 MLX4_CMD_NATIVE);
526 if (!err) { 527 if (!err)
527 if (in_modifier >> 31) { 528 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
528 /* unmap */ 529 if (in_param & (1LL << i))
529 event_eq->event_type &= ~in_param; 530 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
530 } else { 531
531 event_eq->eqn = eqn;
532 event_eq->event_type = in_param;
533 }
534 }
535 return err; 532 return err;
536} 533}
537 534
@@ -546,7 +543,7 @@ static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
546static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 543static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
547 int eq_num) 544 int eq_num)
548{ 545{
549 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, 546 return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
550 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 547 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
551 MLX4_CMD_WRAPPED); 548 MLX4_CMD_WRAPPED);
552} 549}
@@ -554,7 +551,7 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
554static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 551static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
555 int eq_num) 552 int eq_num)
556{ 553{
557 return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, 554 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
558 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 555 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
559 MLX4_CMD_WRAPPED); 556 MLX4_CMD_WRAPPED);
560} 557}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index a424a19280cc..8a21e10952ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -158,7 +158,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
158 158
159#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 159#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
160#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 160#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
161#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3
162#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 161#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
163#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 162#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
164#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 163#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
@@ -182,9 +181,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
182 field = 1 << 7; /* enable only ethernet interface */ 181 field = 1 << 7; /* enable only ethernet interface */
183 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 182 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
184 183
185 field = slave;
186 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
187
188 field = dev->caps.num_ports; 184 field = dev->caps.num_ports;
189 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 185 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
190 186
@@ -249,9 +245,6 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
249 goto out; 245 goto out;
250 } 246 }
251 247
252 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
253 func_cap->function = field;
254
255 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 248 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
256 func_cap->num_ports = field; 249 func_cap->num_ports = field;
257 250
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 119e0cc9fab3..e1a5fa56bcbc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -119,7 +119,6 @@ struct mlx4_dev_cap {
119}; 119};
120 120
121struct mlx4_func_cap { 121struct mlx4_func_cap {
122 u8 function;
123 u8 num_ports; 122 u8 num_ports;
124 u8 flags; 123 u8 flags;
125 u32 pf_context_behaviour; 124 u32 pf_context_behaviour;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6bb62c580e2d..678558b502fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -108,7 +108,7 @@ static struct mlx4_profile default_profile = {
108 .num_cq = 1 << 16, 108 .num_cq = 1 << 16,
109 .num_mcg = 1 << 13, 109 .num_mcg = 1 << 13,
110 .num_mpt = 1 << 19, 110 .num_mpt = 1 << 19,
111 .num_mtt = 1 << 20, 111 .num_mtt = 1 << 20, /* It is really num mtt segements */
112}; 112};
113 113
114static int log_num_mac = 7; 114static int log_num_mac = 7;
@@ -471,7 +471,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
471 return -ENOSYS; 471 return -ENOSYS;
472 } 472 }
473 473
474 dev->caps.function = func_cap.function;
475 dev->caps.num_ports = func_cap.num_ports; 474 dev->caps.num_ports = func_cap.num_ports;
476 dev->caps.num_qps = func_cap.qp_quota; 475 dev->caps.num_qps = func_cap.qp_quota;
477 dev->caps.num_srqs = func_cap.srq_quota; 476 dev->caps.num_srqs = func_cap.srq_quota;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a80121a2b519..c92269f8c057 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -388,9 +388,8 @@ struct mlx4_slave_eqe {
388}; 388};
389 389
390struct mlx4_slave_event_eq_info { 390struct mlx4_slave_event_eq_info {
391 u32 eqn; 391 int eqn;
392 u16 token; 392 u16 token;
393 u64 event_type;
394}; 393};
395 394
396struct mlx4_profile { 395struct mlx4_profile {
@@ -449,6 +448,8 @@ struct mlx4_steer_index {
449 struct list_head duplicates; 448 struct list_head duplicates;
450}; 449};
451 450
451#define MLX4_EVENT_TYPES_NUM 64
452
452struct mlx4_slave_state { 453struct mlx4_slave_state {
453 u8 comm_toggle; 454 u8 comm_toggle;
454 u8 last_cmd; 455 u8 last_cmd;
@@ -461,7 +462,8 @@ struct mlx4_slave_state {
461 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; 462 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
462 struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; 463 struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
463 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; 464 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
464 struct mlx4_slave_event_eq_info event_eq; 465 /* event type to eq number lookup */
466 struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
465 u16 eq_pi; 467 u16 eq_pi;
466 u16 eq_ci; 468 u16 eq_ci;
467 spinlock_t lock; 469 spinlock_t lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f2a8e65f5f88..35f08840813c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -325,11 +325,11 @@ struct mlx4_en_port_profile {
325 u8 rx_ppp; 325 u8 rx_ppp;
326 u8 tx_pause; 326 u8 tx_pause;
327 u8 tx_ppp; 327 u8 tx_ppp;
328 int rss_rings;
328}; 329};
329 330
330struct mlx4_en_profile { 331struct mlx4_en_profile {
331 int rss_xor; 332 int rss_xor;
332 int tcp_rss;
333 int udp_rss; 333 int udp_rss;
334 u8 rss_mask; 334 u8 rss_mask;
335 u32 active_ports; 335 u32 active_ports;
@@ -476,6 +476,7 @@ struct mlx4_en_priv {
476 struct mlx4_en_perf_stats pstats; 476 struct mlx4_en_perf_stats pstats;
477 struct mlx4_en_pkt_stats pkstats; 477 struct mlx4_en_pkt_stats pkstats;
478 struct mlx4_en_port_stats port_stats; 478 struct mlx4_en_port_stats port_stats;
479 u64 stats_bitmap;
479 char *mc_addrs; 480 char *mc_addrs;
480 int mc_addrs_cnt; 481 int mc_addrs_cnt;
481 struct mlx4_en_stat_out_mbox hw_stats; 482 struct mlx4_en_stat_out_mbox hw_stats;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 01df5567e16e..8deeef98280c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -291,7 +291,7 @@ static u32 key_to_hw_index(u32 key)
291static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 291static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
292 int mpt_index) 292 int mpt_index)
293{ 293{
294 return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, 294 return mlx4_cmd(dev, mailbox->dma, mpt_index,
295 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, 295 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
296 MLX4_CMD_WRAPPED); 296 MLX4_CMD_WRAPPED);
297} 297}
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 5c9a54df17ab..db4746d0dca7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -52,8 +52,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
52 *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); 52 *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
53 if (*pdn == -1) 53 if (*pdn == -1)
54 return -ENOMEM; 54 return -ENOMEM;
55 if (mlx4_is_mfunc(dev)) 55
56 *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
57 return 0; 56 return 0;
58} 57}
59EXPORT_SYMBOL_GPL(mlx4_pd_alloc); 58EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 88b52e547524..f44ae555bf43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -44,6 +44,11 @@
44#define MLX4_VLAN_VALID (1u << 31) 44#define MLX4_VLAN_VALID (1u << 31)
45#define MLX4_VLAN_MASK 0xfff 45#define MLX4_VLAN_MASK 0xfff
46 46
47#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
48#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
49#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
50#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
51
47void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 52void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
48{ 53{
49 int i; 54 int i;
@@ -898,6 +903,24 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
898 struct mlx4_cmd_mailbox *outbox, 903 struct mlx4_cmd_mailbox *outbox,
899 struct mlx4_cmd_info *cmd) 904 struct mlx4_cmd_info *cmd)
900{ 905{
906 if (slave != dev->caps.function)
907 return 0;
901 return mlx4_common_dump_eth_stats(dev, slave, 908 return mlx4_common_dump_eth_stats(dev, slave,
902 vhcr->in_modifier, outbox); 909 vhcr->in_modifier, outbox);
903} 910}
911
912void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
913{
914 if (!mlx4_is_mfunc(dev)) {
915 *stats_bitmap = 0;
916 return;
917 }
918
919 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
920 MLX4_STATS_TRAFFIC_DROPS_MASK |
921 MLX4_STATS_PORT_COUNTERS_MASK);
922
923 if (mlx4_is_master(dev))
924 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
925}
926EXPORT_SYMBOL(mlx4_set_stats_bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 66f91ca7a7c6..1129677daa62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
111 profile[MLX4_RES_DMPT].num = request->num_mpt; 111 profile[MLX4_RES_DMPT].num = request->num_mpt;
112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
113 profile[MLX4_RES_MTT].num = request->num_mtt; 113 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
114 profile[MLX4_RES_MCG].num = request->num_mcg; 114 profile[MLX4_RES_MCG].num = request->num_mcg;
115 115
116 for (i = 0; i < MLX4_RES_NUM; ++i) { 116 for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 6b03ac8b9002..738f950a1ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -162,7 +162,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
162 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = 162 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
163 cpu_to_be32(qp->qpn); 163 cpu_to_be32(qp->qpn);
164 164
165 ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function, 165 ret = mlx4_cmd(dev, mailbox->dma,
166 qp->qpn | (!!sqd_event << 31), 166 qp->qpn | (!!sqd_event << 31),
167 new_state == MLX4_QP_STATE_RST ? 2 : 0, 167 new_state == MLX4_QP_STATE_RST ? 2 : 0,
168 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); 168 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index ed20751a057d..dcd819bfb2f0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1561,11 +1561,6 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1561 return be32_to_cpu(mpt->mtt_sz); 1561 return be32_to_cpu(mpt->mtt_sz);
1562} 1562}
1563 1563
1564static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
1565{
1566 return be32_to_cpu(mpt->pd_flags) & 0xffffff;
1567}
1568
1569static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) 1564static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1570{ 1565{
1571 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; 1566 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@@ -1602,16 +1597,6 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1602 return total_pages; 1597 return total_pages;
1603} 1598}
1604 1599
1605static int qp_get_pdn(struct mlx4_qp_context *qpc)
1606{
1607 return be32_to_cpu(qpc->pd) & 0xffffff;
1608}
1609
1610static int pdn2slave(int pdn)
1611{
1612 return (pdn >> NOT_MASKED_PD_BITS) - 1;
1613}
1614
1615static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, 1600static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1616 int size, struct res_mtt *mtt) 1601 int size, struct res_mtt *mtt)
1617{ 1602{
@@ -1656,11 +1641,6 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1656 mpt->mtt = mtt; 1641 mpt->mtt = mtt;
1657 } 1642 }
1658 1643
1659 if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
1660 err = -EPERM;
1661 goto ex_put;
1662 }
1663
1664 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 1644 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1665 if (err) 1645 if (err)
1666 goto ex_put; 1646 goto ex_put;
@@ -1792,11 +1772,6 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1792 if (err) 1772 if (err)
1793 goto ex_put_mtt; 1773 goto ex_put_mtt;
1794 1774
1795 if (pdn2slave(qp_get_pdn(qpc)) != slave) {
1796 err = -EPERM;
1797 goto ex_put_mtt;
1798 }
1799
1800 err = get_res(dev, slave, rcqn, RES_CQ, &rcq); 1775 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1801 if (err) 1776 if (err)
1802 goto ex_put_mtt; 1777 goto ex_put_mtt;
@@ -2048,10 +2023,10 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2048 if (!priv->mfunc.master.slave_state) 2023 if (!priv->mfunc.master.slave_state)
2049 return -EINVAL; 2024 return -EINVAL;
2050 2025
2051 event_eq = &priv->mfunc.master.slave_state[slave].event_eq; 2026 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2052 2027
2053 /* Create the event only if the slave is registered */ 2028 /* Create the event only if the slave is registered */
2054 if ((event_eq->event_type & (1 << eqe->type)) == 0) 2029 if (event_eq->eqn < 0)
2055 return 0; 2030 return 0;
2056 2031
2057 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); 2032 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
@@ -2289,11 +2264,6 @@ ex_put:
2289 return err; 2264 return err;
2290} 2265}
2291 2266
2292static int srq_get_pdn(struct mlx4_srq_context *srqc)
2293{
2294 return be32_to_cpu(srqc->pd) & 0xffffff;
2295}
2296
2297static int srq_get_mtt_size(struct mlx4_srq_context *srqc) 2267static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2298{ 2268{
2299 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; 2269 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
@@ -2333,11 +2303,6 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2333 if (err) 2303 if (err)
2334 goto ex_put_mtt; 2304 goto ex_put_mtt;
2335 2305
2336 if (pdn2slave(srq_get_pdn(srqc)) != slave) {
2337 err = -EPERM;
2338 goto ex_put_mtt;
2339 }
2340
2341 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 2306 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2342 if (err) 2307 if (err)
2343 goto ex_put_mtt; 2308 goto ex_put_mtt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 2823fffc6383..feda6c00829f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -67,7 +67,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
67static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 67static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
68 int srq_num) 68 int srq_num)
69{ 69{
70 return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0, 70 return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
71 MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, 71 MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
72 MLX4_CMD_WRAPPED); 72 MLX4_CMD_WRAPPED);
73} 73}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 964e9c0948bc..3ead111111e1 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1745 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1745 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1746 int err; 1746 int err;
1747 1747
1748 /* Ensure we have a valid MAC */
1749 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1750 pr_err("Error: Invalid MAC address\n");
1751 return -EINVAL;
1752 }
1753
1748 /* hardware has been reset, we need to reload some things */ 1754 /* hardware has been reset, we need to reload some things */
1749 pch_gbe_set_multi(netdev); 1755 pch_gbe_set_multi(netdev);
1750 1756
@@ -2468,9 +2474,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2468 2474
2469 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2475 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2470 if (!is_valid_ether_addr(netdev->dev_addr)) { 2476 if (!is_valid_ether_addr(netdev->dev_addr)) {
2471 dev_err(&pdev->dev, "Invalid MAC Address\n"); 2477 /*
2472 ret = -EIO; 2478 * If the MAC is invalid (or just missing), display a warning
2473 goto err_free_adapter; 2479 * but do not abort setting up the device. pch_gbe_up will
2480 * prevent the interface from being brought up until a valid MAC
2481 * is set.
2482 */
2483 dev_err(&pdev->dev, "Invalid MAC address, "
2484 "interface disabled.\n");
2474 } 2485 }
2475 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog, 2486 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2476 (unsigned long)adapter); 2487 (unsigned long)adapter);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index da4a1042523a..73195329aa46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -154,7 +154,7 @@ int stmmac_mdio_register(struct net_device *ndev)
154 else 154 else
155 irqlist = priv->mii_irq; 155 irqlist = priv->mii_irq;
156 156
157 new_bus->name = "STMMAC MII Bus"; 157 new_bus->name = "stmmac";
158 new_bus->read = &stmmac_mdio_read; 158 new_bus->read = &stmmac_mdio_read;
159 new_bus->write = &stmmac_mdio_write; 159 new_bus->write = &stmmac_mdio_write;
160 new_bus->reset = &stmmac_mdio_reset; 160 new_bus->reset = &stmmac_mdio_reset;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 54a819a36487..c796de9eed72 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -170,9 +170,9 @@ static int stmmac_pci_resume(struct pci_dev *pdev)
170#define STMMAC_DEVICE_ID 0x1108 170#define STMMAC_DEVICE_ID 0x1108
171 171
172static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { 172static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
173 { 173 {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
174 PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, { 174 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
175 } 175 {}
176}; 176};
177 177
178MODULE_DEVICE_TABLE(pci, stmmac_id_table); 178MODULE_DEVICE_TABLE(pci, stmmac_id_table);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 462d05f05e84..1a1ca6cfc74a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -68,11 +68,11 @@ static void do_set_multicast(struct work_struct *w)
68 68
69 nvdev = hv_get_drvdata(ndevctx->device_ctx); 69 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL) 70 if (nvdev == NULL)
71 return; 71 goto out;
72 72
73 rdev = nvdev->extension; 73 rdev = nvdev->extension;
74 if (rdev == NULL) 74 if (rdev == NULL)
75 return; 75 goto out;
76 76
77 if (net->flags & IFF_PROMISC) 77 if (net->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev, 78 rndis_filter_set_packet_filter(rdev,
@@ -83,6 +83,7 @@ static void do_set_multicast(struct work_struct *w)
83 NDIS_PACKET_TYPE_ALL_MULTICAST | 83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED); 84 NDIS_PACKET_TYPE_DIRECTED);
85 85
86out:
86 kfree(w); 87 kfree(w);
87} 88}
88 89
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f2f820c4b40a..9ea99217f116 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -173,6 +173,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
173 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); 173 skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
174 if (!skb) 174 if (!skb)
175 return RX_HANDLER_CONSUMED; 175 return RX_HANDLER_CONSUMED;
176 eth = eth_hdr(skb);
176 src = macvlan_hash_lookup(port, eth->h_source); 177 src = macvlan_hash_lookup(port, eth->h_source);
177 if (!src) 178 if (!src)
178 /* frame comes from an external address */ 179 /* frame comes from an external address */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 88cc5db9affd..8985cc62cf41 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,12 +38,11 @@
38 38
39/** 39/**
40 * mdiobus_alloc_size - allocate a mii_bus structure 40 * mdiobus_alloc_size - allocate a mii_bus structure
41 * @size: extra amount of memory to allocate for private storage.
42 * If non-zero, then bus->priv is points to that memory.
41 * 43 *
42 * Description: called by a bus driver to allocate an mii_bus 44 * Description: called by a bus driver to allocate an mii_bus
43 * structure to fill in. 45 * structure to fill in.
44 *
45 * 'size' is an an extra amount of memory to allocate for private storage.
46 * If non-zero, then bus->priv is points to that memory.
47 */ 46 */
48struct mii_bus *mdiobus_alloc_size(size_t size) 47struct mii_bus *mdiobus_alloc_size(size_t size)
49{ 48{
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ed2a862b835d..6b678f38e5ce 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -92,9 +92,9 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
92 return NULL; 92 return NULL;
93} 93}
94 94
95int team_options_register(struct team *team, 95int __team_options_register(struct team *team,
96 const struct team_option *option, 96 const struct team_option *option,
97 size_t option_count) 97 size_t option_count)
98{ 98{
99 int i; 99 int i;
100 struct team_option **dst_opts; 100 struct team_option **dst_opts;
@@ -116,8 +116,11 @@ int team_options_register(struct team *team,
116 } 116 }
117 } 117 }
118 118
119 for (i = 0; i < option_count; i++) 119 for (i = 0; i < option_count; i++) {
120 dst_opts[i]->changed = true;
121 dst_opts[i]->removed = false;
120 list_add_tail(&dst_opts[i]->list, &team->option_list); 122 list_add_tail(&dst_opts[i]->list, &team->option_list);
123 }
121 124
122 kfree(dst_opts); 125 kfree(dst_opts);
123 return 0; 126 return 0;
@@ -130,10 +133,22 @@ rollback:
130 return err; 133 return err;
131} 134}
132 135
133EXPORT_SYMBOL(team_options_register); 136static void __team_options_mark_removed(struct team *team,
137 const struct team_option *option,
138 size_t option_count)
139{
140 int i;
141
142 for (i = 0; i < option_count; i++, option++) {
143 struct team_option *del_opt;
134 144
135static void __team_options_change_check(struct team *team, 145 del_opt = __team_find_option(team, option->name);
136 struct team_option *changed_option); 146 if (del_opt) {
147 del_opt->changed = true;
148 del_opt->removed = true;
149 }
150 }
151}
137 152
138static void __team_options_unregister(struct team *team, 153static void __team_options_unregister(struct team *team,
139 const struct team_option *option, 154 const struct team_option *option,
@@ -152,12 +167,29 @@ static void __team_options_unregister(struct team *team,
152 } 167 }
153} 168}
154 169
170static void __team_options_change_check(struct team *team);
171
172int team_options_register(struct team *team,
173 const struct team_option *option,
174 size_t option_count)
175{
176 int err;
177
178 err = __team_options_register(team, option, option_count);
179 if (err)
180 return err;
181 __team_options_change_check(team);
182 return 0;
183}
184EXPORT_SYMBOL(team_options_register);
185
155void team_options_unregister(struct team *team, 186void team_options_unregister(struct team *team,
156 const struct team_option *option, 187 const struct team_option *option,
157 size_t option_count) 188 size_t option_count)
158{ 189{
190 __team_options_mark_removed(team, option, option_count);
191 __team_options_change_check(team);
159 __team_options_unregister(team, option, option_count); 192 __team_options_unregister(team, option, option_count);
160 __team_options_change_check(team, NULL);
161} 193}
162EXPORT_SYMBOL(team_options_unregister); 194EXPORT_SYMBOL(team_options_unregister);
163 195
@@ -176,7 +208,8 @@ static int team_option_set(struct team *team, struct team_option *option,
176 if (err) 208 if (err)
177 return err; 209 return err;
178 210
179 __team_options_change_check(team, option); 211 option->changed = true;
212 __team_options_change_check(team);
180 return err; 213 return err;
181} 214}
182 215
@@ -653,6 +686,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
653 return -ENOENT; 686 return -ENOENT;
654 } 687 }
655 688
689 port->removed = true;
656 __team_port_change_check(port, false); 690 __team_port_change_check(port, false);
657 team_port_list_del_port(team, port); 691 team_port_list_del_port(team, port);
658 team_adjust_ops(team); 692 team_adjust_ops(team);
@@ -1200,10 +1234,9 @@ err_fill:
1200 return err; 1234 return err;
1201} 1235}
1202 1236
1203static int team_nl_fill_options_get_changed(struct sk_buff *skb, 1237static int team_nl_fill_options_get(struct sk_buff *skb,
1204 u32 pid, u32 seq, int flags, 1238 u32 pid, u32 seq, int flags,
1205 struct team *team, 1239 struct team *team, bool fillall)
1206 struct team_option *changed_option)
1207{ 1240{
1208 struct nlattr *option_list; 1241 struct nlattr *option_list;
1209 void *hdr; 1242 void *hdr;
@@ -1223,12 +1256,19 @@ static int team_nl_fill_options_get_changed(struct sk_buff *skb,
1223 struct nlattr *option_item; 1256 struct nlattr *option_item;
1224 long arg; 1257 long arg;
1225 1258
1259 /* Include only changed options if fill all mode is not on */
1260 if (!fillall && !option->changed)
1261 continue;
1226 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1262 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1227 if (!option_item) 1263 if (!option_item)
1228 goto nla_put_failure; 1264 goto nla_put_failure;
1229 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); 1265 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
1230 if (option == changed_option) 1266 if (option->changed) {
1231 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); 1267 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
1268 option->changed = false;
1269 }
1270 if (option->removed)
1271 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
1232 switch (option->type) { 1272 switch (option->type) {
1233 case TEAM_OPTION_TYPE_U32: 1273 case TEAM_OPTION_TYPE_U32:
1234 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); 1274 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
@@ -1255,13 +1295,13 @@ nla_put_failure:
1255 return -EMSGSIZE; 1295 return -EMSGSIZE;
1256} 1296}
1257 1297
1258static int team_nl_fill_options_get(struct sk_buff *skb, 1298static int team_nl_fill_options_get_all(struct sk_buff *skb,
1259 struct genl_info *info, int flags, 1299 struct genl_info *info, int flags,
1260 struct team *team) 1300 struct team *team)
1261{ 1301{
1262 return team_nl_fill_options_get_changed(skb, info->snd_pid, 1302 return team_nl_fill_options_get(skb, info->snd_pid,
1263 info->snd_seq, NLM_F_ACK, 1303 info->snd_seq, NLM_F_ACK,
1264 team, NULL); 1304 team, true);
1265} 1305}
1266 1306
1267static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) 1307static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
@@ -1273,7 +1313,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1273 if (!team) 1313 if (!team)
1274 return -EINVAL; 1314 return -EINVAL;
1275 1315
1276 err = team_nl_send_generic(info, team, team_nl_fill_options_get); 1316 err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
1277 1317
1278 team_nl_team_put(team); 1318 team_nl_team_put(team);
1279 1319
@@ -1365,10 +1405,10 @@ team_put:
1365 return err; 1405 return err;
1366} 1406}
1367 1407
1368static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, 1408static int team_nl_fill_port_list_get(struct sk_buff *skb,
1369 u32 pid, u32 seq, int flags, 1409 u32 pid, u32 seq, int flags,
1370 struct team *team, 1410 struct team *team,
1371 struct team_port *changed_port) 1411 bool fillall)
1372{ 1412{
1373 struct nlattr *port_list; 1413 struct nlattr *port_list;
1374 void *hdr; 1414 void *hdr;
@@ -1387,12 +1427,19 @@ static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
1387 list_for_each_entry(port, &team->port_list, list) { 1427 list_for_each_entry(port, &team->port_list, list) {
1388 struct nlattr *port_item; 1428 struct nlattr *port_item;
1389 1429
1430 /* Include only changed ports if fill all mode is not on */
1431 if (!fillall && !port->changed)
1432 continue;
1390 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1433 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1391 if (!port_item) 1434 if (!port_item)
1392 goto nla_put_failure; 1435 goto nla_put_failure;
1393 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); 1436 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
1394 if (port == changed_port) 1437 if (port->changed) {
1395 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); 1438 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
1439 port->changed = false;
1440 }
1441 if (port->removed)
1442 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
1396 if (port->linkup) 1443 if (port->linkup)
1397 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); 1444 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
1398 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); 1445 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
@@ -1408,13 +1455,13 @@ nla_put_failure:
1408 return -EMSGSIZE; 1455 return -EMSGSIZE;
1409} 1456}
1410 1457
1411static int team_nl_fill_port_list_get(struct sk_buff *skb, 1458static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1412 struct genl_info *info, int flags, 1459 struct genl_info *info, int flags,
1413 struct team *team) 1460 struct team *team)
1414{ 1461{
1415 return team_nl_fill_port_list_get_changed(skb, info->snd_pid, 1462 return team_nl_fill_port_list_get(skb, info->snd_pid,
1416 info->snd_seq, NLM_F_ACK, 1463 info->snd_seq, NLM_F_ACK,
1417 team, NULL); 1464 team, true);
1418} 1465}
1419 1466
1420static int team_nl_cmd_port_list_get(struct sk_buff *skb, 1467static int team_nl_cmd_port_list_get(struct sk_buff *skb,
@@ -1427,7 +1474,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1427 if (!team) 1474 if (!team)
1428 return -EINVAL; 1475 return -EINVAL;
1429 1476
1430 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); 1477 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1431 1478
1432 team_nl_team_put(team); 1479 team_nl_team_put(team);
1433 1480
@@ -1464,8 +1511,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
1464 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, 1511 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1465}; 1512};
1466 1513
1467static int team_nl_send_event_options_get(struct team *team, 1514static int team_nl_send_event_options_get(struct team *team)
1468 struct team_option *changed_option)
1469{ 1515{
1470 struct sk_buff *skb; 1516 struct sk_buff *skb;
1471 int err; 1517 int err;
@@ -1475,8 +1521,7 @@ static int team_nl_send_event_options_get(struct team *team,
1475 if (!skb) 1521 if (!skb)
1476 return -ENOMEM; 1522 return -ENOMEM;
1477 1523
1478 err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, 1524 err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
1479 changed_option);
1480 if (err < 0) 1525 if (err < 0)
1481 goto err_fill; 1526 goto err_fill;
1482 1527
@@ -1489,18 +1534,17 @@ err_fill:
1489 return err; 1534 return err;
1490} 1535}
1491 1536
1492static int team_nl_send_event_port_list_get(struct team_port *port) 1537static int team_nl_send_event_port_list_get(struct team *team)
1493{ 1538{
1494 struct sk_buff *skb; 1539 struct sk_buff *skb;
1495 int err; 1540 int err;
1496 struct net *net = dev_net(port->team->dev); 1541 struct net *net = dev_net(team->dev);
1497 1542
1498 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1543 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1499 if (!skb) 1544 if (!skb)
1500 return -ENOMEM; 1545 return -ENOMEM;
1501 1546
1502 err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, 1547 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
1503 port->team, port);
1504 if (err < 0) 1548 if (err < 0)
1505 goto err_fill; 1549 goto err_fill;
1506 1550
@@ -1544,12 +1588,11 @@ static void team_nl_fini(void)
1544 * Change checkers 1588 * Change checkers
1545 ******************/ 1589 ******************/
1546 1590
1547static void __team_options_change_check(struct team *team, 1591static void __team_options_change_check(struct team *team)
1548 struct team_option *changed_option)
1549{ 1592{
1550 int err; 1593 int err;
1551 1594
1552 err = team_nl_send_event_options_get(team, changed_option); 1595 err = team_nl_send_event_options_get(team);
1553 if (err) 1596 if (err)
1554 netdev_warn(team->dev, "Failed to send options change via netlink\n"); 1597 netdev_warn(team->dev, "Failed to send options change via netlink\n");
1555} 1598}
@@ -1559,9 +1602,10 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
1559{ 1602{
1560 int err; 1603 int err;
1561 1604
1562 if (port->linkup == linkup) 1605 if (!port->removed && port->linkup == linkup)
1563 return; 1606 return;
1564 1607
1608 port->changed = true;
1565 port->linkup = linkup; 1609 port->linkup = linkup;
1566 if (linkup) { 1610 if (linkup) {
1567 struct ethtool_cmd ecmd; 1611 struct ethtool_cmd ecmd;
@@ -1577,7 +1621,7 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
1577 port->duplex = 0; 1621 port->duplex = 0;
1578 1622
1579send_event: 1623send_event:
1580 err = team_nl_send_event_port_list_get(port); 1624 err = team_nl_send_event_port_list_get(port->team);
1581 if (err) 1625 if (err)
1582 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", 1626 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
1583 port->dev->name); 1627 port->dev->name);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index b97a40ed5fff..3876c7ea54f4 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -31,6 +31,12 @@ config B43_BCMA
31 depends on B43 && BCMA 31 depends on B43 && BCMA
32 default y 32 default y
33 33
34config B43_BCMA_EXTRA
35 bool "Hardware support that overlaps with the brcmsmac driver"
36 depends on B43_BCMA
37 default n if BRCMSMAC || BRCMSMAC_MODULE
38 default y
39
34config B43_SSB 40config B43_SSB
35 bool 41 bool
36 depends on B43 && SSB 42 depends on B43 && SSB
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b91f28ef1032..23ffb1b9a86f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -116,8 +116,10 @@ MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
116#ifdef CONFIG_B43_BCMA 116#ifdef CONFIG_B43_BCMA
117static const struct bcma_device_id b43_bcma_tbl[] = { 117static const struct bcma_device_id b43_bcma_tbl[] = {
118 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), 118 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
119#ifdef CONFIG_B43_BCMA_EXTRA
119 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), 120 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
120 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), 121 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
122#endif
121 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS), 123 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
122 BCMA_CORETABLE_END 124 BCMA_CORETABLE_END
123}; 125};
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index f7ed34034f88..f6affc6fd12a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7981,13 +7981,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
7981 7981
7982void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7982void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
7983{ 7983{
7984 int timeout = 20;
7985
7984 /* flush packet queue when requested */ 7986 /* flush packet queue when requested */
7985 if (drop) 7987 if (drop)
7986 brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL); 7988 brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
7987 7989
7988 /* wait for queue and DMA fifos to run dry */ 7990 /* wait for queue and DMA fifos to run dry */
7989 while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) 7991 while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
7990 brcms_msleep(wlc->wl, 1); 7992 brcms_msleep(wlc->wl, 1);
7993
7994 if (--timeout == 0)
7995 break;
7996 }
7997
7998 WARN_ON_ONCE(timeout == 0);
7991} 7999}
7992 8000
7993void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) 8001void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 828181fbad5d..58404b0c5010 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -46,6 +46,10 @@ struct team_port {
46 u32 speed; 46 u32 speed;
47 u8 duplex; 47 u8 duplex;
48 48
49 /* Custom gennetlink interface related flags */
50 bool changed;
51 bool removed;
52
49 struct rcu_head rcu; 53 struct rcu_head rcu;
50}; 54};
51 55
@@ -72,6 +76,10 @@ struct team_option {
72 enum team_option_type type; 76 enum team_option_type type;
73 int (*getter)(struct team *team, void *arg); 77 int (*getter)(struct team *team, void *arg);
74 int (*setter)(struct team *team, void *arg); 78 int (*setter)(struct team *team, void *arg);
79
80 /* Custom gennetlink interface related flags */
81 bool changed;
82 bool removed;
75}; 83};
76 84
77struct team_mode { 85struct team_mode {
@@ -207,6 +215,7 @@ enum {
207 TEAM_ATTR_OPTION_CHANGED, /* flag */ 215 TEAM_ATTR_OPTION_CHANGED, /* flag */
208 TEAM_ATTR_OPTION_TYPE, /* u8 */ 216 TEAM_ATTR_OPTION_TYPE, /* u8 */
209 TEAM_ATTR_OPTION_DATA, /* dynamic */ 217 TEAM_ATTR_OPTION_DATA, /* dynamic */
218 TEAM_ATTR_OPTION_REMOVED, /* flag */
210 219
211 __TEAM_ATTR_OPTION_MAX, 220 __TEAM_ATTR_OPTION_MAX,
212 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, 221 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
@@ -227,6 +236,7 @@ enum {
227 TEAM_ATTR_PORT_LINKUP, /* flag */ 236 TEAM_ATTR_PORT_LINKUP, /* flag */
228 TEAM_ATTR_PORT_SPEED, /* u32 */ 237 TEAM_ATTR_PORT_SPEED, /* u32 */
229 TEAM_ATTR_PORT_DUPLEX, /* u8 */ 238 TEAM_ATTR_PORT_DUPLEX, /* u8 */
239 TEAM_ATTR_PORT_REMOVED, /* flag */
230 240
231 __TEAM_ATTR_PORT_MAX, 241 __TEAM_ATTR_PORT_MAX,
232 TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1, 242 TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 5c4fe8e5bfe5..aea61905499b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -621,6 +621,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
621int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); 621int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
622int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); 622int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
623void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn); 623void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
624void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
624 625
625int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 626int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
626int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 627int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index c9d625ca659e..da81af086eaf 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
109 * 109 *
110 * returns 0 on success and <0 if the counter->usage will exceed the 110 * returns 0 on success and <0 if the counter->usage will exceed the
111 * counter->limit _locked call expects the counter->lock to be taken 111 * counter->limit _locked call expects the counter->lock to be taken
112 *
113 * charge_nofail works the same, except that it charges the resource
114 * counter unconditionally, and returns < 0 if the after the current
115 * charge we are over limit.
112 */ 116 */
113 117
114int __must_check res_counter_charge_locked(struct res_counter *counter, 118int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val); 119 unsigned long val);
116int __must_check res_counter_charge(struct res_counter *counter, 120int __must_check res_counter_charge(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at); 121 unsigned long val, struct res_counter **limit_fail_at);
122int __must_check res_counter_charge_nofail(struct res_counter *counter,
123 unsigned long val, struct res_counter **limit_fail_at);
118 124
119/* 125/*
120 * uncharge - tell that some portion of the resource is released 126 * uncharge - tell that some portion of the resource is released
@@ -142,7 +148,10 @@ static inline unsigned long long res_counter_margin(struct res_counter *cnt)
142 unsigned long flags; 148 unsigned long flags;
143 149
144 spin_lock_irqsave(&cnt->lock, flags); 150 spin_lock_irqsave(&cnt->lock, flags);
145 margin = cnt->limit - cnt->usage; 151 if (cnt->limit > cnt->usage)
152 margin = cnt->limit - cnt->usage;
153 else
154 margin = 0;
146 spin_unlock_irqrestore(&cnt->lock, flags); 155 spin_unlock_irqrestore(&cnt->lock, flags);
147 return margin; 156 return margin;
148} 157}
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index e16557a357e5..c1241c428179 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -192,7 +192,6 @@ enum
192 LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ 192 LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */
193 LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ 193 LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */
194 LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ 194 LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */
195 LINUX_MIB_TCPLOSS, /* TCPLoss */
196 LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ 195 LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */
197 LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ 196 LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */
198 LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ 197 LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5b2fed5eebf2..00596e816b4d 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1388,6 +1388,6 @@ struct hci_inquiry_req {
1388}; 1388};
1389#define IREQ_CACHE_FLUSH 0x0001 1389#define IREQ_CACHE_FLUSH 0x0001
1390 1390
1391extern int enable_hs; 1391extern bool enable_hs;
1392 1392
1393#endif /* __HCI_H */ 1393#endif /* __HCI_H */
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index e503b87c4c1b..7b2d43139c8e 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -13,7 +13,6 @@
13 13
14#ifndef _NETPRIO_CGROUP_H 14#ifndef _NETPRIO_CGROUP_H
15#define _NETPRIO_CGROUP_H 15#define _NETPRIO_CGROUP_H
16#include <linux/module.h>
17#include <linux/cgroup.h> 16#include <linux/cgroup.h>
18#include <linux/hardirq.h> 17#include <linux/hardirq.h>
19#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
diff --git a/include/net/sock.h b/include/net/sock.h
index bb972d254dff..4c69ac165e6b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -226,6 +226,7 @@ struct cg_proto;
226 * @sk_ack_backlog: current listen backlog 226 * @sk_ack_backlog: current listen backlog
227 * @sk_max_ack_backlog: listen backlog set in listen() 227 * @sk_max_ack_backlog: listen backlog set in listen()
228 * @sk_priority: %SO_PRIORITY setting 228 * @sk_priority: %SO_PRIORITY setting
229 * @sk_cgrp_prioidx: socket group's priority map index
229 * @sk_type: socket type (%SOCK_STREAM, etc) 230 * @sk_type: socket type (%SOCK_STREAM, etc)
230 * @sk_protocol: which protocol this socket belongs in this network family 231 * @sk_protocol: which protocol this socket belongs in this network family
231 * @sk_peer_pid: &struct pid for this socket's peer 232 * @sk_peer_pid: &struct pid for this socket's peer
@@ -921,7 +922,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
921#define sk_refcnt_debug_release(sk) do { } while (0) 922#define sk_refcnt_debug_release(sk) do { } while (0)
922#endif /* SOCK_REFCNT_DEBUG */ 923#endif /* SOCK_REFCNT_DEBUG */
923 924
924#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 925#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
925extern struct jump_label_key memcg_socket_limit_enabled; 926extern struct jump_label_key memcg_socket_limit_enabled;
926static inline struct cg_proto *parent_cg_proto(struct proto *proto, 927static inline struct cg_proto *parent_cg_proto(struct proto *proto,
927 struct cg_proto *cg_proto) 928 struct cg_proto *cg_proto)
@@ -1007,9 +1008,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1007 struct res_counter *fail; 1008 struct res_counter *fail;
1008 int ret; 1009 int ret;
1009 1010
1010 ret = res_counter_charge(prot->memory_allocated, 1011 ret = res_counter_charge_nofail(prot->memory_allocated,
1011 amt << PAGE_SHIFT, &fail); 1012 amt << PAGE_SHIFT, &fail);
1012
1013 if (ret < 0) 1013 if (ret < 0)
1014 *parent_status = OVER_LIMIT; 1014 *parent_status = OVER_LIMIT;
1015} 1015}
@@ -1053,12 +1053,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1053} 1053}
1054 1054
1055static inline void 1055static inline void
1056sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) 1056sk_memory_allocated_sub(struct sock *sk, int amt)
1057{ 1057{
1058 struct proto *prot = sk->sk_prot; 1058 struct proto *prot = sk->sk_prot;
1059 1059
1060 if (mem_cgroup_sockets_enabled && sk->sk_cgrp && 1060 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1061 parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
1062 memcg_memory_allocated_sub(sk->sk_cgrp, amt); 1061 memcg_memory_allocated_sub(sk->sk_cgrp, amt);
1063 1062
1064 atomic_long_sub(amt, prot->memory_allocated); 1063 atomic_long_sub(amt, prot->memory_allocated);
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 6d269cce7aa1..d508363858b3 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -66,6 +66,31 @@ done:
66 return ret; 66 return ret;
67} 67}
68 68
69int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
70 struct res_counter **limit_fail_at)
71{
72 int ret, r;
73 unsigned long flags;
74 struct res_counter *c;
75
76 r = ret = 0;
77 *limit_fail_at = NULL;
78 local_irq_save(flags);
79 for (c = counter; c != NULL; c = c->parent) {
80 spin_lock(&c->lock);
81 r = res_counter_charge_locked(c, val);
82 if (r)
83 c->usage += val;
84 spin_unlock(&c->lock);
85 if (r < 0 && ret == 0) {
86 *limit_fail_at = c;
87 ret = r;
88 }
89 }
90 local_irq_restore(flags);
91
92 return ret;
93}
69void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) 94void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70{ 95{
71 if (WARN_ON(counter->usage < val)) 96 if (WARN_ON(counter->usage < val))
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4baddbae94cc..556859fec4ef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -379,7 +379,7 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
379static bool mem_cgroup_is_root(struct mem_cgroup *memcg); 379static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
380void sock_update_memcg(struct sock *sk) 380void sock_update_memcg(struct sock *sk)
381{ 381{
382 if (static_branch(&memcg_socket_limit_enabled)) { 382 if (mem_cgroup_sockets_enabled) {
383 struct mem_cgroup *memcg; 383 struct mem_cgroup *memcg;
384 384
385 BUG_ON(!sk->sk_prot->proto_cgroup); 385 BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -411,7 +411,7 @@ EXPORT_SYMBOL(sock_update_memcg);
411 411
412void sock_release_memcg(struct sock *sk) 412void sock_release_memcg(struct sock *sk)
413{ 413{
414 if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { 414 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
415 struct mem_cgroup *memcg; 415 struct mem_cgroup *memcg;
416 WARN_ON(!sk->sk_cgrp->memcg); 416 WARN_ON(!sk->sk_cgrp->memcg);
417 memcg = sk->sk_cgrp->memcg; 417 memcg = sk->sk_cgrp->memcg;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 845da3ee56a0..9de93714213a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -55,7 +55,7 @@
55 55
56#define AUTO_OFF_TIMEOUT 2000 56#define AUTO_OFF_TIMEOUT 2000
57 57
58int enable_hs; 58bool enable_hs;
59 59
60static void hci_rx_work(struct work_struct *work); 60static void hci_rx_work(struct work_struct *work);
61static void hci_cmd_work(struct work_struct *work); 61static void hci_cmd_work(struct work_struct *work);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 921aa2b4b415..369b41894527 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1311 case ETHTOOL_GRXCSUM: 1311 case ETHTOOL_GRXCSUM:
1312 case ETHTOOL_GTXCSUM: 1312 case ETHTOOL_GTXCSUM:
1313 case ETHTOOL_GSG: 1313 case ETHTOOL_GSG:
1314 case ETHTOOL_GSSET_INFO:
1314 case ETHTOOL_GSTRINGS: 1315 case ETHTOOL_GSTRINGS:
1315 case ETHTOOL_GTSO: 1316 case ETHTOOL_GTSO:
1316 case ETHTOOL_GPERMADDR: 1317 case ETHTOOL_GPERMADDR:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0985b9b14b80..a225089df5b6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1,4 +1,5 @@
1#include <linux/skbuff.h> 1#include <linux/skbuff.h>
2#include <linux/export.h>
2#include <linux/ip.h> 3#include <linux/ip.h>
3#include <linux/ipv6.h> 4#include <linux/ipv6.h>
4#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 65f80c7b1656..4d8ce93cd503 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -767,8 +767,8 @@ done:
767 return i; 767 return i;
768} 768}
769 769
770static unsigned long num_arg(const char __user * user_buffer, 770static long num_arg(const char __user *user_buffer, unsigned long maxlen,
771 unsigned long maxlen, unsigned long *num) 771 unsigned long *num)
772{ 772{
773 int i; 773 int i;
774 *num = 0; 774 *num = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index 5c5af9988f94..3e81fd2e3c75 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1827,7 +1827,7 @@ suppress_allocation:
1827 /* Alas. Undo changes. */ 1827 /* Alas. Undo changes. */
1828 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1828 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1829 1829
1830 sk_memory_allocated_sub(sk, amt, parent_status); 1830 sk_memory_allocated_sub(sk, amt);
1831 1831
1832 return 0; 1832 return 0;
1833} 1833}
@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule);
1840void __sk_mem_reclaim(struct sock *sk) 1840void __sk_mem_reclaim(struct sock *sk)
1841{ 1841{
1842 sk_memory_allocated_sub(sk, 1842 sk_memory_allocated_sub(sk,
1843 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); 1843 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
1844 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1844 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1845 1845
1846 if (sk_under_memory_pressure(sk) && 1846 if (sk_under_memory_pressure(sk) &&
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3569d8ecaeac..6afc807ee2ad 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = {
216 SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), 216 SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
217 SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), 217 SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
218 SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), 218 SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
219 SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
220 SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), 219 SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
221 SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), 220 SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
222 SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), 221 SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 6187eb4d1dcf..f45e1c242440 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca)
63{ 63{
64 ca->cnt = 0; 64 ca->cnt = 0;
65 ca->last_max_cwnd = 0; 65 ca->last_max_cwnd = 0;
66 ca->loss_cwnd = 0;
67 ca->last_cwnd = 0; 66 ca->last_cwnd = 0;
68 ca->last_time = 0; 67 ca->last_time = 0;
69 ca->epoch_start = 0; 68 ca->epoch_start = 0;
@@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca)
72 71
73static void bictcp_init(struct sock *sk) 72static void bictcp_init(struct sock *sk)
74{ 73{
75 bictcp_reset(inet_csk_ca(sk)); 74 struct bictcp *ca = inet_csk_ca(sk);
75
76 bictcp_reset(ca);
77 ca->loss_cwnd = 0;
78
76 if (initial_ssthresh) 79 if (initial_ssthresh)
77 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 80 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
78} 81}
@@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
127 } 130 }
128 131
129 /* if in slow start or link utilization is very low */ 132 /* if in slow start or link utilization is very low */
130 if (ca->loss_cwnd == 0) { 133 if (ca->last_max_cwnd == 0) {
131 if (ca->cnt > 20) /* increase cwnd 5% per RTT */ 134 if (ca->cnt > 20) /* increase cwnd 5% per RTT */
132 ca->cnt = 20; 135 ca->cnt = 20;
133 } 136 }
@@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
185{ 188{
186 const struct tcp_sock *tp = tcp_sk(sk); 189 const struct tcp_sock *tp = tcp_sk(sk);
187 const struct bictcp *ca = inet_csk_ca(sk); 190 const struct bictcp *ca = inet_csk_ca(sk);
188 return max(tp->snd_cwnd, ca->last_max_cwnd); 191 return max(tp->snd_cwnd, ca->loss_cwnd);
189} 192}
190 193
191static void bictcp_state(struct sock *sk, u8 new_state) 194static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index f376b05cca81..a9077f441cb2 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca)
107{ 107{
108 ca->cnt = 0; 108 ca->cnt = 0;
109 ca->last_max_cwnd = 0; 109 ca->last_max_cwnd = 0;
110 ca->loss_cwnd = 0;
111 ca->last_cwnd = 0; 110 ca->last_cwnd = 0;
112 ca->last_time = 0; 111 ca->last_time = 0;
113 ca->bic_origin_point = 0; 112 ca->bic_origin_point = 0;
@@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk)
142 141
143static void bictcp_init(struct sock *sk) 142static void bictcp_init(struct sock *sk)
144{ 143{
145 bictcp_reset(inet_csk_ca(sk)); 144 struct bictcp *ca = inet_csk_ca(sk);
145
146 bictcp_reset(ca);
147 ca->loss_cwnd = 0;
146 148
147 if (hystart) 149 if (hystart)
148 bictcp_hystart_reset(sk); 150 bictcp_hystart_reset(sk);
@@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
275 * The initial growth of cubic function may be too conservative 277 * The initial growth of cubic function may be too conservative
276 * when the available bandwidth is still unknown. 278 * when the available bandwidth is still unknown.
277 */ 279 */
278 if (ca->loss_cwnd == 0 && ca->cnt > 20) 280 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
279 ca->cnt = 20; /* increase cwnd 5% per RTT */ 281 ca->cnt = 20; /* increase cwnd 5% per RTT */
280 282
281 /* TCP Friendly */ 283 /* TCP Friendly */
@@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
342{ 344{
343 struct bictcp *ca = inet_csk_ca(sk); 345 struct bictcp *ca = inet_csk_ca(sk);
344 346
345 return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); 347 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
346} 348}
347 349
348static void bictcp_state(struct sock *sk, u8 new_state) 350static void bictcp_state(struct sock *sk, u8 new_state)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2877c3e09587..976034f82320 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly;
105#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 105#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
106#define FLAG_DATA_SACKED 0x20 /* New SACK. */ 106#define FLAG_DATA_SACKED 0x20 /* New SACK. */
107#define FLAG_ECE 0x40 /* ECE in this ACK */ 107#define FLAG_ECE 0x40 /* ECE in this ACK */
108#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
109#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 108#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
110#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 109#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
111#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 110#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1040 * These 6 states form finite state machine, controlled by the following events: 1039 * These 6 states form finite state machine, controlled by the following events:
1041 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1040 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1042 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1041 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1043 * 3. Loss detection event of one of three flavors: 1042 * 3. Loss detection event of two flavors:
1044 * A. Scoreboard estimator decided the packet is lost. 1043 * A. Scoreboard estimator decided the packet is lost.
1045 * A'. Reno "three dupacks" marks head of queue lost. 1044 * A'. Reno "three dupacks" marks head of queue lost.
1046 * A''. Its FACK modfication, head until snd.fack is lost. 1045 * A''. Its FACK modification, head until snd.fack is lost.
1047 * B. SACK arrives sacking data transmitted after never retransmitted 1046 * B. SACK arrives sacking SND.NXT at the moment, when the
1048 * hole was sent out.
1049 * C. SACK arrives sacking SND.NXT at the moment, when the
1050 * segment was retransmitted. 1047 * segment was retransmitted.
1051 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1048 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1052 * 1049 *
@@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1153} 1150}
1154 1151
1155/* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1152/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
1156 * Event "C". Later note: FACK people cheated me again 8), we have to account 1153 * Event "B". Later note: FACK people cheated me again 8), we have to account
1157 * for reordering! Ugly, but should help. 1154 * for reordering! Ugly, but should help.
1158 * 1155 *
1159 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1156 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
@@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1844 if (found_dup_sack && ((i + 1) == first_sack_index)) 1841 if (found_dup_sack && ((i + 1) == first_sack_index))
1845 next_dup = &sp[i + 1]; 1842 next_dup = &sp[i + 1];
1846 1843
1847 /* Event "B" in the comment above. */
1848 if (after(end_seq, tp->high_seq))
1849 state.flag |= FLAG_DATA_LOST;
1850
1851 /* Skip too early cached blocks */ 1844 /* Skip too early cached blocks */
1852 while (tcp_sack_cache_ok(tp, cache) && 1845 while (tcp_sack_cache_ok(tp, cache) &&
1853 !before(start_seq, cache->end_seq)) 1846 !before(start_seq, cache->end_seq))
@@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk)
2515 tcp_verify_left_out(tp); 2508 tcp_verify_left_out(tp);
2516} 2509}
2517 2510
2518/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2511/* Detect loss in event "A" above by marking head of queue up as lost.
2519 * is against sacked "cnt", otherwise it's against facked "cnt" 2512 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments
2513 * are considered lost. For RFC3517 SACK, a segment is considered lost if it
2514 * has at least tp->reordering SACKed seqments above it; "packets" refers to
2515 * the maximum SACKed segments to pass before reaching this limit.
2520 */ 2516 */
2521static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2517static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2522{ 2518{
@@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2525 int cnt, oldcnt; 2521 int cnt, oldcnt;
2526 int err; 2522 int err;
2527 unsigned int mss; 2523 unsigned int mss;
2524 /* Use SACK to deduce losses of new sequences sent during recovery */
2525 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
2528 2526
2529 WARN_ON(packets > tp->packets_out); 2527 WARN_ON(packets > tp->packets_out);
2530 if (tp->lost_skb_hint) { 2528 if (tp->lost_skb_hint) {
@@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2546 tp->lost_skb_hint = skb; 2544 tp->lost_skb_hint = skb;
2547 tp->lost_cnt_hint = cnt; 2545 tp->lost_cnt_hint = cnt;
2548 2546
2549 if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2547 if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
2550 break; 2548 break;
2551 2549
2552 oldcnt = cnt; 2550 oldcnt = cnt;
@@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3033 if (tcp_check_sack_reneging(sk, flag)) 3031 if (tcp_check_sack_reneging(sk, flag))
3034 return; 3032 return;
3035 3033
3036 /* C. Process data loss notification, provided it is valid. */ 3034 /* C. Check consistency of the current state. */
3037 if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
3038 before(tp->snd_una, tp->high_seq) &&
3039 icsk->icsk_ca_state != TCP_CA_Open &&
3040 tp->fackets_out > tp->reordering) {
3041 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
3042 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
3043 }
3044
3045 /* D. Check consistency of the current state. */
3046 tcp_verify_left_out(tp); 3035 tcp_verify_left_out(tp);
3047 3036
3048 /* E. Check state exit conditions. State can be terminated 3037 /* D. Check state exit conditions. State can be terminated
3049 * when high_seq is ACKed. */ 3038 * when high_seq is ACKed. */
3050 if (icsk->icsk_ca_state == TCP_CA_Open) { 3039 if (icsk->icsk_ca_state == TCP_CA_Open) {
3051 WARN_ON(tp->retrans_out != 0); 3040 WARN_ON(tp->retrans_out != 0);
@@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3077 } 3066 }
3078 } 3067 }
3079 3068
3080 /* F. Process state. */ 3069 /* E. Process state. */
3081 switch (icsk->icsk_ca_state) { 3070 switch (icsk->icsk_ca_state) {
3082 case TCP_CA_Recovery: 3071 case TCP_CA_Recovery:
3083 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 3072 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1eb4ad57670e..337ba4cca052 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
631 arg.iov[0].iov_len = sizeof(rep.th); 631 arg.iov[0].iov_len = sizeof(rep.th);
632 632
633#ifdef CONFIG_TCP_MD5SIG 633#ifdef CONFIG_TCP_MD5SIG
634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; 634 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
635 if (key) { 635 if (key) {
636 rep.opt[0] = htonl((TCPOPT_NOP << 24) | 636 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) | 637 (TCPOPT_NOP << 16) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a225d5ee3c2f..c02280a4d126 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
502 rcu_read_unlock(); 502 rcu_read_unlock();
503} 503}
504 504
505static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) 505static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
506{ 506{
507 struct net *net; 507 struct net *net;
508 int old;
509
510 if (!rtnl_trylock())
511 return restart_syscall();
508 512
509 net = (struct net *)table->extra2; 513 net = (struct net *)table->extra2;
510 if (p == &net->ipv6.devconf_dflt->forwarding) 514 old = *p;
511 return 0; 515 *p = newf;
512 516
513 if (!rtnl_trylock()) { 517 if (p == &net->ipv6.devconf_dflt->forwarding) {
514 /* Restore the original values before restarting */ 518 rtnl_unlock();
515 *p = old; 519 return 0;
516 return restart_syscall();
517 } 520 }
518 521
519 if (p == &net->ipv6.devconf_all->forwarding) { 522 if (p == &net->ipv6.devconf_all->forwarding) {
520 __s32 newf = net->ipv6.devconf_all->forwarding;
521 net->ipv6.devconf_dflt->forwarding = newf; 523 net->ipv6.devconf_dflt->forwarding = newf;
522 addrconf_forward_change(net, newf); 524 addrconf_forward_change(net, newf);
523 } else if ((!*p) ^ (!old)) 525 } else if ((!newf) ^ (!old))
524 dev_forward_change((struct inet6_dev *)table->extra1); 526 dev_forward_change((struct inet6_dev *)table->extra1);
525 rtnl_unlock(); 527 rtnl_unlock();
526 528
527 if (*p) 529 if (newf)
528 rt6_purge_dflt_routers(net); 530 rt6_purge_dflt_routers(net);
529 return 1; 531 return 1;
530} 532}
@@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
4260 int *valp = ctl->data; 4262 int *valp = ctl->data;
4261 int val = *valp; 4263 int val = *valp;
4262 loff_t pos = *ppos; 4264 loff_t pos = *ppos;
4265 ctl_table lctl;
4263 int ret; 4266 int ret;
4264 4267
4265 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4268 /*
4269 * ctl->data points to idev->cnf.forwarding, we should
4270 * not modify it until we get the rtnl lock.
4271 */
4272 lctl = *ctl;
4273 lctl.data = &val;
4274
4275 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
4266 4276
4267 if (write) 4277 if (write)
4268 ret = addrconf_fixup_forwarding(ctl, valp, val); 4278 ret = addrconf_fixup_forwarding(ctl, valp, val);
@@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
4300 rcu_read_unlock(); 4310 rcu_read_unlock();
4301} 4311}
4302 4312
4303static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) 4313static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
4304{ 4314{
4305 struct net *net; 4315 struct net *net;
4316 int old;
4317
4318 if (!rtnl_trylock())
4319 return restart_syscall();
4306 4320
4307 net = (struct net *)table->extra2; 4321 net = (struct net *)table->extra2;
4322 old = *p;
4323 *p = newf;
4308 4324
4309 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4325 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
4326 rtnl_unlock();
4310 return 0; 4327 return 0;
4311
4312 if (!rtnl_trylock()) {
4313 /* Restore the original values before restarting */
4314 *p = old;
4315 return restart_syscall();
4316 } 4328 }
4317 4329
4318 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4330 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4319 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
4320 net->ipv6.devconf_dflt->disable_ipv6 = newf; 4331 net->ipv6.devconf_dflt->disable_ipv6 = newf;
4321 addrconf_disable_change(net, newf); 4332 addrconf_disable_change(net, newf);
4322 } else if ((!*p) ^ (!old)) 4333 } else if ((!newf) ^ (!old))
4323 dev_disable_change((struct inet6_dev *)table->extra1); 4334 dev_disable_change((struct inet6_dev *)table->extra1);
4324 4335
4325 rtnl_unlock(); 4336 rtnl_unlock();
@@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
4333 int *valp = ctl->data; 4344 int *valp = ctl->data;
4334 int val = *valp; 4345 int val = *valp;
4335 loff_t pos = *ppos; 4346 loff_t pos = *ppos;
4347 ctl_table lctl;
4336 int ret; 4348 int ret;
4337 4349
4338 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4350 /*
4351 * ctl->data points to idev->cnf.disable_ipv6, we should
4352 * not modify it until we get the rtnl lock.
4353 */
4354 lctl = *ctl;
4355 lctl.data = &val;
4356
4357 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
4339 4358
4340 if (write) 4359 if (write)
4341 ret = addrconf_disable_ipv6(ctl, valp, val); 4360 ret = addrconf_disable_ipv6(ctl, valp, val);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 906c7ca43542..3edd05ae4388 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1083 1083
1084#ifdef CONFIG_TCP_MD5SIG 1084#ifdef CONFIG_TCP_MD5SIG
1085 if (sk) 1085 if (sk)
1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 1086 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
1087#endif 1087#endif
1088 1088
1089 if (th->ack) 1089 if (th->ack)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index a18e6c3d36e3..b9bef2c75026 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
713 struct sk_buff *skb = NULL; 713 struct sk_buff *skb = NULL;
714 struct sock *sk = sock->sk; 714 struct sock *sk = sock->sk;
715 struct llc_sock *llc = llc_sk(sk); 715 struct llc_sock *llc = llc_sk(sk);
716 unsigned long cpu_flags;
716 size_t copied = 0; 717 size_t copied = 0;
717 u32 peek_seq = 0; 718 u32 peek_seq = 0;
718 u32 *seq; 719 u32 *seq;
@@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
838 goto copy_uaddr; 839 goto copy_uaddr;
839 840
840 if (!(flags & MSG_PEEK)) { 841 if (!(flags & MSG_PEEK)) {
842 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
841 sk_eat_skb(sk, skb, 0); 843 sk_eat_skb(sk, skb, 0);
844 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
842 *seq = 0; 845 *seq = 0;
843 } 846 }
844 847
@@ -859,7 +862,9 @@ copy_uaddr:
859 llc_cmsg_rcv(msg, skb); 862 llc_cmsg_rcv(msg, skb);
860 863
861 if (!(flags & MSG_PEEK)) { 864 if (!(flags & MSG_PEEK)) {
865 spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
862 sk_eat_skb(sk, skb, 0); 866 sk_eat_skb(sk, skb, 0);
867 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
863 *seq = 0; 868 *seq = 0;
864 } 869 }
865 870
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 38e6101190d9..59edcd95a58d 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -225,9 +225,9 @@ KEY_OPS(key);
225 key, &key_##name##_ops); 225 key, &key_##name##_ops);
226 226
227void ieee80211_debugfs_key_add(struct ieee80211_key *key) 227void ieee80211_debugfs_key_add(struct ieee80211_key *key)
228 { 228{
229 static int keycount; 229 static int keycount;
230 char buf[50]; 230 char buf[100];
231 struct sta_info *sta; 231 struct sta_info *sta;
232 232
233 if (!key->local->debugfs.keys) 233 if (!key->local->debugfs.keys)
@@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
244 244
245 sta = key->sta; 245 sta = key->sta;
246 if (sta) { 246 if (sta) {
247 sprintf(buf, "../../stations/%pM", sta->sta.addr); 247 sprintf(buf, "../../netdev:%s/stations/%pM",
248 sta->sdata->name, sta->sta.addr);
248 key->debugfs.stalink = 249 key->debugfs.stalink =
249 debugfs_create_symlink("station", key->debugfs.dir, buf); 250 debugfs_create_symlink("station", key->debugfs.dir, buf);
250 } 251 }
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 73abb7524b2c..54df1b2bafd2 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
119 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + 119 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
120 sizeof(mgmt->u.action.u.mesh_action); 120 sizeof(mgmt->u.action.u.mesh_action);
121 121
122 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 122 skb = dev_alloc_skb(local->tx_headroom +
123 hdr_len + 123 hdr_len +
124 2 + 37); /* max HWMP IE */ 124 2 + 37); /* max HWMP IE */
125 if (!skb) 125 if (!skb)
126 return -1; 126 return -1;
127 skb_reserve(skb, local->hw.extra_tx_headroom); 127 skb_reserve(skb, local->tx_headroom);
128 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 128 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
129 memset(mgmt, 0, hdr_len); 129 memset(mgmt, 0, hdr_len);
130 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 130 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
250 if (time_before(jiffies, ifmsh->next_perr)) 250 if (time_before(jiffies, ifmsh->next_perr))
251 return -EAGAIN; 251 return -EAGAIN;
252 252
253 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 253 skb = dev_alloc_skb(local->tx_headroom +
254 hdr_len + 254 hdr_len +
255 2 + 15 /* PERR IE */); 255 2 + 15 /* PERR IE */);
256 if (!skb) 256 if (!skb)
257 return -1; 257 return -1;
258 skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); 258 skb_reserve(skb, local->tx_headroom);
259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 259 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
260 memset(mgmt, 0, hdr_len); 260 memset(mgmt, 0, hdr_len);
261 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 261 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 41ef1b476442..a17251730b9e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
172 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + 172 int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
173 sizeof(mgmt->u.action.u.self_prot); 173 sizeof(mgmt->u.action.u.self_prot);
174 174
175 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 175 skb = dev_alloc_skb(local->tx_headroom +
176 hdr_len + 176 hdr_len +
177 2 + /* capability info */ 177 2 + /* capability info */
178 2 + /* AID */ 178 2 + /* AID */
@@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
186 sdata->u.mesh.ie_len); 186 sdata->u.mesh.ie_len);
187 if (!skb) 187 if (!skb)
188 return -1; 188 return -1;
189 skb_reserve(skb, local->hw.extra_tx_headroom); 189 skb_reserve(skb, local->tx_headroom);
190 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); 190 mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
191 memset(mgmt, 0, hdr_len); 191 memset(mgmt, 0, hdr_len);
192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ecb4c84c1bb3..295be92f7c77 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2750{ 2750{
2751 struct ieee80211_local *local = sdata->local; 2751 struct ieee80211_local *local = sdata->local;
2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2753 struct ieee80211_work *wk;
2754 u8 bssid[ETH_ALEN]; 2753 u8 bssid[ETH_ALEN];
2755 bool assoc_bss = false; 2754 bool assoc_bss = false;
2756 2755
@@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2763 assoc_bss = true; 2762 assoc_bss = true;
2764 } else { 2763 } else {
2765 bool not_auth_yet = false; 2764 bool not_auth_yet = false;
2765 struct ieee80211_work *tmp, *wk = NULL;
2766 2766
2767 mutex_unlock(&ifmgd->mtx); 2767 mutex_unlock(&ifmgd->mtx);
2768 2768
2769 mutex_lock(&local->mtx); 2769 mutex_lock(&local->mtx);
2770 list_for_each_entry(wk, &local->work_list, list) { 2770 list_for_each_entry(tmp, &local->work_list, list) {
2771 if (wk->sdata != sdata) 2771 if (tmp->sdata != sdata)
2772 continue; 2772 continue;
2773 2773
2774 if (wk->type != IEEE80211_WORK_DIRECT_PROBE && 2774 if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
2775 wk->type != IEEE80211_WORK_AUTH && 2775 tmp->type != IEEE80211_WORK_AUTH &&
2776 wk->type != IEEE80211_WORK_ASSOC && 2776 tmp->type != IEEE80211_WORK_ASSOC &&
2777 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) 2777 tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2778 continue; 2778 continue;
2779 2779
2780 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) 2780 if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
2781 continue; 2781 continue;
2782 2782
2783 not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; 2783 not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
2784 list_del_rcu(&wk->list); 2784 list_del_rcu(&tmp->list);
2785 free_work(wk); 2785 synchronize_rcu();
2786 wk = tmp;
2786 break; 2787 break;
2787 } 2788 }
2788 mutex_unlock(&local->mtx); 2789 mutex_unlock(&local->mtx);
2789 2790
2791 if (wk && wk->type == IEEE80211_WORK_ASSOC) {
2792 /* clean up dummy sta & TX sync */
2793 sta_info_destroy_addr(wk->sdata, wk->filter_ta);
2794 if (wk->assoc.synced)
2795 drv_finish_tx_sync(local, wk->sdata,
2796 wk->filter_ta,
2797 IEEE80211_TX_SYNC_ASSOC);
2798 } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
2799 if (wk->probe_auth.synced)
2800 drv_finish_tx_sync(local, wk->sdata,
2801 wk->filter_ta,
2802 IEEE80211_TX_SYNC_AUTH);
2803 }
2804 kfree(wk);
2805
2790 /* 2806 /*
2791 * If somebody requests authentication and we haven't 2807 * If somebody requests authentication and we haven't
2792 * sent out an auth frame yet there's no need to send 2808 * sent out an auth frame yet there's no need to send
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index bb6ad81b671d..424ff622ab5f 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
68{ 68{
69 struct sock *sk = sock->sk; 69 struct sock *sk = sock->sk;
70 struct rds_sock *rs; 70 struct rds_sock *rs;
71 unsigned long flags;
72 71
73 if (!sk) 72 if (!sk)
74 goto out; 73 goto out;
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
94 rds_rdma_drop_keys(rs); 93 rds_rdma_drop_keys(rs);
95 rds_notify_queue_get(rs, NULL); 94 rds_notify_queue_get(rs, NULL);
96 95
97 spin_lock_irqsave(&rds_sock_lock, flags); 96 spin_lock_bh(&rds_sock_lock);
98 list_del_init(&rs->rs_item); 97 list_del_init(&rs->rs_item);
99 rds_sock_count--; 98 rds_sock_count--;
100 spin_unlock_irqrestore(&rds_sock_lock, flags); 99 spin_unlock_bh(&rds_sock_lock);
101 100
102 rds_trans_put(rs->rs_transport); 101 rds_trans_put(rs->rs_transport);
103 102
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
409 408
410static int __rds_create(struct socket *sock, struct sock *sk, int protocol) 409static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
411{ 410{
412 unsigned long flags;
413 struct rds_sock *rs; 411 struct rds_sock *rs;
414 412
415 sock_init_data(sock, sk); 413 sock_init_data(sock, sk);
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
426 spin_lock_init(&rs->rs_rdma_lock); 424 spin_lock_init(&rs->rs_rdma_lock);
427 rs->rs_rdma_keys = RB_ROOT; 425 rs->rs_rdma_keys = RB_ROOT;
428 426
429 spin_lock_irqsave(&rds_sock_lock, flags); 427 spin_lock_bh(&rds_sock_lock);
430 list_add_tail(&rs->rs_item, &rds_sock_list); 428 list_add_tail(&rs->rs_item, &rds_sock_list);
431 rds_sock_count++; 429 rds_sock_count++;
432 spin_unlock_irqrestore(&rds_sock_lock, flags); 430 spin_unlock_bh(&rds_sock_lock);
433 431
434 return 0; 432 return 0;
435} 433}
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
471{ 469{
472 struct rds_sock *rs; 470 struct rds_sock *rs;
473 struct rds_incoming *inc; 471 struct rds_incoming *inc;
474 unsigned long flags;
475 unsigned int total = 0; 472 unsigned int total = 0;
476 473
477 len /= sizeof(struct rds_info_message); 474 len /= sizeof(struct rds_info_message);
478 475
479 spin_lock_irqsave(&rds_sock_lock, flags); 476 spin_lock_bh(&rds_sock_lock);
480 477
481 list_for_each_entry(rs, &rds_sock_list, rs_item) { 478 list_for_each_entry(rs, &rds_sock_list, rs_item) {
482 read_lock(&rs->rs_recv_lock); 479 read_lock(&rs->rs_recv_lock);
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
492 read_unlock(&rs->rs_recv_lock); 489 read_unlock(&rs->rs_recv_lock);
493 } 490 }
494 491
495 spin_unlock_irqrestore(&rds_sock_lock, flags); 492 spin_unlock_bh(&rds_sock_lock);
496 493
497 lens->nr = total; 494 lens->nr = total;
498 lens->each = sizeof(struct rds_info_message); 495 lens->each = sizeof(struct rds_info_message);
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
504{ 501{
505 struct rds_info_socket sinfo; 502 struct rds_info_socket sinfo;
506 struct rds_sock *rs; 503 struct rds_sock *rs;
507 unsigned long flags;
508 504
509 len /= sizeof(struct rds_info_socket); 505 len /= sizeof(struct rds_info_socket);
510 506
511 spin_lock_irqsave(&rds_sock_lock, flags); 507 spin_lock_bh(&rds_sock_lock);
512 508
513 if (len < rds_sock_count) 509 if (len < rds_sock_count)
514 goto out; 510 goto out;
@@ -529,7 +525,7 @@ out:
529 lens->nr = rds_sock_count; 525 lens->nr = rds_sock_count;
530 lens->each = sizeof(struct rds_info_socket); 526 lens->each = sizeof(struct rds_info_socket);
531 527
532 spin_unlock_irqrestore(&rds_sock_lock, flags); 528 spin_unlock_bh(&rds_sock_lock);
533} 529}
534 530
535static void rds_exit(void) 531static void rds_exit(void)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e7e1d0b57b3d..2776012132ea 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -419,7 +419,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 419
420 cb = netem_skb_cb(skb); 420 cb = netem_skb_cb(skb);
421 if (q->gap == 0 || /* not doing reordering */ 421 if (q->gap == 0 || /* not doing reordering */
422 q->counter < q->gap || /* inside last reordering gap */ 422 q->counter < q->gap - 1 || /* inside last reordering gap */
423 q->reorder < get_crandom(&q->reorder_cor)) { 423 q->reorder < get_crandom(&q->reorder_cor)) {
424 psched_time_t now; 424 psched_time_t now;
425 psched_tdiff_t delay; 425 psched_tdiff_t delay;