aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-17 20:26:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-17 20:26:58 -0400
commit184ca823481c99dadd7d946e5afd4bb921eab30d (patch)
treefd636fe467bf11ae3a11ea5d36d860296705b58f
parent5ff132c07aa155d759ab3da946c86351313d3020 (diff)
parentb96c22c071eb1126db4055de4bb75b02b05affd1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Buffers powersave frame test is reversed in cfg80211, fix from Felix Fietkau. 2) Remove bogus WARN_ON in openvswitch, from Jarno Rajahalme. 3) Fix some tg3 ethtool logic bugs, and one that would cause no interrupts to be generated when rx-coalescing is set to 0. From Satish Baddipadige and Siva Reddy Kallam. 4) QLCNIC mailbox corruption and napi budget handling fix from Manish Chopra. 5) Fix fib_trie logic when walking the trie during /proc/net/route output than can access a stale node pointer. From David Forster. 6) Several sctp_diag fixes from Phil Sutter. 7) PAUSE frame handling fixes in mlxsw driver from Ido Schimmel. 8) Checksum fixup fixes in bpf from Daniel Borkmann. 9) Memork leaks in nfnetlink, from Liping Zhang. 10) Use after free in rxrpc, from David Howells. 11) Use after free in new skb_array code of macvtap driver, from Jason Wang. 12) Calipso resource leak, from Colin Ian King. 13) mediatek bug fixes (missing stats sync init, etc.) from Sean Wang. 14) Fix bpf non-linear packet write helpers, from Daniel Borkmann. 15) Fix lockdep splats in macsec, from Sabrina Dubroca. 16) hv_netvsc bug fixes from Vitaly Kuznetsov, mostly to do with VF handling. 17) Various tc-action bug fixes, from CONG Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (116 commits) net_sched: allow flushing tc police actions net_sched: unify the init logic for act_police net_sched: convert tcf_exts from list to pointer array net_sched: move tc offload macros to pkt_cls.h net_sched: fix a typo in tc_for_each_action() net_sched: remove an unnecessary list_del() net_sched: remove the leftover cleanup_a() mlxsw: spectrum: Allow packets to be trapped from any PG mlxsw: spectrum: Unmap 802.1Q FID before destroying it mlxsw: spectrum: Add missing rollbacks in error path mlxsw: reg: Fix missing op field fill-up mlxsw: spectrum: Trap loop-backed packets mlxsw: spectrum: Add missing packet traps mlxsw: spectrum: Mark port as active before registering it mlxsw: spectrum: Create PVID vPort before registering netdevice mlxsw: spectrum: Remove redundant errors from the code mlxsw: spectrum: Don't return upon error in removal path i40e: check for and deal with non-contiguous TCs ixgbe: Re-enable ability to toggle VLAN filtering ixgbe: Force VLNCTRL.VFE to be set in all VMDq paths ...
-rw-r--r--Documentation/networking/rxrpc.txt21
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_regs.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c66
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c35
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c23
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h24
-rw-r--r--drivers/net/hyperv/netvsc.c19
-rw-r--r--drivers/net/hyperv/netvsc_drv.c105
-rw-r--r--drivers/net/macsec.c52
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c5
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c5
-rw-r--r--fs/afs/cmservice.c78
-rw-r--r--fs/afs/fsclient.c221
-rw-r--r--fs/afs/internal.h14
-rw-r--r--fs/afs/rxrpc.c73
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/qed/qed_if.h8
-rw-r--r--include/linux/sctp.h64
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/net/act_api.h23
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/gre.h1
-rw-r--r--include/net/inet_ecn.h3
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/pkt_cls.h41
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/sctp.h64
-rw-r--r--kernel/bpf/hashtab.c84
-rw-r--r--kernel/bpf/verifier.c7
-rw-r--r--lib/rhashtable.c13
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/bridge/br_fdb.c52
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/filter.c109
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_vti.c31
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/calipso.c4
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ping.c33
-rw-r--r--net/irda/iriap.c8
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/status.c14
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c6
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_rbtree.c10
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/vport-geneve.c9
-rw-r--r--net/openvswitch/vport-gre.c11
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c9
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_accept.c1
-rw-r--r--net/rxrpc/call_event.c7
-rw-r--r--net/rxrpc/call_object.c11
-rw-r--r--net/rxrpc/input.c39
-rw-r--r--net/rxrpc/recvmsg.c25
-rw-r--r--net/rxrpc/skbuff.c41
-rw-r--r--net/sched/act_api.c34
-rw-r--r--net/sched/act_police.c62
-rw-r--r--net/sched/cls_api.c51
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/sctp_diag.c18
-rw-r--r--net/sctp/ulpevent.c4
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/wireless/chan.c1
-rw-r--r--net/wireless/nl80211.c34
-rw-r--r--samples/bpf/bpf_helpers.h4
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c2
-rw-r--r--samples/bpf/test_maps.c15
116 files changed, 1486 insertions, 1109 deletions
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 16a924c486bf..70c926ae212d 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -790,13 +790,12 @@ The kernel interface functions are as follows:
790 Data messages can have their contents extracted with the usual bunch of 790 Data messages can have their contents extracted with the usual bunch of
791 socket buffer manipulation functions. A data message can be determined to 791 socket buffer manipulation functions. A data message can be determined to
792 be the last one in a sequence with rxrpc_kernel_is_data_last(). When a 792 be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
793 data message has been used up, rxrpc_kernel_data_delivered() should be 793 data message has been used up, rxrpc_kernel_data_consumed() should be
794 called on it.. 794 called on it.
795 795
796 Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose 796 Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
797 of. It is possible to get extra refs on all types of message for later 797 is possible to get extra refs on all types of message for later freeing,
798 freeing, but this may pin the state of a call until the message is finally 798 but this may pin the state of a call until the message is finally freed.
799 freed.
800 799
801 (*) Accept an incoming call. 800 (*) Accept an incoming call.
802 801
@@ -821,12 +820,14 @@ The kernel interface functions are as follows:
821 Other errors may be returned if the call had been aborted (-ECONNABORTED) 820 Other errors may be returned if the call had been aborted (-ECONNABORTED)
822 or had timed out (-ETIME). 821 or had timed out (-ETIME).
823 822
824 (*) Record the delivery of a data message and free it. 823 (*) Record the delivery of a data message.
825 824
826 void rxrpc_kernel_data_delivered(struct sk_buff *skb); 825 void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
826 struct sk_buff *skb);
827 827
828 This is used to record a data message as having been delivered and to 828 This is used to record a data message as having been consumed and to
829 update the ACK state for the call. The socket buffer will be freed. 829 update the ACK state for the call. The message must still be passed to
830 rxrpc_kernel_free_skb() for disposal by the caller.
830 831
831 (*) Free a message. 832 (*) Free a message.
832 833
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1f276fa30ba6..217e8da0628c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " 152MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
153 "0 for slow, 1 for fast"); 153 "0 for slow, 1 for fast");
154module_param(ad_select, charp, 0); 154module_param(ad_select, charp, 0);
155MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " 155MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
156 "0 for stable (default), 1 for bandwidth, " 156 "0 for stable (default), 1 for bandwidth, "
157 "2 for count"); 157 "2 for count");
158module_param(min_links, int, 0); 158module_param(min_links, int, 0);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 8f12bddd5dc9..a0b453ea34c9 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -258,7 +258,7 @@
258 * BCM5325 and BCM5365 share most definitions below 258 * BCM5325 and BCM5365 share most definitions below
259 */ 259 */
260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) 260#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
261#define ARLTBL_MAC_MASK 0xffffffffffff 261#define ARLTBL_MAC_MASK 0xffffffffffffULL
262#define ARLTBL_VID_S 48 262#define ARLTBL_VID_S 48
263#define ARLTBL_VID_MASK_25 0xff 263#define ARLTBL_VID_MASK_25 0xff
264#define ARLTBL_VID_MASK 0xfff 264#define ARLTBL_VID_MASK 0xfff
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d36aedde8cb9..d1d9d3cf9139 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
3187 return err; 3187 return err;
3188} 3188}
3189 3189
3190#ifdef CONFIG_NET_DSA_HWMON
3190static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, 3191static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
3191 int reg) 3192 int reg)
3192{ 3193{
@@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
3212 3213
3213 return ret; 3214 return ret;
3214} 3215}
3216#endif
3215 3217
3216static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) 3218static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
3217{ 3219{
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 37a0f463b8de..18bb9556dd00 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
793 netdev_err(ndev, "Could not connect to PHY\n"); 793 netdev_err(ndev, "Could not connect to PHY\n");
794 return -ENODEV; 794 return -ENODEV;
795 } 795 }
796#else
797 return -ENODEV;
796#endif 798#endif
797 } 799 }
798 800
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 4bff0f3040df..b0da9693f28a 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
771 priv->dev = dev; 771 priv->dev = dev;
772 772
773 priv->regs = devm_ioremap_resource(dev, &res_regs); 773 priv->regs = devm_ioremap_resource(dev, &res_regs);
774 if (IS_ERR(priv->regs)) 774 if (IS_ERR(priv->regs)) {
775 return PTR_ERR(priv->regs); 775 err = PTR_ERR(priv->regs);
776 goto out_put_node;
777 }
776 778
777 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); 779 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
778 780
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff300f7cf529..659261218d9f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12552 info->data = TG3_RSS_MAX_NUM_QS; 12552 info->data = TG3_RSS_MAX_NUM_QS;
12553 } 12553 }
12554 12554
12555 /* The first interrupt vector only
12556 * handles link interrupts.
12557 */
12558 info->data -= 1;
12559 return 0; 12555 return 0;
12560 12556
12561 default: 12557 default:
@@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14014 } 14010 }
14015 14011
14016 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || 14012 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14013 (!ec->rx_coalesce_usecs) ||
14017 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || 14014 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14018 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || 14015 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14019 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || 14016 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 36893d8958d4..b6fcf10621b6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -403,11 +403,11 @@
403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 403#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 404#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
405#define MACB_CAPS_USRIO_DISABLED 0x00000010 405#define MACB_CAPS_USRIO_DISABLED 0x00000010
406#define MACB_CAPS_JUMBO 0x00000020
406#define MACB_CAPS_FIFO_MODE 0x10000000 407#define MACB_CAPS_FIFO_MODE 0x10000000
407#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 408#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
408#define MACB_CAPS_SG_DISABLED 0x40000000 409#define MACB_CAPS_SG_DISABLED 0x40000000
409#define MACB_CAPS_MACB_IS_GEM 0x80000000 410#define MACB_CAPS_MACB_IS_GEM 0x80000000
410#define MACB_CAPS_JUMBO 0x00000010
411 411
412/* Bit manipulation macros */ 412/* Bit manipulation macros */
413#define MACB_BIT(name) \ 413#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1471e16ba719..f45385f5c6e5 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1299,6 +1299,7 @@ static int
1299dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1300{ 1300{
1301 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1302 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1302 1303
1303 if (netif_msg_ifup(db)) 1304 if (netif_msg_ifup(db))
1304 dev_dbg(db->dev, "enabling %s\n", dev->name); 1305 dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
1306 /* If there is no IRQ type specified, tell the user that this is a 1307 /* If there is no IRQ type specified, tell the user that this is a
1307 * problem 1308 * problem
1308 */ 1309 */
1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) 1310 if (irq_flags == IRQF_TRIGGER_NONE)
1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1311 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1311 1312
1313 irq_flags |= IRQF_SHARED;
1314
1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1315 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1316 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1314 mdelay(1); /* delay needs by DM9000B */ 1317 mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
1316 /* Initialize DM9000 board */ 1319 /* Initialize DM9000 board */
1317 dm9000_init_dm9000(dev); 1320 dm9000_init_dm9000(dev);
1318 1321
1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, 1322 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1320 dev->name, dev))
1321 return -EAGAIN; 1323 return -EAGAIN;
1322 /* Now that we have an interrupt handler hooked up we can unmask 1324 /* Now that we have an interrupt handler hooked up we can unmask
1323 * our interrupts 1325 * our interrupts
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1235c7f2564b..1e1eb92998fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, 17 {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, 18 {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, 19 {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
20 {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, 20 {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, 21 {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, 22 {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, 23 {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7fd4d54599e4..6b03c8553e59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
2032 | FLAG2_DISABLE_ASPM_L0S 2032 | FLAG2_DISABLE_ASPM_L0S
2033 | FLAG2_DISABLE_ASPM_L1 2033 | FLAG2_DISABLE_ASPM_L1
2034 | FLAG2_NO_DISABLE_RX 2034 | FLAG2_NO_DISABLE_RX
2035 | FLAG2_DMA_BURST, 2035 | FLAG2_DMA_BURST
2036 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2036 .pba = 32, 2037 .pba = 32,
2037 .max_hw_frame_size = DEFAULT_JUMBO, 2038 .max_hw_frame_size = DEFAULT_JUMBO,
2038 .get_variants = e1000_get_variants_82571, 2039 .get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
2053 | FLAG_HAS_CTRLEXT_ON_LOAD, 2054 | FLAG_HAS_CTRLEXT_ON_LOAD,
2054 .flags2 = FLAG2_DISABLE_ASPM_L0S 2055 .flags2 = FLAG2_DISABLE_ASPM_L0S
2055 | FLAG2_DISABLE_ASPM_L1 2056 | FLAG2_DISABLE_ASPM_L1
2056 | FLAG2_NO_DISABLE_RX, 2057 | FLAG2_NO_DISABLE_RX
2058 | FLAG2_CHECK_SYSTIM_OVERFLOW,
2057 .pba = 32, 2059 .pba = 32,
2058 .max_hw_frame_size = DEFAULT_JUMBO, 2060 .max_hw_frame_size = DEFAULT_JUMBO,
2059 .get_variants = e1000_get_variants_82571, 2061 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ef96cd11d6d2..879cca47b021 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) 452#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
453#define FLAG2_DFLT_CRC_STRIPPING BIT(12) 453#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13) 454#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
455#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
455 456
456#define E1000_RX_DESC_PS(R, i) \ 457#define E1000_RX_DESC_PS(R, i) \
457 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 458 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 3e11322d8d58..f3aaca743ea3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
5885 | FLAG_HAS_JUMBO_FRAMES 5885 | FLAG_HAS_JUMBO_FRAMES
5886 | FLAG_APME_IN_WUC, 5886 | FLAG_APME_IN_WUC,
5887 .flags2 = FLAG2_HAS_PHY_STATS 5887 .flags2 = FLAG2_HAS_PHY_STATS
5888 | FLAG2_HAS_EEE, 5888 | FLAG2_HAS_EEE
5889 | FLAG2_CHECK_SYSTIM_OVERFLOW,
5889 .pba = 26, 5890 .pba = 26,
5890 .max_hw_frame_size = 9022, 5891 .max_hw_frame_size = 9022,
5891 .get_variants = e1000_get_variants_ich8lan, 5892 .get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 02f443958f31..7017281ba2dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4303} 4303}
4304 4304
4305/** 4305/**
4306 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4307 * @hw: pointer to the HW structure
4308 * @systim: cycle_t value read, sanitized and returned
4309 *
4310 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4311 * check to see that the time is incrementing at a reasonable
4312 * rate and is a multiple of incvalue.
4313 **/
4314static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
4315{
4316 u64 time_delta, rem, temp;
4317 cycle_t systim_next;
4318 u32 incvalue;
4319 int i;
4320
4321 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4322 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4323 /* latch SYSTIMH on read of SYSTIML */
4324 systim_next = (cycle_t)er32(SYSTIML);
4325 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4326
4327 time_delta = systim_next - systim;
4328 temp = time_delta;
4329 /* VMWare users have seen incvalue of zero, don't div / 0 */
4330 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4331
4332 systim = systim_next;
4333
4334 if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
4335 break;
4336 }
4337
4338 return systim;
4339}
4340
4341/**
4306 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) 4342 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4307 * @cc: cyclecounter structure 4343 * @cc: cyclecounter structure
4308 **/ 4344 **/
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4312 cc); 4348 cc);
4313 struct e1000_hw *hw = &adapter->hw; 4349 struct e1000_hw *hw = &adapter->hw;
4314 u32 systimel, systimeh; 4350 u32 systimel, systimeh;
4315 cycle_t systim, systim_next; 4351 cycle_t systim;
4316 /* SYSTIMH latching upon SYSTIML read does not work well. 4352 /* SYSTIMH latching upon SYSTIML read does not work well.
4317 * This means that if SYSTIML overflows after we read it but before 4353 * This means that if SYSTIML overflows after we read it but before
4318 * we read SYSTIMH, the value of SYSTIMH has been incremented and we 4354 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4335 systim = (cycle_t)systimel; 4371 systim = (cycle_t)systimel;
4336 systim |= (cycle_t)systimeh << 32; 4372 systim |= (cycle_t)systimeh << 32;
4337 4373
4338 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { 4374 if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
4339 u64 time_delta, rem, temp; 4375 systim = e1000e_sanitize_systim(hw, systim);
4340 u32 incvalue;
4341 int i;
4342
4343 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4344 * check to see that the time is incrementing at a reasonable
4345 * rate and is a multiple of incvalue
4346 */
4347 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4348 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4349 /* latch SYSTIMH on read of SYSTIML */
4350 systim_next = (cycle_t)er32(SYSTIML);
4351 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4352
4353 time_delta = systim_next - systim;
4354 temp = time_delta;
4355 /* VMWare users have seen incvalue of zero, don't div / 0 */
4356 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4357
4358 systim = systim_next;
4359 4376
4360 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4361 (rem == 0))
4362 break;
4363 }
4364 }
4365 return systim; 4377 return systim;
4366} 4378}
4367 4379
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 81c99e1be708..c6ac7a61812f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4554 **/ 4554 **/
4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) 4555static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4556{ 4556{
4557 int i, tc_unused = 0;
4557 u8 num_tc = 0; 4558 u8 num_tc = 0;
4558 int i; 4559 u8 ret = 0;
4559 4560
4560 /* Scan the ETS Config Priority Table to find 4561 /* Scan the ETS Config Priority Table to find
4561 * traffic class enabled for a given priority 4562 * traffic class enabled for a given priority
4562 * and use the traffic class index to get the 4563 * and create a bitmask of enabled TCs
4563 * number of traffic classes enabled
4564 */ 4564 */
4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 4565 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4566 if (dcbcfg->etscfg.prioritytable[i] > num_tc) 4566 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4567 num_tc = dcbcfg->etscfg.prioritytable[i];
4568 }
4569 4567
4570 /* Traffic class index starts from zero so 4568 /* Now scan the bitmask to check for
4571 * increment to return the actual count 4569 * contiguous TCs starting with TC0
4572 */ 4570 */
4573 return num_tc + 1; 4571 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4572 if (num_tc & BIT(i)) {
4573 if (!tc_unused) {
4574 ret++;
4575 } else {
4576 pr_err("Non-contiguous TC - Disabling DCB\n");
4577 return 1;
4578 }
4579 } else {
4580 tc_unused = 1;
4581 }
4582 }
4583
4584 /* There is always at least TC0 */
4585 if (!ret)
4586 ret = 1;
4587
4588 return ret;
4574} 4589}
4575 4590
4576/** 4591/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e61b647f5f2a..336c103ae374 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
744 } 744 }
745 } 745 }
746 746
747 shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); 747 shhwtstamps.hwtstamp =
748 ktime_add_ns(shhwtstamps.hwtstamp, adjust);
748 749
749 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); 750 skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
750 dev_kfree_skb_any(adapter->ptp_tx_skb); 751 dev_kfree_skb_any(adapter->ptp_tx_skb);
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
767 struct sk_buff *skb) 768 struct sk_buff *skb)
768{ 769{
769 __le64 *regval = (__le64 *)va; 770 __le64 *regval = (__le64 *)va;
771 struct igb_adapter *adapter = q_vector->adapter;
772 int adjust = 0;
770 773
771 /* The timestamp is recorded in little endian format. 774 /* The timestamp is recorded in little endian format.
772 * DWORD: 0 1 2 3 775 * DWORD: 0 1 2 3
773 * Field: Reserved Reserved SYSTIML SYSTIMH 776 * Field: Reserved Reserved SYSTIML SYSTIMH
774 */ 777 */
775 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), 778 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
776 le64_to_cpu(regval[1])); 779 le64_to_cpu(regval[1]));
780
781 /* adjust timestamp for the RX latency based on link speed */
782 if (adapter->hw.mac.type == e1000_i210) {
783 switch (adapter->link_speed) {
784 case SPEED_10:
785 adjust = IGB_I210_RX_LATENCY_10;
786 break;
787 case SPEED_100:
788 adjust = IGB_I210_RX_LATENCY_100;
789 break;
790 case SPEED_1000:
791 adjust = IGB_I210_RX_LATENCY_1000;
792 break;
793 }
794 }
795 skb_hwtstamps(skb)->hwtstamp =
796 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
777} 797}
778 798
779/** 799/**
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
825 } 845 }
826 } 846 }
827 skb_hwtstamps(skb)->hwtstamp = 847 skb_hwtstamps(skb)->hwtstamp =
828 ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); 848 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
829 849
830 /* Update the last_rx_timestamp timer in order to enable watchdog check 850 /* Update the last_rx_timestamp timer in order to enable watchdog check
831 * for error case of latched timestamp on a dropped packet. 851 * for error case of latched timestamp on a dropped packet.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5418c69a7463..b4f03748adc0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4100 struct ixgbe_hw *hw = &adapter->hw; 4100 struct ixgbe_hw *hw = &adapter->hw;
4101 u32 vlnctrl, i; 4101 u32 vlnctrl, i;
4102 4102
4103 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4104
4103 switch (hw->mac.type) { 4105 switch (hw->mac.type) {
4104 case ixgbe_mac_82599EB: 4106 case ixgbe_mac_82599EB:
4105 case ixgbe_mac_X540: 4107 case ixgbe_mac_X540:
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4112 /* fall through */ 4114 /* fall through */
4113 case ixgbe_mac_82598EB: 4115 case ixgbe_mac_82598EB:
4114 /* legacy case, we can just disable VLAN filtering */ 4116 /* legacy case, we can just disable VLAN filtering */
4115 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 4117 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
4116 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
4117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 4118 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4118 return; 4119 return;
4119 } 4120 }
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
4125 /* Set flag so we don't redo unnecessary work */ 4126 /* Set flag so we don't redo unnecessary work */
4126 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; 4127 adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
4127 4128
4129 /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
4130 vlnctrl |= IXGBE_VLNCTRL_VFE;
4131 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4132
4128 /* Add PF to all active pools */ 4133 /* Add PF to all active pools */
4129 for (i = IXGBE_VLVF_ENTRIES; --i;) { 4134 for (i = IXGBE_VLVF_ENTRIES; --i;) {
4130 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); 4135 u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4191 struct ixgbe_hw *hw = &adapter->hw; 4196 struct ixgbe_hw *hw = &adapter->hw;
4192 u32 vlnctrl, i; 4197 u32 vlnctrl, i;
4193 4198
4199 /* Set VLAN filtering to enabled */
4200 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4201 vlnctrl |= IXGBE_VLNCTRL_VFE;
4202 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4203
4194 switch (hw->mac.type) { 4204 switch (hw->mac.type) {
4195 case ixgbe_mac_82599EB: 4205 case ixgbe_mac_82599EB:
4196 case ixgbe_mac_X540: 4206 case ixgbe_mac_X540:
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
4202 break; 4212 break;
4203 /* fall through */ 4213 /* fall through */
4204 case ixgbe_mac_82598EB: 4214 case ixgbe_mac_82598EB:
4205 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4206 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
4207 vlnctrl |= IXGBE_VLNCTRL_VFE;
4208 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
4209 return; 4215 return;
4210 } 4216 }
4211 4217
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
8390 struct tcf_exts *exts, u64 *action, u8 *queue) 8396 struct tcf_exts *exts, u64 *action, u8 *queue)
8391{ 8397{
8392 const struct tc_action *a; 8398 const struct tc_action *a;
8399 LIST_HEAD(actions);
8393 int err; 8400 int err;
8394 8401
8395 if (tc_no_actions(exts)) 8402 if (tc_no_actions(exts))
8396 return -EINVAL; 8403 return -EINVAL;
8397 8404
8398 tc_for_each_action(a, exts) { 8405 tcf_exts_to_list(exts, &actions);
8406 list_for_each_entry(a, &actions, list) {
8399 8407
8400 /* Drop action */ 8408 /* Drop action */
8401 if (is_tcf_gact_shot(a)) { 8409 if (is_tcf_gact_shot(a)) {
@@ -9517,6 +9525,7 @@ skip_sriov:
9517 9525
9518 /* copy netdev features into list of user selectable features */ 9526 /* copy netdev features into list of user selectable features */
9519 netdev->hw_features |= netdev->features | 9527 netdev->hw_features |= netdev->features |
9528 NETIF_F_HW_VLAN_CTAG_FILTER |
9520 NETIF_F_HW_VLAN_CTAG_RX | 9529 NETIF_F_HW_VLAN_CTAG_RX |
9521 NETIF_F_HW_VLAN_CTAG_TX | 9530 NETIF_F_HW_VLAN_CTAG_TX |
9522 NETIF_F_RXALL | 9531 NETIF_F_RXALL |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b57ae3afb994..f1609542adf1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
245 case PHY_INTERFACE_MODE_MII: 245 case PHY_INTERFACE_MODE_MII:
246 ge_mode = 1; 246 ge_mode = 1;
247 break; 247 break;
248 case PHY_INTERFACE_MODE_RMII: 248 case PHY_INTERFACE_MODE_REVMII:
249 ge_mode = 2; 249 ge_mode = 2;
250 break; 250 break;
251 case PHY_INTERFACE_MODE_RMII:
252 if (!mac->id)
253 goto err_phy;
254 ge_mode = 3;
255 break;
251 default: 256 default:
252 dev_err(eth->dev, "invalid phy_mode\n"); 257 goto err_phy;
253 return -1;
254 } 258 }
255 259
256 /* put the gmac into the right mode */ 260 /* put the gmac into the right mode */
@@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac)
263 mac->phy_dev->autoneg = AUTONEG_ENABLE; 267 mac->phy_dev->autoneg = AUTONEG_ENABLE;
264 mac->phy_dev->speed = 0; 268 mac->phy_dev->speed = 0;
265 mac->phy_dev->duplex = 0; 269 mac->phy_dev->duplex = 0;
270
271 if (of_phy_is_fixed_link(mac->of_node))
272 mac->phy_dev->supported |=
273 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
274
266 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 275 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267 SUPPORTED_Asym_Pause; 276 SUPPORTED_Asym_Pause;
268 mac->phy_dev->advertising = mac->phy_dev->supported | 277 mac->phy_dev->advertising = mac->phy_dev->supported |
269 ADVERTISED_Autoneg; 278 ADVERTISED_Autoneg;
270 phy_start_aneg(mac->phy_dev); 279 phy_start_aneg(mac->phy_dev);
271 280
281 of_node_put(np);
282
272 return 0; 283 return 0;
284
285err_phy:
286 of_node_put(np);
287 dev_err(eth->dev, "invalid phy_mode\n");
288 return -EINVAL;
273} 289}
274 290
275static int mtk_mdio_init(struct mtk_eth *eth) 291static int mtk_mdio_init(struct mtk_eth *eth)
@@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
542 return &ring->buf[idx]; 558 return &ring->buf[idx];
543} 559}
544 560
545static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) 561static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
546{ 562{
547 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { 563 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
548 dma_unmap_single(dev, 564 dma_unmap_single(eth->dev,
549 dma_unmap_addr(tx_buf, dma_addr0), 565 dma_unmap_addr(tx_buf, dma_addr0),
550 dma_unmap_len(tx_buf, dma_len0), 566 dma_unmap_len(tx_buf, dma_len0),
551 DMA_TO_DEVICE); 567 DMA_TO_DEVICE);
552 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { 568 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
553 dma_unmap_page(dev, 569 dma_unmap_page(eth->dev,
554 dma_unmap_addr(tx_buf, dma_addr0), 570 dma_unmap_addr(tx_buf, dma_addr0),
555 dma_unmap_len(tx_buf, dma_len0), 571 dma_unmap_len(tx_buf, dma_len0),
556 DMA_TO_DEVICE); 572 DMA_TO_DEVICE);
@@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
595 if (skb_vlan_tag_present(skb)) 611 if (skb_vlan_tag_present(skb))
596 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); 612 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
597 613
598 mapped_addr = dma_map_single(&dev->dev, skb->data, 614 mapped_addr = dma_map_single(eth->dev, skb->data,
599 skb_headlen(skb), DMA_TO_DEVICE); 615 skb_headlen(skb), DMA_TO_DEVICE);
600 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 616 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
601 return -ENOMEM; 617 return -ENOMEM;
602 618
603 WRITE_ONCE(itxd->txd1, mapped_addr); 619 WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
623 639
624 n_desc++; 640 n_desc++;
625 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); 641 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
626 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, 642 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
627 frag_map_size, 643 frag_map_size,
628 DMA_TO_DEVICE); 644 DMA_TO_DEVICE);
629 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) 645 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
630 goto err_dma; 646 goto err_dma;
631 647
632 if (i == nr_frags - 1 && 648 if (i == nr_frags - 1 &&
@@ -679,7 +695,7 @@ err_dma:
679 tx_buf = mtk_desc_to_tx_buf(ring, itxd); 695 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
680 696
681 /* unmap dma */ 697 /* unmap dma */
682 mtk_tx_unmap(&dev->dev, tx_buf); 698 mtk_tx_unmap(eth, tx_buf);
683 699
684 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; 700 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
685 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); 701 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
836 netdev->stats.rx_dropped++; 852 netdev->stats.rx_dropped++;
837 goto release_desc; 853 goto release_desc;
838 } 854 }
839 dma_addr = dma_map_single(&eth->netdev[mac]->dev, 855 dma_addr = dma_map_single(eth->dev,
840 new_data + NET_SKB_PAD, 856 new_data + NET_SKB_PAD,
841 ring->buf_size, 857 ring->buf_size,
842 DMA_FROM_DEVICE); 858 DMA_FROM_DEVICE);
843 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 859 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
844 skb_free_frag(new_data); 860 skb_free_frag(new_data);
845 netdev->stats.rx_dropped++; 861 netdev->stats.rx_dropped++;
846 goto release_desc; 862 goto release_desc;
@@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
855 } 871 }
856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 872 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
857 873
858 dma_unmap_single(&netdev->dev, trxd.rxd1, 874 dma_unmap_single(eth->dev, trxd.rxd1,
859 ring->buf_size, DMA_FROM_DEVICE); 875 ring->buf_size, DMA_FROM_DEVICE);
860 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); 876 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
861 skb->dev = netdev; 877 skb->dev = netdev;
@@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
937 done[mac]++; 953 done[mac]++;
938 budget--; 954 budget--;
939 } 955 }
940 mtk_tx_unmap(eth->dev, tx_buf); 956 mtk_tx_unmap(eth, tx_buf);
941 957
942 ring->last_free = desc; 958 ring->last_free = desc;
943 atomic_inc(&ring->free_count); 959 atomic_inc(&ring->free_count);
@@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1092 1108
1093 if (ring->buf) { 1109 if (ring->buf) {
1094 for (i = 0; i < MTK_DMA_SIZE; i++) 1110 for (i = 0; i < MTK_DMA_SIZE; i++)
1095 mtk_tx_unmap(eth->dev, &ring->buf[i]); 1111 mtk_tx_unmap(eth, &ring->buf[i]);
1096 kfree(ring->buf); 1112 kfree(ring->buf);
1097 ring->buf = NULL; 1113 ring->buf = NULL;
1098 } 1114 }
@@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1751 goto free_netdev; 1767 goto free_netdev;
1752 } 1768 }
1753 spin_lock_init(&mac->hw_stats->stats_lock); 1769 spin_lock_init(&mac->hw_stats->stats_lock);
1770 u64_stats_init(&mac->hw_stats->syncp);
1754 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; 1771 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1755 1772
1756 SET_NETDEV_DEV(eth->netdev[id], eth->dev); 1773 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0f19b01e3fff..dc8b1cb0fdc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
318 u32 *action, u32 *flow_tag) 318 u32 *action, u32 *flow_tag)
319{ 319{
320 const struct tc_action *a; 320 const struct tc_action *a;
321 LIST_HEAD(actions);
321 322
322 if (tc_no_actions(exts)) 323 if (tc_no_actions(exts))
323 return -EINVAL; 324 return -EINVAL;
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
325 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 326 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
326 *action = 0; 327 *action = 0;
327 328
328 tc_for_each_action(a, exts) { 329 tcf_exts_to_list(exts, &actions);
330 list_for_each_entry(a, &actions, list) {
329 /* Only support a single action per rule */ 331 /* Only support a single action per rule */
330 if (*action) 332 if (*action)
331 return -EINVAL; 333 return -EINVAL;
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
362 u32 *action, u32 *dest_vport) 364 u32 *action, u32 *dest_vport)
363{ 365{
364 const struct tc_action *a; 366 const struct tc_action *a;
367 LIST_HEAD(actions);
365 368
366 if (tc_no_actions(exts)) 369 if (tc_no_actions(exts))
367 return -EINVAL; 370 return -EINVAL;
368 371
369 *action = 0; 372 *action = 0;
370 373
371 tc_for_each_action(a, exts) { 374 tcf_exts_to_list(exts, &actions);
375 list_for_each_entry(a, &actions, list) {
372 /* Only support a single action per rule */ 376 /* Only support a single action per rule */
373 if (*action) 377 if (*action)
374 return -EINVAL; 378 return -EINVAL;
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
503 struct mlx5e_tc_flow *flow; 507 struct mlx5e_tc_flow *flow;
504 struct tc_action *a; 508 struct tc_action *a;
505 struct mlx5_fc *counter; 509 struct mlx5_fc *counter;
510 LIST_HEAD(actions);
506 u64 bytes; 511 u64 bytes;
507 u64 packets; 512 u64 packets;
508 u64 lastuse; 513 u64 lastuse;
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
518 523
519 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 524 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
520 525
521 tc_for_each_action(a, f->exts) 526 tcf_exts_to_list(f->exts, &actions);
527 list_for_each_entry(a, &actions, list)
522 tcf_action_stats_update(a, bytes, packets, lastuse); 528 tcf_action_stats_update(a, bytes, packets, lastuse);
523 529
524 return 0; 530 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 7ca9201f7dcb..1721098eef13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3383 */ 3383 */
3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); 3384MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3385 3385
3386/* reg_ritr_lb_en
3387 * Loop-back filter enable for unicast packets.
3388 * If the flag is set then loop-back filter for unicast packets is
3389 * implemented on the RIF. Multicast packets are always subject to
3390 * loop-back filtering.
3391 * Access: RW
3392 */
3393MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
3394
3386/* reg_ritr_virtual_router 3395/* reg_ritr_virtual_router
3387 * Virtual router ID associated with the router interface. 3396 * Virtual router ID associated with the router interface.
3388 * Access: RW 3397 * Access: RW
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3484 mlxsw_reg_ritr_op_set(payload, op); 3493 mlxsw_reg_ritr_op_set(payload, op);
3485 mlxsw_reg_ritr_rif_set(payload, rif); 3494 mlxsw_reg_ritr_rif_set(payload, rif);
3486 mlxsw_reg_ritr_ipv4_fe_set(payload, 1); 3495 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
3496 mlxsw_reg_ritr_lb_en_set(payload, 1);
3487 mlxsw_reg_ritr_mtu_set(payload, mtu); 3497 mlxsw_reg_ritr_mtu_set(payload, mtu);
3488 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); 3498 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3489} 3499}
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
4000{ 4010{
4001 MLXSW_REG_ZERO(ralue, payload); 4011 MLXSW_REG_ZERO(ralue, payload);
4002 mlxsw_reg_ralue_protocol_set(payload, protocol); 4012 mlxsw_reg_ralue_protocol_set(payload, protocol);
4013 mlxsw_reg_ralue_op_set(payload, op);
4003 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); 4014 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
4004 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); 4015 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
4005 mlxsw_reg_ralue_entry_type_set(payload, 4016 mlxsw_reg_ralue_entry_type_set(payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3e61500819d..1f8168906811 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
942 kfree(mlxsw_sp_vport); 942 kfree(mlxsw_sp_vport);
943} 943}
944 944
945int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 945static int mlxsw_sp_port_add_vid(struct net_device *dev,
946 u16 vid) 946 __be16 __always_unused proto, u16 vid)
947{ 947{
948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 948 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
949 struct mlxsw_sp_port *mlxsw_sp_vport; 949 struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
956 if (!vid) 956 if (!vid)
957 return 0; 957 return 0;
958 958
959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { 959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
960 netdev_warn(dev, "VID=%d already configured\n", vid);
961 return 0; 960 return 0;
962 }
963 961
964 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); 962 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
965 if (!mlxsw_sp_vport) { 963 if (!mlxsw_sp_vport)
966 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
967 return -ENOMEM; 964 return -ENOMEM;
968 }
969 965
970 /* When adding the first VLAN interface on a bridged port we need to 966 /* When adding the first VLAN interface on a bridged port we need to
971 * transition all the active 802.1Q bridge VLANs to use explicit 967 * transition all the active 802.1Q bridge VLANs to use explicit
@@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
973 */ 969 */
974 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 970 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
975 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); 971 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
976 if (err) { 972 if (err)
977 netdev_err(dev, "Failed to set to Virtual mode\n");
978 goto err_port_vp_mode_trans; 973 goto err_port_vp_mode_trans;
979 }
980 } 974 }
981 975
982 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 976 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
983 if (err) { 977 if (err)
984 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
985 goto err_port_vid_learning_set; 978 goto err_port_vid_learning_set;
986 }
987 979
988 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); 980 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
989 if (err) { 981 if (err)
990 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
991 vid);
992 goto err_port_add_vid; 982 goto err_port_add_vid;
993 }
994 983
995 return 0; 984 return 0;
996 985
@@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1010 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 999 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1011 struct mlxsw_sp_port *mlxsw_sp_vport; 1000 struct mlxsw_sp_port *mlxsw_sp_vport;
1012 struct mlxsw_sp_fid *f; 1001 struct mlxsw_sp_fid *f;
1013 int err;
1014 1002
1015 /* VLAN 0 is removed from HW filter when device goes down, but 1003 /* VLAN 0 is removed from HW filter when device goes down, but
1016 * it is reserved in our case, so simply return. 1004 * it is reserved in our case, so simply return.
@@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1019 return 0; 1007 return 0;
1020 1008
1021 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); 1009 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1022 if (!mlxsw_sp_vport) { 1010 if (WARN_ON(!mlxsw_sp_vport))
1023 netdev_warn(dev, "VID=%d does not exist\n", vid);
1024 return 0; 1011 return 0;
1025 }
1026 1012
1027 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 1013 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1028 if (err) {
1029 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
1030 vid);
1031 return err;
1032 }
1033 1014
1034 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 1015 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
1035 if (err) {
1036 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
1037 return err;
1038 }
1039 1016
1040 /* Drop FID reference. If this was the last reference the 1017 /* Drop FID reference. If this was the last reference the
1041 * resources will be freed. 1018 * resources will be freed.
@@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1048 * transition all active 802.1Q bridge VLANs to use VID to FID 1025 * transition all active 802.1Q bridge VLANs to use VID to FID
1049 * mappings and set port's mode to VLAN mode. 1026 * mappings and set port's mode to VLAN mode.
1050 */ 1027 */
1051 if (list_is_singular(&mlxsw_sp_port->vports_list)) { 1028 if (list_is_singular(&mlxsw_sp_port->vports_list))
1052 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 1029 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1053 if (err) {
1054 netdev_err(dev, "Failed to set to VLAN mode\n");
1055 return err;
1056 }
1057 }
1058 1030
1059 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); 1031 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1060 1032
@@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1149 bool ingress) 1121 bool ingress)
1150{ 1122{
1151 const struct tc_action *a; 1123 const struct tc_action *a;
1124 LIST_HEAD(actions);
1152 int err; 1125 int err;
1153 1126
1154 if (!tc_single_action(cls->exts)) { 1127 if (!tc_single_action(cls->exts)) {
@@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1156 return -ENOTSUPP; 1129 return -ENOTSUPP;
1157 } 1130 }
1158 1131
1159 tc_for_each_action(a, cls->exts) { 1132 tcf_exts_to_list(cls->exts, &actions);
1133 list_for_each_entry(a, &actions, list) {
1160 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) 1134 if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
1161 return -ENOTSUPP; 1135 return -ENOTSUPP;
1162 1136
@@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2076 return 0; 2050 return 0;
2077} 2051}
2078 2052
2053static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2054{
2055 mlxsw_sp_port->pvid = 1;
2056
2057 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2058}
2059
2060static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2061{
2062 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2063}
2064
2079static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, 2065static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2080 bool split, u8 module, u8 width, u8 lane) 2066 bool split, u8 module, u8 width, u8 lane)
2081{ 2067{
@@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2191 goto err_port_dcb_init; 2177 goto err_port_dcb_init;
2192 } 2178 }
2193 2179
2180 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2181 if (err) {
2182 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2183 mlxsw_sp_port->local_port);
2184 goto err_port_pvid_vport_create;
2185 }
2186
2194 mlxsw_sp_port_switchdev_init(mlxsw_sp_port); 2187 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2188 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2195 err = register_netdev(dev); 2189 err = register_netdev(dev);
2196 if (err) { 2190 if (err) {
2197 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", 2191 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2208 goto err_core_port_init; 2202 goto err_core_port_init;
2209 } 2203 }
2210 2204
2211 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
2212 if (err)
2213 goto err_port_vlan_init;
2214
2215 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2216 return 0; 2205 return 0;
2217 2206
2218err_port_vlan_init:
2219 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2220err_core_port_init: 2207err_core_port_init:
2221 unregister_netdev(dev); 2208 unregister_netdev(dev);
2222err_register_netdev: 2209err_register_netdev:
2210 mlxsw_sp->ports[local_port] = NULL;
2211 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2212 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2213err_port_pvid_vport_create:
2214 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2223err_port_dcb_init: 2215err_port_dcb_init:
2224err_port_ets_init: 2216err_port_ets_init:
2225err_port_buffers_init: 2217err_port_buffers_init:
2226err_port_admin_status_set: 2218err_port_admin_status_set:
2227err_port_mtu_set: 2219err_port_mtu_set:
2228err_port_speed_by_width_set: 2220err_port_speed_by_width_set:
2221 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2229err_port_swid_set: 2222err_port_swid_set:
2230err_port_system_port_mapping_set: 2223err_port_system_port_mapping_set:
2231err_dev_addr_init: 2224err_dev_addr_init:
@@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2245 2238
2246 if (!mlxsw_sp_port) 2239 if (!mlxsw_sp_port)
2247 return; 2240 return;
2248 mlxsw_sp->ports[local_port] = NULL;
2249 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 2241 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
2250 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 2242 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2251 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 2243 mlxsw_sp->ports[local_port] = NULL;
2252 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2253 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 2244 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2245 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2246 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2254 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 2247 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2255 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 2248 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2256 free_percpu(mlxsw_sp_port->pcpu_stats); 2249 free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2662 { 2655 {
2663 .func = mlxsw_sp_rx_listener_func, 2656 .func = mlxsw_sp_rx_listener_func,
2664 .local_port = MLXSW_PORT_DONT_CARE, 2657 .local_port = MLXSW_PORT_DONT_CARE,
2658 .trap_id = MLXSW_TRAP_ID_MTUERROR,
2659 },
2660 {
2661 .func = mlxsw_sp_rx_listener_func,
2662 .local_port = MLXSW_PORT_DONT_CARE,
2663 .trap_id = MLXSW_TRAP_ID_TTLERROR,
2664 },
2665 {
2666 .func = mlxsw_sp_rx_listener_func,
2667 .local_port = MLXSW_PORT_DONT_CARE,
2668 .trap_id = MLXSW_TRAP_ID_LBERROR,
2669 },
2670 {
2671 .func = mlxsw_sp_rx_listener_func,
2672 .local_port = MLXSW_PORT_DONT_CARE,
2673 .trap_id = MLXSW_TRAP_ID_OSPF,
2674 },
2675 {
2676 .func = mlxsw_sp_rx_listener_func,
2677 .local_port = MLXSW_PORT_DONT_CARE,
2665 .trap_id = MLXSW_TRAP_ID_IP2ME, 2678 .trap_id = MLXSW_TRAP_ID_IP2ME,
2666 }, 2679 },
2667 { 2680 {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index f69aa37d1521..ab3feb81bd43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
536 u16 vid); 536 u16 vid);
537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, 537int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
538 u16 vid_end, bool is_member, bool untagged); 538 u16 vid_end, bool is_member, bool untagged);
539int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
540 u16 vid);
541int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 539int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
542 bool set); 540 bool set);
543void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 541void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 074cdda7b6f3..237418a0e6e0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
330 MLXSW_SP_CPU_PORT_SB_CM, 330 MLXSW_SP_CPU_PORT_SB_CM,
331 MLXSW_SP_CPU_PORT_SB_CM, 331 MLXSW_SP_CPU_PORT_SB_CM,
332 MLXSW_SP_CPU_PORT_SB_CM, 332 MLXSW_SP_CPU_PORT_SB_CM,
333 MLXSW_SP_CPU_PORT_SB_CM, 333 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
334 MLXSW_SP_CPU_PORT_SB_CM, 334 MLXSW_SP_CPU_PORT_SB_CM,
335 MLXSW_SP_CPU_PORT_SB_CM, 335 MLXSW_SP_CPU_PORT_SB_CM,
336 MLXSW_SP_CPU_PORT_SB_CM, 336 MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 01cfb7512827..b6ed7f7c531e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
341 char pfcc_pl[MLXSW_REG_PFCC_LEN]; 341 char pfcc_pl[MLXSW_REG_PFCC_LEN];
342 342
343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); 343 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
344 mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
345 mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
344 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); 346 mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
345 347
346 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), 348 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
351 struct ieee_pfc *pfc) 353 struct ieee_pfc *pfc)
352{ 354{
353 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 355 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
356 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
354 int err; 357 int err;
355 358
356 if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && 359 if (pause_en && pfc->pfc_en) {
357 pfc->pfc_en) {
358 netdev_err(dev, "PAUSE frames already enabled on port\n"); 360 netdev_err(dev, "PAUSE frames already enabled on port\n");
359 return -EINVAL; 361 return -EINVAL;
360 } 362 }
361 363
362 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 364 err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
363 mlxsw_sp_port->dcb.ets->prio_tc, 365 mlxsw_sp_port->dcb.ets->prio_tc,
364 false, pfc); 366 pause_en, pfc);
365 if (err) { 367 if (err) {
366 netdev_err(dev, "Failed to configure port's headroom for PFC\n"); 368 netdev_err(dev, "Failed to configure port's headroom for PFC\n");
367 return err; 369 return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
380 382
381err_port_pfc_set: 383err_port_pfc_set:
382 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, 384 __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
383 mlxsw_sp_port->dcb.ets->prio_tc, false, 385 mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
384 mlxsw_sp_port->dcb.pfc); 386 mlxsw_sp_port->dcb.pfc);
385 return err; 387 return err;
386} 388}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 81418d629231..90bb93b037ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
1651 const struct mlxsw_sp_router_fib4_add_info *info = data; 1651 const struct mlxsw_sp_router_fib4_add_info *info = data;
1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; 1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; 1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
1654 struct mlxsw_sp_vr *vr = fib_entry->vr;
1654 1655
1655 mlxsw_sp_fib_entry_destroy(fib_entry); 1656 mlxsw_sp_fib_entry_destroy(fib_entry);
1656 mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); 1657 mlxsw_sp_vr_put(mlxsw_sp, vr);
1657 kfree(info); 1658 kfree(info);
1658} 1659}
1659 1660
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a1ad5e6bdfa8..d1b59cdfacc1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
450 450
451 kfree(f); 451 kfree(f);
452 452
453 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
454
453 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 455 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
454} 456}
455 457
@@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
997} 999}
998 1000
999static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1001static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1000 u16 vid_begin, u16 vid_end, bool init) 1002 u16 vid_begin, u16 vid_end)
1001{ 1003{
1002 struct net_device *dev = mlxsw_sp_port->dev; 1004 struct net_device *dev = mlxsw_sp_port->dev;
1003 u16 vid, pvid; 1005 u16 vid, pvid;
1004 int err; 1006 int err;
1005 1007
1006 if (!init && !mlxsw_sp_port->bridged) 1008 if (!mlxsw_sp_port->bridged)
1007 return -EINVAL; 1009 return -EINVAL;
1008 1010
1009 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 1011 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1014 return err; 1016 return err;
1015 } 1017 }
1016 1018
1017 if (init)
1018 goto out;
1019
1020 pvid = mlxsw_sp_port->pvid; 1019 pvid = mlxsw_sp_port->pvid;
1021 if (pvid >= vid_begin && pvid <= vid_end) { 1020 if (pvid >= vid_begin && pvid <= vid_end) {
1022 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); 1021 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1028 1027
1029 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); 1028 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1030 1029
1031out:
1032 /* Changing activity bits only if HW operation succeded */ 1030 /* Changing activity bits only if HW operation succeded */
1033 for (vid = vid_begin; vid <= vid_end; vid++) 1031 for (vid = vid_begin; vid <= vid_end; vid++)
1034 clear_bit(vid, mlxsw_sp_port->active_vlans); 1032 clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -1039,8 +1037,8 @@ out:
1039static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 1037static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1040 const struct switchdev_obj_port_vlan *vlan) 1038 const struct switchdev_obj_port_vlan *vlan)
1041{ 1039{
1042 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1040 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1043 vlan->vid_begin, vlan->vid_end, false); 1041 vlan->vid_end);
1044} 1042}
1045 1043
1046void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) 1044void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1048 u16 vid; 1046 u16 vid;
1049 1047
1050 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) 1048 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1051 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); 1049 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1052} 1050}
1053 1051
1054static int 1052static int
@@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1546 mlxsw_sp_fdb_fini(mlxsw_sp); 1544 mlxsw_sp_fdb_fini(mlxsw_sp);
1547} 1545}
1548 1546
1549int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
1550{
1551 struct net_device *dev = mlxsw_sp_port->dev;
1552 int err;
1553
1554 /* Allow only untagged packets to ingress and tag them internally
1555 * with VID 1.
1556 */
1557 mlxsw_sp_port->pvid = 1;
1558 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
1559 true);
1560 if (err) {
1561 netdev_err(dev, "Unable to init VLANs\n");
1562 return err;
1563 }
1564
1565 /* Add implicit VLAN interface in the device, so that untagged
1566 * packets will be classified to the default vFID.
1567 */
1568 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1569 if (err)
1570 netdev_err(dev, "Failed to configure default vFID\n");
1571
1572 return err;
1573}
1574
1575void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) 1547void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1576{ 1548{
1577 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; 1549 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 470d7696e9fe..ed8e30186400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -56,6 +56,10 @@ enum {
56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, 56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
57 MLXSW_TRAP_ID_ARPBC = 0x50, 57 MLXSW_TRAP_ID_ARPBC = 0x50,
58 MLXSW_TRAP_ID_ARPUC = 0x51, 58 MLXSW_TRAP_ID_ARPUC = 0x51,
59 MLXSW_TRAP_ID_MTUERROR = 0x52,
60 MLXSW_TRAP_ID_TTLERROR = 0x53,
61 MLXSW_TRAP_ID_LBERROR = 0x54,
62 MLXSW_TRAP_ID_OSPF = 0x55,
59 MLXSW_TRAP_ID_IP2ME = 0x5F, 63 MLXSW_TRAP_ID_IP2ME = 0x5F,
60 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, 64 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
61 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, 65 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d0dc28f93c0e..226cb08cc055 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
52 DCBX_APP_SF_ETHTYPE); 52 DCBX_APP_SF_ETHTYPE);
53} 53}
54 54
55static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
56{
57 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
58
59 /* Old MFW */
60 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
61 return qed_dcbx_app_ethtype(app_info_bitmap);
62
63 return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
64}
65
55static bool qed_dcbx_app_port(u32 app_info_bitmap) 66static bool qed_dcbx_app_port(u32 app_info_bitmap)
56{ 67{
57 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == 68 return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
58 DCBX_APP_SF_PORT); 69 DCBX_APP_SF_PORT);
59} 70}
60 71
61static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) 72static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
62{ 73{
63 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 74 u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
64 proto_id == QED_ETH_TYPE_DEFAULT); 75
76 /* Old MFW */
77 if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
78 return qed_dcbx_app_port(app_info_bitmap);
79
80 return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
65} 81}
66 82
67static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) 83static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
68{ 84{
69 return !!(qed_dcbx_app_port(app_info_bitmap) && 85 bool ethtype;
70 proto_id == QED_TCP_PORT_ISCSI); 86
87 if (ieee)
88 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
89 else
90 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
91
92 return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
71} 93}
72 94
73static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) 95static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
74{ 96{
75 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 97 bool port;
76 proto_id == QED_ETH_TYPE_FCOE); 98
99 if (ieee)
100 port = qed_dcbx_ieee_app_port(app_info_bitmap,
101 DCBX_APP_SF_IEEE_TCP_PORT);
102 else
103 port = qed_dcbx_app_port(app_info_bitmap);
104
105 return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
77} 106}
78 107
79static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) 108static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
80{ 109{
81 return !!(qed_dcbx_app_ethtype(app_info_bitmap) && 110 bool ethtype;
82 proto_id == QED_ETH_TYPE_ROCE); 111
112 if (ieee)
113 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
114 else
115 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
116
117 return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
83} 118}
84 119
85static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) 120static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
86{ 121{
87 return !!(qed_dcbx_app_port(app_info_bitmap) && 122 bool ethtype;
88 proto_id == QED_UDP_PORT_TYPE_ROCE_V2); 123
124 if (ieee)
125 ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
126 else
127 ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
128
129 return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
130}
131
132static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
133{
134 bool port;
135
136 if (ieee)
137 port = qed_dcbx_ieee_app_port(app_info_bitmap,
138 DCBX_APP_SF_IEEE_UDP_PORT);
139 else
140 port = qed_dcbx_app_port(app_info_bitmap);
141
142 return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
89} 143}
90 144
91static void 145static void
@@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
164static bool 218static bool
165qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, 219qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
166 u32 app_prio_bitmap, 220 u32 app_prio_bitmap,
167 u16 id, enum dcbx_protocol_type *type) 221 u16 id, enum dcbx_protocol_type *type, bool ieee)
168{ 222{
169 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { 223 if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
170 *type = DCBX_PROTOCOL_FCOE; 224 *type = DCBX_PROTOCOL_FCOE;
171 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { 225 } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
172 *type = DCBX_PROTOCOL_ROCE; 226 *type = DCBX_PROTOCOL_ROCE;
173 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { 227 } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
174 *type = DCBX_PROTOCOL_ISCSI; 228 *type = DCBX_PROTOCOL_ISCSI;
175 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { 229 } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
176 *type = DCBX_PROTOCOL_ETH; 230 *type = DCBX_PROTOCOL_ETH;
177 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { 231 } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
178 *type = DCBX_PROTOCOL_ROCE_V2; 232 *type = DCBX_PROTOCOL_ROCE_V2;
179 } else { 233 } else {
180 *type = DCBX_MAX_PROTOCOL_TYPE; 234 *type = DCBX_MAX_PROTOCOL_TYPE;
@@ -194,17 +248,18 @@ static int
194qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, 248qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
195 struct qed_dcbx_results *p_data, 249 struct qed_dcbx_results *p_data,
196 struct dcbx_app_priority_entry *p_tbl, 250 struct dcbx_app_priority_entry *p_tbl,
197 u32 pri_tc_tbl, int count, bool dcbx_enabled) 251 u32 pri_tc_tbl, int count, u8 dcbx_version)
198{ 252{
199 u8 tc, priority_map; 253 u8 tc, priority_map;
200 enum dcbx_protocol_type type; 254 enum dcbx_protocol_type type;
255 bool enable, ieee;
201 u16 protocol_id; 256 u16 protocol_id;
202 int priority; 257 int priority;
203 bool enable;
204 int i; 258 int i;
205 259
206 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); 260 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
207 261
262 ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
208 /* Parse APP TLV */ 263 /* Parse APP TLV */
209 for (i = 0; i < count; i++) { 264 for (i = 0; i < count; i++) {
210 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 265 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
219 274
220 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); 275 tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
221 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 276 if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
222 protocol_id, &type)) { 277 protocol_id, &type, ieee)) {
223 /* ETH always have the enable bit reset, as it gets 278 /* ETH always have the enable bit reset, as it gets
224 * vlan information per packet. For other protocols, 279 * vlan information per packet. For other protocols,
225 * should be set according to the dcbx_enabled 280 * should be set according to the dcbx_enabled
@@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
275 struct dcbx_ets_feature *p_ets; 330 struct dcbx_ets_feature *p_ets;
276 struct qed_hw_info *p_info; 331 struct qed_hw_info *p_info;
277 u32 pri_tc_tbl, flags; 332 u32 pri_tc_tbl, flags;
278 bool dcbx_enabled; 333 u8 dcbx_version;
279 int num_entries; 334 int num_entries;
280 int rc = 0; 335 int rc = 0;
281 336
282 /* If DCBx version is non zero, then negotiation was
283 * successfuly performed
284 */
285 flags = p_hwfn->p_dcbx_info->operational.flags; 337 flags = p_hwfn->p_dcbx_info->operational.flags;
286 dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); 338 dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
287 339
288 p_app = &p_hwfn->p_dcbx_info->operational.features.app; 340 p_app = &p_hwfn->p_dcbx_info->operational.features.app;
289 p_tbl = p_app->app_pri_tbl; 341 p_tbl = p_app->app_pri_tbl;
@@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
295 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); 347 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
296 348
297 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, 349 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
298 num_entries, dcbx_enabled); 350 num_entries, dcbx_version);
299 if (rc) 351 if (rc)
300 return rc; 352 return rc;
301 353
302 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); 354 p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
303 data.pf_id = p_hwfn->rel_pf_id; 355 data.pf_id = p_hwfn->rel_pf_id;
304 data.dcbx_enabled = dcbx_enabled; 356 data.dcbx_enabled = !!dcbx_version;
305 357
306 qed_dcbx_dp_protocol(p_hwfn, &data); 358 qed_dcbx_dp_protocol(p_hwfn, &data);
307 359
@@ -400,7 +452,7 @@ static void
400qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, 452qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
401 struct dcbx_app_priority_feature *p_app, 453 struct dcbx_app_priority_feature *p_app,
402 struct dcbx_app_priority_entry *p_tbl, 454 struct dcbx_app_priority_entry *p_tbl,
403 struct qed_dcbx_params *p_params) 455 struct qed_dcbx_params *p_params, bool ieee)
404{ 456{
405 struct qed_app_entry *entry; 457 struct qed_app_entry *entry;
406 u8 pri_map; 458 u8 pri_map;
@@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
414 DCBX_APP_NUM_ENTRIES); 466 DCBX_APP_NUM_ENTRIES);
415 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 467 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
416 entry = &p_params->app_entry[i]; 468 entry = &p_params->app_entry[i];
417 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, 469 if (ieee) {
418 DCBX_APP_SF)); 470 u8 sf_ieee;
471 u32 val;
472
473 sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
474 DCBX_APP_SF_IEEE);
475 switch (sf_ieee) {
476 case DCBX_APP_SF_IEEE_RESERVED:
477 /* Old MFW */
478 val = QED_MFW_GET_FIELD(p_tbl[i].entry,
479 DCBX_APP_SF);
480 entry->sf_ieee = val ?
481 QED_DCBX_SF_IEEE_TCP_UDP_PORT :
482 QED_DCBX_SF_IEEE_ETHTYPE;
483 break;
484 case DCBX_APP_SF_IEEE_ETHTYPE:
485 entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
486 break;
487 case DCBX_APP_SF_IEEE_TCP_PORT:
488 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
489 break;
490 case DCBX_APP_SF_IEEE_UDP_PORT:
491 entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
492 break;
493 case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
494 entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
495 break;
496 }
497 } else {
498 entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
499 DCBX_APP_SF));
500 }
501
419 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); 502 pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
420 entry->prio = ffs(pri_map) - 1; 503 entry->prio = ffs(pri_map) - 1;
421 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, 504 entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
422 DCBX_APP_PROTOCOL_ID); 505 DCBX_APP_PROTOCOL_ID);
423 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, 506 qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
424 entry->proto_id, 507 entry->proto_id,
425 &entry->proto_type); 508 &entry->proto_type, ieee);
426 } 509 }
427 510
428 DP_VERBOSE(p_hwfn, QED_MSG_DCB, 511 DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
483 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); 566 bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
484 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); 567 tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
485 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); 568 tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
486 pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); 569 pri_map = p_ets->pri_tc_tbl[0];
487 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { 570 for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
488 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; 571 p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
489 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; 572 p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
500 struct dcbx_app_priority_feature *p_app, 583 struct dcbx_app_priority_feature *p_app,
501 struct dcbx_app_priority_entry *p_tbl, 584 struct dcbx_app_priority_entry *p_tbl,
502 struct dcbx_ets_feature *p_ets, 585 struct dcbx_ets_feature *p_ets,
503 u32 pfc, struct qed_dcbx_params *p_params) 586 u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
504{ 587{
505 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); 588 qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
506 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); 589 qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
507 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); 590 qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
508} 591}
@@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
516 p_feat = &p_hwfn->p_dcbx_info->local_admin.features; 599 p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
517 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 600 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
518 p_feat->app.app_pri_tbl, &p_feat->ets, 601 p_feat->app.app_pri_tbl, &p_feat->ets,
519 p_feat->pfc, &params->local.params); 602 p_feat->pfc, &params->local.params, false);
520 params->local.valid = true; 603 params->local.valid = true;
521} 604}
522 605
@@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
529 p_feat = &p_hwfn->p_dcbx_info->remote.features; 612 p_feat = &p_hwfn->p_dcbx_info->remote.features;
530 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 613 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
531 p_feat->app.app_pri_tbl, &p_feat->ets, 614 p_feat->app.app_pri_tbl, &p_feat->ets,
532 p_feat->pfc, &params->remote.params); 615 p_feat->pfc, &params->remote.params, false);
533 params->remote.valid = true; 616 params->remote.valid = true;
534} 617}
535 618
@@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
574 657
575 qed_dcbx_get_common_params(p_hwfn, &p_feat->app, 658 qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
576 p_feat->app.app_pri_tbl, &p_feat->ets, 659 p_feat->app.app_pri_tbl, &p_feat->ets,
577 p_feat->pfc, &params->operational.params); 660 p_feat->pfc, &params->operational.params,
661 p_operational->ieee);
578 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); 662 qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
579 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); 663 err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
580 p_operational->err = err; 664 p_operational->err = err;
@@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
944 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); 1028 val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
945 p_ets->pri_tc_tbl[0] |= val; 1029 p_ets->pri_tc_tbl[0] |= val;
946 } 1030 }
947 p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
948 for (i = 0; i < 2; i++) { 1031 for (i = 0; i < 2; i++) {
949 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); 1032 p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
950 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); 1033 p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
954static void 1037static void
955qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, 1038qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
956 struct dcbx_app_priority_feature *p_app, 1039 struct dcbx_app_priority_feature *p_app,
957 struct qed_dcbx_params *p_params) 1040 struct qed_dcbx_params *p_params, bool ieee)
958{ 1041{
959 u32 *entry; 1042 u32 *entry;
960 int i; 1043 int i;
@@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
975 1058
976 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { 1059 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
977 entry = &p_app->app_pri_tbl[i].entry; 1060 entry = &p_app->app_pri_tbl[i].entry;
978 *entry &= ~DCBX_APP_SF_MASK; 1061 if (ieee) {
979 if (p_params->app_entry[i].ethtype) 1062 *entry &= ~DCBX_APP_SF_IEEE_MASK;
980 *entry |= ((u32)DCBX_APP_SF_ETHTYPE << 1063 switch (p_params->app_entry[i].sf_ieee) {
981 DCBX_APP_SF_SHIFT); 1064 case QED_DCBX_SF_IEEE_ETHTYPE:
982 else 1065 *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
983 *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); 1066 DCBX_APP_SF_IEEE_SHIFT);
1067 break;
1068 case QED_DCBX_SF_IEEE_TCP_PORT:
1069 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
1070 DCBX_APP_SF_IEEE_SHIFT);
1071 break;
1072 case QED_DCBX_SF_IEEE_UDP_PORT:
1073 *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
1074 DCBX_APP_SF_IEEE_SHIFT);
1075 break;
1076 case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
1077 *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
1078 DCBX_APP_SF_IEEE_SHIFT);
1079 break;
1080 }
1081 } else {
1082 *entry &= ~DCBX_APP_SF_MASK;
1083 if (p_params->app_entry[i].ethtype)
1084 *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
1085 DCBX_APP_SF_SHIFT);
1086 else
1087 *entry |= ((u32)DCBX_APP_SF_PORT <<
1088 DCBX_APP_SF_SHIFT);
1089 }
1090
984 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; 1091 *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
985 *entry |= ((u32)p_params->app_entry[i].proto_id << 1092 *entry |= ((u32)p_params->app_entry[i].proto_id <<
986 DCBX_APP_PROTOCOL_ID_SHIFT); 1093 DCBX_APP_PROTOCOL_ID_SHIFT);
@@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
995 struct dcbx_local_params *local_admin, 1102 struct dcbx_local_params *local_admin,
996 struct qed_dcbx_set *params) 1103 struct qed_dcbx_set *params)
997{ 1104{
1105 bool ieee = false;
1106
998 local_admin->flags = 0; 1107 local_admin->flags = 0;
999 memcpy(&local_admin->features, 1108 memcpy(&local_admin->features,
1000 &p_hwfn->p_dcbx_info->operational.features, 1109 &p_hwfn->p_dcbx_info->operational.features,
1001 sizeof(local_admin->features)); 1110 sizeof(local_admin->features));
1002 1111
1003 if (params->enabled) 1112 if (params->enabled) {
1004 local_admin->config = params->ver_num; 1113 local_admin->config = params->ver_num;
1005 else 1114 ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
1115 } else {
1006 local_admin->config = DCBX_CONFIG_VERSION_DISABLED; 1116 local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
1117 }
1007 1118
1008 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) 1119 if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
1009 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, 1120 qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
1015 1126
1016 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) 1127 if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
1017 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, 1128 qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
1018 &params->config.params); 1129 &params->config.params, ieee);
1019} 1130}
1020 1131
1021int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1132int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
1596 if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) 1707 if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
1597 break; 1708 break;
1598 /* First empty slot */ 1709 /* First empty slot */
1599 if (!entry->proto_id) 1710 if (!entry->proto_id) {
1711 dcbx_set.config.params.num_app_entries++;
1600 break; 1712 break;
1713 }
1601 } 1714 }
1602 1715
1603 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 1716 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
2117 (entry->proto_id == app->protocol)) 2230 (entry->proto_id == app->protocol))
2118 break; 2231 break;
2119 /* First empty slot */ 2232 /* First empty slot */
2120 if (!entry->proto_id) 2233 if (!entry->proto_id) {
2234 dcbx_set.config.params.num_app_entries++;
2121 break; 2235 break;
2236 }
2122 } 2237 }
2123 2238
2124 if (i == QED_DCBX_MAX_APP_PROTOCOL) { 2239 if (i == QED_DCBX_MAX_APP_PROTOCOL) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 592784019994..6f9d3b831a2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
6850#define DCBX_APP_SF_SHIFT 8 6850#define DCBX_APP_SF_SHIFT 8
6851#define DCBX_APP_SF_ETHTYPE 0 6851#define DCBX_APP_SF_ETHTYPE 0
6852#define DCBX_APP_SF_PORT 1 6852#define DCBX_APP_SF_PORT 1
6853#define DCBX_APP_SF_IEEE_MASK 0x0000f000
6854#define DCBX_APP_SF_IEEE_SHIFT 12
6855#define DCBX_APP_SF_IEEE_RESERVED 0
6856#define DCBX_APP_SF_IEEE_ETHTYPE 1
6857#define DCBX_APP_SF_IEEE_TCP_PORT 2
6858#define DCBX_APP_SF_IEEE_UDP_PORT 3
6859#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
6860
6853#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 6861#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
6854#define DCBX_APP_PROTOCOL_ID_SHIFT 16 6862#define DCBX_APP_PROTOCOL_ID_SHIFT 16
6855}; 6863};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fd973f4f16c7..49bad00a0f8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 64 40#define _QLCNIC_LINUX_SUBVERSION 65
41#define QLCNIC_LINUX_VERSIONID "5.3.64" 41#define QLCNIC_LINUX_VERSIONID "5.3.65"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 87c642d3b075..fedd7366713c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
102#define QLCNIC_RESPONSE_DESC 0x05 102#define QLCNIC_RESPONSE_DESC 0x05
103#define QLCNIC_LRO_DESC 0x12 103#define QLCNIC_LRO_DESC 0x12
104 104
105#define QLCNIC_TX_POLL_BUDGET 128
106#define QLCNIC_TCP_HDR_SIZE 20 105#define QLCNIC_TCP_HDR_SIZE 20
107#define QLCNIC_TCP_TS_OPTION_SIZE 12 106#define QLCNIC_TCP_TS_OPTION_SIZE 12
108#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) 107#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
2008 struct qlcnic_host_tx_ring *tx_ring; 2007 struct qlcnic_host_tx_ring *tx_ring;
2009 struct qlcnic_adapter *adapter; 2008 struct qlcnic_adapter *adapter;
2010 2009
2011 budget = QLCNIC_TX_POLL_BUDGET;
2012 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); 2010 tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
2013 adapter = tx_ring->adapter; 2011 adapter = tx_ring->adapter;
2014 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); 2012 work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c8285..24061b9b92e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
156 spinlock_t vlan_list_lock; /* Lock for VLAN list */ 156 spinlock_t vlan_list_lock; /* Lock for VLAN list */
157}; 157};
158 158
159struct qlcnic_async_work_list { 159struct qlcnic_async_cmd {
160 struct list_head list; 160 struct list_head list;
161 struct work_struct work;
162 void *ptr;
163 struct qlcnic_cmd_args *cmd; 161 struct qlcnic_cmd_args *cmd;
164}; 162};
165 163
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
168 struct workqueue_struct *bc_trans_wq; 166 struct workqueue_struct *bc_trans_wq;
169 struct workqueue_struct *bc_async_wq; 167 struct workqueue_struct *bc_async_wq;
170 struct workqueue_struct *bc_flr_wq; 168 struct workqueue_struct *bc_flr_wq;
171 struct list_head async_list; 169 struct qlcnic_adapter *adapter;
170 struct list_head async_cmd_list;
171 struct work_struct vf_async_work;
172 spinlock_t queue_lock; /* async_cmd_list queue lock */
172}; 173};
173 174
174struct qlcnic_sriov { 175struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..d7107055ec60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
29#define QLC_83XX_VF_RESET_FAIL_THRESH 8 29#define QLC_83XX_VF_RESET_FAIL_THRESH 8
30#define QLC_BC_CMD_MAX_RETRY_CNT 5 30#define QLC_BC_CMD_MAX_RETRY_CNT 5
31 31
32static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
32static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 33static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
33static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 34static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
34static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 35static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
177 } 178 }
178 179
179 bc->bc_async_wq = wq; 180 bc->bc_async_wq = wq;
180 INIT_LIST_HEAD(&bc->async_list); 181 INIT_LIST_HEAD(&bc->async_cmd_list);
182 INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
183 spin_lock_init(&bc->queue_lock);
184 bc->adapter = adapter;
181 185
182 for (i = 0; i < num_vfs; i++) { 186 for (i = 0; i < num_vfs; i++) {
183 vf = &sriov->vf_info[i]; 187 vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1517 1521
1518void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1522void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1519{ 1523{
1520 struct list_head *head = &bc->async_list; 1524 struct list_head *head = &bc->async_cmd_list;
1521 struct qlcnic_async_work_list *entry; 1525 struct qlcnic_async_cmd *entry;
1522 1526
1523 flush_workqueue(bc->bc_async_wq); 1527 flush_workqueue(bc->bc_async_wq);
1528 cancel_work_sync(&bc->vf_async_work);
1529
1530 spin_lock(&bc->queue_lock);
1524 while (!list_empty(head)) { 1531 while (!list_empty(head)) {
1525 entry = list_entry(head->next, struct qlcnic_async_work_list, 1532 entry = list_entry(head->next, struct qlcnic_async_cmd,
1526 list); 1533 list);
1527 cancel_work_sync(&entry->work);
1528 list_del(&entry->list); 1534 list_del(&entry->list);
1535 kfree(entry->cmd);
1529 kfree(entry); 1536 kfree(entry);
1530 } 1537 }
1538 spin_unlock(&bc->queue_lock);
1531} 1539}
1532 1540
1533void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1541void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1587 1595
1588static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) 1596static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1589{ 1597{
1590 struct qlcnic_async_work_list *entry; 1598 struct qlcnic_async_cmd *entry, *tmp;
1591 struct qlcnic_adapter *adapter; 1599 struct qlcnic_back_channel *bc;
1592 struct qlcnic_cmd_args *cmd; 1600 struct qlcnic_cmd_args *cmd;
1601 struct list_head *head;
1602 LIST_HEAD(del_list);
1603
1604 bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
1605 head = &bc->async_cmd_list;
1606
1607 spin_lock(&bc->queue_lock);
1608 list_splice_init(head, &del_list);
1609 spin_unlock(&bc->queue_lock);
1610
1611 list_for_each_entry_safe(entry, tmp, &del_list, list) {
1612 list_del(&entry->list);
1613 cmd = entry->cmd;
1614 __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
1615 kfree(entry);
1616 }
1617
1618 if (!list_empty(head))
1619 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1593 1620
1594 entry = container_of(work, struct qlcnic_async_work_list, work);
1595 adapter = entry->ptr;
1596 cmd = entry->cmd;
1597 __qlcnic_sriov_issue_cmd(adapter, cmd);
1598 return; 1621 return;
1599} 1622}
1600 1623
1601static struct qlcnic_async_work_list * 1624static struct qlcnic_async_cmd *
1602qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1625qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
1626 struct qlcnic_cmd_args *cmd)
1603{ 1627{
1604 struct list_head *node; 1628 struct qlcnic_async_cmd *entry = NULL;
1605 struct qlcnic_async_work_list *entry = NULL;
1606 u8 empty = 0;
1607 1629
1608 list_for_each(node, &bc->async_list) { 1630 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1609 entry = list_entry(node, struct qlcnic_async_work_list, list); 1631 if (!entry)
1610 if (!work_pending(&entry->work)) { 1632 return NULL;
1611 empty = 1;
1612 break;
1613 }
1614 }
1615 1633
1616 if (!empty) { 1634 entry->cmd = cmd;
1617 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1635
1618 GFP_ATOMIC); 1636 spin_lock(&bc->queue_lock);
1619 if (entry == NULL) 1637 list_add_tail(&entry->list, &bc->async_cmd_list);
1620 return NULL; 1638 spin_unlock(&bc->queue_lock);
1621 list_add_tail(&entry->list, &bc->async_list);
1622 }
1623 1639
1624 return entry; 1640 return entry;
1625} 1641}
1626 1642
1627static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, 1643static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1628 work_func_t func, void *data,
1629 struct qlcnic_cmd_args *cmd) 1644 struct qlcnic_cmd_args *cmd)
1630{ 1645{
1631 struct qlcnic_async_work_list *entry = NULL; 1646 struct qlcnic_async_cmd *entry = NULL;
1632 1647
1633 entry = qlcnic_sriov_get_free_node_async_work(bc); 1648 entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
1634 if (!entry) 1649 if (!entry) {
1650 qlcnic_free_mbx_args(cmd);
1651 kfree(cmd);
1635 return; 1652 return;
1653 }
1636 1654
1637 entry->ptr = data; 1655 queue_work(bc->bc_async_wq, &bc->vf_async_work);
1638 entry->cmd = cmd;
1639 INIT_WORK(&entry->work, func);
1640 queue_work(bc->bc_async_wq, &entry->work);
1641} 1656}
1642 1657
1643static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, 1658static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1649 if (adapter->need_fw_reset) 1664 if (adapter->need_fw_reset)
1650 return -EIO; 1665 return -EIO;
1651 1666
1652 qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, 1667 qlcnic_sriov_schedule_async_cmd(bc, cmd);
1653 adapter, cmd); 1668
1654 return 0; 1669 return 0;
1655} 1670}
1656 1671
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c51f34693eae..f85d605e4560 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
734 netif_receive_skb(skb); 734 netif_receive_skb(skb);
735 ndev->stats.rx_bytes += len; 735 ndev->stats.rx_bytes += len;
736 ndev->stats.rx_packets++; 736 ndev->stats.rx_packets++;
737 kmemleak_not_leak(new_skb);
737 } else { 738 } else {
738 ndev->stats.rx_dropped++; 739 ndev->stats.rx_dropped++;
739 new_skb = skb; 740 new_skb = skb;
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1325 kfree_skb(skb); 1326 kfree_skb(skb);
1326 goto err_cleanup; 1327 goto err_cleanup;
1327 } 1328 }
1329 kmemleak_not_leak(skb);
1328 } 1330 }
1329 /* continue even if we didn't manage to submit all 1331 /* continue even if we didn't manage to submit all
1330 * receive descs 1332 * receive descs
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01a77145a0fa..8fd131207ee1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
166 166
167static void tsi108_timed_checker(unsigned long dev_ptr); 167static void tsi108_timed_checker(unsigned long dev_ptr);
168 168
169#ifdef DEBUG
169static void dump_eth_one(struct net_device *dev) 170static void dump_eth_one(struct net_device *dev)
170{ 171{
171 struct tsi108_prv_data *data = netdev_priv(dev); 172 struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
190 TSI_READ(TSI108_EC_RXESTAT), 191 TSI_READ(TSI108_EC_RXESTAT),
191 TSI_READ(TSI108_EC_RXERR), data->rxpending); 192 TSI_READ(TSI108_EC_RXERR), data->rxpending);
192} 193}
194#endif
193 195
194/* Synchronization is needed between the thread and up/down events. 196/* Synchronization is needed between the thread and up/down events.
195 * Note that the PHY is accessed through the same registers for both 197 * Note that the PHY is accessed through the same registers for both
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 467fb8b4d083..591af71eae56 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -644,12 +644,6 @@ struct netvsc_reconfig {
644 u32 event; 644 u32 event;
645}; 645};
646 646
647struct garp_wrk {
648 struct work_struct dwrk;
649 struct net_device *netdev;
650 struct netvsc_device *netvsc_dev;
651};
652
653/* The context of the netvsc device */ 647/* The context of the netvsc device */
654struct net_device_context { 648struct net_device_context {
655 /* point back to our device context */ 649 /* point back to our device context */
@@ -667,7 +661,6 @@ struct net_device_context {
667 661
668 struct work_struct work; 662 struct work_struct work;
669 u32 msg_enable; /* debug level */ 663 u32 msg_enable; /* debug level */
670 struct garp_wrk gwrk;
671 664
672 struct netvsc_stats __percpu *tx_stats; 665 struct netvsc_stats __percpu *tx_stats;
673 struct netvsc_stats __percpu *rx_stats; 666 struct netvsc_stats __percpu *rx_stats;
@@ -678,6 +671,15 @@ struct net_device_context {
678 671
679 /* the device is going away */ 672 /* the device is going away */
680 bool start_remove; 673 bool start_remove;
674
675 /* State to manage the associated VF interface. */
676 struct net_device *vf_netdev;
677 bool vf_inject;
678 atomic_t vf_use_cnt;
679 /* 1: allocated, serial number is valid. 0: not allocated */
680 u32 vf_alloc;
681 /* Serial number of the VF to team with */
682 u32 vf_serial;
681}; 683};
682 684
683/* Per netvsc device */ 685/* Per netvsc device */
@@ -733,15 +735,7 @@ struct netvsc_device {
733 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ 735 u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
734 u32 pkt_align; /* alignment bytes, e.g. 8 */ 736 u32 pkt_align; /* alignment bytes, e.g. 8 */
735 737
736 /* 1: allocated, serial number is valid. 0: not allocated */
737 u32 vf_alloc;
738 /* Serial number of the VF to team with */
739 u32 vf_serial;
740 atomic_t open_cnt; 738 atomic_t open_cnt;
741 /* State to manage the associated VF interface. */
742 bool vf_inject;
743 struct net_device *vf_netdev;
744 atomic_t vf_use_cnt;
745}; 739};
746 740
747static inline struct netvsc_device * 741static inline struct netvsc_device *
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 20e09174ff62..410fb8e81376 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
77 init_waitqueue_head(&net_device->wait_drain); 77 init_waitqueue_head(&net_device->wait_drain);
78 net_device->destroy = false; 78 net_device->destroy = false;
79 atomic_set(&net_device->open_cnt, 0); 79 atomic_set(&net_device->open_cnt, 0);
80 atomic_set(&net_device->vf_use_cnt, 0);
81 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 80 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
82 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 81 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
83 82
84 net_device->vf_netdev = NULL;
85 net_device->vf_inject = false;
86
87 return net_device; 83 return net_device;
88} 84}
89 85
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
1106 nvscdev->send_table[i] = tab[i]; 1102 nvscdev->send_table[i] = tab[i];
1107} 1103}
1108 1104
1109static void netvsc_send_vf(struct netvsc_device *nvdev, 1105static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1110 struct nvsp_message *nvmsg) 1106 struct nvsp_message *nvmsg)
1111{ 1107{
1112 nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1108 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1113 nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1109 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1114} 1110}
1115 1111
1116static inline void netvsc_receive_inband(struct hv_device *hdev, 1112static inline void netvsc_receive_inband(struct hv_device *hdev,
1117 struct netvsc_device *nvdev, 1113 struct net_device_context *net_device_ctx,
1118 struct nvsp_message *nvmsg) 1114 struct nvsp_message *nvmsg)
1119{ 1115{
1120 switch (nvmsg->hdr.msg_type) { 1116 switch (nvmsg->hdr.msg_type) {
1121 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1117 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
1123 break; 1119 break;
1124 1120
1125 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: 1121 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1126 netvsc_send_vf(nvdev, nvmsg); 1122 netvsc_send_vf(net_device_ctx, nvmsg);
1127 break; 1123 break;
1128 } 1124 }
1129} 1125}
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1136 struct vmpacket_descriptor *desc) 1132 struct vmpacket_descriptor *desc)
1137{ 1133{
1138 struct nvsp_message *nvmsg; 1134 struct nvsp_message *nvmsg;
1135 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1139 1136
1140 nvmsg = (struct nvsp_message *)((unsigned long) 1137 nvmsg = (struct nvsp_message *)((unsigned long)
1141 desc + (desc->offset8 << 3)); 1138 desc + (desc->offset8 << 3));
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
1150 break; 1147 break;
1151 1148
1152 case VM_PKT_DATA_INBAND: 1149 case VM_PKT_DATA_INBAND:
1153 netvsc_receive_inband(device, net_device, nvmsg); 1150 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1154 break; 1151 break;
1155 1152
1156 default: 1153 default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 41bd952cc28d..3ba29fc80d05 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
658 struct sk_buff *skb; 658 struct sk_buff *skb;
659 struct sk_buff *vf_skb; 659 struct sk_buff *vf_skb;
660 struct netvsc_stats *rx_stats; 660 struct netvsc_stats *rx_stats;
661 struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
662 u32 bytes_recvd = packet->total_data_buflen; 661 u32 bytes_recvd = packet->total_data_buflen;
663 int ret = 0; 662 int ret = 0;
664 663
665 if (!net || net->reg_state != NETREG_REGISTERED) 664 if (!net || net->reg_state != NETREG_REGISTERED)
666 return NVSP_STAT_FAIL; 665 return NVSP_STAT_FAIL;
667 666
668 if (READ_ONCE(netvsc_dev->vf_inject)) { 667 if (READ_ONCE(net_device_ctx->vf_inject)) {
669 atomic_inc(&netvsc_dev->vf_use_cnt); 668 atomic_inc(&net_device_ctx->vf_use_cnt);
670 if (!READ_ONCE(netvsc_dev->vf_inject)) { 669 if (!READ_ONCE(net_device_ctx->vf_inject)) {
671 /* 670 /*
672 * We raced; just move on. 671 * We raced; just move on.
673 */ 672 */
674 atomic_dec(&netvsc_dev->vf_use_cnt); 673 atomic_dec(&net_device_ctx->vf_use_cnt);
675 goto vf_injection_done; 674 goto vf_injection_done;
676 } 675 }
677 676
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
683 * the host). Deliver these via the VF interface 682 * the host). Deliver these via the VF interface
684 * in the guest. 683 * in the guest.
685 */ 684 */
686 vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, 685 vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
687 csum_info, *data, vlan_tci); 686 packet, csum_info, *data,
687 vlan_tci);
688 if (vf_skb != NULL) { 688 if (vf_skb != NULL) {
689 ++netvsc_dev->vf_netdev->stats.rx_packets; 689 ++net_device_ctx->vf_netdev->stats.rx_packets;
690 netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; 690 net_device_ctx->vf_netdev->stats.rx_bytes +=
691 bytes_recvd;
691 netif_receive_skb(vf_skb); 692 netif_receive_skb(vf_skb);
692 } else { 693 } else {
693 ++net->stats.rx_dropped; 694 ++net->stats.rx_dropped;
694 ret = NVSP_STAT_FAIL; 695 ret = NVSP_STAT_FAIL;
695 } 696 }
696 atomic_dec(&netvsc_dev->vf_use_cnt); 697 atomic_dec(&net_device_ctx->vf_use_cnt);
697 return ret; 698 return ret;
698 } 699 }
699 700
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
1150 free_netdev(netdev); 1151 free_netdev(netdev);
1151} 1152}
1152 1153
1153static void netvsc_notify_peers(struct work_struct *wrk)
1154{
1155 struct garp_wrk *gwrk;
1156
1157 gwrk = container_of(wrk, struct garp_wrk, dwrk);
1158
1159 netdev_notify_peers(gwrk->netdev);
1160
1161 atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
1162}
1163
1164static struct net_device *get_netvsc_net_device(char *mac) 1154static struct net_device *get_netvsc_net_device(char *mac)
1165{ 1155{
1166 struct net_device *dev, *found = NULL; 1156 struct net_device *dev, *found = NULL;
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1203 1193
1204 net_device_ctx = netdev_priv(ndev); 1194 net_device_ctx = netdev_priv(ndev);
1205 netvsc_dev = net_device_ctx->nvdev; 1195 netvsc_dev = net_device_ctx->nvdev;
1206 if (netvsc_dev == NULL) 1196 if (!netvsc_dev || net_device_ctx->vf_netdev)
1207 return NOTIFY_DONE; 1197 return NOTIFY_DONE;
1208 1198
1209 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); 1199 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1211 * Take a reference on the module. 1201 * Take a reference on the module.
1212 */ 1202 */
1213 try_module_get(THIS_MODULE); 1203 try_module_get(THIS_MODULE);
1214 netvsc_dev->vf_netdev = vf_netdev; 1204 net_device_ctx->vf_netdev = vf_netdev;
1215 return NOTIFY_OK; 1205 return NOTIFY_OK;
1216} 1206}
1217 1207
1208static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
1209{
1210 net_device_ctx->vf_inject = true;
1211}
1212
1213static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
1214{
1215 net_device_ctx->vf_inject = false;
1216
1217 /* Wait for currently active users to drain out. */
1218 while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
1219 udelay(50);
1220}
1218 1221
1219static int netvsc_vf_up(struct net_device *vf_netdev) 1222static int netvsc_vf_up(struct net_device *vf_netdev)
1220{ 1223{
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1233 net_device_ctx = netdev_priv(ndev); 1236 net_device_ctx = netdev_priv(ndev);
1234 netvsc_dev = net_device_ctx->nvdev; 1237 netvsc_dev = net_device_ctx->nvdev;
1235 1238
1236 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1239 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1237 return NOTIFY_DONE; 1240 return NOTIFY_DONE;
1238 1241
1239 netdev_info(ndev, "VF up: %s\n", vf_netdev->name); 1242 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1240 netvsc_dev->vf_inject = true; 1243 netvsc_inject_enable(net_device_ctx);
1241 1244
1242 /* 1245 /*
1243 * Open the device before switching data path. 1246 * Open the device before switching data path.
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
1252 1255
1253 netif_carrier_off(ndev); 1256 netif_carrier_off(ndev);
1254 1257
1255 /* 1258 /* Now notify peers through VF device. */
1256 * Now notify peers. We are scheduling work to 1259 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1257 * notify peers; take a reference to prevent
1258 * the VF interface from vanishing.
1259 */
1260 atomic_inc(&netvsc_dev->vf_use_cnt);
1261 net_device_ctx->gwrk.netdev = vf_netdev;
1262 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1263 schedule_work(&net_device_ctx->gwrk.dwrk);
1264 1260
1265 return NOTIFY_OK; 1261 return NOTIFY_OK;
1266} 1262}
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
1283 net_device_ctx = netdev_priv(ndev); 1279 net_device_ctx = netdev_priv(ndev);
1284 netvsc_dev = net_device_ctx->nvdev; 1280 netvsc_dev = net_device_ctx->nvdev;
1285 1281
1286 if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) 1282 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1287 return NOTIFY_DONE; 1283 return NOTIFY_DONE;
1288 1284
1289 netdev_info(ndev, "VF down: %s\n", vf_netdev->name); 1285 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1290 netvsc_dev->vf_inject = false; 1286 netvsc_inject_disable(net_device_ctx);
1291 /*
1292 * Wait for currently active users to
1293 * drain out.
1294 */
1295
1296 while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
1297 udelay(50);
1298 netvsc_switch_datapath(ndev, false); 1287 netvsc_switch_datapath(ndev, false);
1299 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); 1288 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1300 rndis_filter_close(netvsc_dev); 1289 rndis_filter_close(netvsc_dev);
1301 netif_carrier_on(ndev); 1290 netif_carrier_on(ndev);
1302 /* 1291
1303 * Notify peers. 1292 /* Now notify peers through netvsc device. */
1304 */ 1293 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1305 atomic_inc(&netvsc_dev->vf_use_cnt);
1306 net_device_ctx->gwrk.netdev = ndev;
1307 net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
1308 schedule_work(&net_device_ctx->gwrk.dwrk);
1309 1294
1310 return NOTIFY_OK; 1295 return NOTIFY_OK;
1311} 1296}
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
1327 1312
1328 net_device_ctx = netdev_priv(ndev); 1313 net_device_ctx = netdev_priv(ndev);
1329 netvsc_dev = net_device_ctx->nvdev; 1314 netvsc_dev = net_device_ctx->nvdev;
1330 if (netvsc_dev == NULL) 1315 if (!netvsc_dev || !net_device_ctx->vf_netdev)
1331 return NOTIFY_DONE; 1316 return NOTIFY_DONE;
1332 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); 1317 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1333 1318 netvsc_inject_disable(net_device_ctx);
1334 netvsc_dev->vf_netdev = NULL; 1319 net_device_ctx->vf_netdev = NULL;
1335 module_put(THIS_MODULE); 1320 module_put(THIS_MODULE);
1336 return NOTIFY_OK; 1321 return NOTIFY_OK;
1337} 1322}
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
1377 1362
1378 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1363 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1379 INIT_WORK(&net_device_ctx->work, do_set_multicast); 1364 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1380 INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
1381 1365
1382 spin_lock_init(&net_device_ctx->lock); 1366 spin_lock_init(&net_device_ctx->lock);
1383 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 1367 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1384 1368
1369 atomic_set(&net_device_ctx->vf_use_cnt, 0);
1370 net_device_ctx->vf_netdev = NULL;
1371 net_device_ctx->vf_inject = false;
1372
1385 net->netdev_ops = &device_ops; 1373 net->netdev_ops = &device_ops;
1386 1374
1387 net->hw_features = NETVSC_HW_FEATURES; 1375 net->hw_features = NETVSC_HW_FEATURES;
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
1494{ 1482{
1495 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 1483 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1496 1484
1497 /* Avoid Vlan, Bonding dev with same MAC registering as VF */ 1485 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) 1486 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1487 return NOTIFY_DONE;
1488
1489 /* Avoid Bonding master dev with same MAC registering as VF */
1490 if (event_dev->priv_flags & IFF_BONDING &&
1491 event_dev->flags & IFF_MASTER)
1499 return NOTIFY_DONE; 1492 return NOTIFY_DONE;
1500 1493
1501 switch (event) { 1494 switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index d13e6e15d7b5..351e701eb043 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -270,6 +270,7 @@ struct macsec_dev {
270 struct pcpu_secy_stats __percpu *stats; 270 struct pcpu_secy_stats __percpu *stats;
271 struct list_head secys; 271 struct list_head secys;
272 struct gro_cells gro_cells; 272 struct gro_cells gro_cells;
273 unsigned int nest_level;
273}; 274};
274 275
275/** 276/**
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2699 2700
2700#define MACSEC_FEATURES \ 2701#define MACSEC_FEATURES \
2701 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 2702 (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
2703static struct lock_class_key macsec_netdev_addr_lock_key;
2704
2702static int macsec_dev_init(struct net_device *dev) 2705static int macsec_dev_init(struct net_device *dev)
2703{ 2706{
2704 struct macsec_dev *macsec = macsec_priv(dev); 2707 struct macsec_dev *macsec = macsec_priv(dev);
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
2910 return macsec_priv(dev)->real_dev->ifindex; 2913 return macsec_priv(dev)->real_dev->ifindex;
2911} 2914}
2912 2915
2916
2917static int macsec_get_nest_level(struct net_device *dev)
2918{
2919 return macsec_priv(dev)->nest_level;
2920}
2921
2922
2913static const struct net_device_ops macsec_netdev_ops = { 2923static const struct net_device_ops macsec_netdev_ops = {
2914 .ndo_init = macsec_dev_init, 2924 .ndo_init = macsec_dev_init,
2915 .ndo_uninit = macsec_dev_uninit, 2925 .ndo_uninit = macsec_dev_uninit,
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
2923 .ndo_start_xmit = macsec_start_xmit, 2933 .ndo_start_xmit = macsec_start_xmit,
2924 .ndo_get_stats64 = macsec_get_stats64, 2934 .ndo_get_stats64 = macsec_get_stats64,
2925 .ndo_get_iflink = macsec_get_iflink, 2935 .ndo_get_iflink = macsec_get_iflink,
2936 .ndo_get_lock_subclass = macsec_get_nest_level,
2926}; 2937};
2927 2938
2928static const struct device_type macsec_type = { 2939static const struct device_type macsec_type = {
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
3047 } 3058 }
3048} 3059}
3049 3060
3061static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3062{
3063 struct macsec_dev *macsec = macsec_priv(dev);
3064 struct net_device *real_dev = macsec->real_dev;
3065
3066 unregister_netdevice_queue(dev, head);
3067 list_del_rcu(&macsec->secys);
3068 macsec_del_dev(macsec);
3069 netdev_upper_dev_unlink(real_dev, dev);
3070
3071 macsec_generation++;
3072}
3073
3050static void macsec_dellink(struct net_device *dev, struct list_head *head) 3074static void macsec_dellink(struct net_device *dev, struct list_head *head)
3051{ 3075{
3052 struct macsec_dev *macsec = macsec_priv(dev); 3076 struct macsec_dev *macsec = macsec_priv(dev);
3053 struct net_device *real_dev = macsec->real_dev; 3077 struct net_device *real_dev = macsec->real_dev;
3054 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 3078 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3055 3079
3056 macsec_generation++; 3080 macsec_common_dellink(dev, head);
3057 3081
3058 unregister_netdevice_queue(dev, head);
3059 list_del_rcu(&macsec->secys);
3060 if (list_empty(&rxd->secys)) { 3082 if (list_empty(&rxd->secys)) {
3061 netdev_rx_handler_unregister(real_dev); 3083 netdev_rx_handler_unregister(real_dev);
3062 kfree(rxd); 3084 kfree(rxd);
3063 } 3085 }
3064
3065 macsec_del_dev(macsec);
3066} 3086}
3067 3087
3068static int register_macsec_dev(struct net_device *real_dev, 3088static int register_macsec_dev(struct net_device *real_dev,
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3181 3201
3182 dev_hold(real_dev); 3202 dev_hold(real_dev);
3183 3203
3204 macsec->nest_level = dev_get_nest_level(real_dev) + 1;
3205 netdev_lockdep_set_classes(dev);
3206 lockdep_set_class_and_subclass(&dev->addr_list_lock,
3207 &macsec_netdev_addr_lock_key,
3208 macsec_get_nest_level(dev));
3209
3210 err = netdev_upper_dev_link(real_dev, dev);
3211 if (err < 0)
3212 goto unregister;
3213
3184 /* need to be already registered so that ->init has run and 3214 /* need to be already registered so that ->init has run and
3185 * the MAC addr is set 3215 * the MAC addr is set
3186 */ 3216 */
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3193 3223
3194 if (rx_handler && sci_exists(real_dev, sci)) { 3224 if (rx_handler && sci_exists(real_dev, sci)) {
3195 err = -EBUSY; 3225 err = -EBUSY;
3196 goto unregister; 3226 goto unlink;
3197 } 3227 }
3198 3228
3199 err = macsec_add_dev(dev, sci, icv_len); 3229 err = macsec_add_dev(dev, sci, icv_len);
3200 if (err) 3230 if (err)
3201 goto unregister; 3231 goto unlink;
3202 3232
3203 if (data) 3233 if (data)
3204 macsec_changelink_common(dev, data); 3234 macsec_changelink_common(dev, data);
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3213 3243
3214del_dev: 3244del_dev:
3215 macsec_del_dev(macsec); 3245 macsec_del_dev(macsec);
3246unlink:
3247 netdev_upper_dev_unlink(real_dev, dev);
3216unregister: 3248unregister:
3217 unregister_netdevice(dev); 3249 unregister_netdevice(dev);
3218 return err; 3250 return err;
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
3382 3414
3383 rxd = macsec_data_rtnl(real_dev); 3415 rxd = macsec_data_rtnl(real_dev);
3384 list_for_each_entry_safe(m, n, &rxd->secys, secys) { 3416 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3385 macsec_dellink(m->secy.netdev, &head); 3417 macsec_common_dellink(m->secy.netdev, &head);
3386 } 3418 }
3419
3420 netdev_rx_handler_unregister(real_dev);
3421 kfree(rxd);
3422
3387 unregister_netdevice_many(&head); 3423 unregister_netdevice_many(&head);
3388 break; 3424 break;
3389 } 3425 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cd9b53834bf6..3234fcdea317 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1315 vlan->dev = dev; 1315 vlan->dev = dev;
1316 vlan->port = port; 1316 vlan->port = port;
1317 vlan->set_features = MACVLAN_FEATURES; 1317 vlan->set_features = MACVLAN_FEATURES;
1318 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; 1318 vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
1319 1319
1320 vlan->mode = MACVLAN_MODE_VEPA; 1320 vlan->mode = MACVLAN_MODE_VEPA;
1321 if (data && data[IFLA_MACVLAN_MODE]) 1321 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a38c0dac514b..070e3290aa6e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q)
275 rtnl_unlock(); 275 rtnl_unlock();
276 276
277 synchronize_rcu(); 277 synchronize_rcu();
278 skb_array_cleanup(&q->skb_array);
279 sock_put(&q->sk); 278 sock_put(&q->sk);
280} 279}
281 280
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk)
533static void macvtap_sock_destruct(struct sock *sk) 532static void macvtap_sock_destruct(struct sock *sk)
534{ 533{
535 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); 534 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
536 struct sk_buff *skb;
537 535
538 while ((skb = skb_array_consume(&q->skb_array)) != NULL) 536 skb_array_cleanup(&q->skb_array);
539 kfree_skb(skb);
540} 537}
541 538
542static int macvtap_open(struct inode *inode, struct file *file) 539static int macvtap_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 1882d9828c99..053e87905b94 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
677 data[i] = kszphy_get_stat(phydev, i); 677 data[i] = kszphy_get_stat(phydev, i);
678} 678}
679 679
680static int kszphy_resume(struct phy_device *phydev) 680static int kszphy_suspend(struct phy_device *phydev)
681{ 681{
682 int value; 682 /* Disable PHY Interrupts */
683 if (phy_interrupt_is_valid(phydev)) {
684 phydev->interrupts = PHY_INTERRUPT_DISABLED;
685 if (phydev->drv->config_intr)
686 phydev->drv->config_intr(phydev);
687 }
683 688
684 mutex_lock(&phydev->lock); 689 return genphy_suspend(phydev);
690}
685 691
686 value = phy_read(phydev, MII_BMCR); 692static int kszphy_resume(struct phy_device *phydev)
687 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 693{
694 genphy_resume(phydev);
688 695
689 kszphy_config_intr(phydev); 696 /* Enable PHY Interrupts */
690 mutex_unlock(&phydev->lock); 697 if (phy_interrupt_is_valid(phydev)) {
698 phydev->interrupts = PHY_INTERRUPT_ENABLED;
699 if (phydev->drv->config_intr)
700 phydev->drv->config_intr(phydev);
701 }
691 702
692 return 0; 703 return 0;
693} 704}
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
900 .get_sset_count = kszphy_get_sset_count, 911 .get_sset_count = kszphy_get_sset_count,
901 .get_strings = kszphy_get_strings, 912 .get_strings = kszphy_get_strings,
902 .get_stats = kszphy_get_stats, 913 .get_stats = kszphy_get_stats,
903 .suspend = genphy_suspend, 914 .suspend = kszphy_suspend,
904 .resume = kszphy_resume, 915 .resume = kszphy_resume,
905}, { 916}, {
906 .phy_id = PHY_ID_KSZ8061, 917 .phy_id = PHY_ID_KSZ8061,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index da4e3d6632f6..c0dda6fc0921 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
1811 fl4.flowi4_mark = skb->mark; 1811 fl4.flowi4_mark = skb->mark;
1812 fl4.flowi4_proto = IPPROTO_UDP; 1812 fl4.flowi4_proto = IPPROTO_UDP;
1813 fl4.daddr = daddr; 1813 fl4.daddr = daddr;
1814 fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; 1814 fl4.saddr = *saddr;
1815 1815
1816 rt = ip_route_output_key(vxlan->net, &fl4); 1816 rt = ip_route_output_key(vxlan->net, &fl4);
1817 if (!IS_ERR(rt)) { 1817 if (!IS_ERR(rt)) {
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1847 memset(&fl6, 0, sizeof(fl6)); 1847 memset(&fl6, 0, sizeof(fl6));
1848 fl6.flowi6_oif = oif; 1848 fl6.flowi6_oif = oif;
1849 fl6.daddr = *daddr; 1849 fl6.daddr = *daddr;
1850 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 1850 fl6.saddr = *saddr;
1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1851 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1852 fl6.flowi6_mark = skb->mark; 1852 fl6.flowi6_mark = skb->mark;
1853 fl6.flowi6_proto = IPPROTO_UDP; 1853 fl6.flowi6_proto = IPPROTO_UDP;
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1920 struct rtable *rt = NULL; 1920 struct rtable *rt = NULL;
1921 const struct iphdr *old_iph; 1921 const struct iphdr *old_iph;
1922 union vxlan_addr *dst; 1922 union vxlan_addr *dst;
1923 union vxlan_addr remote_ip; 1923 union vxlan_addr remote_ip, local_ip;
1924 union vxlan_addr *src;
1924 struct vxlan_metadata _md; 1925 struct vxlan_metadata _md;
1925 struct vxlan_metadata *md = &_md; 1926 struct vxlan_metadata *md = &_md;
1926 __be16 src_port = 0, dst_port; 1927 __be16 src_port = 0, dst_port;
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1938 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 1939 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
1939 vni = rdst->remote_vni; 1940 vni = rdst->remote_vni;
1940 dst = &rdst->remote_ip; 1941 dst = &rdst->remote_ip;
1942 src = &vxlan->cfg.saddr;
1941 dst_cache = &rdst->dst_cache; 1943 dst_cache = &rdst->dst_cache;
1942 } else { 1944 } else {
1943 if (!info) { 1945 if (!info) {
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1948 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 1950 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
1949 vni = vxlan_tun_id_to_vni(info->key.tun_id); 1951 vni = vxlan_tun_id_to_vni(info->key.tun_id);
1950 remote_ip.sa.sa_family = ip_tunnel_info_af(info); 1952 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
1951 if (remote_ip.sa.sa_family == AF_INET) 1953 if (remote_ip.sa.sa_family == AF_INET) {
1952 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; 1954 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
1953 else 1955 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
1956 } else {
1954 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; 1957 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
1958 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
1959 }
1955 dst = &remote_ip; 1960 dst = &remote_ip;
1961 src = &local_ip;
1956 dst_cache = &info->dst_cache; 1962 dst_cache = &info->dst_cache;
1957 } 1963 }
1958 1964
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1992 } 1998 }
1993 1999
1994 if (dst->sa.sa_family == AF_INET) { 2000 if (dst->sa.sa_family == AF_INET) {
1995 __be32 saddr;
1996
1997 if (!vxlan->vn4_sock) 2001 if (!vxlan->vn4_sock)
1998 goto drop; 2002 goto drop;
1999 sk = vxlan->vn4_sock->sock->sk; 2003 sk = vxlan->vn4_sock->sock->sk;
2000 2004
2001 rt = vxlan_get_route(vxlan, skb, 2005 rt = vxlan_get_route(vxlan, skb,
2002 rdst ? rdst->remote_ifindex : 0, tos, 2006 rdst ? rdst->remote_ifindex : 0, tos,
2003 dst->sin.sin_addr.s_addr, &saddr, 2007 dst->sin.sin_addr.s_addr,
2008 &src->sin.sin_addr.s_addr,
2004 dst_cache, info); 2009 dst_cache, info);
2005 if (IS_ERR(rt)) { 2010 if (IS_ERR(rt)) {
2006 netdev_dbg(dev, "no route to %pI4\n", 2011 netdev_dbg(dev, "no route to %pI4\n",
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2017 } 2022 }
2018 2023
2019 /* Bypass encapsulation if the destination is local */ 2024 /* Bypass encapsulation if the destination is local */
2020 if (rt->rt_flags & RTCF_LOCAL && 2025 if (!info && rt->rt_flags & RTCF_LOCAL &&
2021 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2026 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2022 struct vxlan_dev *dst_vxlan; 2027 struct vxlan_dev *dst_vxlan;
2023 2028
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2043 if (err < 0) 2048 if (err < 0)
2044 goto xmit_tx_error; 2049 goto xmit_tx_error;
2045 2050
2046 udp_tunnel_xmit_skb(rt, sk, skb, saddr, 2051 udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
2047 dst->sin.sin_addr.s_addr, tos, ttl, df, 2052 dst->sin.sin_addr.s_addr, tos, ttl, df,
2048 src_port, dst_port, xnet, !udp_sum); 2053 src_port, dst_port, xnet, !udp_sum);
2049#if IS_ENABLED(CONFIG_IPV6) 2054#if IS_ENABLED(CONFIG_IPV6)
2050 } else { 2055 } else {
2051 struct dst_entry *ndst; 2056 struct dst_entry *ndst;
2052 struct in6_addr saddr;
2053 u32 rt6i_flags; 2057 u32 rt6i_flags;
2054 2058
2055 if (!vxlan->vn6_sock) 2059 if (!vxlan->vn6_sock)
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2058 2062
2059 ndst = vxlan6_get_route(vxlan, skb, 2063 ndst = vxlan6_get_route(vxlan, skb,
2060 rdst ? rdst->remote_ifindex : 0, tos, 2064 rdst ? rdst->remote_ifindex : 0, tos,
2061 label, &dst->sin6.sin6_addr, &saddr, 2065 label, &dst->sin6.sin6_addr,
2066 &src->sin6.sin6_addr,
2062 dst_cache, info); 2067 dst_cache, info);
2063 if (IS_ERR(ndst)) { 2068 if (IS_ERR(ndst)) {
2064 netdev_dbg(dev, "no route to %pI6\n", 2069 netdev_dbg(dev, "no route to %pI6\n",
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2077 2082
2078 /* Bypass encapsulation if the destination is local */ 2083 /* Bypass encapsulation if the destination is local */
2079 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; 2084 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2080 if (rt6i_flags & RTF_LOCAL && 2085 if (!info && rt6i_flags & RTF_LOCAL &&
2081 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { 2086 !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2082 struct vxlan_dev *dst_vxlan; 2087 struct vxlan_dev *dst_vxlan;
2083 2088
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2104 return; 2109 return;
2105 } 2110 }
2106 udp_tunnel6_xmit_skb(ndst, sk, skb, dev, 2111 udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
2107 &saddr, &dst->sin6.sin6_addr, tos, ttl, 2112 &src->sin6.sin6_addr,
2113 &dst->sin6.sin6_addr, tos, ttl,
2108 label, src_port, dst_port, !udp_sum); 2114 label, src_port, dst_port, !udp_sum);
2109#endif 2115#endif
2110 } 2116 }
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1d689169da76..9e1f2d9c9865 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5700,10 +5700,11 @@ out:
5700 mutex_unlock(&wl->mutex); 5700 mutex_unlock(&wl->mutex);
5701} 5701}
5702 5702
5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) 5703static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5704 struct ieee80211_sta *sta)
5704{ 5705{
5705 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; 5706 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5706 struct wl1271 *wl = wl_sta->wl; 5707 struct wl1271 *wl = hw->priv;
5707 u8 hlid = wl_sta->hlid; 5708 u8 hlid = wl_sta->hlid;
5708 5709
5709 /* return in units of Kbps */ 5710 /* return in units of Kbps */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4b0eff6da674..85737e96ab8b 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
189 case 1: 189 case 1:
190 _debug("extract FID count"); 190 _debug("extract FID count");
191 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 191 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
192 switch (ret) { 192 if (ret < 0)
193 case 0: break; 193 return ret;
194 case -EAGAIN: return 0;
195 default: return ret;
196 }
197 194
198 call->count = ntohl(call->tmp); 195 call->count = ntohl(call->tmp);
199 _debug("FID count: %u", call->count); 196 _debug("FID count: %u", call->count);
@@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
210 _debug("extract FID array"); 207 _debug("extract FID array");
211 ret = afs_extract_data(call, skb, last, call->buffer, 208 ret = afs_extract_data(call, skb, last, call->buffer,
212 call->count * 3 * 4); 209 call->count * 3 * 4);
213 switch (ret) { 210 if (ret < 0)
214 case 0: break; 211 return ret;
215 case -EAGAIN: return 0;
216 default: return ret;
217 }
218 212
219 _debug("unmarshall FID array"); 213 _debug("unmarshall FID array");
220 call->request = kcalloc(call->count, 214 call->request = kcalloc(call->count,
@@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
239 case 3: 233 case 3:
240 _debug("extract CB count"); 234 _debug("extract CB count");
241 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 235 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
242 switch (ret) { 236 if (ret < 0)
243 case 0: break; 237 return ret;
244 case -EAGAIN: return 0;
245 default: return ret;
246 }
247 238
248 tmp = ntohl(call->tmp); 239 tmp = ntohl(call->tmp);
249 _debug("CB count: %u", tmp); 240 _debug("CB count: %u", tmp);
@@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
258 _debug("extract CB array"); 249 _debug("extract CB array");
259 ret = afs_extract_data(call, skb, last, call->request, 250 ret = afs_extract_data(call, skb, last, call->request,
260 call->count * 3 * 4); 251 call->count * 3 * 4);
261 switch (ret) { 252 if (ret < 0)
262 case 0: break; 253 return ret;
263 case -EAGAIN: return 0;
264 default: return ret;
265 }
266 254
267 _debug("unmarshall CB array"); 255 _debug("unmarshall CB array");
268 cb = call->request; 256 cb = call->request;
@@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
278 call->unmarshall++; 266 call->unmarshall++;
279 267
280 case 5: 268 case 5:
281 _debug("trailer"); 269 ret = afs_data_complete(call, skb, last);
282 if (skb->len != 0) 270 if (ret < 0)
283 return -EBADMSG; 271 return ret;
284 272
285 /* Record that the message was unmarshalled successfully so 273 /* Record that the message was unmarshalled successfully so
286 * that the call destructor can know do the callback breaking 274 * that the call destructor can know do the callback breaking
@@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
294 break; 282 break;
295 } 283 }
296 284
297 if (!last)
298 return 0;
299 285
300 call->state = AFS_CALL_REPLYING; 286 call->state = AFS_CALL_REPLYING;
301 287
@@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
335{ 321{
336 struct afs_server *server; 322 struct afs_server *server;
337 struct in_addr addr; 323 struct in_addr addr;
324 int ret;
338 325
339 _enter(",{%u},%d", skb->len, last); 326 _enter(",{%u},%d", skb->len, last);
340 327
341 if (skb->len > 0) 328 ret = afs_data_complete(call, skb, last);
342 return -EBADMSG; 329 if (ret < 0)
343 if (!last) 330 return ret;
344 return 0;
345 331
346 /* no unmarshalling required */ 332 /* no unmarshalling required */
347 call->state = AFS_CALL_REPLYING; 333 call->state = AFS_CALL_REPLYING;
@@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
371 357
372 _enter(",{%u},%d", skb->len, last); 358 _enter(",{%u},%d", skb->len, last);
373 359
360 /* There are some arguments that we ignore */
361 afs_data_consumed(call, skb);
374 if (!last) 362 if (!last)
375 return 0; 363 return -EAGAIN;
376 364
377 /* no unmarshalling required */ 365 /* no unmarshalling required */
378 call->state = AFS_CALL_REPLYING; 366 call->state = AFS_CALL_REPLYING;
@@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work)
408static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, 396static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
409 bool last) 397 bool last)
410{ 398{
399 int ret;
400
411 _enter(",{%u},%d", skb->len, last); 401 _enter(",{%u},%d", skb->len, last);
412 402
413 if (skb->len > 0) 403 ret = afs_data_complete(call, skb, last);
414 return -EBADMSG; 404 if (ret < 0)
415 if (!last) 405 return ret;
416 return 0;
417 406
418 /* no unmarshalling required */ 407 /* no unmarshalling required */
419 call->state = AFS_CALL_REPLYING; 408 call->state = AFS_CALL_REPLYING;
@@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
460 449
461 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 450 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
462 451
463 if (skb->len > 0) 452 ret = afs_data_complete(call, skb, last);
464 return -EBADMSG; 453 if (ret < 0)
465 if (!last) 454 return ret;
466 return 0;
467 455
468 switch (call->unmarshall) { 456 switch (call->unmarshall) {
469 case 0: 457 case 0:
@@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
509 break; 497 break;
510 } 498 }
511 499
512 if (!last) 500 ret = afs_data_complete(call, skb, last);
513 return 0; 501 if (ret < 0)
502 return ret;
514 503
515 call->state = AFS_CALL_REPLYING; 504 call->state = AFS_CALL_REPLYING;
516 505
@@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
588static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, 577static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
589 struct sk_buff *skb, bool last) 578 struct sk_buff *skb, bool last)
590{ 579{
580 int ret;
581
591 _enter(",{%u},%d", skb->len, last); 582 _enter(",{%u},%d", skb->len, last);
592 583
593 if (skb->len > 0) 584 ret = afs_data_complete(call, skb, last);
594 return -EBADMSG; 585 if (ret < 0)
595 if (!last) 586 return ret;
596 return 0;
597 587
598 /* no unmarshalling required */ 588 /* no unmarshalling required */
599 call->state = AFS_CALL_REPLYING; 589 call->state = AFS_CALL_REPLYING;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index c2e930ec2888..9312b92e54be 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call,
240{ 240{
241 struct afs_vnode *vnode = call->reply; 241 struct afs_vnode *vnode = call->reply;
242 const __be32 *bp; 242 const __be32 *bp;
243 int ret;
243 244
244 _enter(",,%u", last); 245 _enter(",,%u", last);
245 246
246 afs_transfer_reply(call, skb); 247 ret = afs_transfer_reply(call, skb, last);
247 if (!last) 248 if (ret < 0)
248 return 0; 249 return ret;
249
250 if (call->reply_size != call->reply_max)
251 return -EBADMSG;
252 250
253 /* unmarshall the reply once we've received all of it */ 251 /* unmarshall the reply once we've received all of it */
254 bp = call->buffer; 252 bp = call->buffer;
@@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
335 case 1: 333 case 1:
336 _debug("extract data length (MSW)"); 334 _debug("extract data length (MSW)");
337 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 335 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
338 switch (ret) { 336 if (ret < 0)
339 case 0: break; 337 return ret;
340 case -EAGAIN: return 0;
341 default: return ret;
342 }
343 338
344 call->count = ntohl(call->tmp); 339 call->count = ntohl(call->tmp);
345 _debug("DATA length MSW: %u", call->count); 340 _debug("DATA length MSW: %u", call->count);
@@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
353 case 2: 348 case 2:
354 _debug("extract data length"); 349 _debug("extract data length");
355 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 350 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
356 switch (ret) { 351 if (ret < 0)
357 case 0: break; 352 return ret;
358 case -EAGAIN: return 0;
359 default: return ret;
360 }
361 353
362 call->count = ntohl(call->tmp); 354 call->count = ntohl(call->tmp);
363 _debug("DATA length: %u", call->count); 355 _debug("DATA length: %u", call->count);
@@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
375 ret = afs_extract_data(call, skb, last, buffer, 367 ret = afs_extract_data(call, skb, last, buffer,
376 call->count); 368 call->count);
377 kunmap_atomic(buffer); 369 kunmap_atomic(buffer);
378 switch (ret) { 370 if (ret < 0)
379 case 0: break; 371 return ret;
380 case -EAGAIN: return 0;
381 default: return ret;
382 }
383 } 372 }
384 373
385 call->offset = 0; 374 call->offset = 0;
@@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
389 case 4: 378 case 4:
390 ret = afs_extract_data(call, skb, last, call->buffer, 379 ret = afs_extract_data(call, skb, last, call->buffer,
391 (21 + 3 + 6) * 4); 380 (21 + 3 + 6) * 4);
392 switch (ret) { 381 if (ret < 0)
393 case 0: break; 382 return ret;
394 case -EAGAIN: return 0;
395 default: return ret;
396 }
397 383
398 bp = call->buffer; 384 bp = call->buffer;
399 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); 385 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL);
@@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
405 call->unmarshall++; 391 call->unmarshall++;
406 392
407 case 5: 393 case 5:
408 _debug("trailer"); 394 ret = afs_data_complete(call, skb, last);
409 if (skb->len != 0) 395 if (ret < 0)
410 return -EBADMSG; 396 return ret;
411 break; 397 break;
412 } 398 }
413 399
414 if (!last)
415 return 0;
416
417 if (call->count < PAGE_SIZE) { 400 if (call->count < PAGE_SIZE) {
418 _debug("clear"); 401 _debug("clear");
419 page = call->reply3; 402 page = call->reply3;
@@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
537{ 520{
538 _enter(",{%u},%d", skb->len, last); 521 _enter(",{%u},%d", skb->len, last);
539 522
540 if (skb->len > 0) 523 /* shouldn't be any reply data */
541 return -EBADMSG; /* shouldn't be any reply data */ 524 return afs_data_complete(call, skb, last);
542 return 0;
543} 525}
544 526
545/* 527/*
@@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call,
622{ 604{
623 struct afs_vnode *vnode = call->reply; 605 struct afs_vnode *vnode = call->reply;
624 const __be32 *bp; 606 const __be32 *bp;
607 int ret;
625 608
626 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 609 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
627 610
628 afs_transfer_reply(call, skb); 611 ret = afs_transfer_reply(call, skb, last);
629 if (!last) 612 if (ret < 0)
630 return 0; 613 return ret;
631
632 if (call->reply_size != call->reply_max)
633 return -EBADMSG;
634 614
635 /* unmarshall the reply once we've received all of it */ 615 /* unmarshall the reply once we've received all of it */
636 bp = call->buffer; 616 bp = call->buffer;
@@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call,
721{ 701{
722 struct afs_vnode *vnode = call->reply; 702 struct afs_vnode *vnode = call->reply;
723 const __be32 *bp; 703 const __be32 *bp;
704 int ret;
724 705
725 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 706 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
726 707
727 afs_transfer_reply(call, skb); 708 ret = afs_transfer_reply(call, skb, last);
728 if (!last) 709 if (ret < 0)
729 return 0; 710 return ret;
730
731 if (call->reply_size != call->reply_max)
732 return -EBADMSG;
733 711
734 /* unmarshall the reply once we've received all of it */ 712 /* unmarshall the reply once we've received all of it */
735 bp = call->buffer; 713 bp = call->buffer;
@@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call,
804{ 782{
805 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; 783 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
806 const __be32 *bp; 784 const __be32 *bp;
785 int ret;
807 786
808 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 787 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
809 788
810 afs_transfer_reply(call, skb); 789 ret = afs_transfer_reply(call, skb, last);
811 if (!last) 790 if (ret < 0)
812 return 0; 791 return ret;
813
814 if (call->reply_size != call->reply_max)
815 return -EBADMSG;
816 792
817 /* unmarshall the reply once we've received all of it */ 793 /* unmarshall the reply once we've received all of it */
818 bp = call->buffer; 794 bp = call->buffer;
@@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call,
892{ 868{
893 struct afs_vnode *vnode = call->reply; 869 struct afs_vnode *vnode = call->reply;
894 const __be32 *bp; 870 const __be32 *bp;
871 int ret;
895 872
896 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 873 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
897 874
898 afs_transfer_reply(call, skb); 875 ret = afs_transfer_reply(call, skb, last);
899 if (!last) 876 if (ret < 0)
900 return 0; 877 return ret;
901
902 if (call->reply_size != call->reply_max)
903 return -EBADMSG;
904 878
905 /* unmarshall the reply once we've received all of it */ 879 /* unmarshall the reply once we've received all of it */
906 bp = call->buffer; 880 bp = call->buffer;
@@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call,
999{ 973{
1000 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; 974 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
1001 const __be32 *bp; 975 const __be32 *bp;
976 int ret;
1002 977
1003 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 978 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
1004 979
1005 afs_transfer_reply(call, skb); 980 ret = afs_transfer_reply(call, skb, last);
1006 if (!last) 981 if (ret < 0)
1007 return 0; 982 return ret;
1008
1009 if (call->reply_size != call->reply_max)
1010 return -EBADMSG;
1011 983
1012 /* unmarshall the reply once we've received all of it */ 984 /* unmarshall the reply once we've received all of it */
1013 bp = call->buffer; 985 bp = call->buffer;
@@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call,
1105{ 1077{
1106 struct afs_vnode *vnode = call->reply; 1078 struct afs_vnode *vnode = call->reply;
1107 const __be32 *bp; 1079 const __be32 *bp;
1080 int ret;
1108 1081
1109 _enter(",,%u", last); 1082 _enter(",,%u", last);
1110 1083
1111 afs_transfer_reply(call, skb); 1084 ret = afs_transfer_reply(call, skb, last);
1112 if (!last) { 1085 if (ret < 0)
1113 _leave(" = 0 [more]"); 1086 return ret;
1114 return 0;
1115 }
1116
1117 if (call->reply_size != call->reply_max) {
1118 _leave(" = -EBADMSG [%u != %u]",
1119 call->reply_size, call->reply_max);
1120 return -EBADMSG;
1121 }
1122 1087
1123 /* unmarshall the reply once we've received all of it */ 1088 /* unmarshall the reply once we've received all of it */
1124 bp = call->buffer; 1089 bp = call->buffer;
@@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call,
1292 afs_dataversion_t *store_version; 1257 afs_dataversion_t *store_version;
1293 struct afs_vnode *vnode = call->reply; 1258 struct afs_vnode *vnode = call->reply;
1294 const __be32 *bp; 1259 const __be32 *bp;
1260 int ret;
1295 1261
1296 _enter(",,%u", last); 1262 _enter(",,%u", last);
1297 1263
1298 afs_transfer_reply(call, skb); 1264 ret = afs_transfer_reply(call, skb, last);
1299 if (!last) { 1265 if (ret < 0)
1300 _leave(" = 0 [more]"); 1266 return ret;
1301 return 0;
1302 }
1303
1304 if (call->reply_size != call->reply_max) {
1305 _leave(" = -EBADMSG [%u != %u]",
1306 call->reply_size, call->reply_max);
1307 return -EBADMSG;
1308 }
1309 1267
1310 /* unmarshall the reply once we've received all of it */ 1268 /* unmarshall the reply once we've received all of it */
1311 store_version = NULL; 1269 store_version = NULL;
@@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1504 _debug("extract status"); 1462 _debug("extract status");
1505 ret = afs_extract_data(call, skb, last, call->buffer, 1463 ret = afs_extract_data(call, skb, last, call->buffer,
1506 12 * 4); 1464 12 * 4);
1507 switch (ret) { 1465 if (ret < 0)
1508 case 0: break; 1466 return ret;
1509 case -EAGAIN: return 0;
1510 default: return ret;
1511 }
1512 1467
1513 bp = call->buffer; 1468 bp = call->buffer;
1514 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); 1469 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2);
@@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1518 /* extract the volume name length */ 1473 /* extract the volume name length */
1519 case 2: 1474 case 2:
1520 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1475 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1521 switch (ret) { 1476 if (ret < 0)
1522 case 0: break; 1477 return ret;
1523 case -EAGAIN: return 0;
1524 default: return ret;
1525 }
1526 1478
1527 call->count = ntohl(call->tmp); 1479 call->count = ntohl(call->tmp);
1528 _debug("volname length: %u", call->count); 1480 _debug("volname length: %u", call->count);
@@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1537 if (call->count > 0) { 1489 if (call->count > 0) {
1538 ret = afs_extract_data(call, skb, last, call->reply3, 1490 ret = afs_extract_data(call, skb, last, call->reply3,
1539 call->count); 1491 call->count);
1540 switch (ret) { 1492 if (ret < 0)
1541 case 0: break; 1493 return ret;
1542 case -EAGAIN: return 0;
1543 default: return ret;
1544 }
1545 } 1494 }
1546 1495
1547 p = call->reply3; 1496 p = call->reply3;
@@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1561 case 4: 1510 case 4:
1562 ret = afs_extract_data(call, skb, last, call->buffer, 1511 ret = afs_extract_data(call, skb, last, call->buffer,
1563 call->count); 1512 call->count);
1564 switch (ret) { 1513 if (ret < 0)
1565 case 0: break; 1514 return ret;
1566 case -EAGAIN: return 0;
1567 default: return ret;
1568 }
1569 1515
1570 call->offset = 0; 1516 call->offset = 0;
1571 call->unmarshall++; 1517 call->unmarshall++;
@@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1574 /* extract the offline message length */ 1520 /* extract the offline message length */
1575 case 5: 1521 case 5:
1576 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1522 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1577 switch (ret) { 1523 if (ret < 0)
1578 case 0: break; 1524 return ret;
1579 case -EAGAIN: return 0;
1580 default: return ret;
1581 }
1582 1525
1583 call->count = ntohl(call->tmp); 1526 call->count = ntohl(call->tmp);
1584 _debug("offline msg length: %u", call->count); 1527 _debug("offline msg length: %u", call->count);
@@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1593 if (call->count > 0) { 1536 if (call->count > 0) {
1594 ret = afs_extract_data(call, skb, last, call->reply3, 1537 ret = afs_extract_data(call, skb, last, call->reply3,
1595 call->count); 1538 call->count);
1596 switch (ret) { 1539 if (ret < 0)
1597 case 0: break; 1540 return ret;
1598 case -EAGAIN: return 0;
1599 default: return ret;
1600 }
1601 } 1541 }
1602 1542
1603 p = call->reply3; 1543 p = call->reply3;
@@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1617 case 7: 1557 case 7:
1618 ret = afs_extract_data(call, skb, last, call->buffer, 1558 ret = afs_extract_data(call, skb, last, call->buffer,
1619 call->count); 1559 call->count);
1620 switch (ret) { 1560 if (ret < 0)
1621 case 0: break; 1561 return ret;
1622 case -EAGAIN: return 0;
1623 default: return ret;
1624 }
1625 1562
1626 call->offset = 0; 1563 call->offset = 0;
1627 call->unmarshall++; 1564 call->unmarshall++;
@@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1630 /* extract the message of the day length */ 1567 /* extract the message of the day length */
1631 case 8: 1568 case 8:
1632 ret = afs_extract_data(call, skb, last, &call->tmp, 4); 1569 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
1633 switch (ret) { 1570 if (ret < 0)
1634 case 0: break; 1571 return ret;
1635 case -EAGAIN: return 0;
1636 default: return ret;
1637 }
1638 1572
1639 call->count = ntohl(call->tmp); 1573 call->count = ntohl(call->tmp);
1640 _debug("motd length: %u", call->count); 1574 _debug("motd length: %u", call->count);
@@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1649 if (call->count > 0) { 1583 if (call->count > 0) {
1650 ret = afs_extract_data(call, skb, last, call->reply3, 1584 ret = afs_extract_data(call, skb, last, call->reply3,
1651 call->count); 1585 call->count);
1652 switch (ret) { 1586 if (ret < 0)
1653 case 0: break; 1587 return ret;
1654 case -EAGAIN: return 0;
1655 default: return ret;
1656 }
1657 } 1588 }
1658 1589
1659 p = call->reply3; 1590 p = call->reply3;
@@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call,
1673 case 10: 1604 case 10:
1674 ret = afs_extract_data(call, skb, last, call->buffer, 1605 ret = afs_extract_data(call, skb, last, call->buffer,
1675 call->count); 1606 call->count);
1676 switch (ret) { 1607 if (ret < 0)
1677 case 0: break; 1608 return ret;
1678 case -EAGAIN: return 0;
1679 default: return ret;
1680 }
1681 1609
1682 call->offset = 0; 1610 call->offset = 0;
1683 call->unmarshall++; 1611 call->unmarshall++;
1684 no_motd_padding: 1612 no_motd_padding:
1685 1613
1686 case 11: 1614 case 11:
1687 _debug("trailer %d", skb->len); 1615 ret = afs_data_complete(call, skb, last);
1688 if (skb->len != 0) 1616 if (ret < 0)
1689 return -EBADMSG; 1617 return ret;
1690 break; 1618 break;
1691 } 1619 }
1692 1620
1693 if (!last)
1694 return 0;
1695
1696 _leave(" = 0 [done]"); 1621 _leave(" = 0 [done]");
1697 return 0; 1622 return 0;
1698} 1623}
@@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call,
1764 struct sk_buff *skb, bool last) 1689 struct sk_buff *skb, bool last)
1765{ 1690{
1766 const __be32 *bp; 1691 const __be32 *bp;
1692 int ret;
1767 1693
1768 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); 1694 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
1769 1695
1770 afs_transfer_reply(call, skb); 1696 ret = afs_transfer_reply(call, skb, last);
1771 if (!last) 1697 if (ret < 0)
1772 return 0; 1698 return ret;
1773
1774 if (call->reply_size != call->reply_max)
1775 return -EBADMSG;
1776 1699
1777 /* unmarshall the reply once we've received all of it */ 1700 /* unmarshall the reply once we've received all of it */
1778 bp = call->buffer; 1701 bp = call->buffer;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 71d5982312f3..df976b2a7f40 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *);
609 */ 609 */
610extern int afs_open_socket(void); 610extern int afs_open_socket(void);
611extern void afs_close_socket(void); 611extern void afs_close_socket(void);
612extern void afs_data_consumed(struct afs_call *, struct sk_buff *);
612extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, 613extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
613 const struct afs_wait_mode *); 614 const struct afs_wait_mode *);
614extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, 615extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
615 size_t, size_t); 616 size_t, size_t);
616extern void afs_flat_call_destructor(struct afs_call *); 617extern void afs_flat_call_destructor(struct afs_call *);
617extern void afs_transfer_reply(struct afs_call *, struct sk_buff *); 618extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool);
618extern void afs_send_empty_reply(struct afs_call *); 619extern void afs_send_empty_reply(struct afs_call *);
619extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); 620extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
620extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, 621extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
621 size_t); 622 size_t);
622 623
624static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb,
625 bool last)
626{
627 if (skb->len > 0)
628 return -EBADMSG;
629 afs_data_consumed(call, skb);
630 if (!last)
631 return -EAGAIN;
632 return 0;
633}
634
623/* 635/*
624 * security.c 636 * security.c
625 */ 637 */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 4832de84d52c..14d04c848465 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -150,10 +150,9 @@ void afs_close_socket(void)
150} 150}
151 151
152/* 152/*
153 * note that the data in a socket buffer is now delivered and that the buffer 153 * Note that the data in a socket buffer is now consumed.
154 * should be freed
155 */ 154 */
156static void afs_data_delivered(struct sk_buff *skb) 155void afs_data_consumed(struct afs_call *call, struct sk_buff *skb)
157{ 156{
158 if (!skb) { 157 if (!skb) {
159 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); 158 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
@@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb)
161 } else { 160 } else {
162 _debug("DLVR %p{%u} [%d]", 161 _debug("DLVR %p{%u} [%d]",
163 skb, skb->mark, atomic_read(&afs_outstanding_skbs)); 162 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
164 if (atomic_dec_return(&afs_outstanding_skbs) == -1) 163 rxrpc_kernel_data_consumed(call->rxcall, skb);
165 BUG();
166 rxrpc_kernel_data_delivered(skb);
167 } 164 }
168} 165}
169 166
@@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call)
489 last = rxrpc_kernel_is_data_last(skb); 486 last = rxrpc_kernel_is_data_last(skb);
490 ret = call->type->deliver(call, skb, last); 487 ret = call->type->deliver(call, skb, last);
491 switch (ret) { 488 switch (ret) {
489 case -EAGAIN:
490 if (last) {
491 _debug("short data");
492 goto unmarshal_error;
493 }
494 break;
492 case 0: 495 case 0:
493 if (last && 496 ASSERT(last);
494 call->state == AFS_CALL_AWAIT_REPLY) 497 if (call->state == AFS_CALL_AWAIT_REPLY)
495 call->state = AFS_CALL_COMPLETE; 498 call->state = AFS_CALL_COMPLETE;
496 break; 499 break;
497 case -ENOTCONN: 500 case -ENOTCONN:
@@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call)
501 abort_code = RX_INVALID_OPERATION; 504 abort_code = RX_INVALID_OPERATION;
502 goto do_abort; 505 goto do_abort;
503 default: 506 default:
507 unmarshal_error:
504 abort_code = RXGEN_CC_UNMARSHAL; 508 abort_code = RXGEN_CC_UNMARSHAL;
505 if (call->state != AFS_CALL_AWAIT_REPLY) 509 if (call->state != AFS_CALL_AWAIT_REPLY)
506 abort_code = RXGEN_SS_UNMARSHAL; 510 abort_code = RXGEN_SS_UNMARSHAL;
@@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call)
511 call->state = AFS_CALL_ERROR; 515 call->state = AFS_CALL_ERROR;
512 break; 516 break;
513 } 517 }
514 afs_data_delivered(skb); 518 break;
515 skb = NULL;
516 continue;
517 case RXRPC_SKB_MARK_FINAL_ACK: 519 case RXRPC_SKB_MARK_FINAL_ACK:
518 _debug("Rcv ACK"); 520 _debug("Rcv ACK");
519 call->state = AFS_CALL_COMPLETE; 521 call->state = AFS_CALL_COMPLETE;
@@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call)
685} 687}
686 688
687/* 689/*
688 * empty a socket buffer into a flat reply buffer 690 * Empty a socket buffer into a flat reply buffer.
689 */ 691 */
690void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) 692int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last)
691{ 693{
692 size_t len = skb->len; 694 size_t len = skb->len;
693 695
694 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) 696 if (len > call->reply_max - call->reply_size) {
695 BUG(); 697 _leave(" = -EBADMSG [%zu > %u]",
696 call->reply_size += len; 698 len, call->reply_max - call->reply_size);
699 return -EBADMSG;
700 }
701
702 if (len > 0) {
703 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size,
704 len) < 0)
705 BUG();
706 call->reply_size += len;
707 }
708
709 afs_data_consumed(call, skb);
710 if (!last)
711 return -EAGAIN;
712
713 if (call->reply_size != call->reply_max) {
714 _leave(" = -EBADMSG [%u != %u]",
715 call->reply_size, call->reply_max);
716 return -EBADMSG;
717 }
718 return 0;
697} 719}
698 720
699/* 721/*
@@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
745} 767}
746 768
747/* 769/*
748 * grab the operation ID from an incoming cache manager call 770 * Grab the operation ID from an incoming cache manager call. The socket
771 * buffer is discarded on error or if we don't yet have sufficient data.
749 */ 772 */
750static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, 773static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
751 bool last) 774 bool last)
@@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
766 call->offset += len; 789 call->offset += len;
767 790
768 if (call->offset < 4) { 791 if (call->offset < 4) {
769 if (last) { 792 afs_data_consumed(call, skb);
770 _leave(" = -EBADMSG [op ID short]"); 793 _leave(" = -EAGAIN");
771 return -EBADMSG; 794 return -EAGAIN;
772 }
773 _leave(" = 0 [incomplete]");
774 return 0;
775 } 795 }
776 796
777 call->state = AFS_CALL_AWAIT_REQUEST; 797 call->state = AFS_CALL_AWAIT_REQUEST;
@@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
855} 875}
856 876
857/* 877/*
858 * extract a piece of data from the received data socket buffers 878 * Extract a piece of data from the received data socket buffers.
859 */ 879 */
860int afs_extract_data(struct afs_call *call, struct sk_buff *skb, 880int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
861 bool last, void *buf, size_t count) 881 bool last, void *buf, size_t count)
@@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
873 call->offset += len; 893 call->offset += len;
874 894
875 if (call->offset < count) { 895 if (call->offset < count) {
876 if (last) { 896 afs_data_consumed(call, skb);
877 _leave(" = -EBADMSG [%d < %zu]", call->offset, count);
878 return -EBADMSG;
879 }
880 _leave(" = -EAGAIN"); 897 _leave(" = -EAGAIN");
881 return -EAGAIN; 898 return -EAGAIN;
882 } 899 }
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 340afd0cd182..f94d1abdc3eb 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
64 struct afs_cache_vlocation *entry; 64 struct afs_cache_vlocation *entry;
65 __be32 *bp; 65 __be32 *bp;
66 u32 tmp; 66 u32 tmp;
67 int loop; 67 int loop, ret;
68 68
69 _enter(",,%u", last); 69 _enter(",,%u", last);
70 70
71 afs_transfer_reply(call, skb); 71 ret = afs_transfer_reply(call, skb, last);
72 if (!last) 72 if (ret < 0)
73 return 0; 73 return ret;
74
75 if (call->reply_size != call->reply_max)
76 return -EBADMSG;
77 74
78 /* unmarshall the reply once we've received all of it */ 75 /* unmarshall the reply once we've received all of it */
79 entry = call->reply; 76 entry = call->reply;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 076df5360ba5..3a788bf0affd 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3891,8 +3891,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3891extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; 3891extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
3892void netdev_rss_key_fill(void *buffer, size_t len); 3892void netdev_rss_key_fill(void *buffer, size_t len);
3893 3893
3894int dev_get_nest_level(struct net_device *dev, 3894int dev_get_nest_level(struct net_device *dev);
3895 bool (*type_check)(const struct net_device *dev));
3896int skb_checksum_help(struct sk_buff *skb); 3895int skb_checksum_help(struct sk_buff *skb);
3897struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 3896struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3898 netdev_features_t features, bool tx_path); 3897 netdev_features_t features, bool tx_path);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b1e3c57c7117..d6c4177df7cb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params {
70 u8 max_tc; 70 u8 max_tc;
71}; 71};
72 72
73enum qed_dcbx_sf_ieee_type {
74 QED_DCBX_SF_IEEE_ETHTYPE,
75 QED_DCBX_SF_IEEE_TCP_PORT,
76 QED_DCBX_SF_IEEE_UDP_PORT,
77 QED_DCBX_SF_IEEE_TCP_UDP_PORT
78};
79
73struct qed_app_entry { 80struct qed_app_entry {
74 bool ethtype; 81 bool ethtype;
82 enum qed_dcbx_sf_ieee_type sf_ieee;
75 bool enabled; 83 bool enabled;
76 u8 prio; 84 u8 prio;
77 u16 proto_id; 85 u16 proto_id;
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index de1f64318fc4..fcb4c3646173 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
705 sctp_authhdr_t auth_hdr; 705 sctp_authhdr_t auth_hdr;
706} __packed sctp_auth_chunk_t; 706} __packed sctp_auth_chunk_t;
707 707
708struct sctp_info {
709 __u32 sctpi_tag;
710 __u32 sctpi_state;
711 __u32 sctpi_rwnd;
712 __u16 sctpi_unackdata;
713 __u16 sctpi_penddata;
714 __u16 sctpi_instrms;
715 __u16 sctpi_outstrms;
716 __u32 sctpi_fragmentation_point;
717 __u32 sctpi_inqueue;
718 __u32 sctpi_outqueue;
719 __u32 sctpi_overall_error;
720 __u32 sctpi_max_burst;
721 __u32 sctpi_maxseg;
722 __u32 sctpi_peer_rwnd;
723 __u32 sctpi_peer_tag;
724 __u8 sctpi_peer_capable;
725 __u8 sctpi_peer_sack;
726 __u16 __reserved1;
727
728 /* assoc status info */
729 __u64 sctpi_isacks;
730 __u64 sctpi_osacks;
731 __u64 sctpi_opackets;
732 __u64 sctpi_ipackets;
733 __u64 sctpi_rtxchunks;
734 __u64 sctpi_outofseqtsns;
735 __u64 sctpi_idupchunks;
736 __u64 sctpi_gapcnt;
737 __u64 sctpi_ouodchunks;
738 __u64 sctpi_iuodchunks;
739 __u64 sctpi_oodchunks;
740 __u64 sctpi_iodchunks;
741 __u64 sctpi_octrlchunks;
742 __u64 sctpi_ictrlchunks;
743
744 /* primary transport info */
745 struct sockaddr_storage sctpi_p_address;
746 __s32 sctpi_p_state;
747 __u32 sctpi_p_cwnd;
748 __u32 sctpi_p_srtt;
749 __u32 sctpi_p_rto;
750 __u32 sctpi_p_hbinterval;
751 __u32 sctpi_p_pathmaxrxt;
752 __u32 sctpi_p_sackdelay;
753 __u32 sctpi_p_sackfreq;
754 __u32 sctpi_p_ssthresh;
755 __u32 sctpi_p_partial_bytes_acked;
756 __u32 sctpi_p_flight_size;
757 __u16 sctpi_p_error;
758 __u16 __reserved2;
759
760 /* sctp sock info */
761 __u32 sctpi_s_autoclose;
762 __u32 sctpi_s_adaptation_ind;
763 __u32 sctpi_s_pd_point;
764 __u8 sctpi_s_nodelay;
765 __u8 sctpi_s_disable_fragments;
766 __u8 sctpi_s_v4mapped;
767 __u8 sctpi_s_frag_interleave;
768 __u32 sctpi_s_type;
769 __u32 __reserved3;
770};
771
772struct sctp_infox { 708struct sctp_infox {
773 struct sctp_info *sctpinfo; 709 struct sctp_info *sctpinfo;
774 struct sctp_association *asoc; 710 struct sctp_association *asoc;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6f0b3e0adc73..0f665cb26b50 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
2847 __skb_linearize(skb) : 0; 2847 __skb_linearize(skb) : 0;
2848} 2848}
2849 2849
2850static __always_inline void
2851__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2852 unsigned int off)
2853{
2854 if (skb->ip_summed == CHECKSUM_COMPLETE)
2855 skb->csum = csum_block_sub(skb->csum,
2856 csum_partial(start, len, 0), off);
2857 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2858 skb_checksum_start_offset(skb) < 0)
2859 skb->ip_summed = CHECKSUM_NONE;
2860}
2861
2850/** 2862/**
2851 * skb_postpull_rcsum - update checksum for received skb after pull 2863 * skb_postpull_rcsum - update checksum for received skb after pull
2852 * @skb: buffer to update 2864 * @skb: buffer to update
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
2857 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2869 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2858 * CHECKSUM_NONE so that it can be recomputed from scratch. 2870 * CHECKSUM_NONE so that it can be recomputed from scratch.
2859 */ 2871 */
2860
2861static inline void skb_postpull_rcsum(struct sk_buff *skb, 2872static inline void skb_postpull_rcsum(struct sk_buff *skb,
2862 const void *start, unsigned int len) 2873 const void *start, unsigned int len)
2863{ 2874{
2864 if (skb->ip_summed == CHECKSUM_COMPLETE) 2875 __skb_postpull_rcsum(skb, start, len, 0);
2865 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2866 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2867 skb_checksum_start_offset(skb) < 0)
2868 skb->ip_summed = CHECKSUM_NONE;
2869} 2876}
2870 2877
2871unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2878static __always_inline void
2879__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2880 unsigned int off)
2881{
2882 if (skb->ip_summed == CHECKSUM_COMPLETE)
2883 skb->csum = csum_block_add(skb->csum,
2884 csum_partial(start, len, 0), off);
2885}
2872 2886
2887/**
2888 * skb_postpush_rcsum - update checksum for received skb after push
2889 * @skb: buffer to update
2890 * @start: start of data after push
2891 * @len: length of data pushed
2892 *
2893 * After doing a push on a received packet, you need to call this to
2894 * update the CHECKSUM_COMPLETE checksum.
2895 */
2873static inline void skb_postpush_rcsum(struct sk_buff *skb, 2896static inline void skb_postpush_rcsum(struct sk_buff *skb,
2874 const void *start, unsigned int len) 2897 const void *start, unsigned int len)
2875{ 2898{
2876 /* For performing the reverse operation to skb_postpull_rcsum(), 2899 __skb_postpush_rcsum(skb, start, len, 0);
2877 * we can instead of ...
2878 *
2879 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
2880 *
2881 * ... just use this equivalent version here to save a few
2882 * instructions. Feeding csum of 0 in csum_partial() and later
2883 * on adding skb->csum is equivalent to feed skb->csum in the
2884 * first place.
2885 */
2886 if (skb->ip_summed == CHECKSUM_COMPLETE)
2887 skb->csum = csum_partial(start, len, skb->csum);
2888} 2900}
2889 2901
2902unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2903
2890/** 2904/**
2891 * skb_push_rcsum - push skb and update receive checksum 2905 * skb_push_rcsum - push skb and update receive checksum
2892 * @skb: buffer to update 2906 * @skb: buffer to update
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 41e6a24a44b9..82f3c912a5b1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -176,8 +176,8 @@ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
176int tcf_unregister_action(struct tc_action_ops *a, 176int tcf_unregister_action(struct tc_action_ops *a,
177 struct pernet_operations *ops); 177 struct pernet_operations *ops);
178int tcf_action_destroy(struct list_head *actions, int bind); 178int tcf_action_destroy(struct list_head *actions, int bind);
179int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 179int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
180 struct tcf_result *res); 180 int nr_actions, struct tcf_result *res);
181int tcf_action_init(struct net *net, struct nlattr *nla, 181int tcf_action_init(struct net *net, struct nlattr *nla,
182 struct nlattr *est, char *n, int ovr, 182 struct nlattr *est, char *n, int ovr,
183 int bind, struct list_head *); 183 int bind, struct list_head *);
@@ -189,30 +189,17 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
189int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); 189int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
190int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); 190int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
191 191
192#define tc_no_actions(_exts) \ 192#endif /* CONFIG_NET_CLS_ACT */
193 (list_empty(&(_exts)->actions))
194
195#define tc_for_each_action(_a, _exts) \
196 list_for_each_entry(a, &(_exts)->actions, list)
197
198#define tc_single_action(_exts) \
199 (list_is_singular(&(_exts)->actions))
200 193
201static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, 194static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
202 u64 packets, u64 lastuse) 195 u64 packets, u64 lastuse)
203{ 196{
197#ifdef CONFIG_NET_CLS_ACT
204 if (!a->ops->stats_update) 198 if (!a->ops->stats_update)
205 return; 199 return;
206 200
207 a->ops->stats_update(a, bytes, packets, lastuse); 201 a->ops->stats_update(a, bytes, packets, lastuse);
202#endif
208} 203}
209 204
210#else /* CONFIG_NET_CLS_ACT */
211
212#define tc_no_actions(_exts) true
213#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
214#define tc_single_action(_exts) false
215#define tcf_action_stats_update(a, bytes, packets, lastuse)
216
217#endif /* CONFIG_NET_CLS_ACT */
218#endif 205#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index ac1bc3c49fbd..7b0f88699b25 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
40 unsigned long, 40 unsigned long,
41 gfp_t); 41 gfp_t);
42int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); 42int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
43void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
43void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); 44void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
44void rxrpc_kernel_end_call(struct rxrpc_call *); 45void rxrpc_kernel_end_call(struct rxrpc_call *);
45bool rxrpc_kernel_is_data_last(struct sk_buff *); 46bool rxrpc_kernel_is_data_last(struct sk_buff *);
46u32 rxrpc_kernel_get_abort_code(struct sk_buff *); 47u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
47int rxrpc_kernel_get_error_number(struct sk_buff *); 48int rxrpc_kernel_get_error_number(struct sk_buff *);
48void rxrpc_kernel_data_delivered(struct sk_buff *);
49void rxrpc_kernel_free_skb(struct sk_buff *); 49void rxrpc_kernel_free_skb(struct sk_buff *);
50struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); 50struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
51int rxrpc_kernel_reject_call(struct socket *); 51int rxrpc_kernel_reject_call(struct socket *);
diff --git a/include/net/gre.h b/include/net/gre.h
index 7a54a31d1d4c..73ea256eb7d7 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -104,6 +104,7 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
104 104
105 skb_push(skb, hdr_len); 105 skb_push(skb, hdr_len);
106 106
107 skb_set_inner_protocol(skb, proto);
107 skb_reset_transport_header(skb); 108 skb_reset_transport_header(skb);
108 greh = (struct gre_base_hdr *)skb->data; 109 greh = (struct gre_base_hdr *)skb->data;
109 greh->flags = gre_tnl_flags_to_gre_flags(flags); 110 greh->flags = gre_tnl_flags_to_gre_flags(flags);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 0dc0a51da38f..dce2d586d9ce 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
128 to = from | htonl(INET_ECN_CE << 20); 128 to = from | htonl(INET_ECN_CE << 20);
129 *(__be32 *)iph = to; 129 *(__be32 *)iph = to;
130 if (skb->ip_summed == CHECKSUM_COMPLETE) 130 if (skb->ip_summed == CHECKSUM_COMPLETE)
131 skb->csum = csum_add(csum_sub(skb->csum, from), to); 131 skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
132 (__force __wsum)to);
132 return 1; 133 return 1;
133} 134}
134 135
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b4faadbb4e01..cca510a585c3 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3620,7 +3620,8 @@ struct ieee80211_ops {
3620 3620
3621 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3621 int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3622 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3622 void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3623 u32 (*get_expected_throughput)(struct ieee80211_sta *sta); 3623 u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
3624 struct ieee80211_sta *sta);
3624 int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3625 int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3625 int *dbm); 3626 int *dbm);
3626 3627
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 6f8d65342d3a..c99508d426cc 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -59,7 +59,8 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
59struct tcf_exts { 59struct tcf_exts {
60#ifdef CONFIG_NET_CLS_ACT 60#ifdef CONFIG_NET_CLS_ACT
61 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 61 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
62 struct list_head actions; 62 int nr_actions;
63 struct tc_action **actions;
63#endif 64#endif
64 /* Map to export classifier specific extension TLV types to the 65 /* Map to export classifier specific extension TLV types to the
65 * generic extensions API. Unsupported extensions must be set to 0. 66 * generic extensions API. Unsupported extensions must be set to 0.
@@ -72,7 +73,10 @@ static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
72{ 73{
73#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
74 exts->type = 0; 75 exts->type = 0;
75 INIT_LIST_HEAD(&exts->actions); 76 exts->nr_actions = 0;
77 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
78 GFP_KERNEL);
79 WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
76#endif 80#endif
77 exts->action = action; 81 exts->action = action;
78 exts->police = police; 82 exts->police = police;
@@ -89,7 +93,7 @@ static inline int
89tcf_exts_is_predicative(struct tcf_exts *exts) 93tcf_exts_is_predicative(struct tcf_exts *exts)
90{ 94{
91#ifdef CONFIG_NET_CLS_ACT 95#ifdef CONFIG_NET_CLS_ACT
92 return !list_empty(&exts->actions); 96 return exts->nr_actions;
93#else 97#else
94 return 0; 98 return 0;
95#endif 99#endif
@@ -108,6 +112,20 @@ tcf_exts_is_available(struct tcf_exts *exts)
108 return tcf_exts_is_predicative(exts); 112 return tcf_exts_is_predicative(exts);
109} 113}
110 114
115static inline void tcf_exts_to_list(const struct tcf_exts *exts,
116 struct list_head *actions)
117{
118#ifdef CONFIG_NET_CLS_ACT
119 int i;
120
121 for (i = 0; i < exts->nr_actions; i++) {
122 struct tc_action *a = exts->actions[i];
123
124 list_add(&a->list, actions);
125 }
126#endif
127}
128
111/** 129/**
112 * tcf_exts_exec - execute tc filter extensions 130 * tcf_exts_exec - execute tc filter extensions
113 * @skb: socket buffer 131 * @skb: socket buffer
@@ -124,12 +142,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
124 struct tcf_result *res) 142 struct tcf_result *res)
125{ 143{
126#ifdef CONFIG_NET_CLS_ACT 144#ifdef CONFIG_NET_CLS_ACT
127 if (!list_empty(&exts->actions)) 145 if (exts->nr_actions)
128 return tcf_action_exec(skb, &exts->actions, res); 146 return tcf_action_exec(skb, exts->actions, exts->nr_actions,
147 res);
129#endif 148#endif
130 return 0; 149 return 0;
131} 150}
132 151
152#ifdef CONFIG_NET_CLS_ACT
153
154#define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
155#define tc_single_action(_exts) ((_exts)->nr_actions == 1)
156
157#else /* CONFIG_NET_CLS_ACT */
158
159#define tc_no_actions(_exts) true
160#define tc_single_action(_exts) false
161
162#endif /* CONFIG_NET_CLS_ACT */
163
133int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 164int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
134 struct nlattr **tb, struct nlattr *rate_tlv, 165 struct nlattr **tb, struct nlattr *rate_tlv,
135 struct tcf_exts *exts, bool ovr); 166 struct tcf_exts *exts, bool ovr);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index da218fec6056..9e5fc168c8a3 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -339,7 +339,7 @@ enum bpf_func_id {
339 BPF_FUNC_skb_change_type, 339 BPF_FUNC_skb_change_type,
340 340
341 /** 341 /**
342 * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb 342 * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
343 * @skb: pointer to skb 343 * @skb: pointer to skb
344 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type 344 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
345 * @index: index of the cgroup in the bpf_map 345 * @index: index of the cgroup in the bpf_map
@@ -348,7 +348,7 @@ enum bpf_func_id {
348 * == 1 skb succeeded the cgroup2 descendant test 348 * == 1 skb succeeded the cgroup2 descendant test
349 * < 0 error 349 * < 0 error
350 */ 350 */
351 BPF_FUNC_skb_in_cgroup, 351 BPF_FUNC_skb_under_cgroup,
352 352
353 /** 353 /**
354 * bpf_get_hash_recalc(skb) 354 * bpf_get_hash_recalc(skb)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 01751faccaf8..c674ba2563b7 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -24,7 +24,7 @@ enum nft_registers {
24 __NFT_REG_MAX, 24 __NFT_REG_MAX,
25 25
26 NFT_REG32_00 = 8, 26 NFT_REG32_00 = 8,
27 MFT_REG32_01, 27 NFT_REG32_01,
28 NFT_REG32_02, 28 NFT_REG32_02,
29 NFT_REG32_03, 29 NFT_REG32_03,
30 NFT_REG32_04, 30 NFT_REG32_04,
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d304f4c9792c..a406adcc0793 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -944,4 +944,68 @@ struct sctp_default_prinfo {
944 __u16 pr_policy; 944 __u16 pr_policy;
945}; 945};
946 946
947struct sctp_info {
948 __u32 sctpi_tag;
949 __u32 sctpi_state;
950 __u32 sctpi_rwnd;
951 __u16 sctpi_unackdata;
952 __u16 sctpi_penddata;
953 __u16 sctpi_instrms;
954 __u16 sctpi_outstrms;
955 __u32 sctpi_fragmentation_point;
956 __u32 sctpi_inqueue;
957 __u32 sctpi_outqueue;
958 __u32 sctpi_overall_error;
959 __u32 sctpi_max_burst;
960 __u32 sctpi_maxseg;
961 __u32 sctpi_peer_rwnd;
962 __u32 sctpi_peer_tag;
963 __u8 sctpi_peer_capable;
964 __u8 sctpi_peer_sack;
965 __u16 __reserved1;
966
967 /* assoc status info */
968 __u64 sctpi_isacks;
969 __u64 sctpi_osacks;
970 __u64 sctpi_opackets;
971 __u64 sctpi_ipackets;
972 __u64 sctpi_rtxchunks;
973 __u64 sctpi_outofseqtsns;
974 __u64 sctpi_idupchunks;
975 __u64 sctpi_gapcnt;
976 __u64 sctpi_ouodchunks;
977 __u64 sctpi_iuodchunks;
978 __u64 sctpi_oodchunks;
979 __u64 sctpi_iodchunks;
980 __u64 sctpi_octrlchunks;
981 __u64 sctpi_ictrlchunks;
982
983 /* primary transport info */
984 struct sockaddr_storage sctpi_p_address;
985 __s32 sctpi_p_state;
986 __u32 sctpi_p_cwnd;
987 __u32 sctpi_p_srtt;
988 __u32 sctpi_p_rto;
989 __u32 sctpi_p_hbinterval;
990 __u32 sctpi_p_pathmaxrxt;
991 __u32 sctpi_p_sackdelay;
992 __u32 sctpi_p_sackfreq;
993 __u32 sctpi_p_ssthresh;
994 __u32 sctpi_p_partial_bytes_acked;
995 __u32 sctpi_p_flight_size;
996 __u16 sctpi_p_error;
997 __u16 __reserved2;
998
999 /* sctp sock info */
1000 __u32 sctpi_s_autoclose;
1001 __u32 sctpi_s_adaptation_ind;
1002 __u32 sctpi_s_pd_point;
1003 __u8 sctpi_s_nodelay;
1004 __u8 sctpi_s_disable_fragments;
1005 __u8 sctpi_s_v4mapped;
1006 __u8 sctpi_s_frag_interleave;
1007 __u32 sctpi_s_type;
1008 __u32 __reserved3;
1009};
1010
947#endif /* _UAPI_SCTP_H */ 1011#endif /* _UAPI_SCTP_H */
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index fff3650d52fc..570eeca7bdfa 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -26,11 +26,18 @@ struct bpf_htab {
26 struct bucket *buckets; 26 struct bucket *buckets;
27 void *elems; 27 void *elems;
28 struct pcpu_freelist freelist; 28 struct pcpu_freelist freelist;
29 void __percpu *extra_elems;
29 atomic_t count; /* number of elements in this hashtable */ 30 atomic_t count; /* number of elements in this hashtable */
30 u32 n_buckets; /* number of hash buckets */ 31 u32 n_buckets; /* number of hash buckets */
31 u32 elem_size; /* size of each element in bytes */ 32 u32 elem_size; /* size of each element in bytes */
32}; 33};
33 34
35enum extra_elem_state {
36 HTAB_NOT_AN_EXTRA_ELEM = 0,
37 HTAB_EXTRA_ELEM_FREE,
38 HTAB_EXTRA_ELEM_USED
39};
40
34/* each htab element is struct htab_elem + key + value */ 41/* each htab element is struct htab_elem + key + value */
35struct htab_elem { 42struct htab_elem {
36 union { 43 union {
@@ -38,7 +45,10 @@ struct htab_elem {
38 struct bpf_htab *htab; 45 struct bpf_htab *htab;
39 struct pcpu_freelist_node fnode; 46 struct pcpu_freelist_node fnode;
40 }; 47 };
41 struct rcu_head rcu; 48 union {
49 struct rcu_head rcu;
50 enum extra_elem_state state;
51 };
42 u32 hash; 52 u32 hash;
43 char key[0] __aligned(8); 53 char key[0] __aligned(8);
44}; 54};
@@ -113,6 +123,23 @@ free_elems:
113 return err; 123 return err;
114} 124}
115 125
126static int alloc_extra_elems(struct bpf_htab *htab)
127{
128 void __percpu *pptr;
129 int cpu;
130
131 pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
132 if (!pptr)
133 return -ENOMEM;
134
135 for_each_possible_cpu(cpu) {
136 ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
137 HTAB_EXTRA_ELEM_FREE;
138 }
139 htab->extra_elems = pptr;
140 return 0;
141}
142
116/* Called from syscall */ 143/* Called from syscall */
117static struct bpf_map *htab_map_alloc(union bpf_attr *attr) 144static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
118{ 145{
@@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
185 if (percpu) 212 if (percpu)
186 cost += (u64) round_up(htab->map.value_size, 8) * 213 cost += (u64) round_up(htab->map.value_size, 8) *
187 num_possible_cpus() * htab->map.max_entries; 214 num_possible_cpus() * htab->map.max_entries;
215 else
216 cost += (u64) htab->elem_size * num_possible_cpus();
188 217
189 if (cost >= U32_MAX - PAGE_SIZE) 218 if (cost >= U32_MAX - PAGE_SIZE)
190 /* make sure page count doesn't overflow */ 219 /* make sure page count doesn't overflow */
@@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
212 raw_spin_lock_init(&htab->buckets[i].lock); 241 raw_spin_lock_init(&htab->buckets[i].lock);
213 } 242 }
214 243
244 if (!percpu) {
245 err = alloc_extra_elems(htab);
246 if (err)
247 goto free_buckets;
248 }
249
215 if (!(attr->map_flags & BPF_F_NO_PREALLOC)) { 250 if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
216 err = prealloc_elems_and_freelist(htab); 251 err = prealloc_elems_and_freelist(htab);
217 if (err) 252 if (err)
218 goto free_buckets; 253 goto free_extra_elems;
219 } 254 }
220 255
221 return &htab->map; 256 return &htab->map;
222 257
258free_extra_elems:
259 free_percpu(htab->extra_elems);
223free_buckets: 260free_buckets:
224 kvfree(htab->buckets); 261 kvfree(htab->buckets);
225free_htab: 262free_htab:
@@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
349 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) 386 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
350 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); 387 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
351 kfree(l); 388 kfree(l);
352
353} 389}
354 390
355static void htab_elem_free_rcu(struct rcu_head *head) 391static void htab_elem_free_rcu(struct rcu_head *head)
@@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head)
370 406
371static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) 407static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
372{ 408{
409 if (l->state == HTAB_EXTRA_ELEM_USED) {
410 l->state = HTAB_EXTRA_ELEM_FREE;
411 return;
412 }
413
373 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { 414 if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
374 pcpu_freelist_push(&htab->freelist, &l->fnode); 415 pcpu_freelist_push(&htab->freelist, &l->fnode);
375 } else { 416 } else {
@@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
381 422
382static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, 423static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
383 void *value, u32 key_size, u32 hash, 424 void *value, u32 key_size, u32 hash,
384 bool percpu, bool onallcpus) 425 bool percpu, bool onallcpus,
426 bool old_elem_exists)
385{ 427{
386 u32 size = htab->map.value_size; 428 u32 size = htab->map.value_size;
387 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); 429 bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
388 struct htab_elem *l_new; 430 struct htab_elem *l_new;
389 void __percpu *pptr; 431 void __percpu *pptr;
432 int err = 0;
390 433
391 if (prealloc) { 434 if (prealloc) {
392 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); 435 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
393 if (!l_new) 436 if (!l_new)
394 return ERR_PTR(-E2BIG); 437 err = -E2BIG;
395 } else { 438 } else {
396 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 439 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
397 atomic_dec(&htab->count); 440 atomic_dec(&htab->count);
398 return ERR_PTR(-E2BIG); 441 err = -E2BIG;
442 } else {
443 l_new = kmalloc(htab->elem_size,
444 GFP_ATOMIC | __GFP_NOWARN);
445 if (!l_new)
446 return ERR_PTR(-ENOMEM);
399 } 447 }
400 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); 448 }
401 if (!l_new) 449
402 return ERR_PTR(-ENOMEM); 450 if (err) {
451 if (!old_elem_exists)
452 return ERR_PTR(err);
453
454 /* if we're updating the existing element and the hash table
455 * is full, use per-cpu extra elems
456 */
457 l_new = this_cpu_ptr(htab->extra_elems);
458 if (l_new->state != HTAB_EXTRA_ELEM_FREE)
459 return ERR_PTR(-E2BIG);
460 l_new->state = HTAB_EXTRA_ELEM_USED;
461 } else {
462 l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
403 } 463 }
404 464
405 memcpy(l_new->key, key, key_size); 465 memcpy(l_new->key, key, key_size);
@@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
489 if (ret) 549 if (ret)
490 goto err; 550 goto err;
491 551
492 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false); 552 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
553 !!l_old);
493 if (IS_ERR(l_new)) { 554 if (IS_ERR(l_new)) {
494 /* all pre-allocated elements are in use or memory exhausted */ 555 /* all pre-allocated elements are in use or memory exhausted */
495 ret = PTR_ERR(l_new); 556 ret = PTR_ERR(l_new);
@@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
563 } 624 }
564 } else { 625 } else {
565 l_new = alloc_htab_elem(htab, key, value, key_size, 626 l_new = alloc_htab_elem(htab, key, value, key_size,
566 hash, true, onallcpus); 627 hash, true, onallcpus, false);
567 if (IS_ERR(l_new)) { 628 if (IS_ERR(l_new)) {
568 ret = PTR_ERR(l_new); 629 ret = PTR_ERR(l_new);
569 goto err; 630 goto err;
@@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map)
652 htab_free_elems(htab); 713 htab_free_elems(htab);
653 pcpu_freelist_destroy(&htab->freelist); 714 pcpu_freelist_destroy(&htab->freelist);
654 } 715 }
716 free_percpu(htab->extra_elems);
655 kvfree(htab->buckets); 717 kvfree(htab->buckets);
656 kfree(htab); 718 kfree(htab);
657} 719}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f72f23b8fdab..daea765d72e6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -194,6 +194,7 @@ struct verifier_env {
194 struct verifier_state_list **explored_states; /* search pruning optimization */ 194 struct verifier_state_list **explored_states; /* search pruning optimization */
195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 195 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
196 u32 used_map_cnt; /* number of used maps */ 196 u32 used_map_cnt; /* number of used maps */
197 u32 id_gen; /* used to generate unique reg IDs */
197 bool allow_ptr_leaks; 198 bool allow_ptr_leaks;
198}; 199};
199 200
@@ -1052,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1052 goto error; 1053 goto error;
1053 break; 1054 break;
1054 case BPF_MAP_TYPE_CGROUP_ARRAY: 1055 case BPF_MAP_TYPE_CGROUP_ARRAY:
1055 if (func_id != BPF_FUNC_skb_in_cgroup) 1056 if (func_id != BPF_FUNC_skb_under_cgroup)
1056 goto error; 1057 goto error;
1057 break; 1058 break;
1058 default: 1059 default:
@@ -1074,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1074 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1075 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1075 goto error; 1076 goto error;
1076 break; 1077 break;
1077 case BPF_FUNC_skb_in_cgroup: 1078 case BPF_FUNC_skb_under_cgroup:
1078 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 1079 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1079 goto error; 1080 goto error;
1080 break; 1081 break;
@@ -1301,7 +1302,7 @@ add_imm:
1301 /* dst_reg stays as pkt_ptr type and since some positive 1302 /* dst_reg stays as pkt_ptr type and since some positive
1302 * integer value was added to the pointer, increment its 'id' 1303 * integer value was added to the pointer, increment its 'id'
1303 */ 1304 */
1304 dst_reg->id++; 1305 dst_reg->id = ++env->id_gen;
1305 1306
1306 /* something was added to pkt_ptr, set range and off to zero */ 1307 /* something was added to pkt_ptr, set range and off to zero */
1307 dst_reg->off = 0; 1308 dst_reg->off = 0;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..5ba520b544d7 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -30,7 +30,7 @@
30 30
31#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
32#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 128UL 33#define BUCKET_LOCKS_PER_CPU 32UL
34 34
35static u32 head_hashfn(struct rhashtable *ht, 35static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl, 36 const struct bucket_table *tbl,
@@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 unsigned int nr_pcpus = num_possible_cpus(); 70 unsigned int nr_pcpus = num_possible_cpus();
71#endif 71#endif
72 72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 73 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75 75
76 /* Never allocate more than 0.5 locks per bucket */ 76 /* Never allocate more than 0.5 locks per bucket */
@@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 83 tbl->locks = vmalloc(size * sizeof(spinlock_t));
84 else 84 else
85#endif 85#endif
86 if (gfp != GFP_KERNEL)
87 gfp |= __GFP_NOWARN | __GFP_NORETRY;
88
86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 89 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87 gfp); 90 gfp);
88 if (!tbl->locks) 91 if (!tbl->locks)
@@ -321,12 +324,14 @@ static int rhashtable_expand(struct rhashtable *ht)
321static int rhashtable_shrink(struct rhashtable *ht) 324static int rhashtable_shrink(struct rhashtable *ht)
322{ 325{
323 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 326 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324 unsigned int size; 327 unsigned int nelems = atomic_read(&ht->nelems);
328 unsigned int size = 0;
325 int err; 329 int err;
326 330
327 ASSERT_RHT_MUTEX(ht); 331 ASSERT_RHT_MUTEX(ht);
328 332
329 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 333 if (nelems)
334 size = roundup_pow_of_two(nelems * 3 / 2);
330 if (size < ht->p.min_size) 335 if (size < ht->p.min_size)
331 size = ht->p.min_size; 336 size = ht->p.min_size;
332 337
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 297fdb5e74bd..64e899b63337 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
38 38
39static int max_size = 0; 39static int max_size = 0;
40module_param(max_size, int, 0); 40module_param(max_size, int, 0);
41MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); 41MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
42 42
43static bool shrinking = false; 43static bool shrinking = false;
44module_param(shrinking, bool, 0); 44module_param(shrinking, bool, 0);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 82a116ba590e..8de138d3306b 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev)
169 if (err < 0) 169 if (err < 0)
170 goto out_uninit_mvrp; 170 goto out_uninit_mvrp;
171 171
172 vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; 172 vlan->nest_level = dev_get_nest_level(real_dev) + 1;
173 err = register_netdevice(dev); 173 err = register_netdevice(dev);
174 if (err < 0) 174 if (err < 0)
175 goto out_uninit_mvrp; 175 goto out_uninit_mvrp;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c18080ad4085..cd620fab41b0 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
267 267
268 /* If old entry was unassociated with any port, then delete it. */ 268 /* If old entry was unassociated with any port, then delete it. */
269 f = __br_fdb_get(br, br->dev->dev_addr, 0); 269 f = __br_fdb_get(br, br->dev->dev_addr, 0);
270 if (f && f->is_local && !f->dst) 270 if (f && f->is_local && !f->dst && !f->added_by_user)
271 fdb_delete_local(br, NULL, f); 271 fdb_delete_local(br, NULL, f);
272 272
273 fdb_insert(br, NULL, newaddr, 0); 273 fdb_insert(br, NULL, newaddr, 0);
@@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
282 if (!br_vlan_should_use(v)) 282 if (!br_vlan_should_use(v))
283 continue; 283 continue;
284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid); 284 f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
285 if (f && f->is_local && !f->dst) 285 if (f && f->is_local && !f->dst && !f->added_by_user)
286 fdb_delete_local(br, NULL, f); 286 fdb_delete_local(br, NULL, f);
287 fdb_insert(br, NULL, newaddr, v->vid); 287 fdb_insert(br, NULL, newaddr, v->vid);
288 } 288 }
@@ -764,20 +764,25 @@ out:
764} 764}
765 765
766/* Update (create or replace) forwarding database entry */ 766/* Update (create or replace) forwarding database entry */
767static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, 767static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
768 __u16 state, __u16 flags, __u16 vid) 768 const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
769{ 769{
770 struct net_bridge *br = source->br;
771 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; 770 struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
772 struct net_bridge_fdb_entry *fdb; 771 struct net_bridge_fdb_entry *fdb;
773 bool modified = false; 772 bool modified = false;
774 773
775 /* If the port cannot learn allow only local and static entries */ 774 /* If the port cannot learn allow only local and static entries */
776 if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && 775 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
777 !(source->state == BR_STATE_LEARNING || 776 !(source->state == BR_STATE_LEARNING ||
778 source->state == BR_STATE_FORWARDING)) 777 source->state == BR_STATE_FORWARDING))
779 return -EPERM; 778 return -EPERM;
780 779
780 if (!source && !(state & NUD_PERMANENT)) {
781 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
782 br->dev->name);
783 return -EINVAL;
784 }
785
781 fdb = fdb_find(head, addr, vid); 786 fdb = fdb_find(head, addr, vid);
782 if (fdb == NULL) { 787 if (fdb == NULL) {
783 if (!(flags & NLM_F_CREATE)) 788 if (!(flags & NLM_F_CREATE))
@@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
832 return 0; 837 return 0;
833} 838}
834 839
835static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, 840static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
836 const unsigned char *addr, u16 nlh_flags, u16 vid) 841 struct net_bridge_port *p, const unsigned char *addr,
842 u16 nlh_flags, u16 vid)
837{ 843{
838 int err = 0; 844 int err = 0;
839 845
840 if (ndm->ndm_flags & NTF_USE) { 846 if (ndm->ndm_flags & NTF_USE) {
847 if (!p) {
848 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
849 br->dev->name);
850 return -EINVAL;
851 }
841 local_bh_disable(); 852 local_bh_disable();
842 rcu_read_lock(); 853 rcu_read_lock();
843 br_fdb_update(p->br, p, addr, vid, true); 854 br_fdb_update(br, p, addr, vid, true);
844 rcu_read_unlock(); 855 rcu_read_unlock();
845 local_bh_enable(); 856 local_bh_enable();
846 } else { 857 } else {
847 spin_lock_bh(&p->br->hash_lock); 858 spin_lock_bh(&br->hash_lock);
848 err = fdb_add_entry(p, addr, ndm->ndm_state, 859 err = fdb_add_entry(br, p, addr, ndm->ndm_state,
849 nlh_flags, vid); 860 nlh_flags, vid);
850 spin_unlock_bh(&p->br->hash_lock); 861 spin_unlock_bh(&br->hash_lock);
851 } 862 }
852 863
853 return err; 864 return err;
@@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
884 dev->name); 895 dev->name);
885 return -EINVAL; 896 return -EINVAL;
886 } 897 }
898 br = p->br;
887 vg = nbp_vlan_group(p); 899 vg = nbp_vlan_group(p);
888 } 900 }
889 901
@@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
895 } 907 }
896 908
897 /* VID was specified, so use it. */ 909 /* VID was specified, so use it. */
898 if (dev->priv_flags & IFF_EBRIDGE) 910 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
899 err = br_fdb_insert(br, NULL, addr, vid);
900 else
901 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
902 } else { 911 } else {
903 if (dev->priv_flags & IFF_EBRIDGE) 912 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
904 err = br_fdb_insert(br, NULL, addr, 0);
905 else
906 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
907 if (err || !vg || !vg->num_vlans) 913 if (err || !vg || !vg->num_vlans)
908 goto out; 914 goto out;
909 915
@@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
914 list_for_each_entry(v, &vg->vlan_list, vlist) { 920 list_for_each_entry(v, &vg->vlan_list, vlist) {
915 if (!br_vlan_should_use(v)) 921 if (!br_vlan_should_use(v))
916 continue; 922 continue;
917 if (dev->priv_flags & IFF_EBRIDGE) 923 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
918 err = br_fdb_insert(br, NULL, addr, v->vid);
919 else
920 err = __br_fdb_add(ndm, p, addr, nlh_flags,
921 v->vid);
922 if (err) 924 if (err)
923 goto out; 925 goto out;
924 } 926 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 4ce07dc25573..dd6ce598de89 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6045,8 +6045,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
6045EXPORT_SYMBOL(netdev_lower_dev_get_private); 6045EXPORT_SYMBOL(netdev_lower_dev_get_private);
6046 6046
6047 6047
6048int dev_get_nest_level(struct net_device *dev, 6048int dev_get_nest_level(struct net_device *dev)
6049 bool (*type_check)(const struct net_device *dev))
6050{ 6049{
6051 struct net_device *lower = NULL; 6050 struct net_device *lower = NULL;
6052 struct list_head *iter; 6051 struct list_head *iter;
@@ -6056,15 +6055,12 @@ int dev_get_nest_level(struct net_device *dev,
6056 ASSERT_RTNL(); 6055 ASSERT_RTNL();
6057 6056
6058 netdev_for_each_lower_dev(dev, lower, iter) { 6057 netdev_for_each_lower_dev(dev, lower, iter) {
6059 nest = dev_get_nest_level(lower, type_check); 6058 nest = dev_get_nest_level(lower);
6060 if (max_nest < nest) 6059 if (max_nest < nest)
6061 max_nest = nest; 6060 max_nest = nest;
6062 } 6061 }
6063 6062
6064 if (type_check(dev)) 6063 return max_nest + 1;
6065 max_nest++;
6066
6067 return max_nest;
6068} 6064}
6069EXPORT_SYMBOL(dev_get_nest_level); 6065EXPORT_SYMBOL(dev_get_nest_level);
6070 6066
diff --git a/net/core/filter.c b/net/core/filter.c
index 5708999f8a79..cb06aceb512a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
1355{ 1355{
1356 int err; 1356 int err;
1357 1357
1358 if (!skb_cloned(skb)) 1358 err = skb_ensure_writable(skb, write_len);
1359 return 0; 1359 bpf_compute_data_end(skb);
1360 if (skb_clone_writable(skb, write_len)) 1360
1361 return 0;
1362 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1363 if (!err)
1364 bpf_compute_data_end(skb);
1365 return err; 1361 return err;
1366} 1362}
1367 1363
1364static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1365{
1366 if (skb_at_tc_ingress(skb))
1367 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1368}
1369
1370static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1371{
1372 if (skb_at_tc_ingress(skb))
1373 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1374}
1375
1368static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) 1376static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1369{ 1377{
1370 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1371 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1378 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1372 int offset = (int) r2; 1379 unsigned int offset = (unsigned int) r2;
1373 void *from = (void *) (long) r3; 1380 void *from = (void *) (long) r3;
1374 unsigned int len = (unsigned int) r4; 1381 unsigned int len = (unsigned int) r4;
1375 void *ptr; 1382 void *ptr;
1376 1383
1377 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) 1384 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1378 return -EINVAL; 1385 return -EINVAL;
1379 1386 if (unlikely(offset > 0xffff))
1380 /* bpf verifier guarantees that:
1381 * 'from' pointer points to bpf program stack
1382 * 'len' bytes of it were initialized
1383 * 'len' > 0
1384 * 'skb' is a valid pointer to 'struct sk_buff'
1385 *
1386 * so check for invalid 'offset' and too large 'len'
1387 */
1388 if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
1389 return -EFAULT; 1387 return -EFAULT;
1390 if (unlikely(bpf_try_make_writable(skb, offset + len))) 1388 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1391 return -EFAULT; 1389 return -EFAULT;
1392 1390
1393 ptr = skb_header_pointer(skb, offset, len, sp->buff); 1391 ptr = skb->data + offset;
1394 if (unlikely(!ptr))
1395 return -EFAULT;
1396
1397 if (flags & BPF_F_RECOMPUTE_CSUM) 1392 if (flags & BPF_F_RECOMPUTE_CSUM)
1398 skb_postpull_rcsum(skb, ptr, len); 1393 __skb_postpull_rcsum(skb, ptr, len, offset);
1399 1394
1400 memcpy(ptr, from, len); 1395 memcpy(ptr, from, len);
1401 1396
1402 if (ptr == sp->buff)
1403 /* skb_store_bits cannot return -EFAULT here */
1404 skb_store_bits(skb, offset, ptr, len);
1405
1406 if (flags & BPF_F_RECOMPUTE_CSUM) 1397 if (flags & BPF_F_RECOMPUTE_CSUM)
1407 skb_postpush_rcsum(skb, ptr, len); 1398 __skb_postpush_rcsum(skb, ptr, len, offset);
1408 if (flags & BPF_F_INVALIDATE_HASH) 1399 if (flags & BPF_F_INVALIDATE_HASH)
1409 skb_clear_hash(skb); 1400 skb_clear_hash(skb);
1410 1401
@@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1425static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1416static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1426{ 1417{
1427 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; 1418 const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
1428 int offset = (int) r2; 1419 unsigned int offset = (unsigned int) r2;
1429 void *to = (void *)(unsigned long) r3; 1420 void *to = (void *)(unsigned long) r3;
1430 unsigned int len = (unsigned int) r4; 1421 unsigned int len = (unsigned int) r4;
1431 void *ptr; 1422 void *ptr;
1432 1423
1433 if (unlikely((u32) offset > 0xffff)) 1424 if (unlikely(offset > 0xffff))
1434 goto err_clear; 1425 goto err_clear;
1435 1426
1436 ptr = skb_header_pointer(skb, offset, len, to); 1427 ptr = skb_header_pointer(skb, offset, len, to);
@@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1458static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) 1449static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1459{ 1450{
1460 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1451 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1461 int offset = (int) r2; 1452 unsigned int offset = (unsigned int) r2;
1462 __sum16 sum, *ptr; 1453 __sum16 *ptr;
1463 1454
1464 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) 1455 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1465 return -EINVAL; 1456 return -EINVAL;
1466 if (unlikely((u32) offset > 0xffff)) 1457 if (unlikely(offset > 0xffff || offset & 1))
1467 return -EFAULT; 1458 return -EFAULT;
1468 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1459 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1469 return -EFAULT;
1470
1471 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1472 if (unlikely(!ptr))
1473 return -EFAULT; 1460 return -EFAULT;
1474 1461
1462 ptr = (__sum16 *)(skb->data + offset);
1475 switch (flags & BPF_F_HDR_FIELD_MASK) { 1463 switch (flags & BPF_F_HDR_FIELD_MASK) {
1476 case 0: 1464 case 0:
1477 if (unlikely(from != 0)) 1465 if (unlikely(from != 0))
@@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1489 return -EINVAL; 1477 return -EINVAL;
1490 } 1478 }
1491 1479
1492 if (ptr == &sum)
1493 /* skb_store_bits guaranteed to not return -EFAULT here */
1494 skb_store_bits(skb, offset, ptr, sizeof(sum));
1495
1496 return 0; 1480 return 0;
1497} 1481}
1498 1482
@@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1512 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1496 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1513 bool is_pseudo = flags & BPF_F_PSEUDO_HDR; 1497 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1514 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; 1498 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1515 int offset = (int) r2; 1499 unsigned int offset = (unsigned int) r2;
1516 __sum16 sum, *ptr; 1500 __sum16 *ptr;
1517 1501
1518 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | 1502 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
1519 BPF_F_HDR_FIELD_MASK))) 1503 BPF_F_HDR_FIELD_MASK)))
1520 return -EINVAL; 1504 return -EINVAL;
1521 if (unlikely((u32) offset > 0xffff)) 1505 if (unlikely(offset > 0xffff || offset & 1))
1522 return -EFAULT; 1506 return -EFAULT;
1523 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) 1507 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1524 return -EFAULT; 1508 return -EFAULT;
1525 1509
1526 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); 1510 ptr = (__sum16 *)(skb->data + offset);
1527 if (unlikely(!ptr))
1528 return -EFAULT;
1529 if (is_mmzero && !*ptr) 1511 if (is_mmzero && !*ptr)
1530 return 0; 1512 return 0;
1531 1513
@@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
1548 1530
1549 if (is_mmzero && !*ptr) 1531 if (is_mmzero && !*ptr)
1550 *ptr = CSUM_MANGLED_0; 1532 *ptr = CSUM_MANGLED_0;
1551 if (ptr == &sum)
1552 /* skb_store_bits guaranteed to not return -EFAULT here */
1553 skb_store_bits(skb, offset, ptr, sizeof(sum));
1554
1555 return 0; 1533 return 0;
1556} 1534}
1557 1535
@@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
1607 1585
1608static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) 1586static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1609{ 1587{
1610 if (skb_at_tc_ingress(skb))
1611 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1612
1613 return dev_forward_skb(dev, skb); 1588 return dev_forward_skb(dev, skb);
1614} 1589}
1615 1590
@@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1648 if (unlikely(!skb)) 1623 if (unlikely(!skb))
1649 return -ENOMEM; 1624 return -ENOMEM;
1650 1625
1626 bpf_push_mac_rcsum(skb);
1627
1651 return flags & BPF_F_INGRESS ? 1628 return flags & BPF_F_INGRESS ?
1652 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1629 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1653} 1630}
@@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb)
1693 return -EINVAL; 1670 return -EINVAL;
1694 } 1671 }
1695 1672
1673 bpf_push_mac_rcsum(skb);
1674
1696 return ri->flags & BPF_F_INGRESS ? 1675 return ri->flags & BPF_F_INGRESS ?
1697 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); 1676 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1698} 1677}
@@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
1756 vlan_proto != htons(ETH_P_8021AD))) 1735 vlan_proto != htons(ETH_P_8021AD)))
1757 vlan_proto = htons(ETH_P_8021Q); 1736 vlan_proto = htons(ETH_P_8021Q);
1758 1737
1738 bpf_push_mac_rcsum(skb);
1759 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 1739 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1740 bpf_pull_mac_rcsum(skb);
1741
1760 bpf_compute_data_end(skb); 1742 bpf_compute_data_end(skb);
1761 return ret; 1743 return ret;
1762} 1744}
@@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1776 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1758 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1777 int ret; 1759 int ret;
1778 1760
1761 bpf_push_mac_rcsum(skb);
1779 ret = skb_vlan_pop(skb); 1762 ret = skb_vlan_pop(skb);
1763 bpf_pull_mac_rcsum(skb);
1764
1780 bpf_compute_data_end(skb); 1765 bpf_compute_data_end(skb);
1781 return ret; 1766 return ret;
1782} 1767}
@@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2298} 2283}
2299 2284
2300#ifdef CONFIG_SOCK_CGROUP_DATA 2285#ifdef CONFIG_SOCK_CGROUP_DATA
2301static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 2286static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2302{ 2287{
2303 struct sk_buff *skb = (struct sk_buff *)(long)r1; 2288 struct sk_buff *skb = (struct sk_buff *)(long)r1;
2304 struct bpf_map *map = (struct bpf_map *)(long)r2; 2289 struct bpf_map *map = (struct bpf_map *)(long)r2;
@@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2321 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); 2306 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
2322} 2307}
2323 2308
2324static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { 2309static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
2325 .func = bpf_skb_in_cgroup, 2310 .func = bpf_skb_under_cgroup,
2326 .gpl_only = false, 2311 .gpl_only = false,
2327 .ret_type = RET_INTEGER, 2312 .ret_type = RET_INTEGER,
2328 .arg1_type = ARG_PTR_TO_CTX, 2313 .arg1_type = ARG_PTR_TO_CTX,
@@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2402 case BPF_FUNC_get_smp_processor_id: 2387 case BPF_FUNC_get_smp_processor_id:
2403 return &bpf_get_smp_processor_id_proto; 2388 return &bpf_get_smp_processor_id_proto;
2404#ifdef CONFIG_SOCK_CGROUP_DATA 2389#ifdef CONFIG_SOCK_CGROUP_DATA
2405 case BPF_FUNC_skb_in_cgroup: 2390 case BPF_FUNC_skb_under_cgroup:
2406 return &bpf_skb_in_cgroup_proto; 2391 return &bpf_skb_under_cgroup_proto;
2407#endif 2392#endif
2408 default: 2393 default:
2409 return sk_filter_func_proto(func_id); 2394 return sk_filter_func_proto(func_id);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d07fc076bea0..febca0f1008c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2452,9 +2452,7 @@ struct fib_route_iter {
2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, 2452static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2453 loff_t pos) 2453 loff_t pos)
2454{ 2454{
2455 struct fib_table *tb = iter->main_tb;
2456 struct key_vector *l, **tp = &iter->tnode; 2455 struct key_vector *l, **tp = &iter->tnode;
2457 struct trie *t;
2458 t_key key; 2456 t_key key;
2459 2457
2460 /* use cache location of next-to-find key */ 2458 /* use cache location of next-to-find key */
@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2462 pos -= iter->pos; 2460 pos -= iter->pos;
2463 key = iter->key; 2461 key = iter->key;
2464 } else { 2462 } else {
2465 t = (struct trie *)tb->tb_data;
2466 iter->tnode = t->kv;
2467 iter->pos = 0; 2463 iter->pos = 0;
2468 key = 0; 2464 key = 0;
2469 } 2465 }
@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2504 return NULL; 2500 return NULL;
2505 2501
2506 iter->main_tb = tb; 2502 iter->main_tb = tb;
2503 t = (struct trie *)tb->tb_data;
2504 iter->tnode = t->kv;
2507 2505
2508 if (*pos != 0) 2506 if (*pos != 0)
2509 return fib_route_get_idx(iter, *pos); 2507 return fib_route_get_idx(iter, *pos);
2510 2508
2511 t = (struct trie *)tb->tb_data;
2512 iter->tnode = t->kv;
2513 iter->pos = 0; 2509 iter->pos = 0;
2514 iter->key = 0; 2510 iter->key = 0;
2515 2511
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5b1481be0282..113cc43df789 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
370 tunnel->parms.o_flags, proto, tunnel->parms.o_key, 370 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
371 htonl(tunnel->o_seqno)); 371 htonl(tunnel->o_seqno));
372 372
373 skb_set_inner_protocol(skb, proto);
374 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 373 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
375} 374}
376 375
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index a917903d5e97..cc701fa70b12 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
557 .get_link_net = ip_tunnel_get_link_net, 557 .get_link_net = ip_tunnel_get_link_net,
558}; 558};
559 559
560static bool is_vti_tunnel(const struct net_device *dev)
561{
562 return dev->netdev_ops == &vti_netdev_ops;
563}
564
565static int vti_device_event(struct notifier_block *unused,
566 unsigned long event, void *ptr)
567{
568 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
569 struct ip_tunnel *tunnel = netdev_priv(dev);
570
571 if (!is_vti_tunnel(dev))
572 return NOTIFY_DONE;
573
574 switch (event) {
575 case NETDEV_DOWN:
576 if (!net_eq(tunnel->net, dev_net(dev)))
577 xfrm_garbage_collect(tunnel->net);
578 break;
579 }
580 return NOTIFY_DONE;
581}
582
583static struct notifier_block vti_notifier_block __read_mostly = {
584 .notifier_call = vti_device_event,
585};
586
560static int __init vti_init(void) 587static int __init vti_init(void)
561{ 588{
562 const char *msg; 589 const char *msg;
@@ -564,6 +591,8 @@ static int __init vti_init(void)
564 591
565 pr_info("IPv4 over IPsec tunneling driver\n"); 592 pr_info("IPv4 over IPsec tunneling driver\n");
566 593
594 register_netdevice_notifier(&vti_notifier_block);
595
567 msg = "tunnel device"; 596 msg = "tunnel device";
568 err = register_pernet_device(&vti_net_ops); 597 err = register_pernet_device(&vti_net_ops);
569 if (err < 0) 598 if (err < 0)
@@ -596,6 +625,7 @@ xfrm_proto_ah_failed:
596xfrm_proto_esp_failed: 625xfrm_proto_esp_failed:
597 unregister_pernet_device(&vti_net_ops); 626 unregister_pernet_device(&vti_net_ops);
598pernet_dev_failed: 627pernet_dev_failed:
628 unregister_netdevice_notifier(&vti_notifier_block);
599 pr_err("vti init: failed to register %s\n", msg); 629 pr_err("vti init: failed to register %s\n", msg);
600 return err; 630 return err;
601} 631}
@@ -607,6 +637,7 @@ static void __exit vti_fini(void)
607 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 637 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
608 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 638 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
609 unregister_pernet_device(&vti_net_ops); 639 unregister_pernet_device(&vti_net_ops);
640 unregister_netdevice_notifier(&vti_notifier_block);
610} 641}
611 642
612module_init(vti_init); 643module_init(vti_init);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab3e796596b1..df8425fcbc2c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3543,7 +3543,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
3543 /* combine the user config with event to determine if permanent 3543 /* combine the user config with event to determine if permanent
3544 * addresses are to be removed from address hash table 3544 * addresses are to be removed from address hash table
3545 */ 3545 */
3546 keep_addr = !(how || _keep_addr <= 0); 3546 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3547 3547
3548 /* Step 2: clear hash table */ 3548 /* Step 2: clear hash table */
3549 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3549 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
@@ -3599,7 +3599,7 @@ restart:
3599 /* re-combine the user config with event to determine if permanent 3599 /* re-combine the user config with event to determine if permanent
3600 * addresses are to be removed from the interface list 3600 * addresses are to be removed from the interface list
3601 */ 3601 */
3602 keep_addr = (!how && _keep_addr > 0); 3602 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3603 3603
3604 INIT_LIST_HEAD(&del_list); 3604 INIT_LIST_HEAD(&del_list);
3605 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3605 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index c53b92c617c5..37ac9de713c6 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop,
952 memcpy(new, hop, start); 952 memcpy(new, hop, start);
953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, 953 ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def,
954 secattr); 954 secattr);
955 if (ret_val < 0) 955 if (ret_val < 0) {
956 kfree(new);
956 return ERR_PTR(ret_val); 957 return ERR_PTR(ret_val);
958 }
957 959
958 buf_len = start + ret_val; 960 buf_len = start + ret_val;
959 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ 961 /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 776d145113e1..704274cbd495 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 519 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 520 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
521 521
522 skb_set_inner_protocol(skb, protocol);
523
524 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 522 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
525 NEXTHDR_GRE); 523 NEXTHDR_GRE);
526} 524}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index fed40d1ec29b..0900352c924c 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
55 struct icmp6hdr user_icmph; 55 struct icmp6hdr user_icmph;
56 int addr_type; 56 int addr_type;
57 struct in6_addr *daddr; 57 struct in6_addr *daddr;
58 int iif = 0; 58 int oif = 0;
59 struct flowi6 fl6; 59 struct flowi6 fl6;
60 int err; 60 int err;
61 struct dst_entry *dst; 61 struct dst_entry *dst;
@@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
78 if (u->sin6_family != AF_INET6) { 78 if (u->sin6_family != AF_INET6) {
79 return -EAFNOSUPPORT; 79 return -EAFNOSUPPORT;
80 } 80 }
81 if (sk->sk_bound_dev_if &&
82 sk->sk_bound_dev_if != u->sin6_scope_id) {
83 return -EINVAL;
84 }
85 daddr = &(u->sin6_addr); 81 daddr = &(u->sin6_addr);
86 iif = u->sin6_scope_id; 82 if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr)))
83 oif = u->sin6_scope_id;
87 } else { 84 } else {
88 if (sk->sk_state != TCP_ESTABLISHED) 85 if (sk->sk_state != TCP_ESTABLISHED)
89 return -EDESTADDRREQ; 86 return -EDESTADDRREQ;
90 daddr = &sk->sk_v6_daddr; 87 daddr = &sk->sk_v6_daddr;
91 } 88 }
92 89
93 if (!iif) 90 if (!oif)
94 iif = sk->sk_bound_dev_if; 91 oif = sk->sk_bound_dev_if;
92
93 if (!oif)
94 oif = np->sticky_pktinfo.ipi6_ifindex;
95
96 if (!oif && ipv6_addr_is_multicast(daddr))
97 oif = np->mcast_oif;
98 else if (!oif)
99 oif = np->ucast_oif;
95 100
96 addr_type = ipv6_addr_type(daddr); 101 addr_type = ipv6_addr_type(daddr);
97 if (__ipv6_addr_needs_scope_id(addr_type) && !iif) 102 if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) ||
98 return -EINVAL; 103 (addr_type & IPV6_ADDR_MAPPED) ||
99 if (addr_type & IPV6_ADDR_MAPPED) 104 (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
100 return -EINVAL; 105 return -EINVAL;
101 106
102 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ 107 /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
@@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
106 fl6.flowi6_proto = IPPROTO_ICMPV6; 111 fl6.flowi6_proto = IPPROTO_ICMPV6;
107 fl6.saddr = np->saddr; 112 fl6.saddr = np->saddr;
108 fl6.daddr = *daddr; 113 fl6.daddr = *daddr;
114 fl6.flowi6_oif = oif;
109 fl6.flowi6_mark = sk->sk_mark; 115 fl6.flowi6_mark = sk->sk_mark;
110 fl6.fl6_icmp_type = user_icmph.icmp6_type; 116 fl6.fl6_icmp_type = user_icmph.icmp6_type;
111 fl6.fl6_icmp_code = user_icmph.icmp6_code; 117 fl6.fl6_icmp_code = user_icmph.icmp6_code;
112 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 118 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
113 119
114 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
115 fl6.flowi6_oif = np->mcast_oif;
116 else if (!fl6.flowi6_oif)
117 fl6.flowi6_oif = np->ucast_oif;
118
119 ipc6.tclass = np->tclass; 120 ipc6.tclass = np->tclass;
120 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 121 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
121 122
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 4a7ae32afa09..1138eaf5c682 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
185 185
186 self->magic = IAS_MAGIC; 186 self->magic = IAS_MAGIC;
187 self->mode = mode; 187 self->mode = mode;
188 if (mode == IAS_CLIENT) 188 if (mode == IAS_CLIENT) {
189 iriap_register_lsap(self, slsap_sel, mode); 189 if (iriap_register_lsap(self, slsap_sel, mode)) {
190 kfree(self);
191 return NULL;
192 }
193 }
190 194
191 self->confirm = callback; 195 self->confirm = callback;
192 self->priv = priv; 196 self->priv = priv;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 47e99ab8d97a..543b1d4fc33d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
869 869
870 /* free all potentially still buffered bcast frames */ 870 /* free all potentially still buffered bcast frames */
871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); 871 local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
872 skb_queue_purge(&sdata->u.ap.ps.bc_buf); 872 ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
873 873
874 mutex_lock(&local->mtx); 874 mutex_lock(&local->mtx);
875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true); 875 ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 184473c257eb..ba5fc1f01e53 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local,
1094 1094
1095 trace_drv_get_expected_throughput(sta); 1095 trace_drv_get_expected_throughput(sta);
1096 if (local->ops->get_expected_throughput) 1096 if (local->ops->get_expected_throughput)
1097 ret = local->ops->get_expected_throughput(sta); 1097 ret = local->ops->get_expected_throughput(&local->hw, sta);
1098 trace_drv_return_u32(local, ret); 1098 trace_drv_return_u32(local, ret);
1099 1099
1100 return ret; 1100 return ret;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c66411df9863..42120d965263 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
881 881
882 netif_carrier_off(sdata->dev); 882 netif_carrier_off(sdata->dev);
883 883
884 /* flush STAs and mpaths on this iface */
885 sta_info_flush(sdata);
886 mesh_path_flush_by_iface(sdata);
887
884 /* stop the beacon */ 888 /* stop the beacon */
885 ifmsh->mesh_id_len = 0; 889 ifmsh->mesh_id_len = 0;
886 sdata->vif.bss_conf.enable_beacon = false; 890 sdata->vif.bss_conf.enable_beacon = false;
887 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 891 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
888 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 892 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
893
894 /* remove beacon */
889 bcn = rcu_dereference_protected(ifmsh->beacon, 895 bcn = rcu_dereference_protected(ifmsh->beacon,
890 lockdep_is_held(&sdata->wdev.mtx)); 896 lockdep_is_held(&sdata->wdev.mtx));
891 RCU_INIT_POINTER(ifmsh->beacon, NULL); 897 RCU_INIT_POINTER(ifmsh->beacon, NULL);
892 kfree_rcu(bcn, rcu_head); 898 kfree_rcu(bcn, rcu_head);
893 899
894 /* flush STAs and mpaths on this iface */
895 sta_info_flush(sdata);
896 mesh_path_flush_by_iface(sdata);
897
898 /* free all potentially still buffered group-addressed frames */ 900 /* free all potentially still buffered group-addressed frames */
899 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); 901 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf);
900 skb_queue_purge(&ifmsh->ps.bc_buf); 902 skb_queue_purge(&ifmsh->ps.bc_buf);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2e8a9024625a..9dce3b157908 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta)
1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1270 1270
1271 if (!txqi->tin.backlog_packets) 1271 if (txqi->tin.backlog_packets)
1272 set_bit(tid, &sta->txq_buffered_tids); 1272 set_bit(tid, &sta->txq_buffered_tids);
1273 else 1273 else
1274 clear_bit(tid, &sta->txq_buffered_tids); 1274 clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index c6d5c724e032..a2a68269675d 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
771 clear_sta_flag(sta, WLAN_STA_SP); 771 clear_sta_flag(sta, WLAN_STA_SP);
772 772
773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK); 773 acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
774
775 /* mesh Peer Service Period support */
776 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
777 ieee80211_is_data_qos(fc))
778 ieee80211_mpsp_trigger_process(
779 ieee80211_get_qos_ctl(hdr), sta, true, acked);
780
774 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { 781 if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
775 /* 782 /*
776 * The STA is in power save mode, so assume 783 * The STA is in power save mode, so assume
@@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
781 return; 788 return;
782 } 789 }
783 790
784 /* mesh Peer Service Period support */
785 if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
786 ieee80211_is_data_qos(fc))
787 ieee80211_mpsp_trigger_process(
788 ieee80211_get_qos_ctl(hdr),
789 sta, true, acked);
790
791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && 791 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
792 (ieee80211_is_data(hdr->frame_control)) && 792 (ieee80211_is_data(hdr->frame_control)) &&
793 (rates_idx != -1)) 793 (rates_idx != -1))
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 91461c415525..502396694f47 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
368 skb = skb_dequeue(&ps->bc_buf); 368 skb = skb_dequeue(&ps->bc_buf);
369 if (skb) { 369 if (skb) {
370 purged++; 370 purged++;
371 dev_kfree_skb(skb); 371 ieee80211_free_txskb(&local->hw, skb);
372 } 372 }
373 total += skb_queue_len(&ps->bc_buf); 373 total += skb_queue_len(&ps->bc_buf);
374 } 374 }
@@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { 451 if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
452 ps_dbg(tx->sdata, 452 ps_dbg(tx->sdata,
453 "BC TX buffer full - dropping the oldest frame\n"); 453 "BC TX buffer full - dropping the oldest frame\n");
454 dev_kfree_skb(skb_dequeue(&ps->bc_buf)); 454 ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
455 } else 455 } else
456 tx->local->total_ps_buffered++; 456 tx->local->total_ps_buffered++;
457 457
@@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); 4275 sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) 4276 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
4277 break; 4277 break;
4278 dev_kfree_skb_any(skb); 4278 ieee80211_free_txskb(hw, skb);
4279 } 4279 }
4280 4280
4281 info = IEEE80211_SKB_CB(skb); 4281 info = IEEE80211_SKB_CB(skb);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 9e3693128313..f8dbacf66795 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
574 helper = rcu_dereference(nfct_help(expect->master)->helper); 574 helper = rcu_dereference(nfct_help(expect->master)->helper);
575 if (helper) { 575 if (helper) {
576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); 576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
577 if (helper->expect_policy[expect->class].name) 577 if (helper->expect_policy[expect->class].name[0])
578 seq_printf(s, "/%s", 578 seq_printf(s, "/%s",
579 helper->expect_policy[expect->class].name); 579 helper->expect_policy[expect->class].name);
580 } 580 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index bb77a97961bf..5c0db5c64734 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
1473 "timeout to %u seconds for", 1473 "timeout to %u seconds for",
1474 info->timeout); 1474 info->timeout);
1475 nf_ct_dump_tuple(&exp->tuple); 1475 nf_ct_dump_tuple(&exp->tuple);
1476 mod_timer(&exp->timeout, jiffies + info->timeout * HZ); 1476 mod_timer_pending(&exp->timeout,
1477 jiffies + info->timeout * HZ);
1477 } 1478 }
1478 spin_unlock_bh(&nf_conntrack_expect_lock); 1479 spin_unlock_bh(&nf_conntrack_expect_lock);
1479 } 1480 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 050bb3420a6b..fdfc71f416b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
1894 1894
1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 1895 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1896 return -EINVAL; 1896 return -EINVAL;
1897 if (otuple.dst.protonum != rtuple.dst.protonum)
1898 return -EINVAL;
1897 1899
1898 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, 1900 ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
1899 &rtuple, u3); 1901 &rtuple, u3);
@@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2362 return PTR_ERR(exp); 2364 return PTR_ERR(exp);
2363 2365
2364 err = nf_ct_expect_related_report(exp, portid, report); 2366 err = nf_ct_expect_related_report(exp, portid, report);
2365 if (err < 0) { 2367 nf_ct_expect_put(exp);
2366 nf_ct_expect_put(exp); 2368 return err;
2367 return err;
2368 }
2369
2370 return 0;
2371} 2369}
2372 2370
2373static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2371static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 8d9db9d4702b..7d77217de6a3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff,
1383 return NF_DROP; 1383 return NF_DROP;
1384 } 1384 }
1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1385 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1386 if (!cseq) { 1386 if (!cseq && *(*dptr + matchoff) != '0') {
1387 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1387 nf_ct_helper_log(skb, ct, "cannot get cseq");
1388 return NF_DROP; 1388 return NF_DROP;
1389 } 1389 }
@@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1446 return NF_DROP; 1446 return NF_DROP;
1447 } 1447 }
1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1448 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1449 if (!cseq) { 1449 if (!cseq && *(*dptr + matchoff) != '0') {
1450 nf_ct_helper_log(skb, ct, "cannot get cseq"); 1450 nf_ct_helper_log(skb, ct, "cannot get cseq");
1451 return NF_DROP; 1451 return NF_DROP;
1452 } 1452 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 5d36a0926b4a..f49f45081acb 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1145 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1146 int err; 1146 int err;
1147 1147
1148 queue = instance_lookup(q, queue_num); 1148 queue = verdict_instance_lookup(q, queue_num,
1149 if (!queue) 1149 NETLINK_CB(skb).portid);
1150 queue = verdict_instance_lookup(q, queue_num,
1151 NETLINK_CB(skb).portid);
1152 if (IS_ERR(queue)) 1150 if (IS_ERR(queue))
1153 return PTR_ERR(queue); 1151 return PTR_ERR(queue);
1154 1152
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index ba7aed13e174..82c264e40278 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
59 const struct nlattr * const tb[]) 59 const struct nlattr * const tb[])
60{ 60{
61 struct nft_exthdr *priv = nft_expr_priv(expr); 61 struct nft_exthdr *priv = nft_expr_priv(expr);
62 u32 offset, len;
62 63
63 if (tb[NFTA_EXTHDR_DREG] == NULL || 64 if (tb[NFTA_EXTHDR_DREG] == NULL ||
64 tb[NFTA_EXTHDR_TYPE] == NULL || 65 tb[NFTA_EXTHDR_TYPE] == NULL ||
@@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
66 tb[NFTA_EXTHDR_LEN] == NULL) 67 tb[NFTA_EXTHDR_LEN] == NULL)
67 return -EINVAL; 68 return -EINVAL;
68 69
70 offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
71 len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
72
73 if (offset > U8_MAX || len > U8_MAX)
74 return -ERANGE;
75
69 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); 76 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
70 priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); 77 priv->offset = offset;
71 priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); 78 priv->len = len;
72 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); 79 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
73 80
74 return nft_validate_register_store(ctx, priv->dreg, NULL, 81 return nft_validate_register_store(ctx, priv->dreg, NULL,
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index 6473936d05c6..ffe9ae062d23 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
70 } else if (d > 0) 70 } else if (d > 0)
71 parent = parent->rb_right; 71 parent = parent->rb_right;
72 else { 72 else {
73found:
74 if (!nft_set_elem_active(&rbe->ext, genmask)) { 73 if (!nft_set_elem_active(&rbe->ext, genmask)) {
75 parent = parent->rb_left; 74 parent = parent->rb_left;
76 continue; 75 continue;
@@ -84,9 +83,12 @@ found:
84 } 83 }
85 } 84 }
86 85
87 if (set->flags & NFT_SET_INTERVAL && interval != NULL) { 86 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
88 rbe = interval; 87 nft_set_elem_active(&interval->ext, genmask) &&
89 goto found; 88 !nft_rbtree_interval_end(interval)) {
89 spin_unlock_bh(&nft_rbtree_lock);
90 *ext = &interval->ext;
91 return true;
90 } 92 }
91out: 93out:
92 spin_unlock_bh(&nft_rbtree_lock); 94 spin_unlock_bh(&nft_rbtree_lock);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c644c78ed485..e054a748ff25 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
433 struct nf_conntrack_l4proto *l4proto; 433 struct nf_conntrack_l4proto *l4proto;
434 struct nf_conntrack_tuple tuple; 434 struct nf_conntrack_tuple tuple;
435 struct nf_conntrack_tuple_hash *h; 435 struct nf_conntrack_tuple_hash *h;
436 enum ip_conntrack_info ctinfo;
437 struct nf_conn *ct; 436 struct nf_conn *ct;
438 unsigned int dataoff; 437 unsigned int dataoff;
439 u8 protonum; 438 u8 protonum;
@@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
458 457
459 ct = nf_ct_tuplehash_to_ctrack(h); 458 ct = nf_ct_tuplehash_to_ctrack(h);
460 459
461 ctinfo = ovs_ct_get_info(h);
462 if (ctinfo == IP_CT_NEW) {
463 /* This should not happen. */
464 WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct);
465 }
466 skb->nfct = &ct->ct_general; 460 skb->nfct = &ct->ct_general;
467 skb->nfctinfo = ctinfo; 461 skb->nfctinfo = ovs_ct_get_info(h);
468 return ct; 462 return ct;
469} 463}
470 464
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 1a1fcec88695..5aaf3babfc3f 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
93 return ERR_CAST(dev); 93 return ERR_CAST(dev);
94 } 94 }
95 95
96 dev_change_flags(dev, dev->flags | IFF_UP); 96 err = dev_change_flags(dev, dev->flags | IFF_UP);
97 if (err < 0) {
98 rtnl_delete_link(dev);
99 rtnl_unlock();
100 ovs_vport_free(vport);
101 goto error;
102 }
103
97 rtnl_unlock(); 104 rtnl_unlock();
98 return vport; 105 return vport;
99error: 106error:
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 7f8897f33a67..0e72d95b0e8f 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
54 struct net *net = ovs_dp_get_net(parms->dp); 54 struct net *net = ovs_dp_get_net(parms->dp);
55 struct net_device *dev; 55 struct net_device *dev;
56 struct vport *vport; 56 struct vport *vport;
57 int err;
57 58
58 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); 59 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
59 if (IS_ERR(vport)) 60 if (IS_ERR(vport))
@@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
67 return ERR_CAST(dev); 68 return ERR_CAST(dev);
68 } 69 }
69 70
70 dev_change_flags(dev, dev->flags | IFF_UP); 71 err = dev_change_flags(dev, dev->flags | IFF_UP);
71 rtnl_unlock(); 72 if (err < 0) {
73 rtnl_delete_link(dev);
74 rtnl_unlock();
75 ovs_vport_free(vport);
76 return ERR_PTR(err);
77 }
72 78
79 rtnl_unlock();
73 return vport; 80 return vport;
74} 81}
75 82
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 434e04c3a189..95c36147a6e1 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
140 140
141static void internal_set_rx_headroom(struct net_device *dev, int new_hr) 141static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
142{ 142{
143 dev->needed_headroom = new_hr; 143 dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
144} 144}
145 145
146static const struct net_device_ops internal_dev_netdev_ops = { 146static const struct net_device_ops internal_dev_netdev_ops = {
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 5eb7694348b5..7eb955e453e6 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
130 return ERR_CAST(dev); 130 return ERR_CAST(dev);
131 } 131 }
132 132
133 dev_change_flags(dev, dev->flags | IFF_UP); 133 err = dev_change_flags(dev, dev->flags | IFF_UP);
134 if (err < 0) {
135 rtnl_delete_link(dev);
136 rtnl_unlock();
137 ovs_vport_free(vport);
138 goto error;
139 }
140
134 rtnl_unlock(); 141 rtnl_unlock();
135 return vport; 142 return vport;
136error: 143error:
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1bb9e7ac9e14..ff83fb1ddd47 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -425,6 +425,7 @@ struct rxrpc_call {
425 spinlock_t lock; 425 spinlock_t lock;
426 rwlock_t state_lock; /* lock for state transition */ 426 rwlock_t state_lock; /* lock for state transition */
427 atomic_t usage; 427 atomic_t usage;
428 atomic_t skb_count; /* Outstanding packets on this call */
428 atomic_t sequence; /* Tx data packet sequence counter */ 429 atomic_t sequence; /* Tx data packet sequence counter */
429 u32 local_abort; /* local abort code */ 430 u32 local_abort; /* local abort code */
430 u32 remote_abort; /* remote abort code */ 431 u32 remote_abort; /* remote abort code */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 0b2832141bd0..9bae21e66d65 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
130 call->state = RXRPC_CALL_SERVER_ACCEPTING; 130 call->state = RXRPC_CALL_SERVER_ACCEPTING;
131 list_add_tail(&call->accept_link, &rx->acceptq); 131 list_add_tail(&call->accept_link, &rx->acceptq);
132 rxrpc_get_call(call); 132 rxrpc_get_call(call);
133 atomic_inc(&call->skb_count);
133 nsp = rxrpc_skb(notification); 134 nsp = rxrpc_skb(notification);
134 nsp->call = call; 135 nsp->call = call;
135 136
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index fc32aa5764a2..e60cf65c2232 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
460 ASSERTCMP(sp->call, ==, NULL); 460 ASSERTCMP(sp->call, ==, NULL);
461 sp->call = call; 461 sp->call = call;
462 rxrpc_get_call(call); 462 rxrpc_get_call(call);
463 atomic_inc(&call->skb_count);
463 464
464 /* insert into the buffer in sequence order */ 465 /* insert into the buffer in sequence order */
465 spin_lock_bh(&call->lock); 466 spin_lock_bh(&call->lock);
@@ -734,6 +735,7 @@ all_acked:
734 skb->mark = RXRPC_SKB_MARK_FINAL_ACK; 735 skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
735 sp->call = call; 736 sp->call = call;
736 rxrpc_get_call(call); 737 rxrpc_get_call(call);
738 atomic_inc(&call->skb_count);
737 spin_lock_bh(&call->lock); 739 spin_lock_bh(&call->lock);
738 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) 740 if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
739 BUG(); 741 BUG();
@@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
793 sp->error = error; 795 sp->error = error;
794 sp->call = call; 796 sp->call = call;
795 rxrpc_get_call(call); 797 rxrpc_get_call(call);
798 atomic_inc(&call->skb_count);
796 799
797 spin_lock_bh(&call->lock); 800 spin_lock_bh(&call->lock);
798 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); 801 ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
@@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work)
834 return; 837 return;
835 } 838 }
836 839
840 if (!call->conn)
841 goto skip_msg_init;
842
837 /* there's a good chance we're going to have to send a message, so set 843 /* there's a good chance we're going to have to send a message, so set
838 * one up in advance */ 844 * one up in advance */
839 msg.msg_name = &call->conn->params.peer->srx.transport; 845 msg.msg_name = &call->conn->params.peer->srx.transport;
@@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work)
856 memset(iov, 0, sizeof(iov)); 862 memset(iov, 0, sizeof(iov));
857 iov[0].iov_base = &whdr; 863 iov[0].iov_base = &whdr;
858 iov[0].iov_len = sizeof(whdr); 864 iov[0].iov_len = sizeof(whdr);
865skip_msg_init:
859 866
860 /* deal with events of a final nature */ 867 /* deal with events of a final nature */
861 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { 868 if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 91287c9d01bb..ae057e0740f3 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -275,6 +275,7 @@ error:
275 list_del_init(&call->link); 275 list_del_init(&call->link);
276 write_unlock_bh(&rxrpc_call_lock); 276 write_unlock_bh(&rxrpc_call_lock);
277 277
278 set_bit(RXRPC_CALL_RELEASED, &call->flags);
278 call->state = RXRPC_CALL_DEAD; 279 call->state = RXRPC_CALL_DEAD;
279 rxrpc_put_call(call); 280 rxrpc_put_call(call);
280 _leave(" = %d", ret); 281 _leave(" = %d", ret);
@@ -287,6 +288,7 @@ error:
287 */ 288 */
288found_user_ID_now_present: 289found_user_ID_now_present:
289 write_unlock(&rx->call_lock); 290 write_unlock(&rx->call_lock);
291 set_bit(RXRPC_CALL_RELEASED, &call->flags);
290 call->state = RXRPC_CALL_DEAD; 292 call->state = RXRPC_CALL_DEAD;
291 rxrpc_put_call(call); 293 rxrpc_put_call(call);
292 _leave(" = -EEXIST [%p]", call); 294 _leave(" = -EEXIST [%p]", call);
@@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call)
491 spin_lock_bh(&call->lock); 493 spin_lock_bh(&call->lock);
492 while ((skb = skb_dequeue(&call->rx_queue)) || 494 while ((skb = skb_dequeue(&call->rx_queue)) ||
493 (skb = skb_dequeue(&call->rx_oos_queue))) { 495 (skb = skb_dequeue(&call->rx_oos_queue))) {
494 sp = rxrpc_skb(skb);
495 if (sp->call) {
496 ASSERTCMP(sp->call, ==, call);
497 rxrpc_put_call(call);
498 sp->call = NULL;
499 }
500 skb->destructor = NULL;
501 spin_unlock_bh(&call->lock); 496 spin_unlock_bh(&call->lock);
502 497
498 sp = rxrpc_skb(skb);
503 _debug("- zap %s %%%u #%u", 499 _debug("- zap %s %%%u #%u",
504 rxrpc_pkts[sp->hdr.type], 500 rxrpc_pkts[sp->hdr.type],
505 sp->hdr.serial, sp->hdr.seq); 501 sp->hdr.serial, sp->hdr.seq);
@@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call)
605 601
606 if (atomic_dec_and_test(&call->usage)) { 602 if (atomic_dec_and_test(&call->usage)) {
607 _debug("call %d dead", call->debug_id); 603 _debug("call %d dead", call->debug_id);
604 WARN_ON(atomic_read(&call->skb_count) != 0);
608 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); 605 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
609 rxrpc_queue_work(&call->destroyer); 606 rxrpc_queue_work(&call->destroyer);
610 } 607 }
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 991a20d25093..70bb77818dea 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 55 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
56 _debug("already terminated"); 56 _debug("already terminated");
57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); 57 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
58 skb->destructor = NULL;
59 sp->call = NULL;
60 rxrpc_put_call(call);
61 rxrpc_free_skb(skb); 58 rxrpc_free_skb(skb);
62 return 0; 59 return 0;
63 } 60 }
@@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
111 ret = 0; 108 ret = 0;
112 109
113out: 110out:
114 /* release the socket buffer */ 111 rxrpc_free_skb(skb);
115 if (skb) {
116 skb->destructor = NULL;
117 sp->call = NULL;
118 rxrpc_put_call(call);
119 rxrpc_free_skb(skb);
120 }
121 112
122 _leave(" = %d", ret); 113 _leave(" = %d", ret);
123 return ret; 114 return ret;
@@ -133,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
133 struct rxrpc_skb_priv *sp; 124 struct rxrpc_skb_priv *sp;
134 bool terminal; 125 bool terminal;
135 int ret, ackbit, ack; 126 int ret, ackbit, ack;
127 u32 serial;
128 u8 flags;
136 129
137 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); 130 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
138 131
139 sp = rxrpc_skb(skb); 132 sp = rxrpc_skb(skb);
140 ASSERTCMP(sp->call, ==, NULL); 133 ASSERTCMP(sp->call, ==, NULL);
134 flags = sp->hdr.flags;
135 serial = sp->hdr.serial;
141 136
142 spin_lock(&call->lock); 137 spin_lock(&call->lock);
143 138
@@ -200,8 +195,9 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
200 195
201 sp->call = call; 196 sp->call = call;
202 rxrpc_get_call(call); 197 rxrpc_get_call(call);
203 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && 198 atomic_inc(&call->skb_count);
204 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); 199 terminal = ((flags & RXRPC_LAST_PACKET) &&
200 !(flags & RXRPC_CLIENT_INITIATED));
205 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); 201 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
206 if (ret < 0) { 202 if (ret < 0) {
207 if (ret == -ENOMEM || ret == -ENOBUFS) { 203 if (ret == -ENOMEM || ret == -ENOBUFS) {
@@ -213,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
213 } 209 }
214 210
215 skb = NULL; 211 skb = NULL;
212 sp = NULL;
216 213
217 _debug("post #%u", seq); 214 _debug("post #%u", seq);
218 ASSERTCMP(call->rx_data_post, ==, seq); 215 ASSERTCMP(call->rx_data_post, ==, seq);
219 call->rx_data_post++; 216 call->rx_data_post++;
220 217
221 if (sp->hdr.flags & RXRPC_LAST_PACKET) 218 if (flags & RXRPC_LAST_PACKET)
222 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); 219 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
223 220
224 /* if we've reached an out of sequence packet then we need to drain 221 /* if we've reached an out of sequence packet then we need to drain
@@ -234,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call,
234 231
235 spin_unlock(&call->lock); 232 spin_unlock(&call->lock);
236 atomic_inc(&call->ackr_not_idle); 233 atomic_inc(&call->ackr_not_idle);
237 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); 234 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false);
238 _leave(" = 0 [posted]"); 235 _leave(" = 0 [posted]");
239 return 0; 236 return 0;
240 237
@@ -247,7 +244,7 @@ out:
247 244
248discard_and_ack: 245discard_and_ack:
249 _debug("discard and ACK packet %p", skb); 246 _debug("discard and ACK packet %p", skb);
250 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 247 __rxrpc_propose_ACK(call, ack, serial, true);
251discard: 248discard:
252 spin_unlock(&call->lock); 249 spin_unlock(&call->lock);
253 rxrpc_free_skb(skb); 250 rxrpc_free_skb(skb);
@@ -255,7 +252,7 @@ discard:
255 return 0; 252 return 0;
256 253
257enqueue_and_ack: 254enqueue_and_ack:
258 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 255 __rxrpc_propose_ACK(call, ack, serial, true);
259enqueue_packet: 256enqueue_packet:
260 _net("defer skb %p", skb); 257 _net("defer skb %p", skb);
261 spin_unlock(&call->lock); 258 spin_unlock(&call->lock);
@@ -575,13 +572,13 @@ done:
575 * post connection-level events to the connection 572 * post connection-level events to the connection
576 * - this includes challenges, responses and some aborts 573 * - this includes challenges, responses and some aborts
577 */ 574 */
578static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 575static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
579 struct sk_buff *skb) 576 struct sk_buff *skb)
580{ 577{
581 _enter("%p,%p", conn, skb); 578 _enter("%p,%p", conn, skb);
582 579
583 skb_queue_tail(&conn->rx_queue, skb); 580 skb_queue_tail(&conn->rx_queue, skb);
584 return rxrpc_queue_conn(conn); 581 rxrpc_queue_conn(conn);
585} 582}
586 583
587/* 584/*
@@ -702,7 +699,6 @@ void rxrpc_data_ready(struct sock *sk)
702 699
703 rcu_read_lock(); 700 rcu_read_lock();
704 701
705retry_find_conn:
706 conn = rxrpc_find_connection_rcu(local, skb); 702 conn = rxrpc_find_connection_rcu(local, skb);
707 if (!conn) 703 if (!conn)
708 goto cant_route_call; 704 goto cant_route_call;
@@ -710,8 +706,7 @@ retry_find_conn:
710 if (sp->hdr.callNumber == 0) { 706 if (sp->hdr.callNumber == 0) {
711 /* Connection-level packet */ 707 /* Connection-level packet */
712 _debug("CONN %p {%d}", conn, conn->debug_id); 708 _debug("CONN %p {%d}", conn, conn->debug_id);
713 if (!rxrpc_post_packet_to_conn(conn, skb)) 709 rxrpc_post_packet_to_conn(conn, skb);
714 goto retry_find_conn;
715 } else { 710 } else {
716 /* Call-bound packets are routed by connection channel. */ 711 /* Call-bound packets are routed by connection channel. */
717 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; 712 unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -749,6 +744,8 @@ cant_route_call:
749 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { 744 if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
750 _debug("reject type %d",sp->hdr.type); 745 _debug("reject type %d",sp->hdr.type);
751 rxrpc_reject_packet(local, skb); 746 rxrpc_reject_packet(local, skb);
747 } else {
748 rxrpc_free_skb(skb);
752 } 749 }
753 _leave(" [no call]"); 750 _leave(" [no call]");
754 return; 751 return;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index a3fa2ed85d63..9ed66d533002 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
203 } 203 }
204 204
205 /* we transferred the whole data packet */ 205 /* we transferred the whole data packet */
206 if (!(flags & MSG_PEEK))
207 rxrpc_kernel_data_consumed(call, skb);
208
206 if (sp->hdr.flags & RXRPC_LAST_PACKET) { 209 if (sp->hdr.flags & RXRPC_LAST_PACKET) {
207 _debug("last"); 210 _debug("last");
208 if (rxrpc_conn_is_client(call->conn)) { 211 if (rxrpc_conn_is_client(call->conn)) {
@@ -360,28 +363,6 @@ wait_error:
360} 363}
361 364
362/** 365/**
363 * rxrpc_kernel_data_delivered - Record delivery of data message
364 * @skb: Message holding data
365 *
366 * Record the delivery of a data message. This permits RxRPC to keep its
367 * tracking correct. The socket buffer will be deleted.
368 */
369void rxrpc_kernel_data_delivered(struct sk_buff *skb)
370{
371 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
372 struct rxrpc_call *call = sp->call;
373
374 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
375 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
376 call->rx_data_recv = sp->hdr.seq;
377
378 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
379 rxrpc_free_skb(skb);
380}
381
382EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
383
384/**
385 * rxrpc_kernel_is_data_last - Determine if data message is last one 366 * rxrpc_kernel_is_data_last - Determine if data message is last one
386 * @skb: Message holding data 367 * @skb: Message holding data
387 * 368 *
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index eee0cfd9ac8c..06c51d4b622d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
98 spin_unlock_bh(&call->lock); 98 spin_unlock_bh(&call->lock);
99} 99}
100 100
101/**
102 * rxrpc_kernel_data_consumed - Record consumption of data message
103 * @call: The call to which the message pertains.
104 * @skb: Message holding data
105 *
106 * Record the consumption of a data message and generate an ACK if appropriate.
107 * The call state is shifted if this was the final packet. The caller must be
108 * in process context with no spinlocks held.
109 *
110 * TODO: Actually generate the ACK here rather than punting this to the
111 * workqueue.
112 */
113void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
114{
115 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
116
117 _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
118
119 ASSERTCMP(sp->call, ==, call);
120 ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
121
122 /* TODO: Fix the sequence number tracking */
123 ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
124 ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
125 ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
126
127 call->rx_data_recv = sp->hdr.seq;
128 rxrpc_hard_ACK_data(call, sp);
129}
130EXPORT_SYMBOL(rxrpc_kernel_data_consumed);
131
101/* 132/*
102 * destroy a packet that has an RxRPC control buffer 133 * Destroy a packet that has an RxRPC control buffer
103 * - advance the hard-ACK state of the parent call (done here in case something
104 * in the kernel bypasses recvmsg() and steals the packet directly off of the
105 * socket receive queue)
106 */ 134 */
107void rxrpc_packet_destructor(struct sk_buff *skb) 135void rxrpc_packet_destructor(struct sk_buff *skb)
108{ 136{
@@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb)
112 _enter("%p{%p}", skb, call); 140 _enter("%p{%p}", skb, call);
113 141
114 if (call) { 142 if (call) {
115 /* send the final ACK on a client call */ 143 if (atomic_dec_return(&call->skb_count) < 0)
116 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) 144 BUG();
117 rxrpc_hard_ACK_data(call, sp);
118 rxrpc_put_call(call); 145 rxrpc_put_call(call);
119 sp->call = NULL; 146 sp->call = NULL;
120 } 147 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index e4a5f2607ffa..d09d0687594b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -64,7 +64,6 @@ int __tcf_hash_release(struct tc_action *p, bool bind, bool strict)
64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { 64 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
65 if (p->ops->cleanup) 65 if (p->ops->cleanup)
66 p->ops->cleanup(p, bind); 66 p->ops->cleanup(p, bind);
67 list_del(&p->list);
68 tcf_hash_destroy(p->hinfo, p); 67 tcf_hash_destroy(p->hinfo, p);
69 ret = ACT_P_DELETED; 68 ret = ACT_P_DELETED;
70 } 69 }
@@ -421,18 +420,19 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
421 return res; 420 return res;
422} 421}
423 422
424int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 423int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
425 struct tcf_result *res) 424 int nr_actions, struct tcf_result *res)
426{ 425{
427 const struct tc_action *a; 426 int ret = -1, i;
428 int ret = -1;
429 427
430 if (skb->tc_verd & TC_NCLS) { 428 if (skb->tc_verd & TC_NCLS) {
431 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 429 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
432 ret = TC_ACT_OK; 430 ret = TC_ACT_OK;
433 goto exec_done; 431 goto exec_done;
434 } 432 }
435 list_for_each_entry(a, actions, list) { 433 for (i = 0; i < nr_actions; i++) {
434 const struct tc_action *a = actions[i];
435
436repeat: 436repeat:
437 ret = a->ops->act(skb, a, res); 437 ret = a->ops->act(skb, a, res);
438 if (ret == TC_ACT_REPEAT) 438 if (ret == TC_ACT_REPEAT)
@@ -754,16 +754,6 @@ err_out:
754 return ERR_PTR(err); 754 return ERR_PTR(err);
755} 755}
756 756
757static void cleanup_a(struct list_head *actions)
758{
759 struct tc_action *a, *tmp;
760
761 list_for_each_entry_safe(a, tmp, actions, list) {
762 list_del(&a->list);
763 kfree(a);
764 }
765}
766
767static int tca_action_flush(struct net *net, struct nlattr *nla, 757static int tca_action_flush(struct net *net, struct nlattr *nla,
768 struct nlmsghdr *n, u32 portid) 758 struct nlmsghdr *n, u32 portid)
769{ 759{
@@ -905,7 +895,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
905 return ret; 895 return ret;
906 } 896 }
907err: 897err:
908 cleanup_a(&actions); 898 tcf_action_destroy(&actions, 0);
909 return ret; 899 return ret;
910} 900}
911 901
@@ -942,15 +932,9 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
942 932
943 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); 933 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
944 if (ret) 934 if (ret)
945 goto done; 935 return ret;
946 936
947 /* dump then free all the actions after update; inserted policy 937 return tcf_add_notify(net, n, &actions, portid);
948 * stays intact
949 */
950 ret = tcf_add_notify(net, n, &actions, portid);
951 cleanup_a(&actions);
952done:
953 return ret;
954} 938}
955 939
956static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) 940static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b3c7e975fc9e..8a3be1d99775 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -63,49 +63,8 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb,
63 const struct tc_action_ops *ops) 63 const struct tc_action_ops *ops)
64{ 64{
65 struct tc_action_net *tn = net_generic(net, police_net_id); 65 struct tc_action_net *tn = net_generic(net, police_net_id);
66 struct tcf_hashinfo *hinfo = tn->hinfo;
67 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
68 struct nlattr *nest;
69
70 spin_lock_bh(&hinfo->lock);
71
72 s_i = cb->args[0];
73
74 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
75 struct hlist_head *head;
76 struct tc_action *p;
77
78 head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)];
79
80 hlist_for_each_entry_rcu(p, head, tcfa_head) {
81 index++;
82 if (index < s_i)
83 continue;
84 nest = nla_nest_start(skb, index);
85 if (nest == NULL)
86 goto nla_put_failure;
87 if (type == RTM_DELACTION)
88 err = tcf_action_dump_1(skb, p, 0, 1);
89 else
90 err = tcf_action_dump_1(skb, p, 0, 0);
91 if (err < 0) {
92 index--;
93 nla_nest_cancel(skb, nest);
94 goto done;
95 }
96 nla_nest_end(skb, nest);
97 n_i++;
98 }
99 }
100done:
101 spin_unlock_bh(&hinfo->lock);
102 if (n_i)
103 cb->args[0] += n_i;
104 return n_i;
105 66
106nla_put_failure: 67 return tcf_generic_walker(tn, skb, cb, type, ops);
107 nla_nest_cancel(skb, nest);
108 goto done;
109} 68}
110 69
111static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { 70static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
@@ -125,6 +84,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
125 struct tcf_police *police; 84 struct tcf_police *police;
126 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 85 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
127 struct tc_action_net *tn = net_generic(net, police_net_id); 86 struct tc_action_net *tn = net_generic(net, police_net_id);
87 bool exists = false;
128 int size; 88 int size;
129 89
130 if (nla == NULL) 90 if (nla == NULL)
@@ -139,24 +99,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
139 size = nla_len(tb[TCA_POLICE_TBF]); 99 size = nla_len(tb[TCA_POLICE_TBF]);
140 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) 100 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
141 return -EINVAL; 101 return -EINVAL;
102
142 parm = nla_data(tb[TCA_POLICE_TBF]); 103 parm = nla_data(tb[TCA_POLICE_TBF]);
104 exists = tcf_hash_check(tn, parm->index, a, bind);
105 if (exists && bind)
106 return 0;
143 107
144 if (parm->index) { 108 if (!exists) {
145 if (tcf_hash_check(tn, parm->index, a, bind)) {
146 if (ovr)
147 goto override;
148 /* not replacing */
149 return -EEXIST;
150 }
151 } else {
152 ret = tcf_hash_create(tn, parm->index, NULL, a, 109 ret = tcf_hash_create(tn, parm->index, NULL, a,
153 &act_police_ops, bind, false); 110 &act_police_ops, bind, false);
154 if (ret) 111 if (ret)
155 return ret; 112 return ret;
156 ret = ACT_P_CREATED; 113 ret = ACT_P_CREATED;
114 } else {
115 tcf_hash_release(*a, bind);
116 if (!ovr)
117 return -EEXIST;
157 } 118 }
158 119
159override:
160 police = to_police(*a); 120 police = to_police(*a);
161 if (parm->rate.rate) { 121 if (parm->rate.rate) {
162 err = -ENOMEM; 122 err = -ENOMEM;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 843a716a4303..a7c5645373af 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -541,8 +541,12 @@ out:
541void tcf_exts_destroy(struct tcf_exts *exts) 541void tcf_exts_destroy(struct tcf_exts *exts)
542{ 542{
543#ifdef CONFIG_NET_CLS_ACT 543#ifdef CONFIG_NET_CLS_ACT
544 tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); 544 LIST_HEAD(actions);
545 INIT_LIST_HEAD(&exts->actions); 545
546 tcf_exts_to_list(exts, &actions);
547 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
548 kfree(exts->actions);
549 exts->nr_actions = 0;
546#endif 550#endif
547} 551}
548EXPORT_SYMBOL(tcf_exts_destroy); 552EXPORT_SYMBOL(tcf_exts_destroy);
@@ -554,7 +558,6 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
554 { 558 {
555 struct tc_action *act; 559 struct tc_action *act;
556 560
557 INIT_LIST_HEAD(&exts->actions);
558 if (exts->police && tb[exts->police]) { 561 if (exts->police && tb[exts->police]) {
559 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, 562 act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
560 "police", ovr, 563 "police", ovr,
@@ -563,14 +566,20 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
563 return PTR_ERR(act); 566 return PTR_ERR(act);
564 567
565 act->type = exts->type = TCA_OLD_COMPAT; 568 act->type = exts->type = TCA_OLD_COMPAT;
566 list_add(&act->list, &exts->actions); 569 exts->actions[0] = act;
570 exts->nr_actions = 1;
567 } else if (exts->action && tb[exts->action]) { 571 } else if (exts->action && tb[exts->action]) {
568 int err; 572 LIST_HEAD(actions);
573 int err, i = 0;
574
569 err = tcf_action_init(net, tb[exts->action], rate_tlv, 575 err = tcf_action_init(net, tb[exts->action], rate_tlv,
570 NULL, ovr, 576 NULL, ovr,
571 TCA_ACT_BIND, &exts->actions); 577 TCA_ACT_BIND, &actions);
572 if (err) 578 if (err)
573 return err; 579 return err;
580 list_for_each_entry(act, &actions, list)
581 exts->actions[i++] = act;
582 exts->nr_actions = i;
574 } 583 }
575 } 584 }
576#else 585#else
@@ -587,37 +596,49 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
587 struct tcf_exts *src) 596 struct tcf_exts *src)
588{ 597{
589#ifdef CONFIG_NET_CLS_ACT 598#ifdef CONFIG_NET_CLS_ACT
590 LIST_HEAD(tmp); 599 struct tcf_exts old = *dst;
600
591 tcf_tree_lock(tp); 601 tcf_tree_lock(tp);
592 list_splice_init(&dst->actions, &tmp); 602 dst->nr_actions = src->nr_actions;
593 list_splice(&src->actions, &dst->actions); 603 dst->actions = src->actions;
594 dst->type = src->type; 604 dst->type = src->type;
595 tcf_tree_unlock(tp); 605 tcf_tree_unlock(tp);
596 tcf_action_destroy(&tmp, TCA_ACT_UNBIND); 606
607 tcf_exts_destroy(&old);
597#endif 608#endif
598} 609}
599EXPORT_SYMBOL(tcf_exts_change); 610EXPORT_SYMBOL(tcf_exts_change);
600 611
601#define tcf_exts_first_act(ext) \ 612#ifdef CONFIG_NET_CLS_ACT
602 list_first_entry_or_null(&(exts)->actions, \ 613static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
603 struct tc_action, list) 614{
615 if (exts->nr_actions == 0)
616 return NULL;
617 else
618 return exts->actions[0];
619}
620#endif
604 621
605int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 622int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
606{ 623{
607#ifdef CONFIG_NET_CLS_ACT 624#ifdef CONFIG_NET_CLS_ACT
608 struct nlattr *nest; 625 struct nlattr *nest;
609 626
610 if (exts->action && !list_empty(&exts->actions)) { 627 if (exts->action && exts->nr_actions) {
611 /* 628 /*
612 * again for backward compatible mode - we want 629 * again for backward compatible mode - we want
613 * to work with both old and new modes of entering 630 * to work with both old and new modes of entering
614 * tc data even if iproute2 was newer - jhs 631 * tc data even if iproute2 was newer - jhs
615 */ 632 */
616 if (exts->type != TCA_OLD_COMPAT) { 633 if (exts->type != TCA_OLD_COMPAT) {
634 LIST_HEAD(actions);
635
617 nest = nla_nest_start(skb, exts->action); 636 nest = nla_nest_start(skb, exts->action);
618 if (nest == NULL) 637 if (nest == NULL)
619 goto nla_put_failure; 638 goto nla_put_failure;
620 if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0) 639
640 tcf_exts_to_list(exts, &actions);
641 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
621 goto nla_put_failure; 642 goto nla_put_failure;
622 nla_nest_end(skb, nest); 643 nla_nest_end(skb, nest);
623 } else if (exts->police) { 644 } else if (exts->police) {
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 4cb5aedfe3ee..ef8ba77a5bea 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
293 return ERR_PTR(err); 293 return ERR_PTR(err);
294 } 294 }
295 295
296 iter->start_fail = 0;
296 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); 297 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
297} 298}
298 299
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index f69edcf219e5..bb691538adc8 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
13{ 13{
14 union sctp_addr laddr, paddr; 14 union sctp_addr laddr, paddr;
15 struct dst_entry *dst; 15 struct dst_entry *dst;
16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
16 17
17 laddr = list_entry(asoc->base.bind_addr.address_list.next, 18 laddr = list_entry(asoc->base.bind_addr.address_list.next,
18 struct sctp_sockaddr_entry, list)->a; 19 struct sctp_sockaddr_entry, list)->a;
@@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r,
40 } 41 }
41 42
42 r->idiag_state = asoc->state; 43 r->idiag_state = asoc->state;
43 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; 44 if (timer_pending(t3_rtx)) {
44 r->idiag_retrans = asoc->rtx_data_chunks; 45 r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX;
45 r->idiag_expires = jiffies_to_msecs( 46 r->idiag_retrans = asoc->rtx_data_chunks;
46 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); 47 r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies);
48 } else {
49 r->idiag_timer = 0;
50 r->idiag_retrans = 0;
51 r->idiag_expires = 0;
52 }
47} 53}
48 54
49static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, 55static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
@@ -350,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
350 if (cb->args[4] < cb->args[1]) 356 if (cb->args[4] < cb->args[1])
351 goto next; 357 goto next;
352 358
353 if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) 359 if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs))
354 goto next; 360 goto next;
355 361
356 if (r->sdiag_family != AF_UNSPEC && 362 if (r->sdiag_family != AF_UNSPEC &&
@@ -465,7 +471,7 @@ skip:
465 * 3 : to mark if we have dumped the ep info of the current asoc 471 * 3 : to mark if we have dumped the ep info of the current asoc
466 * 4 : to work as a temporary variable to traversal list 472 * 4 : to work as a temporary variable to traversal list
467 */ 473 */
468 if (!(idiag_states & ~TCPF_LISTEN)) 474 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
469 goto done; 475 goto done;
470 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); 476 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp);
471done: 477done:
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 1bc4f71aaba8..d85b803da11d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
702 */ 702 */
703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); 703 sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
704 704
705 sctp_ulpevent_receive_data(event, asoc);
706
707 /* And hold the chunk as we need it for getting the IP headers 705 /* And hold the chunk as we need it for getting the IP headers
708 * later in recvmsg 706 * later in recvmsg
709 */ 707 */
710 sctp_chunk_hold(chunk); 708 sctp_chunk_hold(chunk);
711 event->chunk = chunk; 709 event->chunk = chunk;
712 710
711 sctp_ulpevent_receive_data(event, asoc);
712
713 event->stream = ntohs(chunk->subh.data_hdr->stream); 713 event->stream = ntohs(chunk->subh.data_hdr->stream);
714 event->ssn = ntohs(chunk->subh.data_hdr->ssn); 714 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
715 event->ppid = chunk->subh.data_hdr->ppid; 715 event->ppid = chunk->subh.data_hdr->ppid;
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index b62caa1c770c..ed97a5876ebe 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
728 u32 bearer_id, u32 *prev_node) 728 u32 bearer_id, u32 *prev_node)
729{ 729{
730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id); 730 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
731 struct tipc_peer *peer = mon->self; 731 struct tipc_peer *peer;
732 732
733 if (!mon) 733 if (!mon)
734 return -EINVAL; 734 return -EINVAL;
735 735
736 read_lock_bh(&mon->lock); 736 read_lock_bh(&mon->lock);
737 peer = mon->self;
737 do { 738 do {
738 if (*prev_node) { 739 if (*prev_node) {
739 if (peer->addr == *prev_node) 740 if (peer->addr == *prev_node)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c49b8df438cb..f9f5f3c3dab5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2180,7 +2180,8 @@ restart:
2180 TIPC_CONN_MSG, SHORT_H_SIZE, 2180 TIPC_CONN_MSG, SHORT_H_SIZE,
2181 0, dnode, onode, dport, oport, 2181 0, dnode, onode, dport, oport,
2182 TIPC_CONN_SHUTDOWN); 2182 TIPC_CONN_SHUTDOWN);
2183 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 2183 if (skb)
2184 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
2184 } 2185 }
2185 tsk->connected = 0; 2186 tsk->connected = 0;
2186 sock->state = SS_DISCONNECTING; 2187 sock->state = SS_DISCONNECTING;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b0e11b6dc994..0f506220a3bd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
513 r = cfg80211_get_chans_dfs_available(wiphy, 513 r = cfg80211_get_chans_dfs_available(wiphy,
514 chandef->center_freq2, 514 chandef->center_freq2,
515 width); 515 width);
516 break;
516 default: 517 default:
517 WARN_ON(chandef->center_freq2); 518 WARN_ON(chandef->center_freq2);
518 break; 519 break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 46417f9cce68..f02653a08993 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
5380{ 5380{
5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; 5381 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
5382 u32 mask = 0; 5382 u32 mask = 0;
5383 u16 ht_opmode;
5383 5384
5384#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ 5385#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
5385do { \ 5386do { \
@@ -5471,9 +5472,36 @@ do { \
5471 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, 5472 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
5472 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 5473 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
5473 nl80211_check_s32); 5474 nl80211_check_s32);
5474 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, 5475 /*
5475 mask, NL80211_MESHCONF_HT_OPMODE, 5476 * Check HT operation mode based on
5476 nl80211_check_u16); 5477 * IEEE 802.11 2012 8.4.2.59 HT Operation element.
5478 */
5479 if (tb[NL80211_MESHCONF_HT_OPMODE]) {
5480 ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
5481
5482 if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION |
5483 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
5484 IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5485 return -EINVAL;
5486
5487 if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
5488 (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5489 return -EINVAL;
5490
5491 switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
5492 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
5493 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
5494 if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
5495 return -EINVAL;
5496 break;
5497 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
5498 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
5499 if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
5500 return -EINVAL;
5501 break;
5502 }
5503 cfg->ht_opmode = ht_opmode;
5504 }
5477 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 5505 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
5478 1, 65535, mask, 5506 1, 65535, mask,
5479 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, 5507 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 217c8d507f2e..7927a090fa0d 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -72,8 +72,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
72 (void *) BPF_FUNC_l3_csum_replace; 72 (void *) BPF_FUNC_l3_csum_replace;
73static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = 73static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
74 (void *) BPF_FUNC_l4_csum_replace; 74 (void *) BPF_FUNC_l4_csum_replace;
75static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) = 75static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
76 (void *) BPF_FUNC_skb_in_cgroup; 76 (void *) BPF_FUNC_skb_under_cgroup;
77 77
78#if defined(__x86_64__) 78#if defined(__x86_64__)
79 79
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 2732c37c8d5b..10ff73404e3a 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -57,7 +57,7 @@ int handle_egress(struct __sk_buff *skb)
57 bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), 57 bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
58 eth->h_proto, ip6h->nexthdr); 58 eth->h_proto, ip6h->nexthdr);
59 return TC_ACT_OK; 59 return TC_ACT_OK;
60 } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { 60 } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
61 bpf_trace_printk(pass_msg, sizeof(pass_msg)); 61 bpf_trace_printk(pass_msg, sizeof(pass_msg));
62 return TC_ACT_OK; 62 return TC_ACT_OK;
63 } else { 63 } else {
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index 47bf0858f9e4..cce2b59751eb 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -68,7 +68,16 @@ static void test_hashmap_sanity(int i, void *data)
68 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && 68 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 &&
69 errno == E2BIG); 69 errno == E2BIG);
70 70
71 /* update existing element, thought the map is full */
72 key = 1;
73 assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
74 key = 2;
75 assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
76 key = 1;
77 assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0);
78
71 /* check that key = 0 doesn't exist */ 79 /* check that key = 0 doesn't exist */
80 key = 0;
72 assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); 81 assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
73 82
74 /* iterate over two elements */ 83 /* iterate over two elements */
@@ -413,10 +422,12 @@ static void do_work(int fn, void *data)
413 422
414 for (i = fn; i < MAP_SIZE; i += TASKS) { 423 for (i = fn; i < MAP_SIZE; i += TASKS) {
415 key = value = i; 424 key = value = i;
416 if (do_update) 425 if (do_update) {
417 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); 426 assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0);
418 else 427 assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0);
428 } else {
419 assert(bpf_delete_elem(map_fd, &key) == 0); 429 assert(bpf_delete_elem(map_fd, &key) == 0);
430 }
420 } 431 }
421} 432}
422 433