aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /include
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'include')
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_regs.h1
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cgroup.h23
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h34
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h441
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h297
-rw-r--r--include/linux/mlx5/port.h40
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/qed/common_hsi.h359
-rw-r--r--include/linux/qed/eth_common.h155
-rw-r--r--include/linux/qed/iscsi_common.h28
-rw-r--r--include/linux/qed/qed_chain.h13
-rw-r--r--include/linux/qed/qed_eth_if.h3
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/qed/tcp_common.h16
-rw-r--r--include/linux/rhashtable.h543
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/skbuff.h73
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_rxrpc.h53
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/cfg80211.h259
-rw-r--r--include/net/devlink.h1
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst_metadata.h52
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/flow_dissector.h14
-rw-r--r--include/net/fq.h3
-rw-r--r--include/net/fq_impl.h7
-rw-r--r--include/net/gre.h10
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h49
-rw-r--r--include/net/ip_tunnels.h21
-rw-r--r--include/net/kcm.h37
-rw-r--r--include/net/l3mdev.h153
-rw-r--r--include/net/lwtunnel.h44
-rw-r--r--include/net/mac80211.h108
-rw-r--r--include/net/mpls.h15
-rw-r--r--include/net/ncsi.h5
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h56
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h8
-rw-r--r--include/net/netfilter/nf_log.h14
-rw-r--r--include/net/netfilter/nf_queue.h69
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h43
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h53
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/pptp.h23
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/sch_generic.h76
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/strparser.h142
-rw-r--r--include/net/switchdev.h52
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_skbmod.h30
-rw-r--r--include/net/tc_act/tc_tunnel_key.h30
-rw-r--r--include/net/tc_act/tc_vlan.h26
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/rxrpc/packet.h17
-rw-r--r--include/trace/events/rxrpc.h625
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/batman_adv.h94
-rw-r--r--include/uapi/linux/bpf.h51
-rw-r--r--include/uapi/linux/bpf_perf_event.h18
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h30
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h20
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/netfilter/nf_log.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h106
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h23
-rw-r--r--include/uapi/linux/nl80211.h270
-rw-r--r--include/uapi/linux/openvswitch.h17
-rw-r--r--include/uapi/linux/pkt_cls.h19
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h39
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h41
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h2
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--include/uapi/linux/xfrm.h2
149 files changed, 5539 insertions, 1520 deletions
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644
index 000000000000..2383dd20ff43
--- /dev/null
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -0,0 +1,21 @@
1/*
2 * Device Tree constants for Microsemi VSC8531 PHY
3 *
4 * Author: Nagaraju Lakkaraju
5 *
6 * License: Dual MIT/GPL
7 * Copyright (c) 2016 Microsemi Corporation
8 */
9
10#ifndef _DT_BINDINGS_MSCC_VSC8531_H
11#define _DT_BINDINGS_MSCC_VSC8531_H
12
13/* MAC interface Edge rate control VDDMAC in milli Volts */
14#define MSCC_VDDMAC_3300 3300
15#define MSCC_VDDMAC_2500 2500
16#define MSCC_VDDMAC_1800 1800
17#define MSCC_VDDMAC_1500 1500
18#define MSCC_VDDMAC_MAX 4
19#define MSCC_SLOWDOWN_MAX 8
20
21#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3db25df396cb..8eeedb2db924 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,6 +205,9 @@ struct bcma_host_ops {
205#define BCMA_PKG_ID_BCM4709 0 205#define BCMA_PKG_ID_BCM4709 0
206#define BCMA_CHIP_ID_BCM47094 53030 206#define BCMA_CHIP_ID_BCM47094 53030
207#define BCMA_CHIP_ID_BCM53018 53018 207#define BCMA_CHIP_ID_BCM53018 53018
208#define BCMA_CHIP_ID_BCM53573 53573
209#define BCMA_PKG_ID_BCM53573 0
210#define BCMA_PKG_ID_BCM47189 1
208 211
209/* Board types (on PCI usually equals to the subsystem dev id) */ 212/* Board types (on PCI usually equals to the subsystem dev id) */
210/* BCM4313 */ 213/* BCM4313 */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 4901fb358b07..9986f8288d01 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -24,6 +24,7 @@
24#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ 24#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
25 25
26/* Agent registers (common for every core) */ 26/* Agent registers (common for every core) */
27#define BCMA_OOB_SEL_OUT_A30 0x0100
27#define BCMA_IOCTL 0x0408 /* IO control */ 28#define BCMA_IOCTL 0x0408 /* IO control */
28#define BCMA_IOCTL_CLK 0x0001 29#define BCMA_IOCTL_CLK 0x0001
29#define BCMA_IOCTL_FGC 0x0002 30#define BCMA_IOCTL_FGC 0x0002
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _LINUX_BITFIELD_H
16#define _LINUX_BITFIELD_H
17
18#include <linux/bug.h>
19
20/*
21 * Bitfield access macros
22 *
23 * FIELD_{GET,PREP} macros take as first parameter shifted mask
24 * from which they extract the base mask and shift amount.
25 * Mask must be a compilation time constant.
26 *
27 * Example:
28 *
29 * #define REG_FIELD_A GENMASK(6, 0)
30 * #define REG_FIELD_B BIT(7)
31 * #define REG_FIELD_C GENMASK(15, 8)
32 * #define REG_FIELD_D GENMASK(31, 16)
33 *
34 * Get:
35 * a = FIELD_GET(REG_FIELD_A, reg);
36 * b = FIELD_GET(REG_FIELD_B, reg);
37 *
38 * Set:
39 * reg = FIELD_PREP(REG_FIELD_A, 1) |
40 * FIELD_PREP(REG_FIELD_B, 0) |
41 * FIELD_PREP(REG_FIELD_C, c) |
42 * FIELD_PREP(REG_FIELD_D, 0x40);
43 *
44 * Modify:
45 * reg &= ~REG_FIELD_C;
46 * reg |= FIELD_PREP(REG_FIELD_C, c);
47 */
48
49#define __bf_shf(x) (__builtin_ffsll(x) - 1)
50
51#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
52 ({ \
53 BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
54 _pfx "mask is not constant"); \
55 BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
56 BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
57 ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
58 _pfx "value too large for the field"); \
59 BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
60 _pfx "type of reg too small for mask"); \
61 __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
62 (1ULL << __bf_shf(_mask))); \
63 })
64
65/**
66 * FIELD_PREP() - prepare a bitfield element
67 * @_mask: shifted mask defining the field's length and position
68 * @_val: value to put in the field
69 *
70 * FIELD_PREP() masks and shifts up the value. The result should
71 * be combined with other fields of the bitfield using logical OR.
72 */
73#define FIELD_PREP(_mask, _val) \
74 ({ \
75 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
76 ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
77 })
78
79/**
80 * FIELD_GET() - extract a bitfield element
81 * @_mask: shifted mask defining the field's length and position
82 * @_reg: 32bit value of entire bitfield
83 *
84 * FIELD_GET() extracts the field specified by @_mask from the
85 * bitfield passed in as @_reg by masking and shifting it down.
86 */
87#define FIELD_GET(_mask, _reg) \
88 ({ \
89 __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
90 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
91 })
92
93#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11134238417d..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
96struct bpf_func_proto { 96struct bpf_func_proto {
97 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 97 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
98 bool gpl_only; 98 bool gpl_only;
99 bool pkt_access;
99 enum bpf_return_type ret_type; 100 enum bpf_return_type ret_type;
100 enum bpf_arg_type arg1_type; 101 enum bpf_arg_type arg1_type;
101 enum bpf_arg_type arg2_type; 102 enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
138 */ 139 */
139 PTR_TO_PACKET, 140 PTR_TO_PACKET,
140 PTR_TO_PACKET_END, /* skb->data + headlen */ 141 PTR_TO_PACKET_END, /* skb->data + headlen */
142
143 /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
144 * elem value. We only allow this if we can statically verify that
145 * access from this register are going to fall within the size of the
146 * map element.
147 */
148 PTR_TO_MAP_VALUE_ADJ,
141}; 149};
142 150
143struct bpf_prog; 151struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
151 */ 159 */
152 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 160 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
153 enum bpf_reg_type *reg_type); 161 enum bpf_reg_type *reg_type);
154 162 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
163 const struct bpf_prog *prog);
155 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, 164 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
156 int src_reg, int ctx_off, 165 int src_reg, int ctx_off,
157 struct bpf_insn *insn, struct bpf_prog *prog); 166 struct bpf_insn *insn, struct bpf_prog *prog);
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
297static inline void bpf_prog_put(struct bpf_prog *prog) 306static inline void bpf_prog_put(struct bpf_prog *prog)
298{ 307{
299} 308}
309static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
310{
311 return ERR_PTR(-EOPNOTSUPP);
312}
300#endif /* CONFIG_BPF_SYSCALL */ 313#endif /* CONFIG_BPF_SYSCALL */
301 314
302/* verifier prototypes for helper functions called from eBPF programs */ 315/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_VERIFIER_H
8#define _LINUX_BPF_VERIFIER_H 1
9
10#include <linux/bpf.h> /* for enum bpf_reg_type */
11#include <linux/filter.h> /* for MAX_BPF_STACK */
12
13 /* Just some arbitrary values so we can safely do math without overflowing and
14 * are obviously wrong for any sort of memory access.
15 */
16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
17#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
18
19struct bpf_reg_state {
20 enum bpf_reg_type type;
21 /*
22 * Used to determine if any memory access using this register will
23 * result in a bad access.
24 */
25 u64 min_value, max_value;
26 union {
27 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
28 s64 imm;
29
30 /* valid when type == PTR_TO_PACKET* */
31 struct {
32 u32 id;
33 u16 off;
34 u16 range;
35 };
36
37 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
38 * PTR_TO_MAP_VALUE_OR_NULL
39 */
40 struct bpf_map *map_ptr;
41 };
42};
43
44enum bpf_stack_slot_type {
45 STACK_INVALID, /* nothing was stored in this stack slot */
46 STACK_SPILL, /* register spilled into stack */
47 STACK_MISC /* BPF program wrote some data into this slot */
48};
49
50#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
51
52/* state of the program:
53 * type of all registers and stack info
54 */
55struct bpf_verifier_state {
56 struct bpf_reg_state regs[MAX_BPF_REG];
57 u8 stack_slot_type[MAX_BPF_STACK];
58 struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
59};
60
61/* linked list of verifier states used to prune search */
62struct bpf_verifier_state_list {
63 struct bpf_verifier_state state;
64 struct bpf_verifier_state_list *next;
65};
66
67struct bpf_insn_aux_data {
68 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
69};
70
71#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
72
73struct bpf_verifier_env;
74struct bpf_ext_analyzer_ops {
75 int (*insn_hook)(struct bpf_verifier_env *env,
76 int insn_idx, int prev_insn_idx);
77};
78
79/* single container for all structs
80 * one verifier_env per bpf_check() call
81 */
82struct bpf_verifier_env {
83 struct bpf_prog *prog; /* eBPF program being verified */
84 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
85 int stack_size; /* number of states to be processed */
86 struct bpf_verifier_state cur_state; /* current verifier state */
87 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
88 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
89 void *analyzer_priv; /* pointer to external analyzer's private data */
90 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
91 u32 used_map_cnt; /* number of used maps */
92 u32 id_gen; /* used to generate unique reg IDs */
93 bool allow_ptr_leaks;
94 bool seen_direct_write;
95 bool varlen_map_value_access;
96 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
97};
98
99int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
100 void *priv);
101
102#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
13struct pt_regs; 13struct pt_regs;
14 14
15#ifdef __CHECKER__ 15#ifdef __CHECKER__
16#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
16#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) 17#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
17#define BUILD_BUG_ON_ZERO(e) (0) 18#define BUILD_BUG_ON_ZERO(e) (0)
18#define BUILD_BUG_ON_NULL(e) ((void*)0) 19#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
24#else /* __CHECKER__ */ 25#else /* __CHECKER__ */
25 26
26/* Force a compilation error if a constant expression is not a power of 2 */ 27/* Force a compilation error if a constant expression is not a power of 2 */
28#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
29 BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
27#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ 30#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
28 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) 31 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
29 32
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 984f73b719a9..a4414a11eea7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
497 return cgrp->ancestor_ids[ancestor->level] == ancestor->id; 497 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
498} 498}
499 499
500/**
501 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
502 * @task: the task to be tested
503 * @ancestor: possible ancestor of @task's cgroup
504 *
505 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
506 * It follows all the same rules as cgroup_is_descendant, and only applies
507 * to the default hierarchy.
508 */
509static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
510 struct cgroup *ancestor)
511{
512 struct css_set *cset = task_css_set(task);
513
514 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
515}
516
500/* no synchronization, the result can only be used as a hint */ 517/* no synchronization, the result can only be used as a hint */
501static inline bool cgroup_is_populated(struct cgroup *cgrp) 518static inline bool cgroup_is_populated(struct cgroup *cgrp)
502{ 519{
@@ -557,6 +574,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
557#else /* !CONFIG_CGROUPS */ 574#else /* !CONFIG_CGROUPS */
558 575
559struct cgroup_subsys_state; 576struct cgroup_subsys_state;
577struct cgroup;
560 578
561static inline void css_put(struct cgroup_subsys_state *css) {} 579static inline void css_put(struct cgroup_subsys_state *css) {}
562static inline int cgroup_attach_task_all(struct task_struct *from, 580static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -574,6 +592,11 @@ static inline void cgroup_free(struct task_struct *p) {}
574static inline int cgroup_init_early(void) { return 0; } 592static inline int cgroup_init_early(void) { return 0; }
575static inline int cgroup_init(void) { return 0; } 593static inline int cgroup_init(void) { return 0; }
576 594
595static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
596 struct cgroup *ancestor)
597{
598 return true;
599}
577#endif /* !CONFIG_CGROUPS */ 600#endif /* !CONFIG_CGROUPS */
578 601
579/* 602/*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
314 bpf_size; \ 314 bpf_size; \
315}) 315})
316 316
317#define BPF_SIZEOF(type) \
318 ({ \
319 const int __size = bytes_to_bpf_size(sizeof(type)); \
320 BUILD_BUG_ON(__size < 0); \
321 __size; \
322 })
323
324#define BPF_FIELD_SIZEOF(type, field) \
325 ({ \
326 const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
327 BUILD_BUG_ON(__size < 0); \
328 __size; \
329 })
330
331#define __BPF_MAP_0(m, v, ...) v
332#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
333#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
334#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
335#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
336#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
337
338#define __BPF_REG_0(...) __BPF_PAD(5)
339#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
340#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
341#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
342#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
343#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
344
345#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
346#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
347
348#define __BPF_CAST(t, a) \
349 (__force t) \
350 (__force \
351 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
352 (unsigned long)0, (t)0))) a
353#define __BPF_V void
354#define __BPF_N
355
356#define __BPF_DECL_ARGS(t, a) t a
357#define __BPF_DECL_REGS(t, a) u64 a
358
359#define __BPF_PAD(n) \
360 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
361 u64, __ur_3, u64, __ur_4, u64, __ur_5)
362
363#define BPF_CALL_x(x, name, ...) \
364 static __always_inline \
365 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
366 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
367 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
368 { \
369 return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
370 } \
371 static __always_inline \
372 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
373
374#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
375#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
376#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
377#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
378#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
379#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
380
317#ifdef CONFIG_COMPAT 381#ifdef CONFIG_COMPAT
318/* A struct sock_filter is architecture independent. */ 382/* A struct sock_filter is architecture independent. */
319struct compat_sock_fprog { 383struct compat_sock_fprog {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bdca58f..6824556d37ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1169 const char *mod_name); 1169 const char *mod_name);
1170void vmbus_driver_unregister(struct hv_driver *hv_driver); 1170void vmbus_driver_unregister(struct hv_driver *hv_driver);
1171 1171
1172static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
1173{
1174 const struct kobject *kobj = &device_obj->device.kobj;
1175
1176 return kobj->name;
1177}
1178
1172void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1179void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1173 1180
1174int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1181int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dcb89e3515db..c6587c01d951 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -45,6 +45,7 @@ struct br_ip_list {
45#define BR_PROXYARP BIT(8) 45#define BR_PROXYARP BIT(8)
46#define BR_LEARNING_SYNC BIT(9) 46#define BR_LEARNING_SYNC BIT(9)
47#define BR_PROXYARP_WIFI BIT(10) 47#define BR_PROXYARP_WIFI BIT(10)
48#define BR_MCAST_FLOOD BIT(11)
48 49
49#define BR_DEFAULT_AGEING_TIME (300 * HZ) 50#define BR_DEFAULT_AGEING_TIME (300 * HZ)
50 51
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
25 __u32 max_tx_rate; 25 __u32 max_tx_rate;
26 __u32 rss_query_en; 26 __u32 rss_query_en;
27 __u32 trusted; 27 __u32 trusted;
28 __be16 vlan_proto;
28}; 29};
29#endif /* _LINUX_IF_LINK_H */ 30#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5f6ce6b578c..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) 83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
84#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
84 85
85/** 86/**
86 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats 87 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
271} 272}
272#endif 273#endif
273 274
275/**
276 * eth_type_vlan - check for valid vlan ether type.
277 * @ethertype: ether type to check
278 *
279 * Returns true if the ether type is a vlan ether type.
280 */
281static inline bool eth_type_vlan(__be16 ethertype)
282{
283 switch (ethertype) {
284 case htons(ETH_P_8021Q):
285 case htons(ETH_P_8021AD):
286 return true;
287 default:
288 return false;
289 }
290}
291
274static inline bool vlan_hw_offload_capable(netdev_features_t features, 292static inline bool vlan_hw_offload_capable(netdev_features_t features,
275 __be16 proto) 293 __be16 proto)
276{ 294{
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
424{ 442{
425 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; 443 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
426 444
427 if (veth->h_vlan_proto != htons(ETH_P_8021Q) && 445 if (!eth_type_vlan(veth->h_vlan_proto))
428 veth->h_vlan_proto != htons(ETH_P_8021AD))
429 return -EINVAL; 446 return -EINVAL;
430 447
431 *vlan_tci = ntohs(veth->h_vlan_TCI); 448 *vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 504 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
488 * ETH_HLEN otherwise 505 * ETH_HLEN otherwise
489 */ 506 */
490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 507 if (eth_type_vlan(type)) {
491 if (vlan_depth) { 508 if (vlan_depth) {
492 if (WARN_ON(vlan_depth < VLAN_HLEN)) 509 if (WARN_ON(vlan_depth < VLAN_HLEN))
493 return 0; 510 return 0;
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 522 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto; 523 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN; 524 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) || 525 } while (eth_type_vlan(type));
509 type == htons(ETH_P_8021AD));
510 } 526 }
511 527
512 if (depth) 528 if (depth)
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
571static inline bool skb_vlan_tagged(const struct sk_buff *skb) 587static inline bool skb_vlan_tagged(const struct sk_buff *skb)
572{ 588{
573 if (!skb_vlan_tag_present(skb) && 589 if (!skb_vlan_tag_present(skb) &&
574 likely(skb->protocol != htons(ETH_P_8021Q) && 590 likely(!eth_type_vlan(skb->protocol)))
575 skb->protocol != htons(ETH_P_8021AD)))
576 return false; 591 return false;
577 592
578 return true; 593 return true;
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
592 if (!skb_vlan_tag_present(skb)) { 607 if (!skb_vlan_tag_present(skb)) {
593 struct vlan_ethhdr *veh; 608 struct vlan_ethhdr *veh;
594 609
595 if (likely(protocol != htons(ETH_P_8021Q) && 610 if (likely(!eth_type_vlan(protocol)))
596 protocol != htons(ETH_P_8021AD)))
597 return false; 611 return false;
598 612
599 veh = (struct vlan_ethhdr *)skb->data; 613 veh = (struct vlan_ethhdr *)skb->data;
600 protocol = veh->h_vlan_encapsulated_proto; 614 protocol = veh->h_vlan_encapsulated_proto;
601 } 615 }
602 616
603 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) 617 if (!eth_type_vlan(protocol))
604 return false; 618 return false;
605 619
606 return true; 620 return true;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
37 struct sk_buff *skb, const struct inet_diag_req_v2 *req, 37 struct sk_buff *skb, const struct inet_diag_req_v2 *req,
38 struct user_namespace *user_ns, 38 struct user_namespace *user_ns,
39 u32 pid, u32 seq, u16 nlmsg_flags, 39 u32 pid, u32 seq, u16 nlmsg_flags,
40 const struct nlmsghdr *unlh); 40 const struct nlmsghdr *unlh, bool net_admin);
41void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, 41void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
42 struct netlink_callback *cb, 42 struct netlink_callback *cb,
43 const struct inet_diag_req_v2 *r, 43 const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
56 56
57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, 57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
58 struct inet_diag_msg *r, int ext, 58 struct inet_diag_msg *r, int ext,
59 struct user_namespace *user_ns); 59 struct user_namespace *user_ns, bool net_admin);
60 60
61extern int inet_diag_register(const struct inet_diag_handler *handler); 61extern int inet_diag_register(const struct inet_diag_handler *handler);
62extern void inet_diag_unregister(const struct inet_diag_handler *handler); 62extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
18 __s32 dad_transmits; 18 __s32 dad_transmits;
19 __s32 rtr_solicits; 19 __s32 rtr_solicits;
20 __s32 rtr_solicit_interval; 20 __s32 rtr_solicit_interval;
21 __s32 rtr_solicit_max_interval;
21 __s32 rtr_solicit_delay; 22 __s32 rtr_solicit_delay;
22 __s32 force_mld_version; 23 __s32 force_mld_version;
23 __s32 mldv1_unsolicited_report_interval; 24 __s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3ffc69ebe967..0fb7ffb1775f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -238,6 +238,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
238 return ktime_sub_ns(kt, usec * NSEC_PER_USEC); 238 return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
239} 239}
240 240
241static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
242{
243 return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
244}
245
241extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); 246extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
242 247
243/** 248/**
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
309 struct ifla_vf_stats *vf_stats); 309 struct ifla_vf_stats *vf_stats);
310u32 mlx4_comm_get_version(void); 310u32 mlx4_comm_get_version(void);
311int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); 311int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
312int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); 312int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
313 u8 qos, __be16 proto);
313int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, 314int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
314 int max_tx_rate); 315 int max_tx_rate);
315int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); 316int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..59b50d3eedb4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -221,6 +221,7 @@ enum {
221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, 221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, 222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
223 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35, 223 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
224 MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
224}; 225};
225 226
226enum { 227enum {
@@ -1371,6 +1372,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1371int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); 1372int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1372int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val); 1373int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1373int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); 1374int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1375int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
1376 bool *vlan_offload_disabled);
1374int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1377int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1375int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1378int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1376int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1379int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
160 160
161enum { /* fl */ 161enum { /* fl */
162 MLX4_FL_CV = 1 << 6, 162 MLX4_FL_CV = 1 << 6,
163 MLX4_FL_SV = 1 << 5,
163 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, 164 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
164 MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, 165 MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
165 MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, 166 MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
267 MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, 268 MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
268 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, 269 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
269 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, 270 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
271 MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
270}; 272};
271 273
272enum { /* param3 */ 274enum { /* param3 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
170int mlx5_init_cq_table(struct mlx5_core_dev *dev); 170int mlx5_init_cq_table(struct mlx5_core_dev *dev);
171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); 171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
173 struct mlx5_create_cq_mbox_in *in, int inlen); 173 u32 *in, int inlen);
174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
176 struct mlx5_query_cq_mbox_out *out); 176 u32 *out, int outlen);
177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
178 struct mlx5_modify_cq_mbox_in *in, int in_sz); 178 u32 *in, int inlen);
179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
180 struct mlx5_core_cq *cq, u16 cq_period, 180 struct mlx5_core_cq *cq, u16 cq_period,
181 u16 cq_max_count); 181 u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..77c141797152 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -198,19 +198,6 @@ enum {
198}; 198};
199 199
200enum { 200enum {
201 MLX5_ACCESS_MODE_PA = 0,
202 MLX5_ACCESS_MODE_MTT = 1,
203 MLX5_ACCESS_MODE_KLM = 2
204};
205
206enum {
207 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
208 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
209 MLX5_MKEY_BSF_EN = 1 << 30,
210 MLX5_MKEY_LEN64 = 1 << 31,
211};
212
213enum {
214 MLX5_EN_RD = (u64)1, 201 MLX5_EN_RD = (u64)1,
215 MLX5_EN_WR = (u64)2 202 MLX5_EN_WR = (u64)2
216}; 203};
@@ -411,33 +398,6 @@ enum {
411 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 398 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
412}; 399};
413 400
414struct mlx5_inbox_hdr {
415 __be16 opcode;
416 u8 rsvd[4];
417 __be16 opmod;
418};
419
420struct mlx5_outbox_hdr {
421 u8 status;
422 u8 rsvd[3];
423 __be32 syndrome;
424};
425
426struct mlx5_cmd_query_adapter_mbox_in {
427 struct mlx5_inbox_hdr hdr;
428 u8 rsvd[8];
429};
430
431struct mlx5_cmd_query_adapter_mbox_out {
432 struct mlx5_outbox_hdr hdr;
433 u8 rsvd0[24];
434 u8 intapin;
435 u8 rsvd1[13];
436 __be16 vsd_vendor_id;
437 u8 vsd[208];
438 u8 vsd_psid[16];
439};
440
441enum mlx5_odp_transport_cap_bits { 401enum mlx5_odp_transport_cap_bits {
442 MLX5_ODP_SUPPORT_SEND = 1 << 31, 402 MLX5_ODP_SUPPORT_SEND = 1 << 31,
443 MLX5_ODP_SUPPORT_RECV = 1 << 30, 403 MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
455 char reserved2[0xe4]; 415 char reserved2[0xe4];
456}; 416};
457 417
458struct mlx5_cmd_init_hca_mbox_in {
459 struct mlx5_inbox_hdr hdr;
460 u8 rsvd0[2];
461 __be16 profile;
462 u8 rsvd1[4];
463};
464
465struct mlx5_cmd_init_hca_mbox_out {
466 struct mlx5_outbox_hdr hdr;
467 u8 rsvd[8];
468};
469
470struct mlx5_cmd_teardown_hca_mbox_in {
471 struct mlx5_inbox_hdr hdr;
472 u8 rsvd0[2];
473 __be16 profile;
474 u8 rsvd1[4];
475};
476
477struct mlx5_cmd_teardown_hca_mbox_out {
478 struct mlx5_outbox_hdr hdr;
479 u8 rsvd[8];
480};
481
482struct mlx5_cmd_layout { 418struct mlx5_cmd_layout {
483 u8 type; 419 u8 type;
484 u8 rsvd0[3]; 420 u8 rsvd0[3];
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
494 u8 status_own; 430 u8 status_own;
495}; 431};
496 432
497
498struct health_buffer { 433struct health_buffer {
499 __be32 assert_var[5]; 434 __be32 assert_var[5];
500 __be32 rsvd0[3]; 435 __be32 rsvd0[3];
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
856 struct mlx5_cqe64 cqe64; 791 struct mlx5_cqe64 cqe64;
857}; 792};
858 793
859struct mlx5_srq_ctx { 794enum {
860 u8 state_log_sz; 795 MLX5_MKEY_STATUS_FREE = 1 << 6,
861 u8 rsvd0[3];
862 __be32 flags_xrcd;
863 __be32 pgoff_cqn;
864 u8 rsvd1[4];
865 u8 log_pg_sz;
866 u8 rsvd2[7];
867 __be32 pd;
868 __be16 lwm;
869 __be16 wqe_cnt;
870 u8 rsvd3[8];
871 __be64 db_record;
872};
873
874struct mlx5_create_srq_mbox_in {
875 struct mlx5_inbox_hdr hdr;
876 __be32 input_srqn;
877 u8 rsvd0[4];
878 struct mlx5_srq_ctx ctx;
879 u8 rsvd1[208];
880 __be64 pas[0];
881};
882
883struct mlx5_create_srq_mbox_out {
884 struct mlx5_outbox_hdr hdr;
885 __be32 srqn;
886 u8 rsvd[4];
887};
888
889struct mlx5_destroy_srq_mbox_in {
890 struct mlx5_inbox_hdr hdr;
891 __be32 srqn;
892 u8 rsvd[4];
893};
894
895struct mlx5_destroy_srq_mbox_out {
896 struct mlx5_outbox_hdr hdr;
897 u8 rsvd[8];
898};
899
900struct mlx5_query_srq_mbox_in {
901 struct mlx5_inbox_hdr hdr;
902 __be32 srqn;
903 u8 rsvd0[4];
904};
905
906struct mlx5_query_srq_mbox_out {
907 struct mlx5_outbox_hdr hdr;
908 u8 rsvd0[8];
909 struct mlx5_srq_ctx ctx;
910 u8 rsvd1[32];
911 __be64 pas[0];
912};
913
914struct mlx5_arm_srq_mbox_in {
915 struct mlx5_inbox_hdr hdr;
916 __be32 srqn;
917 __be16 rsvd;
918 __be16 lwm;
919};
920
921struct mlx5_arm_srq_mbox_out {
922 struct mlx5_outbox_hdr hdr;
923 u8 rsvd[8];
924};
925
926struct mlx5_cq_context {
927 u8 status;
928 u8 cqe_sz_flags;
929 u8 st;
930 u8 rsvd3;
931 u8 rsvd4[6];
932 __be16 page_offset;
933 __be32 log_sz_usr_page;
934 __be16 cq_period;
935 __be16 cq_max_count;
936 __be16 rsvd20;
937 __be16 c_eqn;
938 u8 log_pg_sz;
939 u8 rsvd25[7];
940 __be32 last_notified_index;
941 __be32 solicit_producer_index;
942 __be32 consumer_counter;
943 __be32 producer_counter;
944 u8 rsvd48[8];
945 __be64 db_record_addr;
946};
947
948struct mlx5_create_cq_mbox_in {
949 struct mlx5_inbox_hdr hdr;
950 __be32 input_cqn;
951 u8 rsvdx[4];
952 struct mlx5_cq_context ctx;
953 u8 rsvd6[192];
954 __be64 pas[0];
955};
956
957struct mlx5_create_cq_mbox_out {
958 struct mlx5_outbox_hdr hdr;
959 __be32 cqn;
960 u8 rsvd0[4];
961};
962
963struct mlx5_destroy_cq_mbox_in {
964 struct mlx5_inbox_hdr hdr;
965 __be32 cqn;
966 u8 rsvd0[4];
967};
968
969struct mlx5_destroy_cq_mbox_out {
970 struct mlx5_outbox_hdr hdr;
971 u8 rsvd0[8];
972};
973
974struct mlx5_query_cq_mbox_in {
975 struct mlx5_inbox_hdr hdr;
976 __be32 cqn;
977 u8 rsvd0[4];
978};
979
980struct mlx5_query_cq_mbox_out {
981 struct mlx5_outbox_hdr hdr;
982 u8 rsvd0[8];
983 struct mlx5_cq_context ctx;
984 u8 rsvd6[16];
985 __be64 pas[0];
986};
987
988struct mlx5_modify_cq_mbox_in {
989 struct mlx5_inbox_hdr hdr;
990 __be32 cqn;
991 __be32 field_select;
992 struct mlx5_cq_context ctx;
993 u8 rsvd[192];
994 __be64 pas[0];
995};
996
997struct mlx5_modify_cq_mbox_out {
998 struct mlx5_outbox_hdr hdr;
999 u8 rsvd[8];
1000};
1001
1002struct mlx5_enable_hca_mbox_in {
1003 struct mlx5_inbox_hdr hdr;
1004 u8 rsvd[8];
1005};
1006
1007struct mlx5_enable_hca_mbox_out {
1008 struct mlx5_outbox_hdr hdr;
1009 u8 rsvd[8];
1010};
1011
1012struct mlx5_disable_hca_mbox_in {
1013 struct mlx5_inbox_hdr hdr;
1014 u8 rsvd[8];
1015};
1016
1017struct mlx5_disable_hca_mbox_out {
1018 struct mlx5_outbox_hdr hdr;
1019 u8 rsvd[8];
1020};
1021
1022struct mlx5_eq_context {
1023 u8 status;
1024 u8 ec_oi;
1025 u8 st;
1026 u8 rsvd2[7];
1027 __be16 page_pffset;
1028 __be32 log_sz_usr_page;
1029 u8 rsvd3[7];
1030 u8 intr;
1031 u8 log_page_size;
1032 u8 rsvd4[15];
1033 __be32 consumer_counter;
1034 __be32 produser_counter;
1035 u8 rsvd5[16];
1036};
1037
1038struct mlx5_create_eq_mbox_in {
1039 struct mlx5_inbox_hdr hdr;
1040 u8 rsvd0[3];
1041 u8 input_eqn;
1042 u8 rsvd1[4];
1043 struct mlx5_eq_context ctx;
1044 u8 rsvd2[8];
1045 __be64 events_mask;
1046 u8 rsvd3[176];
1047 __be64 pas[0];
1048};
1049
1050struct mlx5_create_eq_mbox_out {
1051 struct mlx5_outbox_hdr hdr;
1052 u8 rsvd0[3];
1053 u8 eq_number;
1054 u8 rsvd1[4];
1055};
1056
1057struct mlx5_destroy_eq_mbox_in {
1058 struct mlx5_inbox_hdr hdr;
1059 u8 rsvd0[3];
1060 u8 eqn;
1061 u8 rsvd1[4];
1062};
1063
1064struct mlx5_destroy_eq_mbox_out {
1065 struct mlx5_outbox_hdr hdr;
1066 u8 rsvd[8];
1067};
1068
1069struct mlx5_map_eq_mbox_in {
1070 struct mlx5_inbox_hdr hdr;
1071 __be64 mask;
1072 u8 mu;
1073 u8 rsvd0[2];
1074 u8 eqn;
1075 u8 rsvd1[24];
1076};
1077
1078struct mlx5_map_eq_mbox_out {
1079 struct mlx5_outbox_hdr hdr;
1080 u8 rsvd[8];
1081};
1082
1083struct mlx5_query_eq_mbox_in {
1084 struct mlx5_inbox_hdr hdr;
1085 u8 rsvd0[3];
1086 u8 eqn;
1087 u8 rsvd1[4];
1088};
1089
1090struct mlx5_query_eq_mbox_out {
1091 struct mlx5_outbox_hdr hdr;
1092 u8 rsvd[8];
1093 struct mlx5_eq_context ctx;
1094}; 796};
1095 797
1096enum { 798enum {
1097 MLX5_MKEY_STATUS_FREE = 1 << 6, 799 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
800 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
801 MLX5_MKEY_BSF_EN = 1 << 30,
802 MLX5_MKEY_LEN64 = 1 << 31,
1098}; 803};
1099 804
1100struct mlx5_mkey_seg { 805struct mlx5_mkey_seg {
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
1119 u8 rsvd4[4]; 824 u8 rsvd4[4];
1120}; 825};
1121 826
1122struct mlx5_query_special_ctxs_mbox_in {
1123 struct mlx5_inbox_hdr hdr;
1124 u8 rsvd[8];
1125};
1126
1127struct mlx5_query_special_ctxs_mbox_out {
1128 struct mlx5_outbox_hdr hdr;
1129 __be32 dump_fill_mkey;
1130 __be32 reserved_lkey;
1131};
1132
1133struct mlx5_create_mkey_mbox_in {
1134 struct mlx5_inbox_hdr hdr;
1135 __be32 input_mkey_index;
1136 __be32 flags;
1137 struct mlx5_mkey_seg seg;
1138 u8 rsvd1[16];
1139 __be32 xlat_oct_act_size;
1140 __be32 rsvd2;
1141 u8 rsvd3[168];
1142 __be64 pas[0];
1143};
1144
1145struct mlx5_create_mkey_mbox_out {
1146 struct mlx5_outbox_hdr hdr;
1147 __be32 mkey;
1148 u8 rsvd[4];
1149};
1150
1151struct mlx5_destroy_mkey_mbox_in {
1152 struct mlx5_inbox_hdr hdr;
1153 __be32 mkey;
1154 u8 rsvd[4];
1155};
1156
1157struct mlx5_destroy_mkey_mbox_out {
1158 struct mlx5_outbox_hdr hdr;
1159 u8 rsvd[8];
1160};
1161
1162struct mlx5_query_mkey_mbox_in {
1163 struct mlx5_inbox_hdr hdr;
1164 __be32 mkey;
1165};
1166
1167struct mlx5_query_mkey_mbox_out {
1168 struct mlx5_outbox_hdr hdr;
1169 __be64 pas[0];
1170};
1171
1172struct mlx5_modify_mkey_mbox_in {
1173 struct mlx5_inbox_hdr hdr;
1174 __be32 mkey;
1175 __be64 pas[0];
1176};
1177
1178struct mlx5_modify_mkey_mbox_out {
1179 struct mlx5_outbox_hdr hdr;
1180 u8 rsvd[8];
1181};
1182
1183struct mlx5_dump_mkey_mbox_in {
1184 struct mlx5_inbox_hdr hdr;
1185};
1186
1187struct mlx5_dump_mkey_mbox_out {
1188 struct mlx5_outbox_hdr hdr;
1189 __be32 mkey;
1190};
1191
1192struct mlx5_mad_ifc_mbox_in {
1193 struct mlx5_inbox_hdr hdr;
1194 __be16 remote_lid;
1195 u8 rsvd0;
1196 u8 port;
1197 u8 rsvd1[4];
1198 u8 data[256];
1199};
1200
1201struct mlx5_mad_ifc_mbox_out {
1202 struct mlx5_outbox_hdr hdr;
1203 u8 rsvd[8];
1204 u8 data[256];
1205};
1206
1207struct mlx5_access_reg_mbox_in {
1208 struct mlx5_inbox_hdr hdr;
1209 u8 rsvd0[2];
1210 __be16 register_id;
1211 __be32 arg;
1212 __be32 data[0];
1213};
1214
1215struct mlx5_access_reg_mbox_out {
1216 struct mlx5_outbox_hdr hdr;
1217 u8 rsvd[8];
1218 __be32 data[0];
1219};
1220
1221#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 827#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1222 828
1223enum { 829enum {
1224 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 830 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1225}; 831};
1226 832
1227struct mlx5_allocate_psv_in {
1228 struct mlx5_inbox_hdr hdr;
1229 __be32 npsv_pd;
1230 __be32 rsvd_psv0;
1231};
1232
1233struct mlx5_allocate_psv_out {
1234 struct mlx5_outbox_hdr hdr;
1235 u8 rsvd[8];
1236 __be32 psv_idx[4];
1237};
1238
1239struct mlx5_destroy_psv_in {
1240 struct mlx5_inbox_hdr hdr;
1241 __be32 psv_number;
1242 u8 rsvd[4];
1243};
1244
1245struct mlx5_destroy_psv_out {
1246 struct mlx5_outbox_hdr hdr;
1247 u8 rsvd[8];
1248};
1249
1250enum { 833enum {
1251 VPORT_STATE_DOWN = 0x0, 834 VPORT_STATE_DOWN = 0x0,
1252 VPORT_STATE_UP = 0x1, 835 VPORT_STATE_UP = 0x1,
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
1381#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 964#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1382 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 965 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1383 966
967#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
968 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
969
970#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
971 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
972
973#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
974 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
975
976#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
977 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
978
1384#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 979#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1385 MLX5_GET(flow_table_eswitch_cap, \ 980 MLX5_GET(flow_table_eswitch_cap, \
1386 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 981 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,10 +49,6 @@
49#include <linux/mlx5/srq.h> 49#include <linux/mlx5/srq.h>
50 50
51enum { 51enum {
52 MLX5_RQ_BITMASK_VSD = 1 << 1,
53};
54
55enum {
56 MLX5_BOARD_ID_LEN = 64, 52 MLX5_BOARD_ID_LEN = 64,
57 MLX5_MAX_NAME_LEN = 16, 53 MLX5_MAX_NAME_LEN = 16,
58}; 54};
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
481}; 477};
482 478
483struct mlx5_eswitch; 479struct mlx5_eswitch;
480struct mlx5_lag;
484 481
485struct mlx5_rl_entry { 482struct mlx5_rl_entry {
486 u32 rate; 483 u32 rate;
@@ -554,6 +551,7 @@ struct mlx5_priv {
554 struct mlx5_flow_steering *steering; 551 struct mlx5_flow_steering *steering;
555 struct mlx5_eswitch *eswitch; 552 struct mlx5_eswitch *eswitch;
556 struct mlx5_core_sriov sriov; 553 struct mlx5_core_sriov sriov;
554 struct mlx5_lag *lag;
557 unsigned long pci_dev_data; 555 unsigned long pci_dev_data;
558 struct mlx5_fc_stats fc_stats; 556 struct mlx5_fc_stats fc_stats;
559 struct mlx5_rl_table rl_table; 557 struct mlx5_rl_table rl_table;
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
771void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 769void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
772void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 770void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
773void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 771void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
774int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 772
775int mlx5_cmd_status_to_err_v2(void *ptr);
776int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
777int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 773int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
778 int out_size); 774 int out_size);
779int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 775int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
780 void *out, int out_size, mlx5_cmd_cbk_t callback, 776 void *out, int out_size, mlx5_cmd_cbk_t callback,
781 void *context); 777 void *context);
778void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
779
780int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
782int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 781int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
783int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 782int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
784int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 783int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
807 u16 lwm, int is_srq); 806 u16 lwm, int is_srq);
808void mlx5_init_mkey_table(struct mlx5_core_dev *dev); 807void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
809void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); 808void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
809int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
810 struct mlx5_core_mkey *mkey,
811 u32 *in, int inlen,
812 u32 *out, int outlen,
813 mlx5_cmd_cbk_t callback, void *context);
810int mlx5_core_create_mkey(struct mlx5_core_dev *dev, 814int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
811 struct mlx5_core_mkey *mkey, 815 struct mlx5_core_mkey *mkey,
812 struct mlx5_create_mkey_mbox_in *in, int inlen, 816 u32 *in, int inlen);
813 mlx5_cmd_cbk_t callback, void *context,
814 struct mlx5_create_mkey_mbox_out *out);
815int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, 817int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
816 struct mlx5_core_mkey *mkey); 818 struct mlx5_core_mkey *mkey);
817int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, 819int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
818 struct mlx5_query_mkey_mbox_out *out, int outlen); 820 u32 *out, int outlen);
819int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, 821int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
820 u32 *mkey); 822 u32 *mkey);
821int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 823int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -826,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
826void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 828void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
827int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 829int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
828void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 830void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
829int mlx5_sriov_init(struct mlx5_core_dev *dev);
830int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
831void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 831void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
832 s32 npages); 832 s32 npages);
833int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 833int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -865,7 +865,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
865int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 865int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
866void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 866void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
867int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 867int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
868 struct mlx5_query_eq_mbox_out *out, int outlen); 868 u32 *out, int outlen);
869int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); 869int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
870void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); 870void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
871int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 871int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
@@ -930,6 +930,8 @@ enum {
930struct mlx5_interface { 930struct mlx5_interface {
931 void * (*add)(struct mlx5_core_dev *dev); 931 void * (*add)(struct mlx5_core_dev *dev);
932 void (*remove)(struct mlx5_core_dev *dev, void *context); 932 void (*remove)(struct mlx5_core_dev *dev, void *context);
933 int (*attach)(struct mlx5_core_dev *dev, void *context);
934 void (*detach)(struct mlx5_core_dev *dev, void *context);
933 void (*event)(struct mlx5_core_dev *dev, void *context, 935 void (*event)(struct mlx5_core_dev *dev, void *context,
934 enum mlx5_dev_event event, unsigned long param); 936 enum mlx5_dev_event event, unsigned long param);
935 void * (*get_dev)(void *context); 937 void * (*get_dev)(void *context);
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
942void mlx5_unregister_interface(struct mlx5_interface *intf); 944void mlx5_unregister_interface(struct mlx5_interface *intf);
943int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 945int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
944 946
947int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
948int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
949bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
950struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
951
945struct mlx5_profile { 952struct mlx5_profile {
946 u64 mask; 953 u64 mask;
947 u8 log_max_qp; 954 u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e036d6030867..93ebc5e21334 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
54 54
55enum mlx5_flow_namespace_type { 55enum mlx5_flow_namespace_type {
56 MLX5_FLOW_NAMESPACE_BYPASS, 56 MLX5_FLOW_NAMESPACE_BYPASS,
57 MLX5_FLOW_NAMESPACE_LAG,
57 MLX5_FLOW_NAMESPACE_OFFLOADS, 58 MLX5_FLOW_NAMESPACE_OFFLOADS,
58 MLX5_FLOW_NAMESPACE_ETHTOOL, 59 MLX5_FLOW_NAMESPACE_ETHTOOL,
59 MLX5_FLOW_NAMESPACE_KERNEL, 60 MLX5_FLOW_NAMESPACE_KERNEL,
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
62 MLX5_FLOW_NAMESPACE_FDB, 63 MLX5_FLOW_NAMESPACE_FDB,
63 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 64 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
64 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 65 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
66 MLX5_FLOW_NAMESPACE_SNIFFER_RX,
67 MLX5_FLOW_NAMESPACE_SNIFFER_TX,
65}; 68};
66 69
67struct mlx5_flow_table; 70struct mlx5_flow_table;
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
106 int prio, 109 int prio,
107 int num_flow_table_entries, 110 int num_flow_table_entries,
108 u32 level, u16 vport); 111 u32 level, u16 vport);
112struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
113 struct mlx5_flow_namespace *ns,
114 int prio, u32 level);
109int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); 115int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
110 116
111/* inbox should be set with the following values: 117/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d1f9a581aca8..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
152 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, 152 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
153 MLX5_CMD_OP_ACCESS_REG = 0x805, 153 MLX5_CMD_OP_ACCESS_REG = 0x805,
154 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, 154 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
155 MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, 155 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
156 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, 156 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
157 MLX5_CMD_OP_MAD_IFC = 0x50d, 157 MLX5_CMD_OP_MAD_IFC = 0x50d,
158 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, 158 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -174,6 +174,12 @@ enum {
174 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, 174 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
175 MLX5_CMD_OP_SET_WOL_ROL = 0x830, 175 MLX5_CMD_OP_SET_WOL_ROL = 0x830,
176 MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, 176 MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
177 MLX5_CMD_OP_CREATE_LAG = 0x840,
178 MLX5_CMD_OP_MODIFY_LAG = 0x841,
179 MLX5_CMD_OP_QUERY_LAG = 0x842,
180 MLX5_CMD_OP_DESTROY_LAG = 0x843,
181 MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
182 MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
177 MLX5_CMD_OP_CREATE_TIR = 0x900, 183 MLX5_CMD_OP_CREATE_TIR = 0x900,
178 MLX5_CMD_OP_MODIFY_TIR = 0x901, 184 MLX5_CMD_OP_MODIFY_TIR = 0x901,
179 MLX5_CMD_OP_DESTROY_TIR = 0x902, 185 MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -212,6 +218,8 @@ enum {
212 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 218 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
213 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 219 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
214 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, 220 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
221 MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
222 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
215 MLX5_CMD_OP_MAX 223 MLX5_CMD_OP_MAX
216}; 224};
217 225
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
281 u8 modify_root[0x1]; 289 u8 modify_root[0x1];
282 u8 identified_miss_table_mode[0x1]; 290 u8 identified_miss_table_mode[0x1];
283 u8 flow_table_modify[0x1]; 291 u8 flow_table_modify[0x1];
284 u8 reserved_at_7[0x19]; 292 u8 encap[0x1];
293 u8 decap[0x1];
294 u8 reserved_at_9[0x17];
285 295
286 u8 reserved_at_20[0x2]; 296 u8 reserved_at_20[0x2];
287 u8 log_max_ft_size[0x6]; 297 u8 log_max_ft_size[0x6];
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
473 483
474struct mlx5_ifc_flow_table_nic_cap_bits { 484struct mlx5_ifc_flow_table_nic_cap_bits {
475 u8 nic_rx_multi_path_tirs[0x1]; 485 u8 nic_rx_multi_path_tirs[0x1];
476 u8 reserved_at_1[0x1ff]; 486 u8 nic_rx_multi_path_tirs_fts[0x1];
487 u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
488 u8 reserved_at_3[0x1fd];
477 489
478 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 490 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
479 491
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
512 u8 nic_vport_node_guid_modify[0x1]; 524 u8 nic_vport_node_guid_modify[0x1];
513 u8 nic_vport_port_guid_modify[0x1]; 525 u8 nic_vport_port_guid_modify[0x1];
514 526
515 u8 reserved_at_20[0x7e0]; 527 u8 vxlan_encap_decap[0x1];
528 u8 nvgre_encap_decap[0x1];
529 u8 reserved_at_22[0x9];
530 u8 log_max_encap_headers[0x5];
531 u8 reserved_2b[0x6];
532 u8 max_encap_header_size[0xa];
533
534 u8 reserved_40[0x7c0];
535
516}; 536};
517 537
518struct mlx5_ifc_qos_cap_bits { 538struct mlx5_ifc_qos_cap_bits {
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
767 u8 out_of_seq_cnt[0x1]; 787 u8 out_of_seq_cnt[0x1];
768 u8 vport_counters[0x1]; 788 u8 vport_counters[0x1];
769 u8 retransmission_q_counters[0x1]; 789 u8 retransmission_q_counters[0x1];
770 u8 reserved_at_183[0x3]; 790 u8 reserved_at_183[0x1];
791 u8 modify_rq_counter_set_id[0x1];
792 u8 reserved_at_185[0x1];
771 u8 max_qp_cnt[0xa]; 793 u8 max_qp_cnt[0xa];
772 u8 pkey_table_size[0x10]; 794 u8 pkey_table_size[0x10];
773 795
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
870 u8 pad_tx_eth_packet[0x1]; 892 u8 pad_tx_eth_packet[0x1];
871 u8 reserved_at_263[0x8]; 893 u8 reserved_at_263[0x8];
872 u8 log_bf_reg_size[0x5]; 894 u8 log_bf_reg_size[0x5];
873 u8 reserved_at_270[0x10]; 895
896 u8 reserved_at_270[0xb];
897 u8 lag_master[0x1];
898 u8 num_lag_ports[0x4];
874 899
875 u8 reserved_at_280[0x10]; 900 u8 reserved_at_280[0x10];
876 u8 max_wqe_sz_sq[0x10]; 901 u8 max_wqe_sz_sq[0x10];
@@ -1904,7 +1929,7 @@ enum {
1904 1929
1905struct mlx5_ifc_qpc_bits { 1930struct mlx5_ifc_qpc_bits {
1906 u8 state[0x4]; 1931 u8 state[0x4];
1907 u8 reserved_at_4[0x4]; 1932 u8 lag_tx_port_affinity[0x4];
1908 u8 st[0x8]; 1933 u8 st[0x8];
1909 u8 reserved_at_10[0x3]; 1934 u8 reserved_at_10[0x3];
1910 u8 pm_state[0x2]; 1935 u8 pm_state[0x2];
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
1966 u8 reserved_at_3e0[0x8]; 1991 u8 reserved_at_3e0[0x8];
1967 u8 cqn_snd[0x18]; 1992 u8 cqn_snd[0x18];
1968 1993
1969 u8 reserved_at_400[0x40]; 1994 u8 reserved_at_400[0x8];
1995 u8 deth_sqpn[0x18];
1996
1997 u8 reserved_at_420[0x20];
1970 1998
1971 u8 reserved_at_440[0x8]; 1999 u8 reserved_at_440[0x8];
1972 u8 last_acked_psn[0x18]; 2000 u8 last_acked_psn[0x18];
@@ -2064,6 +2092,8 @@ enum {
2064 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, 2092 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
2065 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, 2093 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
2066 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, 2094 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
2095 MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
2096 MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
2067}; 2097};
2068 2098
2069struct mlx5_ifc_flow_context_bits { 2099struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
2083 u8 reserved_at_a0[0x8]; 2113 u8 reserved_at_a0[0x8];
2084 u8 flow_counter_list_size[0x18]; 2114 u8 flow_counter_list_size[0x18];
2085 2115
2086 u8 reserved_at_c0[0x140]; 2116 u8 encap_id[0x20];
2117
2118 u8 reserved_at_e0[0x120];
2087 2119
2088 struct mlx5_ifc_fte_match_param_bits match_value; 2120 struct mlx5_ifc_fte_match_param_bits match_value;
2089 2121
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
2146}; 2178};
2147 2179
2148struct mlx5_ifc_tisc_bits { 2180struct mlx5_ifc_tisc_bits {
2149 u8 reserved_at_0[0xc]; 2181 u8 strict_lag_tx_port_affinity[0x1];
2182 u8 reserved_at_1[0x3];
2183 u8 lag_tx_port_affinity[0x04];
2184
2185 u8 reserved_at_8[0x4];
2150 u8 prio[0x4]; 2186 u8 prio[0x4];
2151 u8 reserved_at_10[0x10]; 2187 u8 reserved_at_10[0x10];
2152 2188
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
2808 2844
2809 struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; 2845 struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
2810 2846
2811 u8 reserved_at_180[0x180]; 2847 u8 reserved_at_180[0x200];
2812 2848
2813 struct mlx5_ifc_wq_bits wq; 2849 struct mlx5_ifc_wq_bits wq;
2814}; 2850};
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
3489 3525
3490 u8 syndrome[0x20]; 3526 u8 syndrome[0x20];
3491 3527
3492 u8 reserved_at_40[0x20]; 3528 u8 dump_fill_mkey[0x20];
3493 3529
3494 u8 resd_lkey[0x20]; 3530 u8 resd_lkey[0x20];
3495}; 3531};
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
4213 u8 reserved_at_60[0x20]; 4249 u8 reserved_at_60[0x20];
4214}; 4250};
4215 4251
4252struct mlx5_ifc_encap_header_in_bits {
4253 u8 reserved_at_0[0x5];
4254 u8 header_type[0x3];
4255 u8 reserved_at_8[0xe];
4256 u8 encap_header_size[0xa];
4257
4258 u8 reserved_at_20[0x10];
4259 u8 encap_header[2][0x8];
4260
4261 u8 more_encap_header[0][0x8];
4262};
4263
4264struct mlx5_ifc_query_encap_header_out_bits {
4265 u8 status[0x8];
4266 u8 reserved_at_8[0x18];
4267
4268 u8 syndrome[0x20];
4269
4270 u8 reserved_at_40[0xa0];
4271
4272 struct mlx5_ifc_encap_header_in_bits encap_header[0];
4273};
4274
4275struct mlx5_ifc_query_encap_header_in_bits {
4276 u8 opcode[0x10];
4277 u8 reserved_at_10[0x10];
4278
4279 u8 reserved_at_20[0x10];
4280 u8 op_mod[0x10];
4281
4282 u8 encap_id[0x20];
4283
4284 u8 reserved_at_60[0xa0];
4285};
4286
4287struct mlx5_ifc_alloc_encap_header_out_bits {
4288 u8 status[0x8];
4289 u8 reserved_at_8[0x18];
4290
4291 u8 syndrome[0x20];
4292
4293 u8 encap_id[0x20];
4294
4295 u8 reserved_at_60[0x20];
4296};
4297
4298struct mlx5_ifc_alloc_encap_header_in_bits {
4299 u8 opcode[0x10];
4300 u8 reserved_at_10[0x10];
4301
4302 u8 reserved_at_20[0x10];
4303 u8 op_mod[0x10];
4304
4305 u8 reserved_at_40[0xa0];
4306
4307 struct mlx5_ifc_encap_header_in_bits encap_header;
4308};
4309
4310struct mlx5_ifc_dealloc_encap_header_out_bits {
4311 u8 status[0x8];
4312 u8 reserved_at_8[0x18];
4313
4314 u8 syndrome[0x20];
4315
4316 u8 reserved_at_40[0x40];
4317};
4318
4319struct mlx5_ifc_dealloc_encap_header_in_bits {
4320 u8 opcode[0x10];
4321 u8 reserved_at_10[0x10];
4322
4323 u8 reserved_20[0x10];
4324 u8 op_mod[0x10];
4325
4326 u8 encap_id[0x20];
4327
4328 u8 reserved_60[0x20];
4329};
4330
4216struct mlx5_ifc_query_dct_out_bits { 4331struct mlx5_ifc_query_dct_out_bits {
4217 u8 status[0x8]; 4332 u8 status[0x8];
4218 u8 reserved_at_8[0x18]; 4333 u8 reserved_at_8[0x18];
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
4517struct mlx5_ifc_modify_tis_bitmask_bits { 4632struct mlx5_ifc_modify_tis_bitmask_bits {
4518 u8 reserved_at_0[0x20]; 4633 u8 reserved_at_0[0x20];
4519 4634
4520 u8 reserved_at_20[0x1f]; 4635 u8 reserved_at_20[0x1d];
4636 u8 lag_tx_port_affinity[0x1];
4637 u8 strict_lag_tx_port_affinity[0x1];
4521 u8 prio[0x1]; 4638 u8 prio[0x1];
4522}; 4639};
4523 4640
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
4652 u8 reserved_at_40[0x40]; 4769 u8 reserved_at_40[0x40];
4653}; 4770};
4654 4771
4772enum {
4773 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
4774 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
4775};
4776
4655struct mlx5_ifc_modify_rq_in_bits { 4777struct mlx5_ifc_modify_rq_in_bits {
4656 u8 opcode[0x10]; 4778 u8 opcode[0x10];
4657 u8 reserved_at_10[0x10]; 4779 u8 reserved_at_10[0x10];
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
4721 u8 reserved_at_0[0x16]; 4843 u8 reserved_at_0[0x16];
4722 u8 node_guid[0x1]; 4844 u8 node_guid[0x1];
4723 u8 port_guid[0x1]; 4845 u8 port_guid[0x1];
4724 u8 reserved_at_18[0x1]; 4846 u8 min_inline[0x1];
4725 u8 mtu[0x1]; 4847 u8 mtu[0x1];
4726 u8 change_event[0x1]; 4848 u8 change_event[0x1];
4727 u8 promisc[0x1]; 4849 u8 promisc[0x1];
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
6099 6221
6100 u8 reserved_at_a0[0x20]; 6222 u8 reserved_at_a0[0x20];
6101 6223
6102 u8 reserved_at_c0[0x4]; 6224 u8 encap_en[0x1];
6225 u8 decap_en[0x1];
6226 u8 reserved_at_c2[0x2];
6103 u8 table_miss_mode[0x4]; 6227 u8 table_miss_mode[0x4];
6104 u8 level[0x8]; 6228 u8 level[0x8];
6105 u8 reserved_at_d0[0x8]; 6229 u8 reserved_at_d0[0x8];
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
6108 u8 reserved_at_e0[0x8]; 6232 u8 reserved_at_e0[0x8];
6109 u8 table_miss_id[0x18]; 6233 u8 table_miss_id[0x18];
6110 6234
6111 u8 reserved_at_100[0x100]; 6235 u8 reserved_at_100[0x8];
6236 u8 lag_master_next_table_id[0x18];
6237
6238 u8 reserved_at_120[0x80];
6112}; 6239};
6113 6240
6114struct mlx5_ifc_create_flow_group_out_bits { 6241struct mlx5_ifc_create_flow_group_out_bits {
@@ -7563,7 +7690,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
7563}; 7690};
7564 7691
7565enum { 7692enum {
7566 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1, 7693 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
7694 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
7567}; 7695};
7568 7696
7569struct mlx5_ifc_modify_flow_table_out_bits { 7697struct mlx5_ifc_modify_flow_table_out_bits {
@@ -7602,7 +7730,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
7602 u8 reserved_at_e0[0x8]; 7730 u8 reserved_at_e0[0x8];
7603 u8 table_miss_id[0x18]; 7731 u8 table_miss_id[0x18];
7604 7732
7605 u8 reserved_at_100[0x100]; 7733 u8 reserved_at_100[0x8];
7734 u8 lag_master_next_table_id[0x18];
7735
7736 u8 reserved_at_120[0x80];
7606}; 7737};
7607 7738
7608struct mlx5_ifc_ets_tcn_config_reg_bits { 7739struct mlx5_ifc_ets_tcn_config_reg_bits {
@@ -7710,4 +7841,134 @@ struct mlx5_ifc_dcbx_param_bits {
7710 u8 error[0x8]; 7841 u8 error[0x8];
7711 u8 reserved_at_a0[0x160]; 7842 u8 reserved_at_a0[0x160];
7712}; 7843};
7844
7845struct mlx5_ifc_lagc_bits {
7846 u8 reserved_at_0[0x1d];
7847 u8 lag_state[0x3];
7848
7849 u8 reserved_at_20[0x14];
7850 u8 tx_remap_affinity_2[0x4];
7851 u8 reserved_at_38[0x4];
7852 u8 tx_remap_affinity_1[0x4];
7853};
7854
7855struct mlx5_ifc_create_lag_out_bits {
7856 u8 status[0x8];
7857 u8 reserved_at_8[0x18];
7858
7859 u8 syndrome[0x20];
7860
7861 u8 reserved_at_40[0x40];
7862};
7863
7864struct mlx5_ifc_create_lag_in_bits {
7865 u8 opcode[0x10];
7866 u8 reserved_at_10[0x10];
7867
7868 u8 reserved_at_20[0x10];
7869 u8 op_mod[0x10];
7870
7871 struct mlx5_ifc_lagc_bits ctx;
7872};
7873
7874struct mlx5_ifc_modify_lag_out_bits {
7875 u8 status[0x8];
7876 u8 reserved_at_8[0x18];
7877
7878 u8 syndrome[0x20];
7879
7880 u8 reserved_at_40[0x40];
7881};
7882
7883struct mlx5_ifc_modify_lag_in_bits {
7884 u8 opcode[0x10];
7885 u8 reserved_at_10[0x10];
7886
7887 u8 reserved_at_20[0x10];
7888 u8 op_mod[0x10];
7889
7890 u8 reserved_at_40[0x20];
7891 u8 field_select[0x20];
7892
7893 struct mlx5_ifc_lagc_bits ctx;
7894};
7895
7896struct mlx5_ifc_query_lag_out_bits {
7897 u8 status[0x8];
7898 u8 reserved_at_8[0x18];
7899
7900 u8 syndrome[0x20];
7901
7902 u8 reserved_at_40[0x40];
7903
7904 struct mlx5_ifc_lagc_bits ctx;
7905};
7906
7907struct mlx5_ifc_query_lag_in_bits {
7908 u8 opcode[0x10];
7909 u8 reserved_at_10[0x10];
7910
7911 u8 reserved_at_20[0x10];
7912 u8 op_mod[0x10];
7913
7914 u8 reserved_at_40[0x40];
7915};
7916
7917struct mlx5_ifc_destroy_lag_out_bits {
7918 u8 status[0x8];
7919 u8 reserved_at_8[0x18];
7920
7921 u8 syndrome[0x20];
7922
7923 u8 reserved_at_40[0x40];
7924};
7925
7926struct mlx5_ifc_destroy_lag_in_bits {
7927 u8 opcode[0x10];
7928 u8 reserved_at_10[0x10];
7929
7930 u8 reserved_at_20[0x10];
7931 u8 op_mod[0x10];
7932
7933 u8 reserved_at_40[0x40];
7934};
7935
7936struct mlx5_ifc_create_vport_lag_out_bits {
7937 u8 status[0x8];
7938 u8 reserved_at_8[0x18];
7939
7940 u8 syndrome[0x20];
7941
7942 u8 reserved_at_40[0x40];
7943};
7944
7945struct mlx5_ifc_create_vport_lag_in_bits {
7946 u8 opcode[0x10];
7947 u8 reserved_at_10[0x10];
7948
7949 u8 reserved_at_20[0x10];
7950 u8 op_mod[0x10];
7951
7952 u8 reserved_at_40[0x40];
7953};
7954
7955struct mlx5_ifc_destroy_vport_lag_out_bits {
7956 u8 status[0x8];
7957 u8 reserved_at_8[0x18];
7958
7959 u8 syndrome[0x20];
7960
7961 u8 reserved_at_40[0x40];
7962};
7963
7964struct mlx5_ifc_destroy_vport_lag_in_bits {
7965 u8 opcode[0x10];
7966 u8 reserved_at_10[0x10];
7967
7968 u8 reserved_at_20[0x10];
7969 u8 op_mod[0x10];
7970
7971 u8 reserved_at_40[0x40];
7972};
7973
7713#endif /* MLX5_IFC_H */ 7974#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e3012cc64b8a..b3065acd20b4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,6 +61,39 @@ enum mlx5_an_status {
61#define MLX5_I2C_ADDR_HIGH 0x51 61#define MLX5_I2C_ADDR_HIGH 0x51
62#define MLX5_EEPROM_PAGE_LENGTH 256 62#define MLX5_EEPROM_PAGE_LENGTH 256
63 63
64enum mlx5e_link_mode {
65 MLX5E_1000BASE_CX_SGMII = 0,
66 MLX5E_1000BASE_KX = 1,
67 MLX5E_10GBASE_CX4 = 2,
68 MLX5E_10GBASE_KX4 = 3,
69 MLX5E_10GBASE_KR = 4,
70 MLX5E_20GBASE_KR2 = 5,
71 MLX5E_40GBASE_CR4 = 6,
72 MLX5E_40GBASE_KR4 = 7,
73 MLX5E_56GBASE_R4 = 8,
74 MLX5E_10GBASE_CR = 12,
75 MLX5E_10GBASE_SR = 13,
76 MLX5E_10GBASE_ER = 14,
77 MLX5E_40GBASE_SR4 = 15,
78 MLX5E_40GBASE_LR4 = 16,
79 MLX5E_50GBASE_SR2 = 18,
80 MLX5E_100GBASE_CR4 = 20,
81 MLX5E_100GBASE_SR4 = 21,
82 MLX5E_100GBASE_KR4 = 22,
83 MLX5E_100GBASE_LR4 = 23,
84 MLX5E_100BASE_TX = 24,
85 MLX5E_1000BASE_T = 25,
86 MLX5E_10GBASE_T = 26,
87 MLX5E_25GBASE_CR = 27,
88 MLX5E_25GBASE_KR = 28,
89 MLX5E_25GBASE_SR = 29,
90 MLX5E_50GBASE_CR2 = 30,
91 MLX5E_50GBASE_KR2 = 31,
92 MLX5E_LINK_MODES_NUMBER,
93};
94
95#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
96
64int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 97int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
65int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, 98int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
66 int ptys_size, int proto_mask, u8 local_port); 99 int ptys_size, int proto_mask, u8 local_port);
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
70 u32 *proto_admin, int proto_mask); 103 u32 *proto_admin, int proto_mask);
71int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, 104int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
72 u8 *link_width_oper, u8 local_port); 105 u8 *link_width_oper, u8 local_port);
73int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, 106int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
74 u8 *proto_oper, int proto_mask, 107 u8 *proto_oper, u8 local_port);
75 u8 local_port); 108int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
109 u32 *proto_oper, u8 local_port);
76int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, 110int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
77 u32 proto_admin, int proto_mask); 111 u32 proto_admin, int proto_mask);
78void mlx5_toggle_port_link(struct mlx5_core_dev *dev); 112void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
123}; 123};
124 124
125enum { 125enum {
126 MLX5_NON_ZERO_RQ = 0 << 24, 126 MLX5_NON_ZERO_RQ = 0x0,
127 MLX5_SRQ_RQ = 1 << 24, 127 MLX5_SRQ_RQ = 0x1,
128 MLX5_CRQ_RQ = 2 << 24, 128 MLX5_CRQ_RQ = 0x2,
129 MLX5_ZERO_LEN_RQ = 3 << 24 129 MLX5_ZERO_LEN_RQ = 0x3
130}; 130};
131 131
132/* TODO REM */
132enum { 133enum {
133 /* params1 */ 134 /* params1 */
134 MLX5_QP_BIT_SRE = 1 << 15, 135 MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
178}; 179};
179 180
180enum { 181enum {
181 MLX5_QP_LAT_SENSITIVE = 1 << 28,
182 MLX5_QP_BLOCK_MCAST = 1 << 30,
183 MLX5_QP_ENABLE_SIG = 1 << 31,
184};
185
186enum {
187 MLX5_RCV_DBR = 0, 182 MLX5_RCV_DBR = 0,
188 MLX5_SND_DBR = 1, 183 MLX5_SND_DBR = 1,
189}; 184};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
484 u8 rmac[6]; 479 u8 rmac[6];
485}; 480};
486 481
482/* FIXME: use mlx5_ifc.h qpc */
487struct mlx5_qp_context { 483struct mlx5_qp_context {
488 __be32 flags; 484 __be32 flags;
489 __be32 flags_pd; 485 __be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
525 u8 rsvd1[24]; 521 u8 rsvd1[24];
526}; 522};
527 523
528struct mlx5_create_qp_mbox_in {
529 struct mlx5_inbox_hdr hdr;
530 __be32 input_qpn;
531 u8 rsvd0[4];
532 __be32 opt_param_mask;
533 u8 rsvd1[4];
534 struct mlx5_qp_context ctx;
535 u8 rsvd3[16];
536 __be64 pas[0];
537};
538
539struct mlx5_create_qp_mbox_out {
540 struct mlx5_outbox_hdr hdr;
541 __be32 qpn;
542 u8 rsvd0[4];
543};
544
545struct mlx5_destroy_qp_mbox_in {
546 struct mlx5_inbox_hdr hdr;
547 __be32 qpn;
548 u8 rsvd0[4];
549};
550
551struct mlx5_destroy_qp_mbox_out {
552 struct mlx5_outbox_hdr hdr;
553 u8 rsvd0[8];
554};
555
556struct mlx5_modify_qp_mbox_in {
557 struct mlx5_inbox_hdr hdr;
558 __be32 qpn;
559 u8 rsvd0[4];
560 __be32 optparam;
561 u8 rsvd1[4];
562 struct mlx5_qp_context ctx;
563 u8 rsvd2[16];
564};
565
566struct mlx5_modify_qp_mbox_out {
567 struct mlx5_outbox_hdr hdr;
568 u8 rsvd0[8];
569};
570
571struct mlx5_query_qp_mbox_in {
572 struct mlx5_inbox_hdr hdr;
573 __be32 qpn;
574 u8 rsvd[4];
575};
576
577struct mlx5_query_qp_mbox_out {
578 struct mlx5_outbox_hdr hdr;
579 u8 rsvd1[8];
580 __be32 optparam;
581 u8 rsvd0[4];
582 struct mlx5_qp_context ctx;
583 u8 rsvd2[16];
584 __be64 pas[0];
585};
586
587struct mlx5_conf_sqp_mbox_in {
588 struct mlx5_inbox_hdr hdr;
589 __be32 qpn;
590 u8 rsvd[3];
591 u8 type;
592};
593
594struct mlx5_conf_sqp_mbox_out {
595 struct mlx5_outbox_hdr hdr;
596 u8 rsvd[8];
597};
598
599struct mlx5_alloc_xrcd_mbox_in {
600 struct mlx5_inbox_hdr hdr;
601 u8 rsvd[8];
602};
603
604struct mlx5_alloc_xrcd_mbox_out {
605 struct mlx5_outbox_hdr hdr;
606 __be32 xrcdn;
607 u8 rsvd[4];
608};
609
610struct mlx5_dealloc_xrcd_mbox_in {
611 struct mlx5_inbox_hdr hdr;
612 __be32 xrcdn;
613 u8 rsvd[4];
614};
615
616struct mlx5_dealloc_xrcd_mbox_out {
617 struct mlx5_outbox_hdr hdr;
618 u8 rsvd[8];
619};
620
621static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) 524static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
622{ 525{
623 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); 526 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
628 return radix_tree_lookup(&dev->priv.mkey_table.tree, key); 531 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
629} 532}
630 533
631struct mlx5_page_fault_resume_mbox_in {
632 struct mlx5_inbox_hdr hdr;
633 __be32 flags_qpn;
634 u8 reserved[4];
635};
636
637struct mlx5_page_fault_resume_mbox_out {
638 struct mlx5_outbox_hdr hdr;
639 u8 rsvd[8];
640};
641
642int mlx5_core_create_qp(struct mlx5_core_dev *dev, 534int mlx5_core_create_qp(struct mlx5_core_dev *dev,
643 struct mlx5_core_qp *qp, 535 struct mlx5_core_qp *qp,
644 struct mlx5_create_qp_mbox_in *in, 536 u32 *in,
645 int inlen); 537 int inlen);
646int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, 538int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
647 struct mlx5_modify_qp_mbox_in *in, int sqd_event, 539 u32 opt_param_mask, void *qpc,
648 struct mlx5_core_qp *qp); 540 struct mlx5_core_qp *qp);
649int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 541int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
650 struct mlx5_core_qp *qp); 542 struct mlx5_core_qp *qp);
651int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 543int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
652 struct mlx5_query_qp_mbox_out *out, int outlen); 544 u32 *out, int outlen);
653 545
654int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); 546int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
655int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); 547int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 46void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
47 u8 *min_inline); 47 u8 *min_inline);
48int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
49 u16 vport, u8 min_inline);
48int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 50int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
49 u16 vport, u8 *addr); 51 u16 vport, u8 *addr);
50int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); 52int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
32#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c 32#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d 33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
35#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 36#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
36#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 37#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
37#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 38#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/net.h b/include/linux/net.h
index b9f0ff4d489c..cd0c8bd0a1de 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -25,6 +25,7 @@
25#include <linux/kmemcheck.h> 25#include <linux/kmemcheck.h>
26#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
27#include <linux/once.h> 27#include <linux/once.h>
28#include <linux/fs.h>
28 29
29#include <uapi/linux/net.h> 30#include <uapi/linux/net.h>
30 31
@@ -128,6 +129,9 @@ struct page;
128struct sockaddr; 129struct sockaddr;
129struct msghdr; 130struct msghdr;
130struct module; 131struct module;
132struct sk_buff;
133typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
134 unsigned int, size_t);
131 135
132struct proto_ops { 136struct proto_ops {
133 int family; 137 int family;
@@ -186,6 +190,8 @@ struct proto_ops {
186 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 190 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
187 int (*set_peek_off)(struct sock *sk, int val); 191 int (*set_peek_off)(struct sock *sk, int val);
188 int (*peek_len)(struct socket *sock); 192 int (*peek_len)(struct socket *sock);
193 int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
194 sk_read_actor_t recv_actor);
189}; 195};
190 196
191#define DECLARE_SOCKADDR(type, dst, src) \ 197#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8d79d4ebcfe..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
52#include <uapi/linux/netdevice.h> 52#include <uapi/linux/netdevice.h>
53#include <uapi/linux/if_bonding.h> 53#include <uapi/linux/if_bonding.h>
54#include <uapi/linux/pkt_cls.h> 54#include <uapi/linux/pkt_cls.h>
55#include <linux/hashtable.h>
55 56
56struct netpoll_info; 57struct netpoll_info;
57struct device; 58struct device;
@@ -788,6 +789,7 @@ enum {
788 TC_SETUP_CLSU32, 789 TC_SETUP_CLSU32,
789 TC_SETUP_CLSFLOWER, 790 TC_SETUP_CLSFLOWER,
790 TC_SETUP_MATCHALL, 791 TC_SETUP_MATCHALL,
792 TC_SETUP_CLSBPF,
791}; 793};
792 794
793struct tc_cls_u32_offload; 795struct tc_cls_u32_offload;
@@ -799,6 +801,7 @@ struct tc_to_netdev {
799 struct tc_cls_u32_offload *cls_u32; 801 struct tc_cls_u32_offload *cls_u32;
800 struct tc_cls_flower_offload *cls_flower; 802 struct tc_cls_flower_offload *cls_flower;
801 struct tc_cls_matchall_offload *cls_mall; 803 struct tc_cls_matchall_offload *cls_mall;
804 struct tc_cls_bpf_offload *cls_bpf;
802 }; 805 };
803}; 806};
804 807
@@ -923,6 +926,14 @@ struct netdev_xdp {
923 * 3. Update dev->stats asynchronously and atomically, and define 926 * 3. Update dev->stats asynchronously and atomically, and define
924 * neither operation. 927 * neither operation.
925 * 928 *
929 * bool (*ndo_has_offload_stats)(int attr_id)
930 * Return true if this device supports offload stats of this attr_id.
931 *
932 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
933 * void *attr_data)
934 * Get statistics for offload operations by attr_id. Write it into the
935 * attr_data pointer.
936 *
926 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 937 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
927 * If device supports VLAN filtering this function is called when a 938 * If device supports VLAN filtering this function is called when a
928 * VLAN id is registered. 939 * VLAN id is registered.
@@ -935,7 +946,8 @@ struct netdev_xdp {
935 * 946 *
936 * SR-IOV management functions. 947 * SR-IOV management functions.
937 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 948 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
938 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 949 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
950 * u8 qos, __be16 proto);
939 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 951 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
940 * int max_tx_rate); 952 * int max_tx_rate);
941 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 953 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1030,7 +1042,7 @@ struct netdev_xdp {
1030 * Deletes the FDB entry from dev coresponding to addr. 1042 * Deletes the FDB entry from dev coresponding to addr.
1031 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1043 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1032 * struct net_device *dev, struct net_device *filter_dev, 1044 * struct net_device *dev, struct net_device *filter_dev,
1033 * int idx) 1045 * int *idx)
1034 * Used to add FDB entries to dump requests. Implementers should add 1046 * Used to add FDB entries to dump requests. Implementers should add
1035 * entries to skb and update idx with the number of entries. 1047 * entries to skb and update idx with the number of entries.
1036 * 1048 *
@@ -1154,6 +1166,10 @@ struct net_device_ops {
1154 1166
1155 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 1167 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1156 struct rtnl_link_stats64 *storage); 1168 struct rtnl_link_stats64 *storage);
1169 bool (*ndo_has_offload_stats)(int attr_id);
1170 int (*ndo_get_offload_stats)(int attr_id,
1171 const struct net_device *dev,
1172 void *attr_data);
1157 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1173 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1158 1174
1159 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1175 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1172,7 +1188,8 @@ struct net_device_ops {
1172 int (*ndo_set_vf_mac)(struct net_device *dev, 1188 int (*ndo_set_vf_mac)(struct net_device *dev,
1173 int queue, u8 *mac); 1189 int queue, u8 *mac);
1174 int (*ndo_set_vf_vlan)(struct net_device *dev, 1190 int (*ndo_set_vf_vlan)(struct net_device *dev,
1175 int queue, u16 vlan, u8 qos); 1191 int queue, u16 vlan,
1192 u8 qos, __be16 proto);
1176 int (*ndo_set_vf_rate)(struct net_device *dev, 1193 int (*ndo_set_vf_rate)(struct net_device *dev,
1177 int vf, int min_tx_rate, 1194 int vf, int min_tx_rate,
1178 int max_tx_rate); 1195 int max_tx_rate);
@@ -1262,7 +1279,7 @@ struct net_device_ops {
1262 struct netlink_callback *cb, 1279 struct netlink_callback *cb,
1263 struct net_device *dev, 1280 struct net_device *dev,
1264 struct net_device *filter_dev, 1281 struct net_device *filter_dev,
1265 int idx); 1282 int *idx);
1266 1283
1267 int (*ndo_bridge_setlink)(struct net_device *dev, 1284 int (*ndo_bridge_setlink)(struct net_device *dev,
1268 struct nlmsghdr *nlh, 1285 struct nlmsghdr *nlh,
@@ -1561,8 +1578,6 @@ enum netdev_priv_flags {
1561 * 1578 *
1562 * @xps_maps: XXX: need comments on this one 1579 * @xps_maps: XXX: need comments on this one
1563 * 1580 *
1564 * @offload_fwd_mark: Offload device fwding mark
1565 *
1566 * @watchdog_timeo: Represents the timeout that is used by 1581 * @watchdog_timeo: Represents the timeout that is used by
1567 * the watchdog (see dev_watchdog()) 1582 * the watchdog (see dev_watchdog())
1568 * @watchdog_timer: List of timers 1583 * @watchdog_timer: List of timers
@@ -1784,7 +1799,7 @@ struct net_device {
1784#endif 1799#endif
1785 struct netdev_queue __rcu *ingress_queue; 1800 struct netdev_queue __rcu *ingress_queue;
1786#ifdef CONFIG_NETFILTER_INGRESS 1801#ifdef CONFIG_NETFILTER_INGRESS
1787 struct list_head nf_hooks_ingress; 1802 struct nf_hook_entry __rcu *nf_hooks_ingress;
1788#endif 1803#endif
1789 1804
1790 unsigned char broadcast[MAX_ADDR_LEN]; 1805 unsigned char broadcast[MAX_ADDR_LEN];
@@ -1800,6 +1815,9 @@ struct net_device {
1800 unsigned int num_tx_queues; 1815 unsigned int num_tx_queues;
1801 unsigned int real_num_tx_queues; 1816 unsigned int real_num_tx_queues;
1802 struct Qdisc *qdisc; 1817 struct Qdisc *qdisc;
1818#ifdef CONFIG_NET_SCHED
1819 DECLARE_HASHTABLE (qdisc_hash, 4);
1820#endif
1803 unsigned long tx_queue_len; 1821 unsigned long tx_queue_len;
1804 spinlock_t tx_global_lock; 1822 spinlock_t tx_global_lock;
1805 int watchdog_timeo; 1823 int watchdog_timeo;
@@ -1810,9 +1828,6 @@ struct net_device {
1810#ifdef CONFIG_NET_CLS_ACT 1828#ifdef CONFIG_NET_CLS_ACT
1811 struct tcf_proto __rcu *egress_cl_list; 1829 struct tcf_proto __rcu *egress_cl_list;
1812#endif 1830#endif
1813#ifdef CONFIG_NET_SWITCHDEV
1814 u32 offload_fwd_mark;
1815#endif
1816 1831
1817 /* These may be needed for future network-power-down code. */ 1832 /* These may be needed for future network-power-down code. */
1818 struct timer_list watchdog_timer; 1833 struct timer_list watchdog_timer;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
55 struct net_device *out; 55 struct net_device *out;
56 struct sock *sk; 56 struct sock *sk;
57 struct net *net; 57 struct net *net;
58 struct list_head *hook_list; 58 struct nf_hook_entry __rcu *hook_entries;
59 int (*okfn)(struct net *, struct sock *, struct sk_buff *); 59 int (*okfn)(struct net *, struct sock *, struct sk_buff *);
60}; 60};
61 61
62typedef unsigned int nf_hookfn(void *priv,
63 struct sk_buff *skb,
64 const struct nf_hook_state *state);
65struct nf_hook_ops {
66 struct list_head list;
67
68 /* User fills in from here down. */
69 nf_hookfn *hook;
70 struct net_device *dev;
71 void *priv;
72 u_int8_t pf;
73 unsigned int hooknum;
74 /* Hooks are ordered in ascending priority. */
75 int priority;
76};
77
78struct nf_hook_entry {
79 struct nf_hook_entry __rcu *next;
80 struct nf_hook_ops ops;
81 const struct nf_hook_ops *orig_ops;
82};
83
62static inline void nf_hook_state_init(struct nf_hook_state *p, 84static inline void nf_hook_state_init(struct nf_hook_state *p,
63 struct list_head *hook_list, 85 struct nf_hook_entry *hook_entry,
64 unsigned int hook, 86 unsigned int hook,
65 int thresh, u_int8_t pf, 87 int thresh, u_int8_t pf,
66 struct net_device *indev, 88 struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
76 p->out = outdev; 98 p->out = outdev;
77 p->sk = sk; 99 p->sk = sk;
78 p->net = net; 100 p->net = net;
79 p->hook_list = hook_list; 101 RCU_INIT_POINTER(p->hook_entries, hook_entry);
80 p->okfn = okfn; 102 p->okfn = okfn;
81} 103}
82 104
83typedef unsigned int nf_hookfn(void *priv,
84 struct sk_buff *skb,
85 const struct nf_hook_state *state);
86
87struct nf_hook_ops {
88 struct list_head list;
89 105
90 /* User fills in from here down. */
91 nf_hookfn *hook;
92 struct net_device *dev;
93 void *priv;
94 u_int8_t pf;
95 unsigned int hooknum;
96 /* Hooks are ordered in ascending priority. */
97 int priority;
98};
99 106
100struct nf_sockopt_ops { 107struct nf_sockopt_ops {
101 struct list_head list; 108 struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
133void nf_unregister_hook(struct nf_hook_ops *reg); 140void nf_unregister_hook(struct nf_hook_ops *reg);
134int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 141int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
135void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); 142void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
143int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
144void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
136 145
137/* Functions to register get/setsockopt ranges (non-inclusive). You 146/* Functions to register get/setsockopt ranges (non-inclusive). You
138 need to check permissions yourself! */ 147 need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
161 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 170 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
162 int thresh) 171 int thresh)
163{ 172{
164 struct list_head *hook_list; 173 struct nf_hook_entry *hook_head;
174 int ret = 1;
165 175
166#ifdef HAVE_JUMP_LABEL 176#ifdef HAVE_JUMP_LABEL
167 if (__builtin_constant_p(pf) && 177 if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
170 return 1; 180 return 1;
171#endif 181#endif
172 182
173 hook_list = &net->nf.hooks[pf][hook]; 183 rcu_read_lock();
174 184 hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
175 if (!list_empty(hook_list)) { 185 if (hook_head) {
176 struct nf_hook_state state; 186 struct nf_hook_state state;
177 187
178 nf_hook_state_init(&state, hook_list, hook, thresh, 188 nf_hook_state_init(&state, hook_head, hook, thresh,
179 pf, indev, outdev, sk, net, okfn); 189 pf, indev, outdev, sk, net, okfn);
180 return nf_hook_slow(skb, &state); 190
191 ret = nf_hook_slow(skb, &state);
181 } 192 }
182 return 1; 193 rcu_read_unlock();
194
195 return ret;
183} 196}
184 197
185static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 198static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
4#include <uapi/linux/netfilter/nf_conntrack_common.h> 4#include <uapi/linux/netfilter/nf_conntrack_common.h>
5 5
6struct ip_conntrack_stat { 6struct ip_conntrack_stat {
7 unsigned int searched;
8 unsigned int found; 7 unsigned int found;
9 unsigned int new;
10 unsigned int invalid; 8 unsigned int invalid;
11 unsigned int ignore; 9 unsigned int ignore;
12 unsigned int delete;
13 unsigned int delete_list;
14 unsigned int insert; 10 unsigned int insert;
15 unsigned int insert_failed; 11 unsigned int insert_failed;
16 unsigned int drop; 12 unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
1#ifndef _CONNTRACK_PROTO_GRE_H 1#ifndef _CONNTRACK_PROTO_GRE_H
2#define _CONNTRACK_PROTO_GRE_H 2#define _CONNTRACK_PROTO_GRE_H
3#include <asm/byteorder.h> 3#include <asm/byteorder.h>
4 4#include <net/gre.h>
5/* GRE PROTOCOL HEADER */ 5#include <net/pptp.h>
6
7/* GRE Version field */
8#define GRE_VERSION_1701 0x0
9#define GRE_VERSION_PPTP 0x1
10
11/* GRE Protocol field */
12#define GRE_PROTOCOL_PPTP 0x880B
13
14/* GRE Flags */
15#define GRE_FLAG_C 0x80
16#define GRE_FLAG_R 0x40
17#define GRE_FLAG_K 0x20
18#define GRE_FLAG_S 0x10
19#define GRE_FLAG_A 0x80
20
21#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
22#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
23#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
24#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
25#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
26
27/* GRE is a mess: Four different standards */
28struct gre_hdr {
29#if defined(__LITTLE_ENDIAN_BITFIELD)
30 __u16 rec:3,
31 srr:1,
32 seq:1,
33 key:1,
34 routing:1,
35 csum:1,
36 version:3,
37 reserved:4,
38 ack:1;
39#elif defined(__BIG_ENDIAN_BITFIELD)
40 __u16 csum:1,
41 routing:1,
42 key:1,
43 seq:1,
44 srr:1,
45 rec:3,
46 ack:1,
47 reserved:4,
48 version:3;
49#else
50#error "Adjust your <asm/byteorder.h> defines"
51#endif
52 __be16 protocol;
53};
54
55/* modified GRE header for PPTP */
56struct gre_hdr_pptp {
57 __u8 flags; /* bitfield */
58 __u8 version; /* should be GRE_VERSION_PPTP */
59 __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
60 __be16 payload_len; /* size of ppp payload, not inc. gre header */
61 __be16 call_id; /* peer's call_id for this session */
62 __be32 seq; /* sequence number. Present if S==1 */
63 __be32 ack; /* seq number of highest packet received by */
64 /* sender in this session */
65};
66 6
67struct nf_ct_gre { 7struct nf_ct_gre {
68 unsigned int stream_timeout; 8 unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) 11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
12 return false; 12 return false;
13#endif 13#endif
14 return !list_empty(&skb->dev->nf_hooks_ingress); 14 return rcu_access_pointer(skb->dev->nf_hooks_ingress);
15} 15}
16 16
17/* caller must hold rcu_read_lock */
17static inline int nf_hook_ingress(struct sk_buff *skb) 18static inline int nf_hook_ingress(struct sk_buff *skb)
18{ 19{
20 struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
19 struct nf_hook_state state; 21 struct nf_hook_state state;
20 22
21 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, 23 /* Must recheck the ingress hook head, in the event it became NULL
22 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, 24 * after the check in nf_hook_ingress_active evaluated to true.
23 skb->dev, NULL, NULL, dev_net(skb->dev), NULL); 25 */
26 if (unlikely(!e))
27 return 0;
28
29 nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
30 NFPROTO_NETDEV, skb->dev, NULL, NULL,
31 dev_net(skb->dev), NULL);
24 return nf_hook_slow(skb, &state); 32 return nf_hook_slow(skb, &state);
25} 33}
26 34
27static inline void nf_hook_ingress_init(struct net_device *dev) 35static inline void nf_hook_ingress_init(struct net_device *dev)
28{ 36{
29 INIT_LIST_HEAD(&dev->nf_hooks_ingress); 37 RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
30} 38}
31#else /* CONFIG_NETFILTER_INGRESS */ 39#else /* CONFIG_NETFILTER_INGRESS */
32static inline int nf_hook_ingress_active(struct sk_buff *skb) 40static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5c5362584aba..060d0ede88df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -690,6 +690,10 @@ struct perf_event {
690 u64 (*clock)(void); 690 u64 (*clock)(void);
691 perf_overflow_handler_t overflow_handler; 691 perf_overflow_handler_t overflow_handler;
692 void *overflow_handler_context; 692 void *overflow_handler_context;
693#ifdef CONFIG_BPF_SYSCALL
694 perf_overflow_handler_t orig_overflow_handler;
695 struct bpf_prog *prog;
696#endif
693 697
694#ifdef CONFIG_EVENT_TRACING 698#ifdef CONFIG_EVENT_TRACING
695 struct trace_event_call *tp_event; 699 struct trace_event_call *tp_event;
@@ -802,6 +806,11 @@ struct perf_output_handle {
802 int page; 806 int page;
803}; 807};
804 808
809struct bpf_perf_event_data_kern {
810 struct pt_regs *regs;
811 struct perf_sample_data *data;
812};
813
805#ifdef CONFIG_CGROUP_PERF 814#ifdef CONFIG_CGROUP_PERF
806 815
807/* 816/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
80 PHY_INTERFACE_MODE_XGMII, 80 PHY_INTERFACE_MODE_XGMII,
81 PHY_INTERFACE_MODE_MOCA, 81 PHY_INTERFACE_MODE_MOCA,
82 PHY_INTERFACE_MODE_QSGMII, 82 PHY_INTERFACE_MODE_QSGMII,
83 PHY_INTERFACE_MODE_TRGMII,
83 PHY_INTERFACE_MODE_MAX, 84 PHY_INTERFACE_MODE_MAX,
84} phy_interface_t; 85} phy_interface_t;
85 86
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
123 return "moca"; 124 return "moca";
124 case PHY_INTERFACE_MODE_QSGMII: 125 case PHY_INTERFACE_MODE_QSGMII:
125 return "qsgmii"; 126 return "qsgmii";
127 case PHY_INTERFACE_MODE_TRGMII:
128 return "trgmii";
126 default: 129 default:
127 return "unknown"; 130 return "unknown";
128 } 131 }
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
127 * 127 *
128 * @info: Structure describing the new clock. 128 * @info: Structure describing the new clock.
129 * @parent: Pointer to the parent device of the new clock. 129 * @parent: Pointer to the parent device of the new clock.
130 *
131 * Returns a valid pointer on success or PTR_ERR on failure. If PHC
132 * support is missing at the configuration level, this function
133 * returns NULL, and drivers are expected to gracefully handle that
134 * case separately.
130 */ 135 */
131 136
132extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 137extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 40c0ada01806..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -5,28 +5,77 @@
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree. 6 * this source tree.
7 */ 7 */
8#ifndef _COMMON_HSI_H
9#define _COMMON_HSI_H
10#include <linux/types.h>
11#include <asm/byteorder.h>
12#include <linux/bitops.h>
13#include <linux/slab.h>
14
15/* dma_addr_t manip */
16#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
17#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
18#define DMA_REGPAIR_LE(x, val) do { \
19 (x).hi = DMA_HI_LE((val)); \
20 (x).lo = DMA_LO_LE((val)); \
21 } while (0)
22
23#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
24#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
25#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
26#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
8 27
9#ifndef __COMMON_HSI__ 28#ifndef __COMMON_HSI__
10#define __COMMON_HSI__ 29#define __COMMON_HSI__
11 30
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13 31
14#define X_FINAL_CLEANUP_AGG_INT 1 32#define X_FINAL_CLEANUP_AGG_INT 1
33
34#define EVENT_RING_PAGE_SIZE_BYTES 4096
35
15#define NUM_OF_GLOBAL_QUEUES 128 36#define NUM_OF_GLOBAL_QUEUES 128
37#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
38
39#define ISCSI_CDU_TASK_SEG_TYPE 0
40#define RDMA_CDU_TASK_SEG_TYPE 1
41
42#define FW_ASSERT_GENERAL_ATTN_IDX 32
43
44#define MAX_PINNED_CCFC 32
16 45
17/* Queue Zone sizes in bytes */ 46/* Queue Zone sizes in bytes */
18#define TSTORM_QZONE_SIZE 8 47#define TSTORM_QZONE_SIZE 8
19#define MSTORM_QZONE_SIZE 0 48#define MSTORM_QZONE_SIZE 16
20#define USTORM_QZONE_SIZE 8 49#define USTORM_QZONE_SIZE 8
21#define XSTORM_QZONE_SIZE 8 50#define XSTORM_QZONE_SIZE 8
22#define YSTORM_QZONE_SIZE 0 51#define YSTORM_QZONE_SIZE 0
23#define PSTORM_QZONE_SIZE 0 52#define PSTORM_QZONE_SIZE 0
24 53
25#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16 54#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
55#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
56#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
57#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
58
59/********************************/
60/* CORE (LIGHT L2) FW CONSTANTS */
61/********************************/
62
63#define CORE_LL2_MAX_RAMROD_PER_CON 8
64#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
65#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
66#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
67#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
68
69#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
70
71#define CORE_SPQE_PAGE_SIZE_BYTES 4096
72
73#define MAX_NUM_LL2_RX_QUEUES 32
74#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
26 75
27#define FW_MAJOR_VERSION 8 76#define FW_MAJOR_VERSION 8
28#define FW_MINOR_VERSION 10 77#define FW_MINOR_VERSION 10
29#define FW_REVISION_VERSION 5 78#define FW_REVISION_VERSION 10
30#define FW_ENGINEERING_VERSION 0 79#define FW_ENGINEERING_VERSION 0
31 80
32/***********************/ 81/***********************/
@@ -83,6 +132,20 @@
83#define NUM_OF_LCIDS (320) 132#define NUM_OF_LCIDS (320)
84#define NUM_OF_LTIDS (320) 133#define NUM_OF_LTIDS (320)
85 134
135/* Clock values */
136#define MASTER_CLK_FREQ_E4 (375e6)
137#define STORM_CLK_FREQ_E4 (1000e6)
138#define CLK25M_CLK_FREQ_E4 (25e6)
139
140/* Global PXP windows (GTT) */
141#define NUM_OF_GTT 19
142#define GTT_DWORD_SIZE_BITS 10
143#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
144#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
145
146/* Tools Version */
147#define TOOLS_VERSION 10
148
86/*****************/ 149/*****************/
87/* CDU CONSTANTS */ 150/* CDU CONSTANTS */
88/*****************/ 151/*****************/
@@ -90,6 +153,8 @@
90#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) 153#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
91#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) 154#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
92 155
156#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
157#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
93/*****************/ 158/*****************/
94/* DQ CONSTANTS */ 159/* DQ CONSTANTS */
95/*****************/ 160/*****************/
@@ -115,6 +180,11 @@
115#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 180#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
116#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 181#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
117#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 182#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
183#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
184#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
185#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
186#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
187#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
118 188
119/* UCM agg val selection (HW) */ 189/* UCM agg val selection (HW) */
120#define DQ_UCM_AGG_VAL_SEL_WORD0 0 190#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -159,13 +229,16 @@
159#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 229#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
160 230
161/* XCM agg counter flag selection */ 231/* XCM agg counter flag selection */
162#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) 232#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
163#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) 233#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
164#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) 234#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
165#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) 235#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
166#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) 236#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
167#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) 237#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
168#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) 238#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
239#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
240#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
241#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
169 242
170/* UCM agg counter flag selection (HW) */ 243/* UCM agg counter flag selection (HW) */
171#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 244#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -178,9 +251,45 @@
178#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 251#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
179 252
180/* UCM agg counter flag selection (FW) */ 253/* UCM agg counter flag selection (FW) */
181#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) 254#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
182#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) 255#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
183 256#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
257#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
258
259/* TCM agg counter flag selection (HW) */
260#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
261#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
262#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
263#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
264#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
265#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
266#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
267#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
268/* TCM agg counter flag selection (FW) */
269#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
270#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
271
272/* PWM address mapping */
273#define DQ_PWM_OFFSET_DPM_BASE 0x0
274#define DQ_PWM_OFFSET_DPM_END 0x27
275#define DQ_PWM_OFFSET_XCM16_BASE 0x40
276#define DQ_PWM_OFFSET_XCM32_BASE 0x44
277#define DQ_PWM_OFFSET_UCM16_BASE 0x48
278#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
279#define DQ_PWM_OFFSET_UCM16_4 0x50
280#define DQ_PWM_OFFSET_TCM16_BASE 0x58
281#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
282#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
283#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
284#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
285
286#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
287#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
288#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
289#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
290#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
291#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
292#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
184#define DQ_REGION_SHIFT (12) 293#define DQ_REGION_SHIFT (12)
185 294
186/* DPM */ 295/* DPM */
@@ -214,15 +323,17 @@
214 */ 323 */
215#define CM_TX_PQ_BASE 0x200 324#define CM_TX_PQ_BASE 0x200
216 325
326/* number of global Vport/QCN rate limiters */
327#define MAX_QM_GLOBAL_RLS 256
217/* QM registers data */ 328/* QM registers data */
218#define QM_LINE_CRD_REG_WIDTH 16 329#define QM_LINE_CRD_REG_WIDTH 16
219#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) 330#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
220#define QM_BYTE_CRD_REG_WIDTH 24 331#define QM_BYTE_CRD_REG_WIDTH 24
221#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) 332#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1))
222#define QM_WFQ_CRD_REG_WIDTH 32 333#define QM_WFQ_CRD_REG_WIDTH 32
223#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) 334#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1))
224#define QM_RL_CRD_REG_WIDTH 32 335#define QM_RL_CRD_REG_WIDTH 32
225#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) 336#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1))
226 337
227/*****************/ 338/*****************/
228/* CAU CONSTANTS */ 339/* CAU CONSTANTS */
@@ -287,6 +398,17 @@
287/* PXP CONSTANTS */ 398/* PXP CONSTANTS */
288/*****************/ 399/*****************/
289 400
401/* Bars for Blocks */
402#define PXP_BAR_GRC 0
403#define PXP_BAR_TSDM 0
404#define PXP_BAR_USDM 0
405#define PXP_BAR_XSDM 0
406#define PXP_BAR_MSDM 0
407#define PXP_BAR_YSDM 0
408#define PXP_BAR_PSDM 0
409#define PXP_BAR_IGU 0
410#define PXP_BAR_DQ 1
411
290/* PTT and GTT */ 412/* PTT and GTT */
291#define PXP_NUM_PF_WINDOWS 12 413#define PXP_NUM_PF_WINDOWS 12
292#define PXP_PER_PF_ENTRY_SIZE 8 414#define PXP_PER_PF_ENTRY_SIZE 8
@@ -334,6 +456,52 @@
334 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ 456 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
335 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) 457 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
336 458
459/* PF BAR */
460#define PXP_BAR0_START_GRC 0x0000
461#define PXP_BAR0_GRC_LENGTH 0x1C00000
462#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
463 PXP_BAR0_GRC_LENGTH - 1)
464
465#define PXP_BAR0_START_IGU 0x1C00000
466#define PXP_BAR0_IGU_LENGTH 0x10000
467#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
468 PXP_BAR0_IGU_LENGTH - 1)
469
470#define PXP_BAR0_START_TSDM 0x1C80000
471#define PXP_BAR0_SDM_LENGTH 0x40000
472#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
473#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
474 PXP_BAR0_SDM_LENGTH - 1)
475
476#define PXP_BAR0_START_MSDM 0x1D00000
477#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
478 PXP_BAR0_SDM_LENGTH - 1)
479
480#define PXP_BAR0_START_USDM 0x1D80000
481#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
482 PXP_BAR0_SDM_LENGTH - 1)
483
484#define PXP_BAR0_START_XSDM 0x1E00000
485#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
486 PXP_BAR0_SDM_LENGTH - 1)
487
488#define PXP_BAR0_START_YSDM 0x1E80000
489#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
490 PXP_BAR0_SDM_LENGTH - 1)
491
492#define PXP_BAR0_START_PSDM 0x1F00000
493#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
494 PXP_BAR0_SDM_LENGTH - 1)
495
496#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
497
498/* VF BAR */
499#define PXP_VF_BAR0 0
500
501#define PXP_VF_BAR0_START_GRC 0x3E00
502#define PXP_VF_BAR0_GRC_LENGTH 0x200
503#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
504 PXP_VF_BAR0_GRC_LENGTH - 1)
337 505
338#define PXP_VF_BAR0_START_IGU 0 506#define PXP_VF_BAR0_START_IGU 0
339#define PXP_VF_BAR0_IGU_LENGTH 0x3000 507#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -399,6 +567,20 @@
399#define PXP_NUM_ILT_RECORDS_BB 7600 567#define PXP_NUM_ILT_RECORDS_BB 7600
400#define PXP_NUM_ILT_RECORDS_K2 11000 568#define PXP_NUM_ILT_RECORDS_K2 11000
401#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) 569#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
570#define PXP_QUEUES_ZONE_MAX_NUM 320
571/*****************/
572/* PRM CONSTANTS */
573/*****************/
574#define PRM_DMA_PAD_BYTES_NUM 2
575/******************/
576/* SDMs CONSTANTS */
577/******************/
578#define SDM_OP_GEN_TRIG_NONE 0
579#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
580#define SDM_OP_GEN_TRIG_AGG_INT 2
581#define SDM_OP_GEN_TRIG_LOADER 4
582#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
583#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
402 584
403#define SDM_COMP_TYPE_NONE 0 585#define SDM_COMP_TYPE_NONE 0
404#define SDM_COMP_TYPE_WAKE_THREAD 1 586#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -424,6 +606,8 @@
424/* PRS CONSTANTS */ 606/* PRS CONSTANTS */
425/*****************/ 607/*****************/
426 608
609#define PRS_GFT_CAM_LINES_NO_MATCH 31
610
427/* Async data KCQ CQE */ 611/* Async data KCQ CQE */
428struct async_data { 612struct async_data {
429 __le32 cid; 613 __le32 cid;
@@ -440,20 +624,6 @@ struct coalescing_timeset {
440#define COALESCING_TIMESET_VALID_SHIFT 7 624#define COALESCING_TIMESET_VALID_SHIFT 7
441}; 625};
442 626
443struct common_prs_pf_msg_info {
444 __le32 value;
445#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
446#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
447#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
448#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
449#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
450#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
451#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
452#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
453#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
454#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
455};
456
457struct common_queue_zone { 627struct common_queue_zone {
458 __le16 ring_drv_data_consumer; 628 __le16 ring_drv_data_consumer;
459 __le16 reserved; 629 __le16 reserved;
@@ -473,6 +643,19 @@ struct vf_pf_channel_eqe_data {
473 struct regpair msg_addr; 643 struct regpair msg_addr;
474}; 644};
475 645
646struct iscsi_eqe_data {
647 __le32 cid;
648 __le16 conn_id;
649 u8 error_code;
650 u8 error_pdu_opcode_reserved;
651#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
652#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
653#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
654#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
655#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
656#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
657};
658
476struct malicious_vf_eqe_data { 659struct malicious_vf_eqe_data {
477 u8 vf_id; 660 u8 vf_id;
478 u8 err_id; 661 u8 err_id;
@@ -488,8 +671,10 @@ struct initial_cleanup_eqe_data {
488union event_ring_data { 671union event_ring_data {
489 u8 bytes[8]; 672 u8 bytes[8];
490 struct vf_pf_channel_eqe_data vf_pf_channel; 673 struct vf_pf_channel_eqe_data vf_pf_channel;
674 struct iscsi_eqe_data iscsi_info;
491 struct malicious_vf_eqe_data malicious_vf; 675 struct malicious_vf_eqe_data malicious_vf;
492 struct initial_cleanup_eqe_data vf_init_cleanup; 676 struct initial_cleanup_eqe_data vf_init_cleanup;
677 struct regpair roce_handle;
493}; 678};
494 679
495/* Event Ring Entry */ 680/* Event Ring Entry */
@@ -616,6 +801,52 @@ enum db_dest {
616 MAX_DB_DEST 801 MAX_DB_DEST
617}; 802};
618 803
804/* Enum of doorbell DPM types */
805enum db_dpm_type {
806 DPM_LEGACY,
807 DPM_ROCE,
808 DPM_L2_INLINE,
809 DPM_L2_BD,
810 MAX_DB_DPM_TYPE
811};
812
813/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
814struct db_l2_dpm_data {
815 __le16 icid;
816 __le16 bd_prod;
817 __le32 params;
818#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
819#define DB_L2_DPM_DATA_SIZE_SHIFT 0
820#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
821#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
822#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF
823#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
824#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
825#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
826#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
827#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
828#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
829#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
830#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
831#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
832};
833
834/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
835struct db_l2_dpm_sge {
836 struct regpair addr;
837 __le16 nbytes;
838 __le16 bitfields;
839#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
840#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
841#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
842#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
843#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
844#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
845#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
846#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
847 __le32 reserved2;
848};
849
619/* Structure for doorbell address, in legacy mode */ 850/* Structure for doorbell address, in legacy mode */
620struct db_legacy_addr { 851struct db_legacy_addr {
621 __le32 addr; 852 __le32 addr;
@@ -627,6 +858,49 @@ struct db_legacy_addr {
627#define DB_LEGACY_ADDR_ICID_SHIFT 5 858#define DB_LEGACY_ADDR_ICID_SHIFT 5
628}; 859};
629 860
861/* Structure for doorbell address, in PWM mode */
862struct db_pwm_addr {
863 __le32 addr;
864#define DB_PWM_ADDR_RESERVED0_MASK 0x7
865#define DB_PWM_ADDR_RESERVED0_SHIFT 0
866#define DB_PWM_ADDR_OFFSET_MASK 0x7F
867#define DB_PWM_ADDR_OFFSET_SHIFT 3
868#define DB_PWM_ADDR_WID_MASK 0x3
869#define DB_PWM_ADDR_WID_SHIFT 10
870#define DB_PWM_ADDR_DPI_MASK 0xFFFF
871#define DB_PWM_ADDR_DPI_SHIFT 12
872#define DB_PWM_ADDR_RESERVED1_MASK 0xF
873#define DB_PWM_ADDR_RESERVED1_SHIFT 28
874};
875
876/* Parameters to RoCE firmware, passed in EDPM doorbell */
877struct db_roce_dpm_params {
878 __le32 params;
879#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
880#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
881#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
882#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
883#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
884#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
885#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
886#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
887#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
888#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
889#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
890#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
891#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
892#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
893#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
894#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
895};
896
897/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
898struct db_roce_dpm_data {
899 __le16 icid;
900 __le16 prod_val;
901 struct db_roce_dpm_params params;
902};
903
630/* Igu interrupt command */ 904/* Igu interrupt command */
631enum igu_int_cmd { 905enum igu_int_cmd {
632 IGU_INT_ENABLE = 0, 906 IGU_INT_ENABLE = 0,
@@ -764,6 +1038,19 @@ struct pxp_ptt_entry {
764 struct pxp_pretend_cmd pretend; 1038 struct pxp_pretend_cmd pretend;
765}; 1039};
766 1040
1041/* VF Zone A Permission Register. */
1042struct pxp_vf_zone_a_permission {
1043 __le32 control;
1044#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
1045#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
1046#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
1047#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
1048#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
1049#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
1050#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
1051#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
1052};
1053
767/* RSS hash type */ 1054/* RSS hash type */
768struct rdif_task_context { 1055struct rdif_task_context {
769 __le32 initial_ref_tag; 1056 __le32 initial_ref_tag;
@@ -831,6 +1118,7 @@ struct rdif_task_context {
831 __le32 reserved2; 1118 __le32 reserved2;
832}; 1119};
833 1120
1121/* RSS hash type */
834enum rss_hash_type { 1122enum rss_hash_type {
835 RSS_HASH_TYPE_DEFAULT = 0, 1123 RSS_HASH_TYPE_DEFAULT = 0,
836 RSS_HASH_TYPE_IPV4 = 1, 1124 RSS_HASH_TYPE_IPV4 = 1,
@@ -942,7 +1230,7 @@ struct tdif_task_context {
942}; 1230};
943 1231
944struct timers_context { 1232struct timers_context {
945 __le32 logical_client0; 1233 __le32 logical_client_0;
946#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF 1234#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
947#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 1235#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
948#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 1236#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
@@ -951,7 +1239,7 @@ struct timers_context {
951#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 1239#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
952#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 1240#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
953#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 1241#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
954 __le32 logical_client1; 1242 __le32 logical_client_1;
955#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF 1243#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
956#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 1244#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
957#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 1245#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
@@ -960,7 +1248,7 @@ struct timers_context {
960#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 1248#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
961#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 1249#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
962#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 1250#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
963 __le32 logical_client2; 1251 __le32 logical_client_2;
964#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF 1252#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
965#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 1253#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
966#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 1254#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
@@ -978,3 +1266,4 @@ struct timers_context {
978#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 1266#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
979}; 1267};
980#endif /* __COMMON_HSI__ */ 1268#endif /* __COMMON_HSI__ */
1269#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index b5ebc697d05f..1aa0727c4136 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -13,9 +13,12 @@
13/* ETH FW CONSTANTS */ 13/* ETH FW CONSTANTS */
14/********************/ 14/********************/
15#define ETH_HSI_VER_MAJOR 3 15#define ETH_HSI_VER_MAJOR 3
16#define ETH_HSI_VER_MINOR 0 16#define ETH_HSI_VER_MINOR 10
17#define ETH_CACHE_LINE_SIZE 64 17
18#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
18 19
20#define ETH_CACHE_LINE_SIZE 64
21#define ETH_RX_CQE_GAP 32
19#define ETH_MAX_RAMROD_PER_CON 8 22#define ETH_MAX_RAMROD_PER_CON 8
20#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 23#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
21#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 24#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
@@ -24,15 +27,25 @@
24 27
25#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 28#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
26#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 29#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
30#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
27#define ETH_TX_MAX_LSO_HDR_NBD 4 31#define ETH_TX_MAX_LSO_HDR_NBD 4
28#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 32#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
29#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 33#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
30#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 34#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
31#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 35#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
32#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) 36#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
33#define ETH_TX_MAX_LSO_HDR_BYTES 510 37#define ETH_TX_MAX_LSO_HDR_BYTES 510
38#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
39#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
40#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
41#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
42#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
34 43
35#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 44#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
45#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
46 (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
47#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
48 (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
36 49
37/* Maximum number of buffers, used for RX packet placement */ 50/* Maximum number of buffers, used for RX packet placement */
38#define ETH_RX_MAX_BUFF_PER_PKT 5 51#define ETH_RX_MAX_BUFF_PER_PKT 5
@@ -59,6 +72,8 @@
59#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 72#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
60#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 73#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
61 74
75/* Control frame check constants */
76#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
62 77
63struct eth_tx_1st_bd_flags { 78struct eth_tx_1st_bd_flags {
64 u8 bitfields; 79 u8 bitfields;
@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
82 97
83/* The parsing information data fo rthe first tx bd of a given packet. */ 98/* The parsing information data fo rthe first tx bd of a given packet. */
84struct eth_tx_data_1st_bd { 99struct eth_tx_data_1st_bd {
85 __le16 vlan; 100 __le16 vlan;
86 u8 nbds; 101 u8 nbds;
87 struct eth_tx_1st_bd_flags bd_flags; 102 struct eth_tx_1st_bd_flags bd_flags;
88 __le16 bitfields; 103 __le16 bitfields;
89#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 104#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
90#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 105#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
91#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 106#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
96 111
97/* The parsing information data for the second tx bd of a given packet. */ 112/* The parsing information data for the second tx bd of a given packet. */
98struct eth_tx_data_2nd_bd { 113struct eth_tx_data_2nd_bd {
99 __le16 tunn_ip_size; 114 __le16 tunn_ip_size;
100 __le16 bitfields1; 115 __le16 bitfields1;
101#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF 116#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
102#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 117#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
125#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 140#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
126}; 141};
127 142
143/* Firmware data for L2-EDPM packet. */
144struct eth_edpm_fw_data {
145 struct eth_tx_data_1st_bd data_1st_bd;
146 struct eth_tx_data_2nd_bd data_2nd_bd;
147 __le32 reserved;
148};
149
128struct eth_fast_path_cqe_fw_debug { 150struct eth_fast_path_cqe_fw_debug {
129 u8 reserved0;
130 u8 reserved1;
131 __le16 reserved2; 151 __le16 reserved2;
132}; 152};
133 153
@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
148#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 168#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
149}; 169};
150 170
171/* PMD flow control bits */
172struct eth_pmd_flow_flags {
173 u8 flags;
174#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
175#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
176#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
177#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
178#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
179#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
180};
181
151/* Regular ETH Rx FP CQE. */ 182/* Regular ETH Rx FP CQE. */
152struct eth_fast_path_rx_reg_cqe { 183struct eth_fast_path_rx_reg_cqe {
153 u8 type; 184 u8 type;
@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
166 u8 placement_offset; 197 u8 placement_offset;
167 struct eth_tunnel_parsing_flags tunnel_pars_flags; 198 struct eth_tunnel_parsing_flags tunnel_pars_flags;
168 u8 bd_num; 199 u8 bd_num;
169 u8 reserved[7]; 200 u8 reserved[9];
170 struct eth_fast_path_cqe_fw_debug fw_debug; 201 struct eth_fast_path_cqe_fw_debug fw_debug;
171 u8 reserved1[3]; 202 u8 reserved1[3];
172 u8 flags; 203 struct eth_pmd_flow_flags pmd_flags;
173#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
174#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
175#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
176#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
177#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
178#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
179}; 204};
180 205
181/* TPA-continue ETH Rx FP CQE. */ 206/* TPA-continue ETH Rx FP CQE. */
182struct eth_fast_path_rx_tpa_cont_cqe { 207struct eth_fast_path_rx_tpa_cont_cqe {
183 u8 type; 208 u8 type;
184 u8 tpa_agg_index; 209 u8 tpa_agg_index;
185 __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; 210 __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
186 u8 reserved[5]; 211 u8 reserved;
187 u8 reserved1; 212 u8 reserved1;
188 __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; 213 __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
214 u8 reserved3[3];
215 struct eth_pmd_flow_flags pmd_flags;
189}; 216};
190 217
191/* TPA-end ETH Rx FP CQE. */ 218/* TPA-end ETH Rx FP CQE. */
192struct eth_fast_path_rx_tpa_end_cqe { 219struct eth_fast_path_rx_tpa_end_cqe {
193 u8 type; 220 u8 type;
194 u8 tpa_agg_index; 221 u8 tpa_agg_index;
195 __le16 total_packet_len; 222 __le16 total_packet_len;
196 u8 num_of_bds; 223 u8 num_of_bds;
197 u8 end_reason; 224 u8 end_reason;
198 __le16 num_of_coalesced_segs; 225 __le16 num_of_coalesced_segs;
199 __le32 ts_delta; 226 __le32 ts_delta;
200 __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; 227 __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
201 u8 reserved1[3]; 228 __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
202 u8 reserved2; 229 __le16 reserved1;
203 __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; 230 u8 reserved2;
231 struct eth_pmd_flow_flags pmd_flags;
204}; 232};
205 233
206/* TPA-start ETH Rx FP CQE. */ 234/* TPA-start ETH Rx FP CQE. */
207struct eth_fast_path_rx_tpa_start_cqe { 235struct eth_fast_path_rx_tpa_start_cqe {
208 u8 type; 236 u8 type;
209 u8 bitfields; 237 u8 bitfields;
210#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 238#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
211#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 239#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
212#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF 240#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
213#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 241#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
214#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 242#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
215#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 243#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
216 __le16 seg_len; 244 __le16 seg_len;
217 struct parsing_and_err_flags pars_flags; 245 struct parsing_and_err_flags pars_flags;
218 __le16 vlan_tag; 246 __le16 vlan_tag;
219 __le32 rss_hash; 247 __le32 rss_hash;
220 __le16 len_on_first_bd; 248 __le16 len_on_first_bd;
221 u8 placement_offset; 249 u8 placement_offset;
222 struct eth_tunnel_parsing_flags tunnel_pars_flags; 250 struct eth_tunnel_parsing_flags tunnel_pars_flags;
223 u8 tpa_agg_index; 251 u8 tpa_agg_index;
224 u8 header_len; 252 u8 header_len;
225 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; 253 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
226 struct eth_fast_path_cqe_fw_debug fw_debug; 254 struct eth_fast_path_cqe_fw_debug fw_debug;
255 u8 reserved;
256 struct eth_pmd_flow_flags pmd_flags;
227}; 257};
228 258
229/* The L4 pseudo checksum mode for Ethernet */ 259/* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
245 u8 reserved[25]; 275 u8 reserved[25];
246 __le16 echo; 276 __le16 echo;
247 u8 reserved1; 277 u8 reserved1;
248 u8 flags; 278 struct eth_pmd_flow_flags pmd_flags;
249/* for PMD mode - valid indication */
250#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
251#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
252/* for PMD mode - valid toggle indication */
253#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
254#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
255#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
256#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
257}; 279};
258 280
259/* union for all ETH Rx CQE types */ 281/* union for all ETH Rx CQE types */
@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
276 MAX_ETH_RX_CQE_TYPE 298 MAX_ETH_RX_CQE_TYPE
277}; 299};
278 300
301struct eth_rx_pmd_cqe {
302 union eth_rx_cqe cqe;
303 u8 reserved[ETH_RX_CQE_GAP];
304};
305
279enum eth_rx_tunn_type { 306enum eth_rx_tunn_type {
280 ETH_RX_NO_TUNN, 307 ETH_RX_NO_TUNN,
281 ETH_RX_TUNN_GENEVE, 308 ETH_RX_TUNN_GENEVE,
@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
313 340
314/* The parsing information data for the third tx bd of a given packet. */ 341/* The parsing information data for the third tx bd of a given packet. */
315struct eth_tx_data_3rd_bd { 342struct eth_tx_data_3rd_bd {
316 __le16 lso_mss; 343 __le16 lso_mss;
317 __le16 bitfields; 344 __le16 bitfields;
318#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF 345#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
319#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 346#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
320#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF 347#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
323#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 350#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
324#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F 351#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
325#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 352#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
326 u8 tunn_l4_hdr_start_offset_w; 353 u8 tunn_l4_hdr_start_offset_w;
327 u8 tunn_hdr_size_w; 354 u8 tunn_hdr_size_w;
328}; 355};
329 356
330/* The third tx bd of a given packet */ 357/* The third tx bd of a given packet */
@@ -355,10 +382,10 @@ struct eth_tx_bd {
355}; 382};
356 383
357union eth_tx_bd_types { 384union eth_tx_bd_types {
358 struct eth_tx_1st_bd first_bd; 385 struct eth_tx_1st_bd first_bd;
359 struct eth_tx_2nd_bd second_bd; 386 struct eth_tx_2nd_bd second_bd;
360 struct eth_tx_3rd_bd third_bd; 387 struct eth_tx_3rd_bd third_bd;
361 struct eth_tx_bd reg_bd; 388 struct eth_tx_bd reg_bd;
362}; 389};
363 390
364/* Mstorm Queue Zone */ 391/* Mstorm Queue Zone */
@@ -389,8 +416,8 @@ struct eth_db_data {
389#define ETH_DB_DATA_RESERVED_SHIFT 5 416#define ETH_DB_DATA_RESERVED_SHIFT 5
390#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 417#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
391#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 418#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
392 u8 agg_flags; 419 u8 agg_flags;
393 __le16 bd_prod; 420 __le16 bd_prod;
394}; 421};
395 422
396#endif /* __ETH_COMMON__ */ 423#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b3c0feb15ae9..8f64b1223c2f 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
311#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 311#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
312#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF 312#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
313#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 313#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
314 __le32 isid_TABC; 314 __le32 isid_tabc;
315 __le16 tsih; 315 __le16 tsih;
316 __le16 isid_d; 316 __le16 isid_d;
317 __le32 itt; 317 __le32 itt;
@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
464#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 464#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
465#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 465#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
466#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 466#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
467 __le32 isid_TABC; 467 __le32 isid_tabc;
468 __le16 tsih; 468 __le16 tsih;
469 __le16 isid_d; 469 __le16 isid_d;
470 __le32 itt; 470 __le32 itt;
@@ -688,8 +688,7 @@ union iscsi_cqe {
688enum iscsi_cqes_type { 688enum iscsi_cqes_type {
689 ISCSI_CQE_TYPE_SOLICITED = 1, 689 ISCSI_CQE_TYPE_SOLICITED = 1,
690 ISCSI_CQE_TYPE_UNSOLICITED, 690 ISCSI_CQE_TYPE_UNSOLICITED,
691 ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE 691 ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
692 ,
693 ISCSI_CQE_TYPE_TASK_CLEANUP, 692 ISCSI_CQE_TYPE_TASK_CLEANUP,
694 ISCSI_CQE_TYPE_DUMMY, 693 ISCSI_CQE_TYPE_DUMMY,
695 MAX_ISCSI_CQES_TYPE 694 MAX_ISCSI_CQES_TYPE
@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
769 ISCSI_EVENT_TYPE_UPDATE_CONN, 768 ISCSI_EVENT_TYPE_UPDATE_CONN,
770 ISCSI_EVENT_TYPE_CLEAR_SQ, 769 ISCSI_EVENT_TYPE_CLEAR_SQ,
771 ISCSI_EVENT_TYPE_TERMINATE_CONN, 770 ISCSI_EVENT_TYPE_TERMINATE_CONN,
771 ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
772 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, 772 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
773 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, 773 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
774 RESERVED8,
775 RESERVED9, 774 RESERVED9,
776 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, 775 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
777 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, 776 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
867 ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, 866 ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
868 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, 867 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
869 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, 868 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
869 ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
870 MAX_ISCSI_RAMROD_CMD_ID 870 MAX_ISCSI_RAMROD_CMD_ID
871}; 871};
872 872
@@ -883,6 +883,16 @@ union iscsi_seq_num {
883 __le16 r2t_sn; 883 __le16 r2t_sn;
884}; 884};
885 885
886struct iscsi_spe_conn_mac_update {
887 struct iscsi_slow_path_hdr hdr;
888 __le16 conn_id;
889 __le32 fw_cid;
890 __le16 remote_mac_addr_lo;
891 __le16 remote_mac_addr_mid;
892 __le16 remote_mac_addr_hi;
893 u8 reserved0[2];
894};
895
886struct iscsi_spe_conn_offload { 896struct iscsi_spe_conn_offload {
887 struct iscsi_slow_path_hdr hdr; 897 struct iscsi_slow_path_hdr hdr;
888 __le16 conn_id; 898 __le16 conn_id;
@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
1302 struct regpair iscsi_rx_dropped_pdus_task_not_valid; 1312 struct regpair iscsi_rx_dropped_pdus_task_not_valid;
1303}; 1313};
1304 1314
1305struct ooo_opaque {
1306 __le32 cid;
1307 u8 drop_isle;
1308 u8 drop_size;
1309 u8 ooo_opcode;
1310 u8 ooo_isle;
1311};
1312
1313struct pstorm_iscsi_stats_drv { 1315struct pstorm_iscsi_stats_drv {
1314 struct regpair iscsi_tx_bytes_cnt; 1316 struct regpair iscsi_tx_bytes_cnt;
1315 struct regpair iscsi_tx_packet_cnt; 1317 struct regpair iscsi_tx_packet_cnt;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 7e441bdeabdc..72d88cf3ca25 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -16,19 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/qed/common_hsi.h> 17#include <linux/qed/common_hsi.h>
18 18
19/* dma_addr_t manip */
20#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
21#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
22#define DMA_REGPAIR_LE(x, val) do { \
23 (x).hi = DMA_HI_LE((val)); \
24 (x).lo = DMA_LO_LE((val)); \
25 } while (0)
26
27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
28#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
29#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
30#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
31
32enum qed_chain_mode { 19enum qed_chain_mode {
33 /* Each Page contains a next pointer at its end */ 20 /* Each Page contains a next pointer at its end */
34 QED_CHAIN_MODE_NEXT_PTR, 21 QED_CHAIN_MODE_NEXT_PTR,
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4475a9d8ae15..33c24ebc9b7f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
23 23
24 u8 port_mac[ETH_ALEN]; 24 u8 port_mac[ETH_ALEN];
25 u8 num_vlan_filters; 25 u8 num_vlan_filters;
26
27 /* Legacy VF - this affects the datapath, so qede has to know */
28 bool is_legacy;
26}; 29};
27 30
28struct qed_update_vport_rss_params { 31struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d6c4177df7cb..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
34 DCBX_MAX_PROTOCOL_TYPE 34 DCBX_MAX_PROTOCOL_TYPE
35}; 35};
36 36
37#define QED_ROCE_PROTOCOL_INDEX (3)
38
37#ifdef CONFIG_DCB 39#ifdef CONFIG_DCB
38#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 40#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
39#define QED_LLDP_PORT_ID_STAT_LEN 4 41#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
260 /* MFW version */ 262 /* MFW version */
261 u32 mfw_rev; 263 u32 mfw_rev;
262 264
263 bool rdma_supported;
264
265 u32 flash_size; 265 u32 flash_size;
266 u8 mf_mode; 266 u8 mf_mode;
267 bool tx_switching; 267 bool tx_switching;
268 bool rdma_supported;
268}; 269};
269 270
270enum qed_sb_type { 271enum qed_sb_type {
271 QED_SB_TYPE_L2_QUEUE, 272 QED_SB_TYPE_L2_QUEUE,
273 QED_SB_TYPE_CNQ,
272}; 274};
273 275
274enum qed_protocol { 276enum qed_protocol {
@@ -276,6 +278,21 @@ enum qed_protocol {
276 QED_PROTOCOL_ISCSI, 278 QED_PROTOCOL_ISCSI,
277}; 279};
278 280
281enum qed_link_mode_bits {
282 QED_LM_FIBRE_BIT = BIT(0),
283 QED_LM_Autoneg_BIT = BIT(1),
284 QED_LM_Asym_Pause_BIT = BIT(2),
285 QED_LM_Pause_BIT = BIT(3),
286 QED_LM_1000baseT_Half_BIT = BIT(4),
287 QED_LM_1000baseT_Full_BIT = BIT(5),
288 QED_LM_10000baseKR_Full_BIT = BIT(6),
289 QED_LM_25000baseKR_Full_BIT = BIT(7),
290 QED_LM_40000baseLR4_Full_BIT = BIT(8),
291 QED_LM_50000baseKR2_Full_BIT = BIT(9),
292 QED_LM_100000baseKR4_Full_BIT = BIT(10),
293 QED_LM_COUNT = 11
294};
295
279struct qed_link_params { 296struct qed_link_params {
280 bool link_up; 297 bool link_up;
281 298
@@ -303,9 +320,11 @@ struct qed_link_params {
303struct qed_link_output { 320struct qed_link_output {
304 bool link_up; 321 bool link_up;
305 322
306 u32 supported_caps; /* In SUPPORTED defs */ 323 /* In QED_LM_* defs */
307 u32 advertised_caps; /* In ADVERTISED defs */ 324 u32 supported_caps;
308 u32 lp_caps; /* In ADVERTISED defs */ 325 u32 advertised_caps;
326 u32 lp_caps;
327
309 u32 speed; /* In Mb/s */ 328 u32 speed; /* In Mb/s */
310 u8 duplex; /* In DUPLEX defs */ 329 u8 duplex; /* In DUPLEX defs */
311 u8 port; /* In PORT defs */ 330 u8 port; /* In PORT defs */
@@ -438,6 +457,10 @@ struct qed_common_ops {
438 void (*simd_handler_clean)(struct qed_dev *cdev, 457 void (*simd_handler_clean)(struct qed_dev *cdev,
439 int index); 458 int index);
440 459
460 int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
461
462 int (*dbg_all_data_size) (struct qed_dev *cdev);
463
441/** 464/**
442 * @brief can_link_change - can the instance change the link or not 465 * @brief can_link_change - can the instance change the link or not
443 * 466 *
@@ -606,8 +629,9 @@ enum DP_MODULE {
606 QED_MSG_SP = 0x100000, 629 QED_MSG_SP = 0x100000,
607 QED_MSG_STORAGE = 0x200000, 630 QED_MSG_STORAGE = 0x200000,
608 QED_MSG_CXT = 0x800000, 631 QED_MSG_CXT = 0x800000,
632 QED_MSG_LL2 = 0x1000000,
609 QED_MSG_ILT = 0x2000000, 633 QED_MSG_ILT = 0x2000000,
610 QED_MSG_ROCE = 0x4000000, 634 QED_MSG_RDMA = 0x4000000,
611 QED_MSG_DEBUG = 0x8000000, 635 QED_MSG_DEBUG = 0x8000000,
612 /* to be added...up to 0x8000000 */ 636 /* to be added...up to 0x8000000 */
613}; 637};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QED_LL2_IF_H
11#define _QED_LL2_IF_H
12
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/pci.h>
17#include <linux/skbuff.h>
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/qed/qed_if.h>
22
23struct qed_ll2_stats {
24 u64 gsi_invalid_hdr;
25 u64 gsi_invalid_pkt_length;
26 u64 gsi_unsupported_pkt_typ;
27 u64 gsi_crcchksm_error;
28
29 u64 packet_too_big_discard;
30 u64 no_buff_discard;
31
32 u64 rcv_ucast_bytes;
33 u64 rcv_mcast_bytes;
34 u64 rcv_bcast_bytes;
35 u64 rcv_ucast_pkts;
36 u64 rcv_mcast_pkts;
37 u64 rcv_bcast_pkts;
38
39 u64 sent_ucast_bytes;
40 u64 sent_mcast_bytes;
41 u64 sent_bcast_bytes;
42 u64 sent_ucast_pkts;
43 u64 sent_mcast_pkts;
44 u64 sent_bcast_pkts;
45};
46
47#define QED_LL2_UNUSED_HANDLE (0xff)
48
49struct qed_ll2_cb_ops {
50 int (*rx_cb)(void *, struct sk_buff *, u32, u32);
51 int (*tx_cb)(void *, struct sk_buff *, bool);
52};
53
54struct qed_ll2_params {
55 u16 mtu;
56 bool drop_ttl0_packets;
57 bool rx_vlan_stripping;
58 u8 tx_tc;
59 bool frags_mapped;
60 u8 ll2_mac_address[ETH_ALEN];
61};
62
63struct qed_ll2_ops {
64/**
65 * @brief start - initializes ll2
66 *
67 * @param cdev
68 * @param params - protocol driver configuration for the ll2.
69 *
70 * @return 0 on success, otherwise error value.
71 */
72 int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
73
74/**
75 * @brief stop - stops the ll2
76 *
77 * @param cdev
78 *
79 * @return 0 on success, otherwise error value.
80 */
81 int (*stop)(struct qed_dev *cdev);
82
83/**
84 * @brief start_xmit - transmits an skb over the ll2 interface
85 *
86 * @param cdev
87 * @param skb
88 *
89 * @return 0 on success, otherwise error value.
90 */
91 int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
92
93/**
94 * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
95 * packets. Should be called before `start'.
96 *
97 * @param cdev
98 * @param cookie - to be passed to the callback functions.
99 * @param ops - the callback functions to register for Rx / Tx.
100 *
101 * @return 0 on success, otherwise error value.
102 */
103 void (*register_cb_ops)(struct qed_dev *cdev,
104 const struct qed_ll2_cb_ops *ops,
105 void *cookie);
106
107/**
108 * @brief get LL2 related statistics
109 *
110 * @param cdev
111 * @param stats - pointer to struct that would be filled with stats
112 *
113 * @return 0 on success, error otherwise.
114 */
115 int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
116};
117
118#ifdef CONFIG_QED_LL2
119int qed_ll2_alloc_if(struct qed_dev *);
120void qed_ll2_dealloc_if(struct qed_dev *);
121#else
122static const struct qed_ll2_ops qed_ll2_ops_pass = {
123 .start = NULL,
124 .stop = NULL,
125 .start_xmit = NULL,
126 .register_cb_ops = NULL,
127 .get_stats = NULL,
128};
129
130static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
131{
132 return 0;
133}
134
135static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
136{
137}
138#endif
139#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _QED_ROCE_IF_H
33#define _QED_ROCE_IF_H
34#include <linux/types.h>
35#include <linux/delay.h>
36#include <linux/list.h>
37#include <linux/mutex.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/qed/qed_if.h>
41#include <linux/qed/qed_ll2_if.h>
42#include <linux/qed/rdma_common.h>
43
44enum qed_roce_ll2_tx_dest {
45 /* Light L2 TX Destination to the Network */
46 QED_ROCE_LL2_TX_DEST_NW,
47
48 /* Light L2 TX Destination to the Loopback */
49 QED_ROCE_LL2_TX_DEST_LB,
50 QED_ROCE_LL2_TX_DEST_MAX
51};
52
53#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
54
55/* rdma interface */
56
57enum qed_roce_qp_state {
58 QED_ROCE_QP_STATE_RESET,
59 QED_ROCE_QP_STATE_INIT,
60 QED_ROCE_QP_STATE_RTR,
61 QED_ROCE_QP_STATE_RTS,
62 QED_ROCE_QP_STATE_SQD,
63 QED_ROCE_QP_STATE_ERR,
64 QED_ROCE_QP_STATE_SQE
65};
66
67enum qed_rdma_tid_type {
68 QED_RDMA_TID_REGISTERED_MR,
69 QED_RDMA_TID_FMR,
70 QED_RDMA_TID_MW_TYPE1,
71 QED_RDMA_TID_MW_TYPE2A
72};
73
74struct qed_rdma_events {
75 void *context;
76 void (*affiliated_event)(void *context, u8 fw_event_code,
77 void *fw_handle);
78 void (*unaffiliated_event)(void *context, u8 event_code);
79};
80
81struct qed_rdma_device {
82 u32 vendor_id;
83 u32 vendor_part_id;
84 u32 hw_ver;
85 u64 fw_ver;
86
87 u64 node_guid;
88 u64 sys_image_guid;
89
90 u8 max_cnq;
91 u8 max_sge;
92 u8 max_srq_sge;
93 u16 max_inline;
94 u32 max_wqe;
95 u32 max_srq_wqe;
96 u8 max_qp_resp_rd_atomic_resc;
97 u8 max_qp_req_rd_atomic_resc;
98 u64 max_dev_resp_rd_atomic_resc;
99 u32 max_cq;
100 u32 max_qp;
101 u32 max_srq;
102 u32 max_mr;
103 u64 max_mr_size;
104 u32 max_cqe;
105 u32 max_mw;
106 u32 max_fmr;
107 u32 max_mr_mw_fmr_pbl;
108 u64 max_mr_mw_fmr_size;
109 u32 max_pd;
110 u32 max_ah;
111 u8 max_pkey;
112 u16 max_srq_wr;
113 u8 max_stats_queues;
114 u32 dev_caps;
115
116 /* Abilty to support RNR-NAK generation */
117
118#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
119#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
120 /* Abilty to support shutdown port */
121#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
122#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
123 /* Abilty to support port active event */
124#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
125#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
126 /* Abilty to support port change event */
127#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
128#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
129 /* Abilty to support system image GUID */
130#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
131#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
132 /* Abilty to support bad P_Key counter support */
133#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
134#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
135 /* Abilty to support atomic operations */
136#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
137#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
138#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
139#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
140 /* Abilty to support modifying the maximum number of
141 * outstanding work requests per QP
142 */
143#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
144#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
145 /* Abilty to support automatic path migration */
146#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
147#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
148 /* Abilty to support the base memory management extensions */
149#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
150#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
151#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
152#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
153 /* Abilty to support multipile page sizes per memory region */
154#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
155#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
156 /* Abilty to support block list physical buffer list */
157#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
158#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
159 /* Abilty to support zero based virtual addresses */
160#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
161#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
162 /* Abilty to support local invalidate fencing */
163#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
164#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
165 /* Abilty to support Loopback on QP */
166#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
167#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
168 u64 page_size_caps;
169 u8 dev_ack_delay;
170 u32 reserved_lkey;
171 u32 bad_pkey_counter;
172 struct qed_rdma_events events;
173};
174
175enum qed_port_state {
176 QED_RDMA_PORT_UP,
177 QED_RDMA_PORT_DOWN,
178};
179
180enum qed_roce_capability {
181 QED_ROCE_V1 = 1 << 0,
182 QED_ROCE_V2 = 1 << 1,
183};
184
185struct qed_rdma_port {
186 enum qed_port_state port_state;
187 int link_speed;
188 u64 max_msg_size;
189 u8 source_gid_table_len;
190 void *source_gid_table_ptr;
191 u8 pkey_table_len;
192 void *pkey_table_ptr;
193 u32 pkey_bad_counter;
194 enum qed_roce_capability capability;
195};
196
197struct qed_rdma_cnq_params {
198 u8 num_pbl_pages;
199 u64 pbl_ptr;
200};
201
202/* The CQ Mode affects the CQ doorbell transaction size.
203 * 64/32 bit machines should configure to 32/16 bits respectively.
204 */
205enum qed_rdma_cq_mode {
206 QED_RDMA_CQ_MODE_16_BITS,
207 QED_RDMA_CQ_MODE_32_BITS,
208};
209
210struct qed_roce_dcqcn_params {
211 u8 notification_point;
212 u8 reaction_point;
213
214 /* fields for notification point */
215 u32 cnp_send_timeout;
216
217 /* fields for reaction point */
218 u32 rl_bc_rate;
219 u16 rl_max_rate;
220 u16 rl_r_ai;
221 u16 rl_r_hai;
222 u16 dcqcn_g;
223 u32 dcqcn_k_us;
224 u32 dcqcn_timeout_us;
225};
226
227struct qed_rdma_start_in_params {
228 struct qed_rdma_events *events;
229 struct qed_rdma_cnq_params cnq_pbl_list[128];
230 u8 desired_cnq;
231 enum qed_rdma_cq_mode cq_mode;
232 struct qed_roce_dcqcn_params dcqcn_params;
233 u16 max_mtu;
234 u8 mac_addr[ETH_ALEN];
235 u8 iwarp_flags;
236};
237
238struct qed_rdma_add_user_out_params {
239 u16 dpi;
240 u64 dpi_addr;
241 u64 dpi_phys_addr;
242 u32 dpi_size;
243};
244
245enum roce_mode {
246 ROCE_V1,
247 ROCE_V2_IPV4,
248 ROCE_V2_IPV6,
249 MAX_ROCE_MODE
250};
251
252union qed_gid {
253 u8 bytes[16];
254 u16 words[8];
255 u32 dwords[4];
256 u64 qwords[2];
257 u32 ipv4_addr;
258};
259
260struct qed_rdma_register_tid_in_params {
261 u32 itid;
262 enum qed_rdma_tid_type tid_type;
263 u8 key;
264 u16 pd;
265 bool local_read;
266 bool local_write;
267 bool remote_read;
268 bool remote_write;
269 bool remote_atomic;
270 bool mw_bind;
271 u64 pbl_ptr;
272 bool pbl_two_level;
273 u8 pbl_page_size_log;
274 u8 page_size_log;
275 u32 fbo;
276 u64 length;
277 u64 vaddr;
278 bool zbva;
279 bool phy_mr;
280 bool dma_mr;
281
282 bool dif_enabled;
283 u64 dif_error_addr;
284 u64 dif_runt_addr;
285};
286
287struct qed_rdma_create_cq_in_params {
288 u32 cq_handle_lo;
289 u32 cq_handle_hi;
290 u32 cq_size;
291 u16 dpi;
292 bool pbl_two_level;
293 u64 pbl_ptr;
294 u16 pbl_num_pages;
295 u8 pbl_page_size_log;
296 u8 cnq_id;
297 u16 int_timeout;
298};
299
300struct qed_rdma_create_srq_in_params {
301 u64 pbl_base_addr;
302 u64 prod_pair_addr;
303 u16 num_pages;
304 u16 pd_id;
305 u16 page_size;
306};
307
308struct qed_rdma_destroy_cq_in_params {
309 u16 icid;
310};
311
312struct qed_rdma_destroy_cq_out_params {
313 u16 num_cq_notif;
314};
315
316struct qed_rdma_create_qp_in_params {
317 u32 qp_handle_lo;
318 u32 qp_handle_hi;
319 u32 qp_handle_async_lo;
320 u32 qp_handle_async_hi;
321 bool use_srq;
322 bool signal_all;
323 bool fmr_and_reserved_lkey;
324 u16 pd;
325 u16 dpi;
326 u16 sq_cq_id;
327 u16 sq_num_pages;
328 u64 sq_pbl_ptr;
329 u8 max_sq_sges;
330 u16 rq_cq_id;
331 u16 rq_num_pages;
332 u64 rq_pbl_ptr;
333 u16 srq_id;
334 u8 stats_queue;
335};
336
337struct qed_rdma_create_qp_out_params {
338 u32 qp_id;
339 u16 icid;
340 void *rq_pbl_virt;
341 dma_addr_t rq_pbl_phys;
342 void *sq_pbl_virt;
343 dma_addr_t sq_pbl_phys;
344};
345
346struct qed_rdma_modify_qp_in_params {
347 u32 modify_flags;
348#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
349#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
350#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
351#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
352#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
353#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
354#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
355#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
356#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
357#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
358#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
359#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
360#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
361#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
362#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
363#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
364#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
365#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
366#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
367#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
368#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
369#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
370#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
371#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
372#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
373#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
374#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
375#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
376#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
377#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
378
379 enum qed_roce_qp_state new_state;
380 u16 pkey;
381 bool incoming_rdma_read_en;
382 bool incoming_rdma_write_en;
383 bool incoming_atomic_en;
384 bool e2e_flow_control_en;
385 u32 dest_qp;
386 bool lb_indication;
387 u16 mtu;
388 u8 traffic_class_tos;
389 u8 hop_limit_ttl;
390 u32 flow_label;
391 union qed_gid sgid;
392 union qed_gid dgid;
393 u16 udp_src_port;
394
395 u16 vlan_id;
396
397 u32 rq_psn;
398 u32 sq_psn;
399 u8 max_rd_atomic_resp;
400 u8 max_rd_atomic_req;
401 u32 ack_timeout;
402 u8 retry_cnt;
403 u8 rnr_retry_cnt;
404 u8 min_rnr_nak_timer;
405 bool sqd_async;
406 u8 remote_mac_addr[6];
407 u8 local_mac_addr[6];
408 bool use_local_mac;
409 enum roce_mode roce_mode;
410};
411
412struct qed_rdma_query_qp_out_params {
413 enum qed_roce_qp_state state;
414 u32 rq_psn;
415 u32 sq_psn;
416 bool draining;
417 u16 mtu;
418 u32 dest_qp;
419 bool incoming_rdma_read_en;
420 bool incoming_rdma_write_en;
421 bool incoming_atomic_en;
422 bool e2e_flow_control_en;
423 union qed_gid sgid;
424 union qed_gid dgid;
425 u32 flow_label;
426 u8 hop_limit_ttl;
427 u8 traffic_class_tos;
428 u32 timeout;
429 u8 rnr_retry;
430 u8 retry_cnt;
431 u8 min_rnr_nak_timer;
432 u16 pkey_index;
433 u8 max_rd_atomic;
434 u8 max_dest_rd_atomic;
435 bool sqd_async;
436};
437
438struct qed_rdma_create_srq_out_params {
439 u16 srq_id;
440};
441
442struct qed_rdma_destroy_srq_in_params {
443 u16 srq_id;
444};
445
446struct qed_rdma_modify_srq_in_params {
447 u32 wqe_limit;
448 u16 srq_id;
449};
450
451struct qed_rdma_stats_out_params {
452 u64 sent_bytes;
453 u64 sent_pkts;
454 u64 rcv_bytes;
455 u64 rcv_pkts;
456};
457
458struct qed_rdma_counters_out_params {
459 u64 pd_count;
460 u64 max_pd;
461 u64 dpi_count;
462 u64 max_dpi;
463 u64 cq_count;
464 u64 max_cq;
465 u64 qp_count;
466 u64 max_qp;
467 u64 tid_count;
468 u64 max_tid;
469};
470
471#define QED_ROCE_TX_HEAD_FAILURE (1)
472#define QED_ROCE_TX_FRAG_FAILURE (2)
473
474struct qed_roce_ll2_header {
475 void *vaddr;
476 dma_addr_t baddr;
477 size_t len;
478};
479
480struct qed_roce_ll2_buffer {
481 dma_addr_t baddr;
482 size_t len;
483};
484
485struct qed_roce_ll2_packet {
486 struct qed_roce_ll2_header header;
487 int n_seg;
488 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
489 int roce_mode;
490 enum qed_roce_ll2_tx_dest tx_dest;
491};
492
493struct qed_roce_ll2_tx_params {
494 int reserved;
495};
496
497struct qed_roce_ll2_rx_params {
498 u16 vlan_id;
499 u8 smac[ETH_ALEN];
500 int rc;
501};
502
503struct qed_roce_ll2_cbs {
504 void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
505
506 void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
507 struct qed_roce_ll2_rx_params *params);
508};
509
510struct qed_roce_ll2_params {
511 u16 max_rx_buffers;
512 u16 max_tx_buffers;
513 u16 mtu;
514 u8 mac_address[ETH_ALEN];
515 struct qed_roce_ll2_cbs cbs;
516 void *cb_cookie;
517};
518
519struct qed_roce_ll2_info {
520 u8 handle;
521 struct qed_roce_ll2_cbs cbs;
522 u8 mac_address[ETH_ALEN];
523 void *cb_cookie;
524
525 /* Lock to protect ll2 */
526 struct mutex lock;
527};
528
529enum qed_rdma_type {
530 QED_RDMA_TYPE_ROCE,
531};
532
533struct qed_dev_rdma_info {
534 struct qed_dev_info common;
535 enum qed_rdma_type rdma_type;
536};
537
538struct qed_rdma_ops {
539 const struct qed_common_ops *common;
540
541 int (*fill_dev_info)(struct qed_dev *cdev,
542 struct qed_dev_rdma_info *info);
543 void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
544
545 int (*rdma_init)(struct qed_dev *dev,
546 struct qed_rdma_start_in_params *iparams);
547
548 int (*rdma_add_user)(void *rdma_cxt,
549 struct qed_rdma_add_user_out_params *oparams);
550
551 void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
552 int (*rdma_stop)(void *rdma_cxt);
553 struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
554 struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
555 int (*rdma_get_start_sb)(struct qed_dev *cdev);
556 int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
557 void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
558 int (*rdma_get_rdma_int)(struct qed_dev *cdev,
559 struct qed_int_info *info);
560 int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
561 int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
562 void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
563 int (*rdma_create_cq)(void *rdma_cxt,
564 struct qed_rdma_create_cq_in_params *params,
565 u16 *icid);
566 int (*rdma_destroy_cq)(void *rdma_cxt,
567 struct qed_rdma_destroy_cq_in_params *iparams,
568 struct qed_rdma_destroy_cq_out_params *oparams);
569 struct qed_rdma_qp *
570 (*rdma_create_qp)(void *rdma_cxt,
571 struct qed_rdma_create_qp_in_params *iparams,
572 struct qed_rdma_create_qp_out_params *oparams);
573
574 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
575 struct qed_rdma_modify_qp_in_params *iparams);
576
577 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
578 struct qed_rdma_query_qp_out_params *oparams);
579 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
580 int
581 (*rdma_register_tid)(void *rdma_cxt,
582 struct qed_rdma_register_tid_in_params *iparams);
583 int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
584 int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
585 void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
586 int (*roce_ll2_start)(struct qed_dev *cdev,
587 struct qed_roce_ll2_params *params);
588 int (*roce_ll2_stop)(struct qed_dev *cdev);
589 int (*roce_ll2_tx)(struct qed_dev *cdev,
590 struct qed_roce_ll2_packet *packet,
591 struct qed_roce_ll2_tx_params *params);
592 int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
593 struct qed_roce_ll2_buffer *buf,
594 u64 cookie, u8 notify_fw);
595 int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
596 u8 *old_mac_address,
597 u8 *new_mac_address);
598 int (*roce_ll2_stats)(struct qed_dev *cdev,
599 struct qed_ll2_stats *stats);
600};
601
602const struct qed_rdma_ops *qed_get_rdma_ops(void);
603
604#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef QEDE_ROCE_H
33#define QEDE_ROCE_H
34
35struct qedr_dev;
36struct qed_dev;
37struct qede_dev;
38
39enum qede_roce_event {
40 QEDE_UP,
41 QEDE_DOWN,
42 QEDE_CHANGE_ADDR,
43 QEDE_CLOSE
44};
45
46struct qede_roce_event_work {
47 struct list_head list;
48 struct work_struct work;
49 void *ptr;
50 enum qede_roce_event event;
51};
52
53struct qedr_driver {
54 unsigned char name[32];
55
56 struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
57 struct net_device *);
58
59 void (*remove)(struct qedr_dev *);
60 void (*notify)(struct qedr_dev *, enum qede_roce_event);
61};
62
63/* APIs for RoCE driver to register callback handlers,
64 * which will be invoked when device is added, removed, ifup, ifdown
65 */
66int qede_roce_register_driver(struct qedr_driver *drv);
67void qede_roce_unregister_driver(struct qedr_driver *drv);
68
69bool qede_roce_supported(struct qede_dev *dev);
70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev);
75void qede_roce_dev_remove(struct qede_dev *dev);
76void qede_roce_event_changeaddr(struct qede_dev *qedr);
77#else
78static inline int qede_roce_dev_add(struct qede_dev *dev)
79{
80 return 0;
81}
82
83static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
84static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
85static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
86static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
87#endif
88#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
28#define RDMA_MAX_PDS (64 * 1024) 28#define RDMA_MAX_PDS (64 * 1024)
29 29
30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
31#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
31 32
32#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) 33#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
33 34
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index accba0e6b704..dc3889d1bbe6 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -11,6 +11,14 @@
11 11
12#define TCP_INVALID_TIMEOUT_VAL -1 12#define TCP_INVALID_TIMEOUT_VAL -1
13 13
14struct ooo_opaque {
15 __le32 cid;
16 u8 drop_isle;
17 u8 drop_size;
18 u8 ooo_opcode;
19 u8 ooo_isle;
20};
21
14enum tcp_connect_mode { 22enum tcp_connect_mode {
15 TCP_CONNECT_ACTIVE, 23 TCP_CONNECT_ACTIVE,
16 TCP_CONNECT_PASSIVE, 24 TCP_CONNECT_PASSIVE,
@@ -18,14 +26,10 @@ enum tcp_connect_mode {
18}; 26};
19 27
20struct tcp_init_params { 28struct tcp_init_params {
21 __le32 max_cwnd; 29 __le32 two_msl_timer;
22 __le16 dup_ack_threshold;
23 __le16 tx_sws_timer; 30 __le16 tx_sws_timer;
24 __le16 min_rto;
25 __le16 min_rto_rt;
26 __le16 max_rto;
27 u8 maxfinrt; 31 u8 maxfinrt;
28 u8 reserved[1]; 32 u8 reserved[9];
29}; 33};
30 34
31enum tcp_ip_version { 35enum tcp_ip_version {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Resizable, Scalable, Concurrent Hash Table 2 * Resizable, Scalable, Concurrent Hash Table
3 * 3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * 7 *
@@ -53,6 +53,11 @@ struct rhash_head {
53 struct rhash_head __rcu *next; 53 struct rhash_head __rcu *next;
54}; 54};
55 55
56struct rhlist_head {
57 struct rhash_head rhead;
58 struct rhlist_head __rcu *next;
59};
60
56/** 61/**
57 * struct bucket_table - Table of hash buckets 62 * struct bucket_table - Table of hash buckets
58 * @size: Number of hash buckets 63 * @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
137 * @key_len: Key length for hashfn 142 * @key_len: Key length for hashfn
138 * @elasticity: Maximum chain length before rehash 143 * @elasticity: Maximum chain length before rehash
139 * @p: Configuration parameters 144 * @p: Configuration parameters
145 * @rhlist: True if this is an rhltable
140 * @run_work: Deferred worker to expand/shrink asynchronously 146 * @run_work: Deferred worker to expand/shrink asynchronously
141 * @mutex: Mutex to protect current/future table swapping 147 * @mutex: Mutex to protect current/future table swapping
142 * @lock: Spin lock to protect walker list 148 * @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
147 unsigned int key_len; 153 unsigned int key_len;
148 unsigned int elasticity; 154 unsigned int elasticity;
149 struct rhashtable_params p; 155 struct rhashtable_params p;
156 bool rhlist;
150 struct work_struct run_work; 157 struct work_struct run_work;
151 struct mutex mutex; 158 struct mutex mutex;
152 spinlock_t lock; 159 spinlock_t lock;
153}; 160};
154 161
155/** 162/**
163 * struct rhltable - Hash table with duplicate objects in a list
164 * @ht: Underlying rhtable
165 */
166struct rhltable {
167 struct rhashtable ht;
168};
169
170/**
156 * struct rhashtable_walker - Hash table walker 171 * struct rhashtable_walker - Hash table walker
157 * @list: List entry on list of walkers 172 * @list: List entry on list of walkers
158 * @tbl: The table that we were walking over 173 * @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
163}; 178};
164 179
165/** 180/**
166 * struct rhashtable_iter - Hash table iterator, fits into netlink cb 181 * struct rhashtable_iter - Hash table iterator
167 * @ht: Table to iterate through 182 * @ht: Table to iterate through
168 * @p: Current pointer 183 * @p: Current pointer
184 * @list: Current hash list pointer
169 * @walker: Associated rhashtable walker 185 * @walker: Associated rhashtable walker
170 * @slot: Current slot 186 * @slot: Current slot
171 * @skip: Number of entries to skip in slot 187 * @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
173struct rhashtable_iter { 189struct rhashtable_iter {
174 struct rhashtable *ht; 190 struct rhashtable *ht;
175 struct rhash_head *p; 191 struct rhash_head *p;
176 struct rhashtable_walker *walker; 192 struct rhlist_head *list;
193 struct rhashtable_walker walker;
177 unsigned int slot; 194 unsigned int slot;
178 unsigned int skip; 195 unsigned int skip;
179}; 196};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
339 356
340int rhashtable_init(struct rhashtable *ht, 357int rhashtable_init(struct rhashtable *ht,
341 const struct rhashtable_params *params); 358 const struct rhashtable_params *params);
359int rhltable_init(struct rhltable *hlt,
360 const struct rhashtable_params *params);
342 361
343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 362void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
344 const void *key, 363 struct rhash_head *obj);
345 struct rhash_head *obj,
346 struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348 364
349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, 365void rhashtable_walk_enter(struct rhashtable *ht,
350 gfp_t gfp); 366 struct rhashtable_iter *iter);
351void rhashtable_walk_exit(struct rhashtable_iter *iter); 367void rhashtable_walk_exit(struct rhashtable_iter *iter);
352int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 368int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
353void *rhashtable_walk_next(struct rhashtable_iter *iter); 369void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
506 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
507 tbl, hash, member) 523 tbl, hash, member)
508 524
525/**
526 * rhl_for_each_rcu - iterate over rcu hash table list
527 * @pos: the &struct rlist_head to use as a loop cursor.
528 * @list: the head of the list
529 *
530 * This hash chain list-traversal primitive should be used on the
531 * list returned by rhltable_lookup.
532 */
533#define rhl_for_each_rcu(pos, list) \
534 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
535
536/**
537 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
538 * @tpos: the type * to use as a loop cursor.
539 * @pos: the &struct rlist_head to use as a loop cursor.
540 * @list: the head of the list
541 * @member: name of the &struct rlist_head within the hashable struct.
542 *
543 * This hash chain list-traversal primitive should be used on the
544 * list returned by rhltable_lookup.
545 */
546#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
547 for (pos = list; pos && rht_entry(tpos, pos, member); \
548 pos = rcu_dereference_raw(pos->next))
549
509static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 550static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
510 const void *obj) 551 const void *obj)
511{ 552{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
515 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 556 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
516} 557}
517 558
518/** 559/* Internal function, do not use. */
519 * rhashtable_lookup_fast - search hash table, inlined version 560static inline struct rhash_head *__rhashtable_lookup(
520 * @ht: hash table
521 * @key: the pointer to the key
522 * @params: hash table parameters
523 *
524 * Computes the hash value for the key and traverses the bucket chain looking
525 * for a entry with an identical key. The first matching entry is returned.
526 *
527 * Returns the first entry on which the compare function returned true.
528 */
529static inline void *rhashtable_lookup_fast(
530 struct rhashtable *ht, const void *key, 561 struct rhashtable *ht, const void *key,
531 const struct rhashtable_params params) 562 const struct rhashtable_params params)
532{ 563{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
538 struct rhash_head *he; 569 struct rhash_head *he;
539 unsigned int hash; 570 unsigned int hash;
540 571
541 rcu_read_lock();
542
543 tbl = rht_dereference_rcu(ht->tbl, ht); 572 tbl = rht_dereference_rcu(ht->tbl, ht);
544restart: 573restart:
545 hash = rht_key_hashfn(ht, tbl, key, params); 574 hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
548 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 577 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
549 rhashtable_compare(&arg, rht_obj(ht, he))) 578 rhashtable_compare(&arg, rht_obj(ht, he)))
550 continue; 579 continue;
551 rcu_read_unlock(); 580 return he;
552 return rht_obj(ht, he);
553 } 581 }
554 582
555 /* Ensure we see any new tables. */ 583 /* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
558 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 586 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
559 if (unlikely(tbl)) 587 if (unlikely(tbl))
560 goto restart; 588 goto restart;
561 rcu_read_unlock();
562 589
563 return NULL; 590 return NULL;
564} 591}
565 592
566/* Internal function, please use rhashtable_insert_fast() instead */ 593/**
567static inline int __rhashtable_insert_fast( 594 * rhashtable_lookup - search hash table
568 struct rhashtable *ht, const void *key, struct rhash_head *obj, 595 * @ht: hash table
596 * @key: the pointer to the key
597 * @params: hash table parameters
598 *
599 * Computes the hash value for the key and traverses the bucket chain looking
600 * for a entry with an identical key. The first matching entry is returned.
601 *
602 * This must only be called under the RCU read lock.
603 *
604 * Returns the first entry on which the compare function returned true.
605 */
606static inline void *rhashtable_lookup(
607 struct rhashtable *ht, const void *key,
569 const struct rhashtable_params params) 608 const struct rhashtable_params params)
570{ 609{
610 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
611
612 return he ? rht_obj(ht, he) : NULL;
613}
614
615/**
616 * rhashtable_lookup_fast - search hash table, without RCU read lock
617 * @ht: hash table
618 * @key: the pointer to the key
619 * @params: hash table parameters
620 *
621 * Computes the hash value for the key and traverses the bucket chain looking
622 * for a entry with an identical key. The first matching entry is returned.
623 *
624 * Only use this function when you have other mechanisms guaranteeing
625 * that the object won't go away after the RCU read lock is released.
626 *
627 * Returns the first entry on which the compare function returned true.
628 */
629static inline void *rhashtable_lookup_fast(
630 struct rhashtable *ht, const void *key,
631 const struct rhashtable_params params)
632{
633 void *obj;
634
635 rcu_read_lock();
636 obj = rhashtable_lookup(ht, key, params);
637 rcu_read_unlock();
638
639 return obj;
640}
641
642/**
643 * rhltable_lookup - search hash list table
644 * @hlt: hash table
645 * @key: the pointer to the key
646 * @params: hash table parameters
647 *
648 * Computes the hash value for the key and traverses the bucket chain looking
649 * for a entry with an identical key. All matching entries are returned
650 * in a list.
651 *
652 * This must only be called under the RCU read lock.
653 *
654 * Returns the list of entries that match the given key.
655 */
656static inline struct rhlist_head *rhltable_lookup(
657 struct rhltable *hlt, const void *key,
658 const struct rhashtable_params params)
659{
660 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
661
662 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
663}
664
665/* Internal function, please use rhashtable_insert_fast() instead. This
666 * function returns the existing element already in hashes in there is a clash,
667 * otherwise it returns an error via ERR_PTR().
668 */
669static inline void *__rhashtable_insert_fast(
670 struct rhashtable *ht, const void *key, struct rhash_head *obj,
671 const struct rhashtable_params params, bool rhlist)
672{
571 struct rhashtable_compare_arg arg = { 673 struct rhashtable_compare_arg arg = {
572 .ht = ht, 674 .ht = ht,
573 .key = key, 675 .key = key,
574 }; 676 };
575 struct bucket_table *tbl, *new_tbl; 677 struct rhash_head __rcu **pprev;
678 struct bucket_table *tbl;
576 struct rhash_head *head; 679 struct rhash_head *head;
577 spinlock_t *lock; 680 spinlock_t *lock;
578 unsigned int elasticity;
579 unsigned int hash; 681 unsigned int hash;
580 int err; 682 int elasticity;
683 void *data;
581 684
582restart:
583 rcu_read_lock(); 685 rcu_read_lock();
584 686
585 tbl = rht_dereference_rcu(ht->tbl, ht); 687 tbl = rht_dereference_rcu(ht->tbl, ht);
688 hash = rht_head_hashfn(ht, tbl, obj, params);
689 lock = rht_bucket_lock(tbl, hash);
690 spin_lock_bh(lock);
586 691
587 /* All insertions must grab the oldest table containing 692 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
588 * the hashed bucket that is yet to be rehashed.
589 */
590 for (;;) {
591 hash = rht_head_hashfn(ht, tbl, obj, params);
592 lock = rht_bucket_lock(tbl, hash);
593 spin_lock_bh(lock);
594
595 if (tbl->rehash <= hash)
596 break;
597
598 spin_unlock_bh(lock);
599 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 }
601
602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 if (unlikely(new_tbl)) {
604 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
605 if (!IS_ERR_OR_NULL(tbl))
606 goto slow_path;
607
608 err = PTR_ERR(tbl);
609 goto out;
610 }
611
612 err = -E2BIG;
613 if (unlikely(rht_grow_above_max(ht, tbl)))
614 goto out;
615
616 if (unlikely(rht_grow_above_100(ht, tbl))) {
617slow_path: 693slow_path:
618 spin_unlock_bh(lock); 694 spin_unlock_bh(lock);
619 err = rhashtable_insert_rehash(ht, tbl);
620 rcu_read_unlock(); 695 rcu_read_unlock();
621 if (err) 696 return rhashtable_insert_slow(ht, key, obj);
622 return err;
623
624 goto restart;
625 } 697 }
626 698
627 err = -EEXIST;
628 elasticity = ht->elasticity; 699 elasticity = ht->elasticity;
700 pprev = &tbl->buckets[hash];
629 rht_for_each(head, tbl, hash) { 701 rht_for_each(head, tbl, hash) {
630 if (key && 702 struct rhlist_head *plist;
631 unlikely(!(params.obj_cmpfn ? 703 struct rhlist_head *list;
632 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 704
633 rhashtable_compare(&arg, rht_obj(ht, head))))) 705 elasticity--;
706 if (!key ||
707 (params.obj_cmpfn ?
708 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
709 rhashtable_compare(&arg, rht_obj(ht, head))))
710 continue;
711
712 data = rht_obj(ht, head);
713
714 if (!rhlist)
634 goto out; 715 goto out;
635 if (!--elasticity) 716
636 goto slow_path; 717
718 list = container_of(obj, struct rhlist_head, rhead);
719 plist = container_of(head, struct rhlist_head, rhead);
720
721 RCU_INIT_POINTER(list->next, plist);
722 head = rht_dereference_bucket(head->next, tbl, hash);
723 RCU_INIT_POINTER(list->rhead.next, head);
724 rcu_assign_pointer(*pprev, obj);
725
726 goto good;
637 } 727 }
638 728
639 err = 0; 729 if (elasticity <= 0)
730 goto slow_path;
731
732 data = ERR_PTR(-E2BIG);
733 if (unlikely(rht_grow_above_max(ht, tbl)))
734 goto out;
735
736 if (unlikely(rht_grow_above_100(ht, tbl)))
737 goto slow_path;
640 738
641 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 739 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
642 740
643 RCU_INIT_POINTER(obj->next, head); 741 RCU_INIT_POINTER(obj->next, head);
742 if (rhlist) {
743 struct rhlist_head *list;
744
745 list = container_of(obj, struct rhlist_head, rhead);
746 RCU_INIT_POINTER(list->next, NULL);
747 }
644 748
645 rcu_assign_pointer(tbl->buckets[hash], obj); 749 rcu_assign_pointer(tbl->buckets[hash], obj);
646 750
@@ -648,11 +752,14 @@ slow_path:
648 if (rht_grow_above_75(ht, tbl)) 752 if (rht_grow_above_75(ht, tbl))
649 schedule_work(&ht->run_work); 753 schedule_work(&ht->run_work);
650 754
755good:
756 data = NULL;
757
651out: 758out:
652 spin_unlock_bh(lock); 759 spin_unlock_bh(lock);
653 rcu_read_unlock(); 760 rcu_read_unlock();
654 761
655 return err; 762 return data;
656} 763}
657 764
658/** 765/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
675 struct rhashtable *ht, struct rhash_head *obj, 782 struct rhashtable *ht, struct rhash_head *obj,
676 const struct rhashtable_params params) 783 const struct rhashtable_params params)
677{ 784{
678 return __rhashtable_insert_fast(ht, NULL, obj, params); 785 void *ret;
786
787 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
788 if (IS_ERR(ret))
789 return PTR_ERR(ret);
790
791 return ret == NULL ? 0 : -EEXIST;
792}
793
794/**
795 * rhltable_insert_key - insert object into hash list table
796 * @hlt: hash list table
797 * @key: the pointer to the key
798 * @list: pointer to hash list head inside object
799 * @params: hash table parameters
800 *
801 * Will take a per bucket spinlock to protect against mutual mutations
802 * on the same bucket. Multiple insertions may occur in parallel unless
803 * they map to the same bucket lock.
804 *
805 * It is safe to call this function from atomic context.
806 *
807 * Will trigger an automatic deferred table resizing if the size grows
808 * beyond the watermark indicated by grow_decision() which can be passed
809 * to rhashtable_init().
810 */
811static inline int rhltable_insert_key(
812 struct rhltable *hlt, const void *key, struct rhlist_head *list,
813 const struct rhashtable_params params)
814{
815 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
816 params, true));
817}
818
819/**
820 * rhltable_insert - insert object into hash list table
821 * @hlt: hash list table
822 * @list: pointer to hash list head inside object
823 * @params: hash table parameters
824 *
825 * Will take a per bucket spinlock to protect against mutual mutations
826 * on the same bucket. Multiple insertions may occur in parallel unless
827 * they map to the same bucket lock.
828 *
829 * It is safe to call this function from atomic context.
830 *
831 * Will trigger an automatic deferred table resizing if the size grows
832 * beyond the watermark indicated by grow_decision() which can be passed
833 * to rhashtable_init().
834 */
835static inline int rhltable_insert(
836 struct rhltable *hlt, struct rhlist_head *list,
837 const struct rhashtable_params params)
838{
839 const char *key = rht_obj(&hlt->ht, &list->rhead);
840
841 key += params.key_offset;
842
843 return rhltable_insert_key(hlt, key, list, params);
679} 844}
680 845
681/** 846/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
704 const struct rhashtable_params params) 869 const struct rhashtable_params params)
705{ 870{
706 const char *key = rht_obj(ht, obj); 871 const char *key = rht_obj(ht, obj);
872 void *ret;
707 873
708 BUG_ON(ht->p.obj_hashfn); 874 BUG_ON(ht->p.obj_hashfn);
709 875
710 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, 876 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
711 params); 877 false);
878 if (IS_ERR(ret))
879 return PTR_ERR(ret);
880
881 return ret == NULL ? 0 : -EEXIST;
712} 882}
713 883
714/** 884/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
737 struct rhashtable *ht, const void *key, struct rhash_head *obj, 907 struct rhashtable *ht, const void *key, struct rhash_head *obj,
738 const struct rhashtable_params params) 908 const struct rhashtable_params params)
739{ 909{
910 void *ret;
911
912 BUG_ON(!ht->p.obj_hashfn || !key);
913
914 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
915 if (IS_ERR(ret))
916 return PTR_ERR(ret);
917
918 return ret == NULL ? 0 : -EEXIST;
919}
920
921/**
922 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
923 * @ht: hash table
924 * @obj: pointer to hash head inside object
925 * @params: hash table parameters
926 * @data: pointer to element data already in hashes
927 *
928 * Just like rhashtable_lookup_insert_key(), but this function returns the
929 * object if it exists, NULL if it does not and the insertion was successful,
930 * and an ERR_PTR otherwise.
931 */
932static inline void *rhashtable_lookup_get_insert_key(
933 struct rhashtable *ht, const void *key, struct rhash_head *obj,
934 const struct rhashtable_params params)
935{
740 BUG_ON(!ht->p.obj_hashfn || !key); 936 BUG_ON(!ht->p.obj_hashfn || !key);
741 937
742 return __rhashtable_insert_fast(ht, key, obj, params); 938 return __rhashtable_insert_fast(ht, key, obj, params, false);
743} 939}
744 940
745/* Internal function, please use rhashtable_remove_fast() instead */ 941/* Internal function, please use rhashtable_remove_fast() instead */
746static inline int __rhashtable_remove_fast( 942static inline int __rhashtable_remove_fast_one(
747 struct rhashtable *ht, struct bucket_table *tbl, 943 struct rhashtable *ht, struct bucket_table *tbl,
748 struct rhash_head *obj, const struct rhashtable_params params) 944 struct rhash_head *obj, const struct rhashtable_params params,
945 bool rhlist)
749{ 946{
750 struct rhash_head __rcu **pprev; 947 struct rhash_head __rcu **pprev;
751 struct rhash_head *he; 948 struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
760 957
761 pprev = &tbl->buckets[hash]; 958 pprev = &tbl->buckets[hash];
762 rht_for_each(he, tbl, hash) { 959 rht_for_each(he, tbl, hash) {
960 struct rhlist_head *list;
961
962 list = container_of(he, struct rhlist_head, rhead);
963
763 if (he != obj) { 964 if (he != obj) {
965 struct rhlist_head __rcu **lpprev;
966
764 pprev = &he->next; 967 pprev = &he->next;
765 continue; 968
969 if (!rhlist)
970 continue;
971
972 do {
973 lpprev = &list->next;
974 list = rht_dereference_bucket(list->next,
975 tbl, hash);
976 } while (list && obj != &list->rhead);
977
978 if (!list)
979 continue;
980
981 list = rht_dereference_bucket(list->next, tbl, hash);
982 RCU_INIT_POINTER(*lpprev, list);
983 err = 0;
984 break;
766 } 985 }
767 986
768 rcu_assign_pointer(*pprev, obj->next); 987 obj = rht_dereference_bucket(obj->next, tbl, hash);
769 err = 0; 988 err = 1;
989
990 if (rhlist) {
991 list = rht_dereference_bucket(list->next, tbl, hash);
992 if (list) {
993 RCU_INIT_POINTER(list->rhead.next, obj);
994 obj = &list->rhead;
995 err = 0;
996 }
997 }
998
999 rcu_assign_pointer(*pprev, obj);
770 break; 1000 break;
771 } 1001 }
772 1002
773 spin_unlock_bh(lock); 1003 spin_unlock_bh(lock);
774 1004
1005 if (err > 0) {
1006 atomic_dec(&ht->nelems);
1007 if (unlikely(ht->p.automatic_shrinking &&
1008 rht_shrink_below_30(ht, tbl)))
1009 schedule_work(&ht->run_work);
1010 err = 0;
1011 }
1012
775 return err; 1013 return err;
776} 1014}
777 1015
778/** 1016/* Internal function, please use rhashtable_remove_fast() instead */
779 * rhashtable_remove_fast - remove object from hash table 1017static inline int __rhashtable_remove_fast(
780 * @ht: hash table
781 * @obj: pointer to hash head inside object
782 * @params: hash table parameters
783 *
784 * Since the hash chain is single linked, the removal operation needs to
785 * walk the bucket chain upon removal. The removal operation is thus
786 * considerable slow if the hash table is not correctly sized.
787 *
788 * Will automatically shrink the table via rhashtable_expand() if the
789 * shrink_decision function specified at rhashtable_init() returns true.
790 *
791 * Returns zero on success, -ENOENT if the entry could not be found.
792 */
793static inline int rhashtable_remove_fast(
794 struct rhashtable *ht, struct rhash_head *obj, 1018 struct rhashtable *ht, struct rhash_head *obj,
795 const struct rhashtable_params params) 1019 const struct rhashtable_params params, bool rhlist)
796{ 1020{
797 struct bucket_table *tbl; 1021 struct bucket_table *tbl;
798 int err; 1022 int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
806 * visible then that guarantees the entry to still be in 1030 * visible then that guarantees the entry to still be in
807 * the old tbl if it exists. 1031 * the old tbl if it exists.
808 */ 1032 */
809 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && 1033 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1034 rhlist)) &&
810 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1035 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
811 ; 1036 ;
812 1037
813 if (err)
814 goto out;
815
816 atomic_dec(&ht->nelems);
817 if (unlikely(ht->p.automatic_shrinking &&
818 rht_shrink_below_30(ht, tbl)))
819 schedule_work(&ht->run_work);
820
821out:
822 rcu_read_unlock(); 1038 rcu_read_unlock();
823 1039
824 return err; 1040 return err;
825} 1041}
826 1042
1043/**
1044 * rhashtable_remove_fast - remove object from hash table
1045 * @ht: hash table
1046 * @obj: pointer to hash head inside object
1047 * @params: hash table parameters
1048 *
1049 * Since the hash chain is single linked, the removal operation needs to
1050 * walk the bucket chain upon removal. The removal operation is thus
1051 * considerable slow if the hash table is not correctly sized.
1052 *
1053 * Will automatically shrink the table via rhashtable_expand() if the
1054 * shrink_decision function specified at rhashtable_init() returns true.
1055 *
1056 * Returns zero on success, -ENOENT if the entry could not be found.
1057 */
1058static inline int rhashtable_remove_fast(
1059 struct rhashtable *ht, struct rhash_head *obj,
1060 const struct rhashtable_params params)
1061{
1062 return __rhashtable_remove_fast(ht, obj, params, false);
1063}
1064
1065/**
1066 * rhltable_remove - remove object from hash list table
1067 * @hlt: hash list table
1068 * @list: pointer to hash list head inside object
1069 * @params: hash table parameters
1070 *
1071 * Since the hash chain is single linked, the removal operation needs to
1072 * walk the bucket chain upon removal. The removal operation is thus
1073 * considerable slow if the hash table is not correctly sized.
1074 *
1075 * Will automatically shrink the table via rhashtable_expand() if the
1076 * shrink_decision function specified at rhashtable_init() returns true.
1077 *
1078 * Returns zero on success, -ENOENT if the entry could not be found.
1079 */
1080static inline int rhltable_remove(
1081 struct rhltable *hlt, struct rhlist_head *list,
1082 const struct rhashtable_params params)
1083{
1084 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1085}
1086
827/* Internal function, please use rhashtable_replace_fast() instead */ 1087/* Internal function, please use rhashtable_replace_fast() instead */
828static inline int __rhashtable_replace_fast( 1088static inline int __rhashtable_replace_fast(
829 struct rhashtable *ht, struct bucket_table *tbl, 1089 struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
906 return err; 1166 return err;
907} 1167}
908 1168
1169/* Obsolete function, do not use in new code. */
1170static inline int rhashtable_walk_init(struct rhashtable *ht,
1171 struct rhashtable_iter *iter, gfp_t gfp)
1172{
1173 rhashtable_walk_enter(ht, iter);
1174 return 0;
1175}
1176
1177/**
1178 * rhltable_walk_enter - Initialise an iterator
1179 * @hlt: Table to walk over
1180 * @iter: Hash table Iterator
1181 *
1182 * This function prepares a hash table walk.
1183 *
1184 * Note that if you restart a walk after rhashtable_walk_stop you
1185 * may see the same object twice. Also, you may miss objects if
1186 * there are removals in between rhashtable_walk_stop and the next
1187 * call to rhashtable_walk_start.
1188 *
1189 * For a completely stable walk you should construct your own data
1190 * structure outside the hash table.
1191 *
1192 * This function may sleep so you must not call it from interrupt
1193 * context or with spin locks held.
1194 *
1195 * You must call rhashtable_walk_exit after this function returns.
1196 */
1197static inline void rhltable_walk_enter(struct rhltable *hlt,
1198 struct rhashtable_iter *iter)
1199{
1200 return rhashtable_walk_enter(&hlt->ht, iter);
1201}
1202
1203/**
1204 * rhltable_free_and_destroy - free elements and destroy hash list table
1205 * @hlt: the hash list table to destroy
1206 * @free_fn: callback to release resources of element
1207 * @arg: pointer passed to free_fn
1208 *
1209 * See documentation for rhashtable_free_and_destroy.
1210 */
1211static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1212 void (*free_fn)(void *ptr,
1213 void *arg),
1214 void *arg)
1215{
1216 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1217}
1218
1219static inline void rhltable_destroy(struct rhltable *hlt)
1220{
1221 return rhltable_free_and_destroy(hlt, NULL, NULL);
1222}
1223
909#endif /* _LINUX_RHASHTABLE_H */ 1224#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2daece8979f7..57e54847b0b9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -105,7 +105,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
105 struct netlink_callback *cb, 105 struct netlink_callback *cb,
106 struct net_device *dev, 106 struct net_device *dev,
107 struct net_device *filter_dev, 107 struct net_device *filter_dev,
108 int idx); 108 int *idx);
109extern int ndo_dflt_fdb_add(struct ndmsg *ndm, 109extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
110 struct nlattr *tb[], 110 struct nlattr *tb[],
111 struct net_device *dev, 111 struct net_device *dev,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0f665cb26b50..9bf60b556bd2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
613 * @napi_id: id of the NAPI struct this skb came from 613 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking 614 * @secmark: security marking
615 * @offload_fwd_mark: fwding offload mark
616 * @mark: Generic packet mark 615 * @mark: Generic packet mark
617 * @vlan_proto: vlan encapsulation protocol 616 * @vlan_proto: vlan encapsulation protocol
618 * @vlan_tci: vlan tag control information 617 * @vlan_tci: vlan tag control information
@@ -677,13 +676,23 @@ struct sk_buff {
677 */ 676 */
678 kmemcheck_bitfield_begin(flags1); 677 kmemcheck_bitfield_begin(flags1);
679 __u16 queue_mapping; 678 __u16 queue_mapping;
679
680/* if you move cloned around you also must adapt those constants */
681#ifdef __BIG_ENDIAN_BITFIELD
682#define CLONED_MASK (1 << 7)
683#else
684#define CLONED_MASK 1
685#endif
686#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
687
688 __u8 __cloned_offset[0];
680 __u8 cloned:1, 689 __u8 cloned:1,
681 nohdr:1, 690 nohdr:1,
682 fclone:2, 691 fclone:2,
683 peeked:1, 692 peeked:1,
684 head_frag:1, 693 head_frag:1,
685 xmit_more:1; 694 xmit_more:1,
686 /* one bit hole */ 695 __unused:1; /* one bit hole */
687 kmemcheck_bitfield_end(flags1); 696 kmemcheck_bitfield_end(flags1);
688 697
689 /* fields enclosed in headers_start/headers_end are copied 698 /* fields enclosed in headers_start/headers_end are copied
@@ -730,7 +739,10 @@ struct sk_buff {
730 __u8 ipvs_property:1; 739 __u8 ipvs_property:1;
731 __u8 inner_protocol_type:1; 740 __u8 inner_protocol_type:1;
732 __u8 remcsum_offload:1; 741 __u8 remcsum_offload:1;
733 /* 3 or 5 bit hole */ 742#ifdef CONFIG_NET_SWITCHDEV
743 __u8 offload_fwd_mark:1;
744#endif
745 /* 2, 4 or 5 bit hole */
734 746
735#ifdef CONFIG_NET_SCHED 747#ifdef CONFIG_NET_SCHED
736 __u16 tc_index; /* traffic control index */ 748 __u16 tc_index; /* traffic control index */
@@ -757,14 +769,9 @@ struct sk_buff {
757 unsigned int sender_cpu; 769 unsigned int sender_cpu;
758 }; 770 };
759#endif 771#endif
760 union {
761#ifdef CONFIG_NETWORK_SECMARK 772#ifdef CONFIG_NETWORK_SECMARK
762 __u32 secmark; 773 __u32 secmark;
763#endif
764#ifdef CONFIG_NET_SWITCHDEV
765 __u32 offload_fwd_mark;
766#endif 774#endif
767 };
768 775
769 union { 776 union {
770 __u32 mark; 777 __u32 mark;
@@ -2295,7 +2302,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2295 2302
2296int ___pskb_trim(struct sk_buff *skb, unsigned int len); 2303int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2297 2304
2298static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 2305static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2299{ 2306{
2300 if (unlikely(skb_is_nonlinear(skb))) { 2307 if (unlikely(skb_is_nonlinear(skb))) {
2301 WARN_ON(1); 2308 WARN_ON(1);
@@ -2305,6 +2312,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2305 skb_set_tail_pointer(skb, len); 2312 skb_set_tail_pointer(skb, len);
2306} 2313}
2307 2314
2315static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2316{
2317 __skb_set_length(skb, len);
2318}
2319
2308void skb_trim(struct sk_buff *skb, unsigned int len); 2320void skb_trim(struct sk_buff *skb, unsigned int len);
2309 2321
2310static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 2322static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -2335,6 +2347,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2335 BUG_ON(err); 2347 BUG_ON(err);
2336} 2348}
2337 2349
2350static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2351{
2352 unsigned int diff = len - skb->len;
2353
2354 if (skb_tailroom(skb) < diff) {
2355 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2356 GFP_ATOMIC);
2357 if (ret)
2358 return ret;
2359 }
2360 __skb_set_length(skb, len);
2361 return 0;
2362}
2363
2338/** 2364/**
2339 * skb_orphan - orphan a buffer 2365 * skb_orphan - orphan a buffer
2340 * @skb: buffer to orphan 2366 * @skb: buffer to orphan
@@ -2386,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2386 kfree_skb(skb); 2412 kfree_skb(skb);
2387} 2413}
2388 2414
2415void skb_rbtree_purge(struct rb_root *root);
2416
2389void *netdev_alloc_frag(unsigned int fragsz); 2417void *netdev_alloc_frag(unsigned int fragsz);
2390 2418
2391struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 2419struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2938,6 +2966,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2938 return __pskb_trim(skb, len); 2966 return __pskb_trim(skb, len);
2939} 2967}
2940 2968
2969static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2970{
2971 if (skb->ip_summed == CHECKSUM_COMPLETE)
2972 skb->ip_summed = CHECKSUM_NONE;
2973 __skb_trim(skb, len);
2974 return 0;
2975}
2976
2977static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2978{
2979 if (skb->ip_summed == CHECKSUM_COMPLETE)
2980 skb->ip_summed = CHECKSUM_NONE;
2981 return __skb_grow(skb, len);
2982}
2983
2941#define skb_queue_walk(queue, skb) \ 2984#define skb_queue_walk(queue, skb) \
2942 for (skb = (queue)->next; \ 2985 for (skb = (queue)->next; \
2943 skb != (struct sk_buff *)(queue); \ 2986 skb != (struct sk_buff *)(queue); \
@@ -3042,6 +3085,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3042struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 3085struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3043struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 3086struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3044int skb_ensure_writable(struct sk_buff *skb, int write_len); 3087int skb_ensure_writable(struct sk_buff *skb, int write_len);
3088int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3045int skb_vlan_pop(struct sk_buff *skb); 3089int skb_vlan_pop(struct sk_buff *skb);
3046int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 3090int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3047struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, 3091struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3726,6 +3770,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3726 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 3770 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3727} 3771}
3728 3772
3773static inline void skb_gso_reset(struct sk_buff *skb)
3774{
3775 skb_shinfo(skb)->gso_size = 0;
3776 skb_shinfo(skb)->gso_segs = 0;
3777 skb_shinfo(skb)->gso_type = 0;
3778}
3779
3729void __skb_warn_lro_forwarding(const struct sk_buff *skb); 3780void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3730 3781
3731static inline bool skb_warn_if_lro(const struct sk_buff *skb) 3782static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a4f7203a9017..ecc3e07c6e63 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/rbtree.h> 27#include <linux/rbtree.h>
28#include <linux/uidgid.h>
28#include <uapi/linux/sysctl.h> 29#include <uapi/linux/sysctl.h>
29 30
30/* For the /proc/sys support */ 31/* For the /proc/sys support */
@@ -159,6 +160,9 @@ struct ctl_table_root {
159 struct ctl_table_set default_set; 160 struct ctl_table_set default_set;
160 struct ctl_table_set *(*lookup)(struct ctl_table_root *root, 161 struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
161 struct nsproxy *namespaces); 162 struct nsproxy *namespaces);
163 void (*set_ownership)(struct ctl_table_header *head,
164 struct ctl_table *table,
165 kuid_t *uid, kgid_t *gid);
162 int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); 166 int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
163}; 167};
164 168
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
19 19
20 20
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/win_minmax.h>
22#include <net/sock.h> 23#include <net/sock.h>
23#include <net/inet_connection_sock.h> 24#include <net/inet_connection_sock.h>
24#include <net/inet_timewait_sock.h> 25#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
212 u8 reord; /* reordering detected */ 213 u8 reord; /* reordering detected */
213 } rack; 214 } rack;
214 u16 advmss; /* Advertised MSS */ 215 u16 advmss; /* Advertised MSS */
215 u8 unused; 216 u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
217 unused:7;
216 u8 nonagle : 4,/* Disable Nagle algorithm? */ 218 u8 nonagle : 4,/* Disable Nagle algorithm? */
217 thin_lto : 1,/* Use linear timeouts for thin streams */ 219 thin_lto : 1,/* Use linear timeouts for thin streams */
218 thin_dupack : 1,/* Fast retransmit on first dupack */ 220 thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
234 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 236 u32 mdev_max_us; /* maximal mdev for the last rtt period */
235 u32 rttvar_us; /* smoothed mdev_max */ 237 u32 rttvar_us; /* smoothed mdev_max */
236 u32 rtt_seq; /* sequence number to update rttvar */ 238 u32 rtt_seq; /* sequence number to update rttvar */
237 struct rtt_meas { 239 struct minmax rtt_min;
238 u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
239 } rtt_min[3];
240 240
241 u32 packets_out; /* Packets which are "in flight" */ 241 u32 packets_out; /* Packets which are "in flight" */
242 u32 retrans_out; /* Retransmitted packets out */ 242 u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
268 * receiver in Recovery. */ 268 * receiver in Recovery. */
269 u32 prr_out; /* Total number of pkts sent during Recovery. */ 269 u32 prr_out; /* Total number of pkts sent during Recovery. */
270 u32 delivered; /* Total data packets delivered incl. rexmits */ 270 u32 delivered; /* Total data packets delivered incl. rexmits */
271 u32 lost; /* Total data packets lost incl. rexmits */
272 u32 app_limited; /* limited until "delivered" reaches this val */
273 struct skb_mstamp first_tx_mstamp; /* start of window send phase */
274 struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
275 u32 rate_delivered; /* saved rate sample: packets delivered */
276 u32 rate_interval_us; /* saved rate sample: time elapsed */
271 277
272 u32 rcv_wnd; /* Current receiver window */ 278 u32 rcv_wnd; /* Current receiver window */
273 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 279 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
281 struct sk_buff* lost_skb_hint; 287 struct sk_buff* lost_skb_hint;
282 struct sk_buff *retransmit_skb_hint; 288 struct sk_buff *retransmit_skb_hint;
283 289
284 /* OOO segments go in this list. Note that socket lock must be held, 290 /* OOO segments go in this rbtree. Socket lock must be held. */
285 * as we do not use sk_buff_head lock. 291 struct rb_root out_of_order_queue;
286 */ 292 struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
287 struct sk_buff_head out_of_order_queue;
288 293
289 /* SACKs data, these 2 need to be together (see tcp_options_write) */ 294 /* SACKs data, these 2 need to be together (see tcp_options_write) */
290 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 295 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
1/**
2 * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
3 *
4 */
5#ifndef MINMAX_H
6#define MINMAX_H
7
8#include <linux/types.h>
9
10/* A single data point for our parameterized min-max tracker */
11struct minmax_sample {
12 u32 t; /* time measurement was taken */
13 u32 v; /* value measured */
14};
15
16/* State for the parameterized min-max tracker */
17struct minmax {
18 struct minmax_sample s[3];
19};
20
21static inline u32 minmax_get(const struct minmax *m)
22{
23 return m->s[0].v;
24}
25
26static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
27{
28 struct minmax_sample val = { .t = t, .v = meas };
29
30 m->s[2] = m->s[1] = m->s[0] = val;
31 return m->s[0].v;
32}
33
34u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
35u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
36
37#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9826d3a9464c..f2d072787947 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
1#ifndef _ADDRCONF_H 1#ifndef _ADDRCONF_H
2#define _ADDRCONF_H 2#define _ADDRCONF_H
3 3
4#define MAX_RTR_SOLICITATIONS 3 4#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
5#define RTR_SOLICITATION_INTERVAL (4*HZ) 5#define RTR_SOLICITATION_INTERVAL (4*HZ)
6#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
6 7
7#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ 8#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
8 9
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 7b0f88699b25..1061a472a3e3 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,42 +12,39 @@
12#ifndef _NET_RXRPC_H 12#ifndef _NET_RXRPC_H
13#define _NET_RXRPC_H 13#define _NET_RXRPC_H
14 14
15#include <linux/skbuff.h>
16#include <linux/rxrpc.h> 15#include <linux/rxrpc.h>
17 16
17struct key;
18struct sock;
19struct socket;
18struct rxrpc_call; 20struct rxrpc_call;
19 21
20/* 22typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
21 * the mark applied to socket buffers that may be intercepted 23 unsigned long);
22 */ 24typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
23enum rxrpc_skb_mark { 25 unsigned long);
24 RXRPC_SKB_MARK_DATA, /* data message */ 26typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
25 RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ 27typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
26 RXRPC_SKB_MARK_BUSY, /* server busy message */
27 RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
28 RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
29 RXRPC_SKB_MARK_NET_ERROR, /* network error message */
30 RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
31 RXRPC_SKB_MARK_NEW_CALL, /* local error message */
32};
33 28
34typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long, 29void rxrpc_kernel_new_call_notification(struct socket *,
35 struct sk_buff *); 30 rxrpc_notify_new_call_t,
36void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t); 31 rxrpc_discard_new_call_t);
37struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, 32struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
38 struct sockaddr_rxrpc *, 33 struct sockaddr_rxrpc *,
39 struct key *, 34 struct key *,
40 unsigned long, 35 unsigned long,
41 gfp_t); 36 gfp_t,
42int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); 37 rxrpc_notify_rx_t);
43void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); 38int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
44void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); 39 struct msghdr *, size_t);
45void rxrpc_kernel_end_call(struct rxrpc_call *); 40int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
46bool rxrpc_kernel_is_data_last(struct sk_buff *); 41 void *, size_t, size_t *, bool, u32 *);
47u32 rxrpc_kernel_get_abort_code(struct sk_buff *); 42void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
48int rxrpc_kernel_get_error_number(struct sk_buff *); 43 u32, int, const char *);
49void rxrpc_kernel_free_skb(struct sk_buff *); 44void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
50struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); 45void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
51int rxrpc_kernel_reject_call(struct socket *); 46 struct sockaddr_rxrpc *);
47int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
48 rxrpc_user_attach_call_t, unsigned long, gfp_t);
52 49
53#endif /* _NET_RXRPC_H */ 50#endif /* _NET_RXRPC_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
29#include <net/sock.h> 29#include <net/sock.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31 31
32#define BT_SUBSYS_VERSION "2.21" 32#define BT_SUBSYS_VERSION 2
33#define BT_SUBSYS_REVISION 22
33 34
34#ifndef AF_BLUETOOTH 35#ifndef AF_BLUETOOTH
35#define AF_BLUETOOTH 31 36#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
371void hci_sock_clear_flag(struct sock *sk, int nr); 372void hci_sock_clear_flag(struct sock *sk, int nr);
372int hci_sock_test_flag(struct sock *sk, int nr); 373int hci_sock_test_flag(struct sock *sk, int nr);
373unsigned short hci_sock_get_channel(struct sock *sk); 374unsigned short hci_sock_get_channel(struct sock *sk);
375u32 hci_sock_get_cookie(struct sock *sk);
374 376
375int hci_sock_init(void); 377int hci_sock_init(void);
376void hci_sock_cleanup(void); 378void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
63#define HCI_SDIO 6 63#define HCI_SDIO 6
64#define HCI_SPI 7 64#define HCI_SPI 7
65#define HCI_I2C 8 65#define HCI_I2C 8
66#define HCI_SMD 9
66 67
67/* HCI controller types */ 68/* HCI controller types */
68#define HCI_PRIMARY 0x00 69#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
207 HCI_MGMT_INDEX_EVENTS, 208 HCI_MGMT_INDEX_EVENTS,
208 HCI_MGMT_UNCONF_INDEX_EVENTS, 209 HCI_MGMT_UNCONF_INDEX_EVENTS,
209 HCI_MGMT_EXT_INDEX_EVENTS, 210 HCI_MGMT_EXT_INDEX_EVENTS,
210 HCI_MGMT_GENERIC_EVENTS, 211 HCI_MGMT_EXT_INFO_EVENTS,
212 HCI_MGMT_OPTION_EVENTS,
213 HCI_MGMT_SETTING_EVENTS,
214 HCI_MGMT_DEV_CLASS_EVENTS,
215 HCI_MGMT_LOCAL_NAME_EVENTS,
211 HCI_MGMT_OOB_DATA_EVENTS, 216 HCI_MGMT_OOB_DATA_EVENTS,
212}; 217};
213 218
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
211 __u8 dev_name[HCI_MAX_NAME_LENGTH]; 211 __u8 dev_name[HCI_MAX_NAME_LENGTH];
212 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; 212 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
213 __u8 eir[HCI_MAX_EIR_LENGTH]; 213 __u8 eir[HCI_MAX_EIR_LENGTH];
214 __u16 appearance;
214 __u8 dev_class[3]; 215 __u8 dev_class[3];
215 __u8 major_class; 216 __u8 major_class;
216 __u8 minor_class; 217 __u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
399 struct delayed_work rpa_expired; 400 struct delayed_work rpa_expired;
400 bdaddr_t rpa; 401 bdaddr_t rpa;
401 402
403#if IS_ENABLED(CONFIG_BT_LEDS)
402 struct led_trigger *power_led; 404 struct led_trigger *power_led;
405#endif
403 406
404 int (*open)(struct hci_dev *hdev); 407 int (*open)(struct hci_dev *hdev);
405 int (*close)(struct hci_dev *hdev); 408 int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
1026int hci_reset_dev(struct hci_dev *hdev); 1029int hci_reset_dev(struct hci_dev *hdev);
1027int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1030int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
1028int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); 1031int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
1029void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); 1032__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
1030void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); 1033__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
1031int hci_dev_open(__u16 dev); 1034int hci_dev_open(__u16 dev);
1032int hci_dev_close(__u16 dev); 1035int hci_dev_close(__u16 dev);
1033int hci_dev_do_close(struct hci_dev *hdev); 1036int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
1404void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 1407void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
1405 int flag, struct sock *skip_sk); 1408 int flag, struct sock *skip_sk);
1406void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); 1409void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
1410void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
1411 void *data, u16 data_len, ktime_t tstamp,
1412 int flag, struct sock *skip_sk);
1407 1413
1408void hci_sock_dev_event(struct hci_dev *hdev, int event); 1414void hci_sock_dev_event(struct hci_dev *hdev, int event);
1409 1415
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
1449#define DISCOV_BREDR_INQUIRY_LEN 0x08 1455#define DISCOV_BREDR_INQUIRY_LEN 0x08
1450#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ 1456#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
1451 1457
1458void mgmt_fill_version_info(void *ver);
1452int mgmt_new_settings(struct hci_dev *hdev); 1459int mgmt_new_settings(struct hci_dev *hdev);
1453void mgmt_index_added(struct hci_dev *hdev); 1460void mgmt_index_added(struct hci_dev *hdev);
1454void mgmt_index_removed(struct hci_dev *hdev); 1461void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
45#define HCI_MON_VENDOR_DIAG 11 45#define HCI_MON_VENDOR_DIAG 11
46#define HCI_MON_SYSTEM_NOTE 12 46#define HCI_MON_SYSTEM_NOTE 12
47#define HCI_MON_USER_LOGGING 13 47#define HCI_MON_USER_LOGGING 13
48#define HCI_MON_CTRL_OPEN 14
49#define HCI_MON_CTRL_CLOSE 15
50#define HCI_MON_CTRL_COMMAND 16
51#define HCI_MON_CTRL_EVENT 17
48 52
49struct hci_mon_new_index { 53struct hci_mon_new_index {
50 __u8 type; 54 __u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
586 586
587#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041 587#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
588 588
589#define MGMT_OP_READ_EXT_INFO 0x0042
590#define MGMT_READ_EXT_INFO_SIZE 0
591struct mgmt_rp_read_ext_info {
592 bdaddr_t bdaddr;
593 __u8 version;
594 __le16 manufacturer;
595 __le32 supported_settings;
596 __le32 current_settings;
597 __le16 eir_len;
598 __u8 eir[0];
599} __packed;
600
601#define MGMT_OP_SET_APPEARANCE 0x0043
602struct mgmt_cp_set_appearance {
603 __u16 appearance;
604} __packed;
605#define MGMT_SET_APPEARANCE_SIZE 2
606
589#define MGMT_EV_CMD_COMPLETE 0x0001 607#define MGMT_EV_CMD_COMPLETE 0x0001
590struct mgmt_ev_cmd_complete { 608struct mgmt_ev_cmd_complete {
591 __le16 opcode; 609 __le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
800struct mgmt_ev_advertising_removed { 818struct mgmt_ev_advertising_removed {
801 __u8 instance; 819 __u8 instance;
802} __packed; 820} __packed;
821
822#define MGMT_EV_EXT_INFO_CHANGED 0x0025
823struct mgmt_ev_ext_info_changed {
824 __le16 eir_len;
825 __u8 eir[0];
826} __packed;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index beb7610d64e9..fe78f02a242e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright 2015 Intel Deutschland GmbH 8 * Copyright 2015-2016 Intel Deutschland GmbH
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -593,6 +593,8 @@ struct survey_info {
593 s8 noise; 593 s8 noise;
594}; 594};
595 595
596#define CFG80211_MAX_WEP_KEYS 4
597
596/** 598/**
597 * struct cfg80211_crypto_settings - Crypto settings 599 * struct cfg80211_crypto_settings - Crypto settings
598 * @wpa_versions: indicates which, if any, WPA versions are enabled 600 * @wpa_versions: indicates which, if any, WPA versions are enabled
@@ -610,6 +612,9 @@ struct survey_info {
610 * allowed through even on unauthorized ports 612 * allowed through even on unauthorized ports
611 * @control_port_no_encrypt: TRUE to prevent encryption of control port 613 * @control_port_no_encrypt: TRUE to prevent encryption of control port
612 * protocol frames. 614 * protocol frames.
615 * @wep_keys: static WEP keys, if not NULL points to an array of
616 * CFG80211_MAX_WEP_KEYS WEP keys
617 * @wep_tx_key: key index (0..3) of the default TX static WEP key
613 */ 618 */
614struct cfg80211_crypto_settings { 619struct cfg80211_crypto_settings {
615 u32 wpa_versions; 620 u32 wpa_versions;
@@ -621,6 +626,8 @@ struct cfg80211_crypto_settings {
621 bool control_port; 626 bool control_port;
622 __be16 control_port_ethertype; 627 __be16 control_port_ethertype;
623 bool control_port_no_encrypt; 628 bool control_port_no_encrypt;
629 struct key_params *wep_keys;
630 int wep_tx_key;
624}; 631};
625 632
626/** 633/**
@@ -676,6 +683,18 @@ struct cfg80211_acl_data {
676 struct mac_address mac_addrs[]; 683 struct mac_address mac_addrs[];
677}; 684};
678 685
686/*
687 * cfg80211_bitrate_mask - masks for bitrate control
688 */
689struct cfg80211_bitrate_mask {
690 struct {
691 u32 legacy;
692 u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
693 u16 vht_mcs[NL80211_VHT_NSS_MAX];
694 enum nl80211_txrate_gi gi;
695 } control[NUM_NL80211_BANDS];
696};
697
679/** 698/**
680 * struct cfg80211_ap_settings - AP configuration 699 * struct cfg80211_ap_settings - AP configuration
681 * 700 *
@@ -700,6 +719,7 @@ struct cfg80211_acl_data {
700 * MAC address based access control 719 * MAC address based access control
701 * @pbss: If set, start as a PCP instead of AP. Relevant for DMG 720 * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
702 * networks. 721 * networks.
722 * @beacon_rate: bitrate to be used for beacons
703 */ 723 */
704struct cfg80211_ap_settings { 724struct cfg80211_ap_settings {
705 struct cfg80211_chan_def chandef; 725 struct cfg80211_chan_def chandef;
@@ -719,6 +739,7 @@ struct cfg80211_ap_settings {
719 bool p2p_opp_ps; 739 bool p2p_opp_ps;
720 const struct cfg80211_acl_data *acl; 740 const struct cfg80211_acl_data *acl;
721 bool pbss; 741 bool pbss;
742 struct cfg80211_bitrate_mask beacon_rate;
722}; 743};
723 744
724/** 745/**
@@ -1351,6 +1372,7 @@ struct mesh_config {
1351 * @beacon_interval: beacon interval to use 1372 * @beacon_interval: beacon interval to use
1352 * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] 1373 * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
1353 * @basic_rates: basic rates to use when creating the mesh 1374 * @basic_rates: basic rates to use when creating the mesh
1375 * @beacon_rate: bitrate to be used for beacons
1354 * 1376 *
1355 * These parameters are fixed when the mesh is created. 1377 * These parameters are fixed when the mesh is created.
1356 */ 1378 */
@@ -1371,6 +1393,7 @@ struct mesh_setup {
1371 u16 beacon_interval; 1393 u16 beacon_interval;
1372 int mcast_rate[NUM_NL80211_BANDS]; 1394 int mcast_rate[NUM_NL80211_BANDS];
1373 u32 basic_rates; 1395 u32 basic_rates;
1396 struct cfg80211_bitrate_mask beacon_rate;
1374}; 1397};
1375 1398
1376/** 1399/**
@@ -2010,17 +2033,6 @@ enum wiphy_params_flags {
2010 WIPHY_PARAM_DYN_ACK = 1 << 5, 2033 WIPHY_PARAM_DYN_ACK = 1 << 5,
2011}; 2034};
2012 2035
2013/*
2014 * cfg80211_bitrate_mask - masks for bitrate control
2015 */
2016struct cfg80211_bitrate_mask {
2017 struct {
2018 u32 legacy;
2019 u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
2020 u16 vht_mcs[NL80211_VHT_NSS_MAX];
2021 enum nl80211_txrate_gi gi;
2022 } control[NUM_NL80211_BANDS];
2023};
2024/** 2036/**
2025 * struct cfg80211_pmksa - PMK Security Association 2037 * struct cfg80211_pmksa - PMK Security Association
2026 * 2038 *
@@ -2302,6 +2314,98 @@ struct cfg80211_qos_map {
2302}; 2314};
2303 2315
2304/** 2316/**
2317 * struct cfg80211_nan_conf - NAN configuration
2318 *
2319 * This struct defines NAN configuration parameters
2320 *
2321 * @master_pref: master preference (1 - 255)
2322 * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf
2323 */
2324struct cfg80211_nan_conf {
2325 u8 master_pref;
2326 u8 dual;
2327};
2328
2329/**
2330 * enum cfg80211_nan_conf_changes - indicates changed fields in NAN
2331 * configuration
2332 *
2333 * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
2334 * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation
2335 */
2336enum cfg80211_nan_conf_changes {
2337 CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
2338 CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1),
2339};
2340
2341/**
2342 * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
2343 *
2344 * @filter: the content of the filter
2345 * @len: the length of the filter
2346 */
2347struct cfg80211_nan_func_filter {
2348 const u8 *filter;
2349 u8 len;
2350};
2351
2352/**
2353 * struct cfg80211_nan_func - a NAN function
2354 *
2355 * @type: &enum nl80211_nan_function_type
2356 * @service_id: the service ID of the function
2357 * @publish_type: &nl80211_nan_publish_type
2358 * @close_range: if true, the range should be limited. Threshold is
2359 * implementation specific.
2360 * @publish_bcast: if true, the solicited publish should be broadcasted
2361 * @subscribe_active: if true, the subscribe is active
2362 * @followup_id: the instance ID for follow up
2363 * @followup_reqid: the requestor instance ID for follow up
2364 * @followup_dest: MAC address of the recipient of the follow up
2365 * @ttl: time to live counter in DW.
2366 * @serv_spec_info: Service Specific Info
2367 * @serv_spec_info_len: Service Specific Info length
2368 * @srf_include: if true, SRF is inclusive
2369 * @srf_bf: Bloom Filter
2370 * @srf_bf_len: Bloom Filter length
2371 * @srf_bf_idx: Bloom Filter index
2372 * @srf_macs: SRF MAC addresses
2373 * @srf_num_macs: number of MAC addresses in SRF
2374 * @rx_filters: rx filters that are matched with corresponding peer's tx_filter
2375 * @tx_filters: filters that should be transmitted in the SDF.
2376 * @num_rx_filters: length of &rx_filters.
2377 * @num_tx_filters: length of &tx_filters.
2378 * @instance_id: driver allocated id of the function.
2379 * @cookie: unique NAN function identifier.
2380 */
2381struct cfg80211_nan_func {
2382 enum nl80211_nan_function_type type;
2383 u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
2384 u8 publish_type;
2385 bool close_range;
2386 bool publish_bcast;
2387 bool subscribe_active;
2388 u8 followup_id;
2389 u8 followup_reqid;
2390 struct mac_address followup_dest;
2391 u32 ttl;
2392 const u8 *serv_spec_info;
2393 u8 serv_spec_info_len;
2394 bool srf_include;
2395 const u8 *srf_bf;
2396 u8 srf_bf_len;
2397 u8 srf_bf_idx;
2398 struct mac_address *srf_macs;
2399 int srf_num_macs;
2400 struct cfg80211_nan_func_filter *rx_filters;
2401 struct cfg80211_nan_func_filter *tx_filters;
2402 u8 num_tx_filters;
2403 u8 num_rx_filters;
2404 u8 instance_id;
2405 u64 cookie;
2406};
2407
2408/**
2305 * struct cfg80211_ops - backend description for wireless configuration 2409 * struct cfg80211_ops - backend description for wireless configuration
2306 * 2410 *
2307 * This struct is registered by fullmac card drivers and/or wireless stacks 2411 * This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2432,7 +2536,8 @@ struct cfg80211_qos_map {
2432 * cases, the result of roaming is indicated with a call to 2536 * cases, the result of roaming is indicated with a call to
2433 * cfg80211_roamed() or cfg80211_roamed_bss(). 2537 * cfg80211_roamed() or cfg80211_roamed_bss().
2434 * (invoked with the wireless_dev mutex held) 2538 * (invoked with the wireless_dev mutex held)
2435 * @disconnect: Disconnect from the BSS/ESS. 2539 * @disconnect: Disconnect from the BSS/ESS. Once done, call
2540 * cfg80211_disconnected().
2436 * (invoked with the wireless_dev mutex held) 2541 * (invoked with the wireless_dev mutex held)
2437 * 2542 *
2438 * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call 2543 * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
@@ -2588,6 +2693,19 @@ struct cfg80211_qos_map {
2588 * and returning to the base channel for communication with the AP. 2693 * and returning to the base channel for communication with the AP.
2589 * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both 2694 * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
2590 * peers must be on the base channel when the call completes. 2695 * peers must be on the base channel when the call completes.
2696 * @start_nan: Start the NAN interface.
2697 * @stop_nan: Stop the NAN interface.
2698 * @add_nan_func: Add a NAN function. Returns negative value on failure.
2699 * On success @nan_func ownership is transferred to the driver and
2700 * it may access it outside of the scope of this function. The driver
2701 * should free the @nan_func when no longer needed by calling
2702 * cfg80211_free_nan_func().
2703 * On success the driver should assign an instance_id in the
2704 * provided @nan_func.
2705 * @del_nan_func: Delete a NAN function.
2706 * @nan_change_conf: changes NAN configuration. The changed parameters must
2707 * be specified in @changes (using &enum cfg80211_nan_conf_changes);
2708 * All other parameters must be ignored.
2591 */ 2709 */
2592struct cfg80211_ops { 2710struct cfg80211_ops {
2593 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 2711 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2853,6 +2971,17 @@ struct cfg80211_ops {
2853 void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, 2971 void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
2854 struct net_device *dev, 2972 struct net_device *dev,
2855 const u8 *addr); 2973 const u8 *addr);
2974 int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
2975 struct cfg80211_nan_conf *conf);
2976 void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
2977 int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
2978 struct cfg80211_nan_func *nan_func);
2979 void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
2980 u64 cookie);
2981 int (*nan_change_conf)(struct wiphy *wiphy,
2982 struct wireless_dev *wdev,
2983 struct cfg80211_nan_conf *conf,
2984 u32 changes);
2856}; 2985};
2857 2986
2858/* 2987/*
@@ -2899,6 +3028,8 @@ struct cfg80211_ops {
2899 * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. 3028 * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
2900 * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in 3029 * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
2901 * beaconing mode (AP, IBSS, Mesh, ...). 3030 * beaconing mode (AP, IBSS, Mesh, ...).
3031 * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
3032 * before connection.
2902 */ 3033 */
2903enum wiphy_flags { 3034enum wiphy_flags {
2904 /* use hole at 0 */ 3035 /* use hole at 0 */
@@ -2924,6 +3055,7 @@ enum wiphy_flags {
2924 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21), 3055 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
2925 WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), 3056 WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
2926 WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), 3057 WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
3058 WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
2927}; 3059};
2928 3060
2929/** 3061/**
@@ -3301,6 +3433,8 @@ struct wiphy_iftype_ext_capab {
3301 * @bss_select_support: bitmask indicating the BSS selection criteria supported 3433 * @bss_select_support: bitmask indicating the BSS selection criteria supported
3302 * by the driver in the .connect() callback. The bit position maps to the 3434 * by the driver in the .connect() callback. The bit position maps to the
3303 * attribute indices defined in &enum nl80211_bss_select_attr. 3435 * attribute indices defined in &enum nl80211_bss_select_attr.
3436 *
3437 * @cookie_counter: unique generic cookie counter, used to identify objects.
3304 */ 3438 */
3305struct wiphy { 3439struct wiphy {
3306 /* assign these fields before you register the wiphy */ 3440 /* assign these fields before you register the wiphy */
@@ -3430,6 +3564,8 @@ struct wiphy {
3430 3564
3431 u32 bss_select_support; 3565 u32 bss_select_support;
3432 3566
3567 u64 cookie_counter;
3568
3433 char priv[0] __aligned(NETDEV_ALIGN); 3569 char priv[0] __aligned(NETDEV_ALIGN);
3434}; 3570};
3435 3571
@@ -3610,6 +3746,7 @@ struct cfg80211_cached_keys;
3610 * beacons, 0 when not valid 3746 * beacons, 0 when not valid
3611 * @address: The address for this device, valid only if @netdev is %NULL 3747 * @address: The address for this device, valid only if @netdev is %NULL
3612 * @p2p_started: true if this is a P2P Device that has been started 3748 * @p2p_started: true if this is a P2P Device that has been started
3749 * @nan_started: true if this is a NAN interface that has been started
3613 * @cac_started: true if DFS channel availability check has been started 3750 * @cac_started: true if DFS channel availability check has been started
3614 * @cac_start_time: timestamp (jiffies) when the dfs state was entered. 3751 * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
3615 * @cac_time_ms: CAC time in ms 3752 * @cac_time_ms: CAC time in ms
@@ -3641,7 +3778,7 @@ struct wireless_dev {
3641 3778
3642 struct mutex mtx; 3779 struct mutex mtx;
3643 3780
3644 bool use_4addr, p2p_started; 3781 bool use_4addr, p2p_started, nan_started;
3645 3782
3646 u8 address[ETH_ALEN] __aligned(sizeof(u16)); 3783 u8 address[ETH_ALEN] __aligned(sizeof(u16));
3647 3784
@@ -3955,6 +4092,34 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
3955 struct cfg80211_qos_map *qos_map); 4092 struct cfg80211_qos_map *qos_map);
3956 4093
3957/** 4094/**
4095 * cfg80211_find_ie_match - match information element and byte array in data
4096 *
4097 * @eid: element ID
4098 * @ies: data consisting of IEs
4099 * @len: length of data
4100 * @match: byte array to match
4101 * @match_len: number of bytes in the match array
4102 * @match_offset: offset in the IE where the byte array should match.
4103 * If match_len is zero, this must also be set to zero.
4104 * Otherwise this must be set to 2 or more, because the first
4105 * byte is the element id, which is already compared to eid, and
4106 * the second byte is the IE length.
4107 *
4108 * Return: %NULL if the element ID could not be found or if
4109 * the element is invalid (claims to be longer than the given
4110 * data) or if the byte array doesn't match, or a pointer to the first
4111 * byte of the requested element, that is the byte containing the
4112 * element ID.
4113 *
4114 * Note: There are no checks on the element length other than
4115 * having to fit into the given data and being large enough for the
4116 * byte array to match.
4117 */
4118const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
4119 const u8 *match, int match_len,
4120 int match_offset);
4121
4122/**
3958 * cfg80211_find_ie - find information element in data 4123 * cfg80211_find_ie - find information element in data
3959 * 4124 *
3960 * @eid: element ID 4125 * @eid: element ID
@@ -3969,7 +4134,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
3969 * Note: There are no checks on the element length other than 4134 * Note: There are no checks on the element length other than
3970 * having to fit into the given data. 4135 * having to fit into the given data.
3971 */ 4136 */
3972const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); 4137static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
4138{
4139 return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
4140}
3973 4141
3974/** 4142/**
3975 * cfg80211_find_vendor_ie - find vendor specific information element in data 4143 * cfg80211_find_vendor_ie - find vendor specific information element in data
@@ -5518,6 +5686,67 @@ wiphy_ext_feature_isset(struct wiphy *wiphy,
5518 return (ft_byte & BIT(ftidx % 8)) != 0; 5686 return (ft_byte & BIT(ftidx % 8)) != 0;
5519} 5687}
5520 5688
5689/**
5690 * cfg80211_free_nan_func - free NAN function
5691 * @f: NAN function that should be freed
5692 *
5693 * Frees all the NAN function and all it's allocated members.
5694 */
5695void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
5696
5697/**
5698 * struct cfg80211_nan_match_params - NAN match parameters
5699 * @type: the type of the function that triggered a match. If it is
5700 * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
5701 * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
5702 * result.
5703 * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
5704 * @inst_id: the local instance id
5705 * @peer_inst_id: the instance id of the peer's function
5706 * @addr: the MAC address of the peer
5707 * @info_len: the length of the &info
5708 * @info: the Service Specific Info from the peer (if any)
5709 * @cookie: unique identifier of the corresponding function
5710 */
5711struct cfg80211_nan_match_params {
5712 enum nl80211_nan_function_type type;
5713 u8 inst_id;
5714 u8 peer_inst_id;
5715 const u8 *addr;
5716 u8 info_len;
5717 const u8 *info;
5718 u64 cookie;
5719};
5720
5721/**
5722 * cfg80211_nan_match - report a match for a NAN function.
5723 * @wdev: the wireless device reporting the match
5724 * @match: match notification parameters
5725 * @gfp: allocation flags
5726 *
5727 * This function reports that the a NAN function had a match. This
5728 * can be a subscribe that had a match or a solicited publish that
5729 * was sent. It can also be a follow up that was received.
5730 */
5731void cfg80211_nan_match(struct wireless_dev *wdev,
5732 struct cfg80211_nan_match_params *match, gfp_t gfp);
5733
5734/**
5735 * cfg80211_nan_func_terminated - notify about NAN function termination.
5736 *
5737 * @wdev: the wireless device reporting the match
5738 * @inst_id: the local instance id
5739 * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
5740 * @cookie: unique NAN function identifier
5741 * @gfp: allocation flags
5742 *
5743 * This function reports that the a NAN function is terminated.
5744 */
5745void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
5746 u8 inst_id,
5747 enum nl80211_nan_func_term_reason reason,
5748 u64 cookie, gfp_t gfp);
5749
5521/* ethtool helper */ 5750/* ethtool helper */
5522void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); 5751void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
5523 5752
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c99ffe8cef3c..211bd3c37028 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -50,7 +50,6 @@ struct devlink_sb_pool_info {
50}; 50};
51 51
52struct devlink_ops { 52struct devlink_ops {
53 size_t priv_size;
54 int (*port_type_set)(struct devlink_port *devlink_port, 53 int (*port_type_set)(struct devlink_port *devlink_port,
55 enum devlink_port_type port_type); 54 enum devlink_port_type port_type);
56 int (*port_split)(struct devlink *devlink, unsigned int port_index, 55 int (*port_split)(struct devlink *devlink, unsigned int port_index,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2217a3f817f8..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
26 DSA_TAG_PROTO_TRAILER, 26 DSA_TAG_PROTO_TRAILER,
27 DSA_TAG_PROTO_EDSA, 27 DSA_TAG_PROTO_EDSA,
28 DSA_TAG_PROTO_BRCM, 28 DSA_TAG_PROTO_BRCM,
29 DSA_TAG_PROTO_QCA,
29 DSA_TAG_LAST, /* MUST BE LAST */ 30 DSA_TAG_LAST, /* MUST BE LAST */
30}; 31};
31 32
@@ -142,6 +143,7 @@ struct dsa_port {
142 struct net_device *netdev; 143 struct net_device *netdev;
143 struct device_node *dn; 144 struct device_node *dn;
144 unsigned int ageing_time; 145 unsigned int ageing_time;
146 u8 stp_state;
145}; 147};
146 148
147struct dsa_switch { 149struct dsa_switch {
@@ -165,9 +167,9 @@ struct dsa_switch {
165 struct dsa_chip_data *cd; 167 struct dsa_chip_data *cd;
166 168
167 /* 169 /*
168 * The used switch driver. 170 * The switch operations.
169 */ 171 */
170 struct dsa_switch_driver *drv; 172 struct dsa_switch_ops *ops;
171 173
172 /* 174 /*
173 * An array of which element [a] indicates which port on this 175 * An array of which element [a] indicates which port on this
@@ -234,19 +236,21 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
234struct switchdev_trans; 236struct switchdev_trans;
235struct switchdev_obj; 237struct switchdev_obj;
236struct switchdev_obj_port_fdb; 238struct switchdev_obj_port_fdb;
239struct switchdev_obj_port_mdb;
237struct switchdev_obj_port_vlan; 240struct switchdev_obj_port_vlan;
238 241
239struct dsa_switch_driver { 242struct dsa_switch_ops {
240 struct list_head list; 243 struct list_head list;
241 244
242 enum dsa_tag_protocol tag_protocol;
243
244 /* 245 /*
245 * Probing and setup. 246 * Probing and setup.
246 */ 247 */
247 const char *(*probe)(struct device *dsa_dev, 248 const char *(*probe)(struct device *dsa_dev,
248 struct device *host_dev, int sw_addr, 249 struct device *host_dev, int sw_addr,
249 void **priv); 250 void **priv);
251
252 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds);
253
250 int (*setup)(struct dsa_switch *ds); 254 int (*setup)(struct dsa_switch *ds);
251 int (*set_addr)(struct dsa_switch *ds, u8 *addr); 255 int (*set_addr)(struct dsa_switch *ds, u8 *addr);
252 u32 (*get_phy_flags)(struct dsa_switch *ds, int port); 256 u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -336,6 +340,7 @@ struct dsa_switch_driver {
336 void (*port_bridge_leave)(struct dsa_switch *ds, int port); 340 void (*port_bridge_leave)(struct dsa_switch *ds, int port);
337 void (*port_stp_state_set)(struct dsa_switch *ds, int port, 341 void (*port_stp_state_set)(struct dsa_switch *ds, int port,
338 u8 state); 342 u8 state);
343 void (*port_fast_age)(struct dsa_switch *ds, int port);
339 344
340 /* 345 /*
341 * VLAN support 346 * VLAN support
@@ -368,17 +373,27 @@ struct dsa_switch_driver {
368 int (*port_fdb_dump)(struct dsa_switch *ds, int port, 373 int (*port_fdb_dump)(struct dsa_switch *ds, int port,
369 struct switchdev_obj_port_fdb *fdb, 374 struct switchdev_obj_port_fdb *fdb,
370 int (*cb)(struct switchdev_obj *obj)); 375 int (*cb)(struct switchdev_obj *obj));
376
377 /*
378 * Multicast database
379 */
380 int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
381 const struct switchdev_obj_port_mdb *mdb,
382 struct switchdev_trans *trans);
383 void (*port_mdb_add)(struct dsa_switch *ds, int port,
384 const struct switchdev_obj_port_mdb *mdb,
385 struct switchdev_trans *trans);
386 int (*port_mdb_del)(struct dsa_switch *ds, int port,
387 const struct switchdev_obj_port_mdb *mdb);
388 int (*port_mdb_dump)(struct dsa_switch *ds, int port,
389 struct switchdev_obj_port_mdb *mdb,
390 int (*cb)(struct switchdev_obj *obj));
371}; 391};
372 392
373void register_switch_driver(struct dsa_switch_driver *type); 393void register_switch_driver(struct dsa_switch_ops *type);
374void unregister_switch_driver(struct dsa_switch_driver *type); 394void unregister_switch_driver(struct dsa_switch_ops *type);
375struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); 395struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
376 396
377static inline void *ds_to_priv(struct dsa_switch *ds)
378{
379 return ds->priv;
380}
381
382static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) 397static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
383{ 398{
384 return dst->rcv != NULL; 399 return dst->rcv != NULL;
@@ -386,4 +401,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
386 401
387void dsa_unregister_switch(struct dsa_switch *ds); 402void dsa_unregister_switch(struct dsa_switch *ds);
388int dsa_register_switch(struct dsa_switch *ds, struct device_node *np); 403int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
404#ifdef CONFIG_PM_SLEEP
405int dsa_switch_suspend(struct dsa_switch *ds);
406int dsa_switch_resume(struct dsa_switch *ds);
407#else
408static inline int dsa_switch_suspend(struct dsa_switch *ds)
409{
410 return 0;
411}
412static inline int dsa_switch_resume(struct dsa_switch *ds)
413{
414 return 0;
415}
416#endif /* CONFIG_PM_SLEEP */
417
389#endif 418#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 5db9f5910428..6965c8f68ade 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -112,12 +112,13 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
112 return &dst->u.tun_info; 112 return &dst->u.tun_info;
113} 113}
114 114
115static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, 115static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
116 __be16 flags, 116 __be32 daddr,
117 __be64 tunnel_id, 117 __u8 tos, __u8 ttl,
118 int md_size) 118 __be16 flags,
119 __be64 tunnel_id,
120 int md_size)
119{ 121{
120 const struct iphdr *iph = ip_hdr(skb);
121 struct metadata_dst *tun_dst; 122 struct metadata_dst *tun_dst;
122 123
123 tun_dst = tun_rx_dst(md_size); 124 tun_dst = tun_rx_dst(md_size);
@@ -125,17 +126,30 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
125 return NULL; 126 return NULL;
126 127
127 ip_tunnel_key_init(&tun_dst->u.tun_info.key, 128 ip_tunnel_key_init(&tun_dst->u.tun_info.key,
128 iph->saddr, iph->daddr, iph->tos, iph->ttl, 129 saddr, daddr, tos, ttl,
129 0, 0, 0, tunnel_id, flags); 130 0, 0, 0, tunnel_id, flags);
130 return tun_dst; 131 return tun_dst;
131} 132}
132 133
133static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, 134static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
134 __be16 flags, 135 __be16 flags,
135 __be64 tunnel_id, 136 __be64 tunnel_id,
136 int md_size) 137 int md_size)
137{ 138{
138 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 139 const struct iphdr *iph = ip_hdr(skb);
140
141 return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
142 flags, tunnel_id, md_size);
143}
144
145static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
146 const struct in6_addr *daddr,
147 __u8 tos, __u8 ttl,
148 __be32 label,
149 __be16 flags,
150 __be64 tunnel_id,
151 int md_size)
152{
139 struct metadata_dst *tun_dst; 153 struct metadata_dst *tun_dst;
140 struct ip_tunnel_info *info; 154 struct ip_tunnel_info *info;
141 155
@@ -150,14 +164,26 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
150 info->key.tp_src = 0; 164 info->key.tp_src = 0;
151 info->key.tp_dst = 0; 165 info->key.tp_dst = 0;
152 166
153 info->key.u.ipv6.src = ip6h->saddr; 167 info->key.u.ipv6.src = *saddr;
154 info->key.u.ipv6.dst = ip6h->daddr; 168 info->key.u.ipv6.dst = *daddr;
155 169
156 info->key.tos = ipv6_get_dsfield(ip6h); 170 info->key.tos = tos;
157 info->key.ttl = ip6h->hop_limit; 171 info->key.ttl = ttl;
158 info->key.label = ip6_flowlabel(ip6h); 172 info->key.label = label;
159 173
160 return tun_dst; 174 return tun_dst;
161} 175}
162 176
177static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
178 __be16 flags,
179 __be64 tunnel_id,
180 int md_size)
181{
182 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
183
184 return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
185 ipv6_get_dsfield(ip6h), ip6h->hop_limit,
186 ip6_flowlabel(ip6h), flags, tunnel_id,
187 md_size);
188}
163#endif /* __NET_DST_METADATA_H */ 189#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index d47ef4bb5423..035aa7716967 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,8 +34,7 @@ struct flowi_common {
34 __u8 flowic_flags; 34 __u8 flowic_flags;
35#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
36#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_L3MDEV_SRC 0x04 37#define FLOWI_FLAG_SKIP_NH_OIF 0x04
38#define FLOWI_FLAG_SKIP_NH_OIF 0x08
39 __u32 flowic_secid; 38 __u32 flowic_secid;
40 struct flowi_tunnel flowic_tun_key; 39 struct flowi_tunnel flowic_tun_key;
41}; 40};
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d3d60dccd19f..d9534927d93b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -32,8 +32,13 @@ struct flow_dissector_key_basic {
32}; 32};
33 33
34struct flow_dissector_key_tags { 34struct flow_dissector_key_tags {
35 u32 vlan_id:12, 35 u32 flow_label;
36 flow_label:20; 36};
37
38struct flow_dissector_key_vlan {
39 u16 vlan_id:12,
40 vlan_priority:3;
41 u16 padding;
37}; 42};
38 43
39struct flow_dissector_key_keyid { 44struct flow_dissector_key_keyid {
@@ -119,7 +124,7 @@ enum flow_dissector_key_id {
119 FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ 124 FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
120 FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ 125 FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
121 FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ 126 FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
122 FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ 127 FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
123 FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ 128 FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
124 FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ 129 FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
125 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ 130 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
@@ -148,6 +153,7 @@ struct flow_keys {
148#define FLOW_KEYS_HASH_START_FIELD basic 153#define FLOW_KEYS_HASH_START_FIELD basic
149 struct flow_dissector_key_basic basic; 154 struct flow_dissector_key_basic basic;
150 struct flow_dissector_key_tags tags; 155 struct flow_dissector_key_tags tags;
156 struct flow_dissector_key_vlan vlan;
151 struct flow_dissector_key_keyid keyid; 157 struct flow_dissector_key_keyid keyid;
152 struct flow_dissector_key_ports ports; 158 struct flow_dissector_key_ports ports;
153 struct flow_dissector_key_addrs addrs; 159 struct flow_dissector_key_addrs addrs;
@@ -177,7 +183,7 @@ struct flow_keys_digest {
177void make_flow_keys_digest(struct flow_keys_digest *digest, 183void make_flow_keys_digest(struct flow_keys_digest *digest,
178 const struct flow_keys *flow); 184 const struct flow_keys *flow);
179 185
180static inline bool flow_keys_have_l4(struct flow_keys *keys) 186static inline bool flow_keys_have_l4(const struct flow_keys *keys)
181{ 187{
182 return (keys->ports.ports || keys->tags.flow_label); 188 return (keys->ports.ports || keys->tags.flow_label);
183} 189}
diff --git a/include/net/fq.h b/include/net/fq.h
index 268b49049c37..6d8521a30c5c 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -72,9 +72,12 @@ struct fq {
72 u32 flows_cnt; 72 u32 flows_cnt;
73 u32 perturbation; 73 u32 perturbation;
74 u32 limit; 74 u32 limit;
75 u32 memory_limit;
76 u32 memory_usage;
75 u32 quantum; 77 u32 quantum;
76 u32 backlog; 78 u32 backlog;
77 u32 overlimit; 79 u32 overlimit;
80 u32 overmemory;
78 u32 collisions; 81 u32 collisions;
79}; 82};
80 83
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 163f3ed0f05a..4e6131cd3f43 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -29,6 +29,7 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
29 tin->backlog_packets--; 29 tin->backlog_packets--;
30 flow->backlog -= skb->len; 30 flow->backlog -= skb->len;
31 fq->backlog--; 31 fq->backlog--;
32 fq->memory_usage -= skb->truesize;
32 33
33 if (flow->backlog == 0) { 34 if (flow->backlog == 0) {
34 list_del_init(&flow->backlogchain); 35 list_del_init(&flow->backlogchain);
@@ -154,6 +155,7 @@ static void fq_tin_enqueue(struct fq *fq,
154 flow->backlog += skb->len; 155 flow->backlog += skb->len;
155 tin->backlog_bytes += skb->len; 156 tin->backlog_bytes += skb->len;
156 tin->backlog_packets++; 157 tin->backlog_packets++;
158 fq->memory_usage += skb->truesize;
157 fq->backlog++; 159 fq->backlog++;
158 160
159 fq_recalc_backlog(fq, tin, flow); 161 fq_recalc_backlog(fq, tin, flow);
@@ -166,7 +168,7 @@ static void fq_tin_enqueue(struct fq *fq,
166 168
167 __skb_queue_tail(&flow->queue, skb); 169 __skb_queue_tail(&flow->queue, skb);
168 170
169 if (fq->backlog > fq->limit) { 171 if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
170 flow = list_first_entry_or_null(&fq->backlogs, 172 flow = list_first_entry_or_null(&fq->backlogs,
171 struct fq_flow, 173 struct fq_flow,
172 backlogchain); 174 backlogchain);
@@ -181,6 +183,8 @@ static void fq_tin_enqueue(struct fq *fq,
181 183
182 flow->tin->overlimit++; 184 flow->tin->overlimit++;
183 fq->overlimit++; 185 fq->overlimit++;
186 if (fq->memory_usage > fq->memory_limit)
187 fq->overmemory++;
184 } 188 }
185} 189}
186 190
@@ -251,6 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
251 fq->perturbation = prandom_u32(); 255 fq->perturbation = prandom_u32();
252 fq->quantum = 300; 256 fq->quantum = 300;
253 fq->limit = 8192; 257 fq->limit = 8192;
258 fq->memory_limit = 16 << 20; /* 16 MBytes */
254 259
255 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 260 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
256 if (!fq->flows) 261 if (!fq->flows)
diff --git a/include/net/gre.h b/include/net/gre.h
index 73ea256eb7d7..d25d836c129b 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -7,7 +7,15 @@
7struct gre_base_hdr { 7struct gre_base_hdr {
8 __be16 flags; 8 __be16 flags;
9 __be16 protocol; 9 __be16 protocol;
10}; 10} __packed;
11
12struct gre_full_hdr {
13 struct gre_base_hdr fixed_header;
14 __be16 csum;
15 __be16 reserved1;
16 __be32 key;
17 __be32 seq;
18} __packed;
11#define GRE_HEADER_SECTION 4 19#define GRE_HEADER_SECTION 4
12 20
13#define GREPROTO_CISCO 0 21#define GREPROTO_CISCO 0
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0fd9476c538..ba07b9d8ed63 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -190,6 +190,10 @@ struct ieee80211_radiotap_header {
190 * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 190 * IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
191 * 191 *
192 * Contains VHT information about this frame. 192 * Contains VHT information about this frame.
193 *
194 * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable
195 *
196 * Contains timestamp information for this frame.
193 */ 197 */
194enum ieee80211_radiotap_type { 198enum ieee80211_radiotap_type {
195 IEEE80211_RADIOTAP_TSFT = 0, 199 IEEE80211_RADIOTAP_TSFT = 0,
@@ -214,6 +218,7 @@ enum ieee80211_radiotap_type {
214 IEEE80211_RADIOTAP_MCS = 19, 218 IEEE80211_RADIOTAP_MCS = 19,
215 IEEE80211_RADIOTAP_AMPDU_STATUS = 20, 219 IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
216 IEEE80211_RADIOTAP_VHT = 21, 220 IEEE80211_RADIOTAP_VHT = 21,
221 IEEE80211_RADIOTAP_TIMESTAMP = 22,
217 222
218 /* valid in every it_present bitmap, even vendor namespaces */ 223 /* valid in every it_present bitmap, even vendor namespaces */
219 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, 224 IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -321,6 +326,22 @@ enum ieee80211_radiotap_type {
321#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04 326#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
322#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08 327#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
323 328
329/* For IEEE80211_RADIOTAP_TIMESTAMP */
330#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F
331#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000
332#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001
333#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
334#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
335#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
336#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
337#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
338#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
339#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
340
341#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
342#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01
343#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02
344
324/* helpers */ 345/* helpers */
325static inline int ieee80211_get_radiotap_len(unsigned char *data) 346static inline int ieee80211_get_radiotap_len(unsigned char *data)
326{ 347{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 1c8b6820b694..515352c6280a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -201,6 +201,7 @@ struct inet6_dev {
201 struct ipv6_devstat stats; 201 struct ipv6_devstat stats;
202 202
203 struct timer_list rs_timer; 203 struct timer_list rs_timer;
204 __s32 rs_interval; /* in jiffies */
204 __u8 rs_probes; 205 __u8 rs_probes;
205 206
206 __u8 addr_gen_mode; 207 __u8 addr_gen_mode;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
134 } icsk_mtup; 134 } icsk_mtup;
135 u32 icsk_user_timeout; 135 u32 icsk_user_timeout;
136 136
137 u64 icsk_ca_priv[64 / sizeof(u64)]; 137 u64 icsk_ca_priv[88 / sizeof(u64)];
138#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64)) 138#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
139}; 139};
140 140
141#define ICSK_TIME_RETRANS 1 /* Retransmit timer */ 141#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index 9742b92dc933..bc43c0fcae12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -219,6 +219,29 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
219} 219}
220#endif 220#endif
221 221
222#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
223{ \
224 int i, c; \
225 for_each_possible_cpu(c) { \
226 for (i = 0; stats_list[i].name; i++) \
227 buff64[i] += snmp_get_cpu_field64( \
228 mib_statistic, \
229 c, stats_list[i].entry, \
230 offset); \
231 } \
232}
233
234#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
235{ \
236 int i, c; \
237 for_each_possible_cpu(c) { \
238 for (i = 0; stats_list[i].name; i++) \
239 buff[i] += snmp_get_cpu_field( \
240 mib_statistic, \
241 c, stats_list[i].entry); \
242 } \
243}
244
222void inet_get_local_port_range(struct net *net, int *low, int *high); 245void inet_get_local_port_range(struct net *net, int *low, int *high);
223 246
224#ifdef CONFIG_SYSCTL 247#ifdef CONFIG_SYSCTL
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
64} 64}
65 65
66void ip6_route_input(struct sk_buff *skb); 66void ip6_route_input(struct sk_buff *skb);
67struct dst_entry *ip6_route_input_lookup(struct net *net,
68 struct net_device *dev,
69 struct flowi6 *fl6, int flags);
67 70
68struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, 71struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
69 struct flowi6 *fl6, int flags); 72 struct flowi6 *fl6, int flags);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 43a5a0e4524c..20ed9699fcd4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
23 __u8 proto; /* tunnel protocol */ 23 __u8 proto; /* tunnel protocol */
24 __u8 encap_limit; /* encapsulation limit for tunnel */ 24 __u8 encap_limit; /* encapsulation limit for tunnel */
25 __u8 hop_limit; /* hop limit for tunnel */ 25 __u8 hop_limit; /* hop limit for tunnel */
26 bool collect_md;
26 __be32 flowinfo; /* traffic class and flowlabel for tunnel */ 27 __be32 flowinfo; /* traffic class and flowlabel for tunnel */
27 __u32 flags; /* tunnel flags */ 28 __u32 flags; /* tunnel flags */
28 struct in6_addr laddr; /* local tunnel end-point address */ 29 struct in6_addr laddr; /* local tunnel end-point address */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7d4a72e75f33..b9314b48e39f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,7 @@
22#include <net/fib_rules.h> 22#include <net/fib_rules.h>
23#include <net/inetpeer.h> 23#include <net/inetpeer.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/notifier.h>
25 26
26struct fib_config { 27struct fib_config {
27 u8 fc_dst_len; 28 u8 fc_dst_len;
@@ -122,6 +123,7 @@ struct fib_info {
122#ifdef CONFIG_IP_ROUTE_MULTIPATH 123#ifdef CONFIG_IP_ROUTE_MULTIPATH
123 int fib_weight; 124 int fib_weight;
124#endif 125#endif
126 unsigned int fib_offload_cnt;
125 struct rcu_head rcu; 127 struct rcu_head rcu;
126 struct fib_nh fib_nh[0]; 128 struct fib_nh fib_nh[0];
127#define fib_dev fib_nh[0].nh_dev 129#define fib_dev fib_nh[0].nh_dev
@@ -173,6 +175,18 @@ struct fib_result_nl {
173 175
174__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); 176__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
175 177
178static inline void fib_info_offload_inc(struct fib_info *fi)
179{
180 fi->fib_offload_cnt++;
181 fi->fib_flags |= RTNH_F_OFFLOAD;
182}
183
184static inline void fib_info_offload_dec(struct fib_info *fi)
185{
186 if (--fi->fib_offload_cnt == 0)
187 fi->fib_flags &= ~RTNH_F_OFFLOAD;
188}
189
176#define FIB_RES_SADDR(net, res) \ 190#define FIB_RES_SADDR(net, res) \
177 ((FIB_RES_NH(res).nh_saddr_genid == \ 191 ((FIB_RES_NH(res).nh_saddr_genid == \
178 atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ 192 atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -185,6 +199,33 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
185#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \ 199#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
186 FIB_RES_SADDR(net, res)) 200 FIB_RES_SADDR(net, res))
187 201
202struct fib_notifier_info {
203 struct net *net;
204};
205
206struct fib_entry_notifier_info {
207 struct fib_notifier_info info; /* must be first */
208 u32 dst;
209 int dst_len;
210 struct fib_info *fi;
211 u8 tos;
212 u8 type;
213 u32 tb_id;
214 u32 nlflags;
215};
216
217enum fib_event_type {
218 FIB_EVENT_ENTRY_ADD,
219 FIB_EVENT_ENTRY_DEL,
220 FIB_EVENT_RULE_ADD,
221 FIB_EVENT_RULE_DEL,
222};
223
224int register_fib_notifier(struct notifier_block *nb);
225int unregister_fib_notifier(struct notifier_block *nb);
226int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
227 struct fib_notifier_info *info);
228
188struct fib_table { 229struct fib_table {
189 struct hlist_node tb_hlist; 230 struct hlist_node tb_hlist;
190 u32 tb_id; 231 u32 tb_id;
@@ -196,13 +237,12 @@ struct fib_table {
196 237
197int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 238int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
198 struct fib_result *res, int fib_flags); 239 struct fib_result *res, int fib_flags);
199int fib_table_insert(struct fib_table *, struct fib_config *); 240int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
200int fib_table_delete(struct fib_table *, struct fib_config *); 241int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
201int fib_table_dump(struct fib_table *table, struct sk_buff *skb, 242int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
202 struct netlink_callback *cb); 243 struct netlink_callback *cb);
203int fib_table_flush(struct fib_table *table); 244int fib_table_flush(struct net *net, struct fib_table *table);
204struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
205void fib_table_flush_external(struct fib_table *table);
206void fib_free_table(struct fib_table *tb); 246void fib_free_table(struct fib_table *tb);
207 247
208#ifndef CONFIG_IP_MULTIPLE_TABLES 248#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -315,7 +355,6 @@ static inline int fib_num_tclassid_users(struct net *net)
315} 355}
316#endif 356#endif
317int fib_unmerge(struct net *net); 357int fib_unmerge(struct net *net);
318void fib_flush_external(struct net *net);
319 358
320/* Exported by fib_semantics.c */ 359/* Exported by fib_semantics.c */
321int ip_fib_check_default(__be32 gw, struct net_device *dev); 360int ip_fib_check_default(__be32 gw, struct net_device *dev);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a5e7035fb93f..59557c07904b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -222,6 +222,25 @@ static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
222 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET; 222 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
223} 223}
224 224
225static inline __be64 key32_to_tunnel_id(__be32 key)
226{
227#ifdef __BIG_ENDIAN
228 return (__force __be64)key;
229#else
230 return (__force __be64)((__force u64)key << 32);
231#endif
232}
233
234/* Returns the least-significant 32 bits of a __be64. */
235static inline __be32 tunnel_id_to_key32(__be64 tun_id)
236{
237#ifdef __BIG_ENDIAN
238 return (__force __be32)tun_id;
239#else
240 return (__force __be32)((__force u64)tun_id >> 32);
241#endif
242}
243
225#ifdef CONFIG_INET 244#ifdef CONFIG_INET
226 245
227int ip_tunnel_init(struct net_device *dev); 246int ip_tunnel_init(struct net_device *dev);
@@ -236,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
236 255
237void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, 256void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
238 const struct iphdr *tnl_params, const u8 protocol); 257 const struct iphdr *tnl_params, const u8 protocol);
258void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
259 const u8 proto);
239int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 260int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
240int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); 261int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
241int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 262int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2840b5825dcc..2a8965819db0 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/skbuff.h> 14#include <linux/skbuff.h>
15#include <net/sock.h> 15#include <net/sock.h>
16#include <net/strparser.h>
16#include <uapi/linux/kcm.h> 17#include <uapi/linux/kcm.h>
17 18
18extern unsigned int kcm_net_id; 19extern unsigned int kcm_net_id;
@@ -21,16 +22,8 @@ extern unsigned int kcm_net_id;
21#define KCM_STATS_INCR(stat) ((stat)++) 22#define KCM_STATS_INCR(stat) ((stat)++)
22 23
23struct kcm_psock_stats { 24struct kcm_psock_stats {
24 unsigned long long rx_msgs;
25 unsigned long long rx_bytes;
26 unsigned long long tx_msgs; 25 unsigned long long tx_msgs;
27 unsigned long long tx_bytes; 26 unsigned long long tx_bytes;
28 unsigned int rx_aborts;
29 unsigned int rx_mem_fail;
30 unsigned int rx_need_more_hdr;
31 unsigned int rx_msg_too_big;
32 unsigned int rx_msg_timeouts;
33 unsigned int rx_bad_hdr_len;
34 unsigned long long reserved; 27 unsigned long long reserved;
35 unsigned long long unreserved; 28 unsigned long long unreserved;
36 unsigned int tx_aborts; 29 unsigned int tx_aborts;
@@ -64,13 +57,6 @@ struct kcm_tx_msg {
64 struct sk_buff *last_skb; 57 struct sk_buff *last_skb;
65}; 58};
66 59
67struct kcm_rx_msg {
68 int full_len;
69 int accum_len;
70 int offset;
71 int early_eaten;
72};
73
74/* Socket structure for KCM client sockets */ 60/* Socket structure for KCM client sockets */
75struct kcm_sock { 61struct kcm_sock {
76 struct sock sk; 62 struct sock sk;
@@ -87,6 +73,7 @@ struct kcm_sock {
87 struct work_struct tx_work; 73 struct work_struct tx_work;
88 struct list_head wait_psock_list; 74 struct list_head wait_psock_list;
89 struct sk_buff *seq_skb; 75 struct sk_buff *seq_skb;
76 u32 tx_stopped : 1;
90 77
91 /* Don't use bit fields here, these are set under different locks */ 78 /* Don't use bit fields here, these are set under different locks */
92 bool tx_wait; 79 bool tx_wait;
@@ -104,11 +91,11 @@ struct bpf_prog;
104/* Structure for an attached lower socket */ 91/* Structure for an attached lower socket */
105struct kcm_psock { 92struct kcm_psock {
106 struct sock *sk; 93 struct sock *sk;
94 struct strparser strp;
107 struct kcm_mux *mux; 95 struct kcm_mux *mux;
108 int index; 96 int index;
109 97
110 u32 tx_stopped : 1; 98 u32 tx_stopped : 1;
111 u32 rx_stopped : 1;
112 u32 done : 1; 99 u32 done : 1;
113 u32 unattaching : 1; 100 u32 unattaching : 1;
114 101
@@ -121,18 +108,12 @@ struct kcm_psock {
121 struct kcm_psock_stats stats; 108 struct kcm_psock_stats stats;
122 109
123 /* Receive */ 110 /* Receive */
124 struct sk_buff *rx_skb_head;
125 struct sk_buff **rx_skb_nextp;
126 struct sk_buff *ready_rx_msg;
127 struct list_head psock_ready_list; 111 struct list_head psock_ready_list;
128 struct work_struct rx_work;
129 struct delayed_work rx_delayed_work;
130 struct bpf_prog *bpf_prog; 112 struct bpf_prog *bpf_prog;
131 struct kcm_sock *rx_kcm; 113 struct kcm_sock *rx_kcm;
132 unsigned long long saved_rx_bytes; 114 unsigned long long saved_rx_bytes;
133 unsigned long long saved_rx_msgs; 115 unsigned long long saved_rx_msgs;
134 struct timer_list rx_msg_timer; 116 struct sk_buff *ready_rx_msg;
135 unsigned int rx_need_bytes;
136 117
137 /* Transmit */ 118 /* Transmit */
138 struct kcm_sock *tx_kcm; 119 struct kcm_sock *tx_kcm;
@@ -146,6 +127,7 @@ struct kcm_net {
146 struct mutex mutex; 127 struct mutex mutex;
147 struct kcm_psock_stats aggregate_psock_stats; 128 struct kcm_psock_stats aggregate_psock_stats;
148 struct kcm_mux_stats aggregate_mux_stats; 129 struct kcm_mux_stats aggregate_mux_stats;
130 struct strp_aggr_stats aggregate_strp_stats;
149 struct list_head mux_list; 131 struct list_head mux_list;
150 int count; 132 int count;
151}; 133};
@@ -163,6 +145,7 @@ struct kcm_mux {
163 145
164 struct kcm_mux_stats stats; 146 struct kcm_mux_stats stats;
165 struct kcm_psock_stats aggregate_psock_stats; 147 struct kcm_psock_stats aggregate_psock_stats;
148 struct strp_aggr_stats aggregate_strp_stats;
166 149
167 /* Receive */ 150 /* Receive */
168 spinlock_t rx_lock ____cacheline_aligned_in_smp; 151 spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -190,14 +173,6 @@ static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
190 /* Save psock statistics in the mux when psock is being unattached. */ 173 /* Save psock statistics in the mux when psock is being unattached. */
191 174
192#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) 175#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
193 SAVE_PSOCK_STATS(rx_msgs);
194 SAVE_PSOCK_STATS(rx_bytes);
195 SAVE_PSOCK_STATS(rx_aborts);
196 SAVE_PSOCK_STATS(rx_mem_fail);
197 SAVE_PSOCK_STATS(rx_need_more_hdr);
198 SAVE_PSOCK_STATS(rx_msg_too_big);
199 SAVE_PSOCK_STATS(rx_msg_timeouts);
200 SAVE_PSOCK_STATS(rx_bad_hdr_len);
201 SAVE_PSOCK_STATS(tx_msgs); 176 SAVE_PSOCK_STATS(tx_msgs);
202 SAVE_PSOCK_STATS(tx_bytes); 177 SAVE_PSOCK_STATS(tx_bytes);
203 SAVE_PSOCK_STATS(reserved); 178 SAVE_PSOCK_STATS(reserved);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e90095091aa0..b220dabeab45 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,7 @@
11#ifndef _NET_L3MDEV_H_ 11#ifndef _NET_L3MDEV_H_
12#define _NET_L3MDEV_H_ 12#define _NET_L3MDEV_H_
13 13
14#include <net/dst.h>
14#include <net/fib_rules.h> 15#include <net/fib_rules.h>
15 16
16/** 17/**
@@ -18,30 +19,24 @@
18 * 19 *
19 * @l3mdev_fib_table: Get FIB table id to use for lookups 20 * @l3mdev_fib_table: Get FIB table id to use for lookups
20 * 21 *
21 * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device 22 * @l3mdev_l3_rcv: Hook in L3 receive path
22 * 23 *
23 * @l3mdev_get_saddr: Get source address for a flow 24 * @l3mdev_l3_out: Hook in L3 output path
24 * 25 *
25 * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device 26 * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
26 */ 27 */
27 28
28struct l3mdev_ops { 29struct l3mdev_ops {
29 u32 (*l3mdev_fib_table)(const struct net_device *dev); 30 u32 (*l3mdev_fib_table)(const struct net_device *dev);
30 struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev, 31 struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
31 struct sk_buff *skb, u16 proto); 32 struct sk_buff *skb, u16 proto);
32 33 struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
33 /* IPv4 ops */ 34 struct sock *sk, struct sk_buff *skb,
34 struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev, 35 u16 proto);
35 const struct flowi4 *fl4);
36 int (*l3mdev_get_saddr)(struct net_device *dev,
37 struct flowi4 *fl4);
38 36
39 /* IPv6 ops */ 37 /* IPv6 ops */
40 struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev, 38 struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
41 struct flowi6 *fl6); 39 struct flowi6 *fl6);
42 int (*l3mdev_get_saddr6)(struct net_device *dev,
43 const struct sock *sk,
44 struct flowi6 *fl6);
45}; 40};
46 41
47#ifdef CONFIG_NET_L3_MASTER_DEV 42#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -49,6 +44,8 @@ struct l3mdev_ops {
49int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, 44int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
50 struct fib_lookup_arg *arg); 45 struct fib_lookup_arg *arg);
51 46
47void l3mdev_update_flow(struct net *net, struct flowi *fl);
48
52int l3mdev_master_ifindex_rcu(const struct net_device *dev); 49int l3mdev_master_ifindex_rcu(const struct net_device *dev);
53static inline int l3mdev_master_ifindex(struct net_device *dev) 50static inline int l3mdev_master_ifindex(struct net_device *dev)
54{ 51{
@@ -80,7 +77,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
80} 77}
81 78
82static inline 79static inline
83const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev) 80struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
84{ 81{
85 /* netdev_master_upper_dev_get_rcu calls 82 /* netdev_master_upper_dev_get_rcu calls
86 * list_first_or_null_rcu to walk the upper dev list. 83 * list_first_or_null_rcu to walk the upper dev list.
@@ -89,7 +86,7 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
89 * typecast to remove the const 86 * typecast to remove the const
90 */ 87 */
91 struct net_device *dev = (struct net_device *)_dev; 88 struct net_device *dev = (struct net_device *)_dev;
92 const struct net_device *master; 89 struct net_device *master;
93 90
94 if (!dev) 91 if (!dev)
95 return NULL; 92 return NULL;
@@ -104,26 +101,6 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
104 return master; 101 return master;
105} 102}
106 103
107/* get index of an interface to use for FIB lookups. For devices
108 * enslaved to an L3 master device FIB lookups are based on the
109 * master index
110 */
111static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
112{
113 return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
114}
115
116static inline int l3mdev_fib_oif(struct net_device *dev)
117{
118 int oif;
119
120 rcu_read_lock();
121 oif = l3mdev_fib_oif_rcu(dev);
122 rcu_read_unlock();
123
124 return oif;
125}
126
127u32 l3mdev_fib_table_rcu(const struct net_device *dev); 104u32 l3mdev_fib_table_rcu(const struct net_device *dev);
128u32 l3mdev_fib_table_by_index(struct net *net, int ifindex); 105u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
129static inline u32 l3mdev_fib_table(const struct net_device *dev) 106static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -137,39 +114,7 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
137 return tb_id; 114 return tb_id;
138} 115}
139 116
140static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev, 117struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
141 const struct flowi4 *fl4)
142{
143 if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
144 return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
145
146 return NULL;
147}
148
149static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
150{
151 struct net_device *dev;
152 bool rc = false;
153
154 if (ifindex == 0)
155 return false;
156
157 rcu_read_lock();
158
159 dev = dev_get_by_index_rcu(net, ifindex);
160 if (dev)
161 rc = netif_is_l3_master(dev);
162
163 rcu_read_unlock();
164
165 return rc;
166}
167
168int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
169
170struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
171int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
172 struct flowi6 *fl6);
173 118
174static inline 119static inline
175struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) 120struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -199,6 +144,34 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
199 return l3mdev_l3_rcv(skb, AF_INET6); 144 return l3mdev_l3_rcv(skb, AF_INET6);
200} 145}
201 146
147static inline
148struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
149{
150 struct net_device *dev = skb_dst(skb)->dev;
151
152 if (netif_is_l3_slave(dev)) {
153 struct net_device *master;
154
155 master = netdev_master_upper_dev_get_rcu(dev);
156 if (master && master->l3mdev_ops->l3mdev_l3_out)
157 skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
158 skb, proto);
159 }
160
161 return skb;
162}
163
164static inline
165struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
166{
167 return l3mdev_l3_out(sk, skb, AF_INET);
168}
169
170static inline
171struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
172{
173 return l3mdev_l3_out(sk, skb, AF_INET6);
174}
202#else 175#else
203 176
204static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev) 177static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
@@ -216,20 +189,11 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
216} 189}
217 190
218static inline 191static inline
219const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev) 192struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
220{ 193{
221 return NULL; 194 return NULL;
222} 195}
223 196
224static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
225{
226 return dev ? dev->ifindex : 0;
227}
228static inline int l3mdev_fib_oif(struct net_device *dev)
229{
230 return dev ? dev->ifindex : 0;
231}
232
233static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev) 197static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
234{ 198{
235 return 0; 199 return 0;
@@ -243,43 +207,32 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
243 return 0; 207 return 0;
244} 208}
245 209
246static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev, 210static inline
247 const struct flowi4 *fl4) 211struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
248{ 212{
249 return NULL; 213 return NULL;
250} 214}
251 215
252static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
253{
254 return false;
255}
256
257static inline int l3mdev_get_saddr(struct net *net, int ifindex,
258 struct flowi4 *fl4)
259{
260 return 0;
261}
262
263static inline 216static inline
264struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6) 217struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
265{ 218{
266 return NULL; 219 return skb;
267} 220}
268 221
269static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk, 222static inline
270 struct flowi6 *fl6) 223struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
271{ 224{
272 return 0; 225 return skb;
273} 226}
274 227
275static inline 228static inline
276struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb) 229struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
277{ 230{
278 return skb; 231 return skb;
279} 232}
280 233
281static inline 234static inline
282struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb) 235struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
283{ 236{
284 return skb; 237 return skb;
285} 238}
@@ -290,6 +243,10 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
290{ 243{
291 return 1; 244 return 1;
292} 245}
246static inline
247void l3mdev_update_flow(struct net *net, struct flowi *fl)
248{
249}
293#endif 250#endif
294 251
295#endif /* _NET_L3MDEV_H_ */ 252#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index e9f116e29c22..ea3f80f58fd6 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -13,6 +13,13 @@
13/* lw tunnel state flags */ 13/* lw tunnel state flags */
14#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0) 14#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
15#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1) 15#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
16#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
17
18enum {
19 LWTUNNEL_XMIT_DONE,
20 LWTUNNEL_XMIT_CONTINUE,
21};
22
16 23
17struct lwtunnel_state { 24struct lwtunnel_state {
18 __u16 type; 25 __u16 type;
@@ -21,6 +28,7 @@ struct lwtunnel_state {
21 int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); 28 int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
22 int (*orig_input)(struct sk_buff *); 29 int (*orig_input)(struct sk_buff *);
23 int len; 30 int len;
31 __u16 headroom;
24 __u8 data[0]; 32 __u8 data[0];
25}; 33};
26 34
@@ -34,6 +42,7 @@ struct lwtunnel_encap_ops {
34 struct lwtunnel_state *lwtstate); 42 struct lwtunnel_state *lwtstate);
35 int (*get_encap_size)(struct lwtunnel_state *lwtstate); 43 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
36 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); 44 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
45 int (*xmit)(struct sk_buff *skb);
37}; 46};
38 47
39#ifdef CONFIG_LWTUNNEL 48#ifdef CONFIG_LWTUNNEL
@@ -75,6 +84,24 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
75 84
76 return false; 85 return false;
77} 86}
87
88static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
89{
90 if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_XMIT_REDIRECT))
91 return true;
92
93 return false;
94}
95
96static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
97 unsigned int mtu)
98{
99 if (lwtunnel_xmit_redirect(lwtstate) && lwtstate->headroom < mtu)
100 return lwtstate->headroom;
101
102 return 0;
103}
104
78int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, 105int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
79 unsigned int num); 106 unsigned int num);
80int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, 107int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
@@ -90,6 +117,7 @@ struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
90int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b); 117int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
91int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb); 118int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
92int lwtunnel_input(struct sk_buff *skb); 119int lwtunnel_input(struct sk_buff *skb);
120int lwtunnel_xmit(struct sk_buff *skb);
93 121
94#else 122#else
95 123
@@ -117,6 +145,17 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
117 return false; 145 return false;
118} 146}
119 147
148static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
149{
150 return false;
151}
152
153static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
154 unsigned int mtu)
155{
156 return 0;
157}
158
120static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, 159static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
121 unsigned int num) 160 unsigned int num)
122{ 161{
@@ -170,6 +209,11 @@ static inline int lwtunnel_input(struct sk_buff *skb)
170 return -EOPNOTSUPP; 209 return -EOPNOTSUPP;
171} 210}
172 211
212static inline int lwtunnel_xmit(struct sk_buff *skb)
213{
214 return -EOPNOTSUPP;
215}
216
173#endif /* CONFIG_LWTUNNEL */ 217#endif /* CONFIG_LWTUNNEL */
174 218
175#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type)) 219#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cca510a585c3..a810dfcb83c2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
715 * frame (PS-Poll or uAPSD). 715 * frame (PS-Poll or uAPSD).
716 * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information 716 * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
717 * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame 717 * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
718 * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
718 * 719 *
719 * These flags are used in tx_info->control.flags. 720 * These flags are used in tx_info->control.flags.
720 */ 721 */
@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
723 IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), 724 IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
724 IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), 725 IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
725 IEEE80211_TX_CTRL_AMSDU = BIT(3), 726 IEEE80211_TX_CTRL_AMSDU = BIT(3),
727 IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
726}; 728};
727 729
728/* 730/*
@@ -1735,6 +1737,9 @@ struct ieee80211_sta_rates {
1735 * @supp_rates: Bitmap of supported rates (per band) 1737 * @supp_rates: Bitmap of supported rates (per band)
1736 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities 1738 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
1737 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities 1739 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
1740 * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
1741 * that this station is allowed to transmit to us.
1742 * Can be modified by driver.
1738 * @wme: indicates whether the STA supports QoS/WME (if local devices does, 1743 * @wme: indicates whether the STA supports QoS/WME (if local devices does,
1739 * otherwise always false) 1744 * otherwise always false)
1740 * @drv_priv: data area for driver use, will always be aligned to 1745 * @drv_priv: data area for driver use, will always be aligned to
@@ -1775,6 +1780,7 @@ struct ieee80211_sta {
1775 u16 aid; 1780 u16 aid;
1776 struct ieee80211_sta_ht_cap ht_cap; 1781 struct ieee80211_sta_ht_cap ht_cap;
1777 struct ieee80211_sta_vht_cap vht_cap; 1782 struct ieee80211_sta_vht_cap vht_cap;
1783 u8 max_rx_aggregation_subframes;
1778 bool wme; 1784 bool wme;
1779 u8 uapsd_queues; 1785 u8 uapsd_queues;
1780 u8 max_sp; 1786 u8 max_sp;
@@ -2014,6 +2020,11 @@ struct ieee80211_txq {
2014 * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list 2020 * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
2015 * skbs, needed for zero-copy software A-MSDU. 2021 * skbs, needed for zero-copy software A-MSDU.
2016 * 2022 *
2023 * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
2024 * by ieee80211_report_low_ack() based on its own algorithm. For such
2025 * drivers, mac80211 packet loss mechanism will not be triggered and driver
2026 * is completely depending on firmware event for station kickout.
2027 *
2017 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays 2028 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
2018 */ 2029 */
2019enum ieee80211_hw_flags { 2030enum ieee80211_hw_flags {
@@ -2054,6 +2065,7 @@ enum ieee80211_hw_flags {
2054 IEEE80211_HW_USES_RSS, 2065 IEEE80211_HW_USES_RSS,
2055 IEEE80211_HW_TX_AMSDU, 2066 IEEE80211_HW_TX_AMSDU,
2056 IEEE80211_HW_TX_FRAG_LIST, 2067 IEEE80211_HW_TX_FRAG_LIST,
2068 IEEE80211_HW_REPORTS_LOW_ACK,
2057 2069
2058 /* keep last, obviously */ 2070 /* keep last, obviously */
2059 NUM_IEEE80211_HW_FLAGS 2071 NUM_IEEE80211_HW_FLAGS
@@ -2141,6 +2153,14 @@ enum ieee80211_hw_flags {
2141 * the default is _GI | _BANDWIDTH. 2153 * the default is _GI | _BANDWIDTH.
2142 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values. 2154 * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
2143 * 2155 *
2156 * @radiotap_timestamp: Information for the radiotap timestamp field; if the
2157 * 'units_pos' member is set to a non-negative value it must be set to
2158 * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
2159 * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
2160 * field will be added and populated from the &struct ieee80211_rx_status
2161 * device_timestamp. If the 'accuracy' member is non-negative, it's put
2162 * into the accuracy radiotap field and the accuracy known flag is set.
2163 *
2144 * @netdev_features: netdev features to be set in each netdev created 2164 * @netdev_features: netdev features to be set in each netdev created
2145 * from this HW. Note that not all features are usable with mac80211, 2165 * from this HW. Note that not all features are usable with mac80211,
2146 * other features will be rejected during HW registration. 2166 * other features will be rejected during HW registration.
@@ -2159,6 +2179,8 @@ enum ieee80211_hw_flags {
2159 * @n_cipher_schemes: a size of an array of cipher schemes definitions. 2179 * @n_cipher_schemes: a size of an array of cipher schemes definitions.
2160 * @cipher_schemes: a pointer to an array of cipher scheme definitions 2180 * @cipher_schemes: a pointer to an array of cipher scheme definitions
2161 * supported by HW. 2181 * supported by HW.
2182 * @max_nan_de_entries: maximum number of NAN DE functions supported by the
2183 * device.
2162 */ 2184 */
2163struct ieee80211_hw { 2185struct ieee80211_hw {
2164 struct ieee80211_conf conf; 2186 struct ieee80211_conf conf;
@@ -2184,11 +2206,16 @@ struct ieee80211_hw {
2184 u8 offchannel_tx_hw_queue; 2206 u8 offchannel_tx_hw_queue;
2185 u8 radiotap_mcs_details; 2207 u8 radiotap_mcs_details;
2186 u16 radiotap_vht_details; 2208 u16 radiotap_vht_details;
2209 struct {
2210 int units_pos;
2211 s16 accuracy;
2212 } radiotap_timestamp;
2187 netdev_features_t netdev_features; 2213 netdev_features_t netdev_features;
2188 u8 uapsd_queues; 2214 u8 uapsd_queues;
2189 u8 uapsd_max_sp_len; 2215 u8 uapsd_max_sp_len;
2190 u8 n_cipher_schemes; 2216 u8 n_cipher_schemes;
2191 const struct ieee80211_cipher_scheme *cipher_schemes; 2217 const struct ieee80211_cipher_scheme *cipher_schemes;
2218 u8 max_nan_de_entries;
2192}; 2219};
2193 2220
2194static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, 2221static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -3085,11 +3112,8 @@ enum ieee80211_reconfig_type {
3085 * 3112 *
3086 * @sta_add_debugfs: Drivers can use this callback to add debugfs files 3113 * @sta_add_debugfs: Drivers can use this callback to add debugfs files
3087 * when a station is added to mac80211's station list. This callback 3114 * when a station is added to mac80211's station list. This callback
3088 * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS 3115 * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
3089 * conditional. This callback can sleep. 3116 * callback can sleep.
3090 *
3091 * @sta_remove_debugfs: Remove the debugfs files which were added using
3092 * @sta_add_debugfs. This callback can sleep.
3093 * 3117 *
3094 * @sta_notify: Notifies low level driver about power state transition of an 3118 * @sta_notify: Notifies low level driver about power state transition of an
3095 * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating 3119 * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
@@ -3147,6 +3171,12 @@ enum ieee80211_reconfig_type {
3147 * required function. 3171 * required function.
3148 * The callback can sleep. 3172 * The callback can sleep.
3149 * 3173 *
3174 * @offset_tsf: Offset the TSF timer by the specified value in the
3175 * firmware/hardware. Preferred to set_tsf as it avoids delay between
3176 * calling set_tsf() and hardware getting programmed, which will show up
3177 * as TSF delay. Is not a required function.
3178 * The callback can sleep.
3179 *
3150 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize 3180 * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
3151 * with other STAs in the IBSS. This is only used in IBSS mode. This 3181 * with other STAs in the IBSS. This is only used in IBSS mode. This
3152 * function is optional if the firmware/hardware takes full care of 3182 * function is optional if the firmware/hardware takes full care of
@@ -3401,6 +3431,21 @@ enum ieee80211_reconfig_type {
3401 * synchronization which is needed in case driver has in its RSS queues 3431 * synchronization which is needed in case driver has in its RSS queues
3402 * pending frames that were received prior to the control path action 3432 * pending frames that were received prior to the control path action
3403 * currently taken (e.g. disassociation) but are not processed yet. 3433 * currently taken (e.g. disassociation) but are not processed yet.
3434 *
3435 * @start_nan: join an existing NAN cluster, or create a new one.
3436 * @stop_nan: leave the NAN cluster.
3437 * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf
3438 * contains full new configuration and changes specify which parameters
3439 * are changed with respect to the last NAN config.
3440 * The driver gets both full configuration and the changed parameters since
3441 * some devices may need the full configuration while others need only the
3442 * changed parameters.
3443 * @add_nan_func: Add a NAN function. Returns 0 on success. The data in
3444 * cfg80211_nan_func must not be referenced outside the scope of
3445 * this call.
3446 * @del_nan_func: Remove a NAN function. The driver must call
3447 * ieee80211_nan_func_terminated() with
3448 * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
3404 */ 3449 */
3405struct ieee80211_ops { 3450struct ieee80211_ops {
3406 void (*tx)(struct ieee80211_hw *hw, 3451 void (*tx)(struct ieee80211_hw *hw,
@@ -3485,10 +3530,6 @@ struct ieee80211_ops {
3485 struct ieee80211_vif *vif, 3530 struct ieee80211_vif *vif,
3486 struct ieee80211_sta *sta, 3531 struct ieee80211_sta *sta,
3487 struct dentry *dir); 3532 struct dentry *dir);
3488 void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
3489 struct ieee80211_vif *vif,
3490 struct ieee80211_sta *sta,
3491 struct dentry *dir);
3492#endif 3533#endif
3493 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3534 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3494 enum sta_notify_cmd, struct ieee80211_sta *sta); 3535 enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -3516,6 +3557,8 @@ struct ieee80211_ops {
3516 u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3557 u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3517 void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 3558 void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3518 u64 tsf); 3559 u64 tsf);
3560 void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3561 s64 offset);
3519 void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 3562 void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
3520 int (*tx_last_beacon)(struct ieee80211_hw *hw); 3563 int (*tx_last_beacon)(struct ieee80211_hw *hw);
3521 int (*ampdu_action)(struct ieee80211_hw *hw, 3564 int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -3640,6 +3683,21 @@ struct ieee80211_ops {
3640 void (*wake_tx_queue)(struct ieee80211_hw *hw, 3683 void (*wake_tx_queue)(struct ieee80211_hw *hw,
3641 struct ieee80211_txq *txq); 3684 struct ieee80211_txq *txq);
3642 void (*sync_rx_queues)(struct ieee80211_hw *hw); 3685 void (*sync_rx_queues)(struct ieee80211_hw *hw);
3686
3687 int (*start_nan)(struct ieee80211_hw *hw,
3688 struct ieee80211_vif *vif,
3689 struct cfg80211_nan_conf *conf);
3690 int (*stop_nan)(struct ieee80211_hw *hw,
3691 struct ieee80211_vif *vif);
3692 int (*nan_change_conf)(struct ieee80211_hw *hw,
3693 struct ieee80211_vif *vif,
3694 struct cfg80211_nan_conf *conf, u32 changes);
3695 int (*add_nan_func)(struct ieee80211_hw *hw,
3696 struct ieee80211_vif *vif,
3697 const struct cfg80211_nan_func *nan_func);
3698 void (*del_nan_func)(struct ieee80211_hw *hw,
3699 struct ieee80211_vif *vif,
3700 u8 instance_id);
3643}; 3701};
3644 3702
3645/** 3703/**
@@ -5713,4 +5771,36 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
5713void ieee80211_txq_get_depth(struct ieee80211_txq *txq, 5771void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
5714 unsigned long *frame_cnt, 5772 unsigned long *frame_cnt,
5715 unsigned long *byte_cnt); 5773 unsigned long *byte_cnt);
5774
5775/**
5776 * ieee80211_nan_func_terminated - notify about NAN function termination.
5777 *
5778 * This function is used to notify mac80211 about NAN function termination.
5779 * Note that this function can't be called from hard irq.
5780 *
5781 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
5782 * @inst_id: the local instance id
5783 * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
5784 * @gfp: allocation flags
5785 */
5786void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
5787 u8 inst_id,
5788 enum nl80211_nan_func_term_reason reason,
5789 gfp_t gfp);
5790
5791/**
5792 * ieee80211_nan_func_match - notify about NAN function match event.
5793 *
5794 * This function is used to notify mac80211 about NAN function match. The
5795 * cookie inside the match struct will be assigned by mac80211.
5796 * Note that this function can't be called from hard irq.
5797 *
5798 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
5799 * @match: match event information
5800 * @gfp: allocation flags
5801 */
5802void ieee80211_nan_func_match(struct ieee80211_vif *vif,
5803 struct cfg80211_nan_match_params *match,
5804 gfp_t gfp);
5805
5716#endif /* MAC80211_H */ 5806#endif /* MAC80211_H */
diff --git a/include/net/mpls.h b/include/net/mpls.h
index 5b3b5addfb08..1dbc669b770e 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -19,21 +19,18 @@
19 19
20#define MPLS_HLEN 4 20#define MPLS_HLEN 4
21 21
22struct mpls_shim_hdr {
23 __be32 label_stack_entry;
24};
25
22static inline bool eth_p_mpls(__be16 eth_type) 26static inline bool eth_p_mpls(__be16 eth_type)
23{ 27{
24 return eth_type == htons(ETH_P_MPLS_UC) || 28 return eth_type == htons(ETH_P_MPLS_UC) ||
25 eth_type == htons(ETH_P_MPLS_MC); 29 eth_type == htons(ETH_P_MPLS_MC);
26} 30}
27 31
28/* 32static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
29 * For non-MPLS skbs this will correspond to the network header.
30 * For MPLS skbs it will be before the network_header as the MPLS
31 * label stack lies between the end of the mac header and the network
32 * header. That is, for MPLS skbs the end of the mac header
33 * is the top of the MPLS label stack.
34 */
35static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
36{ 33{
37 return skb_mac_header(skb) + skb->mac_len; 34 return (struct mpls_shim_hdr *)skb_network_header(skb);
38} 35}
39#endif 36#endif
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 1dbf42f79750..68680baac0fd 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -31,6 +31,7 @@ struct ncsi_dev {
31struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 31struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
32 void (*notifier)(struct ncsi_dev *nd)); 32 void (*notifier)(struct ncsi_dev *nd));
33int ncsi_start_dev(struct ncsi_dev *nd); 33int ncsi_start_dev(struct ncsi_dev *nd);
34void ncsi_stop_dev(struct ncsi_dev *nd);
34void ncsi_unregister_dev(struct ncsi_dev *nd); 35void ncsi_unregister_dev(struct ncsi_dev *nd);
35#else /* !CONFIG_NET_NCSI */ 36#else /* !CONFIG_NET_NCSI */
36static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev, 37static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
@@ -44,6 +45,10 @@ static inline int ncsi_start_dev(struct ncsi_dev *nd)
44 return -ENOTTY; 45 return -ENOTTY;
45} 46}
46 47
48static void ncsi_stop_dev(struct ncsi_dev *nd)
49{
50}
51
47static inline void ncsi_unregister_dev(struct ncsi_dev *nd) 52static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
48{ 53{
49} 54}
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index e8d1448425a7..0b0c35c37125 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -15,6 +15,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
15 15
16void nf_bridge_update_protocol(struct sk_buff *skb); 16void nf_bridge_update_protocol(struct sk_buff *skb);
17 17
18int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
19 struct sk_buff *skb, struct net_device *indev,
20 struct net_device *outdev,
21 int (*okfn)(struct net *, struct sock *,
22 struct sk_buff *));
23
18static inline struct nf_bridge_info * 24static inline struct nf_bridge_info *
19nf_bridge_info_get(const struct sk_buff *skb) 25nf_bridge_info_get(const struct sk_buff *skb)
20{ 26{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 445b019c2078..50418052a520 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -42,7 +42,6 @@ union nf_conntrack_expect_proto {
42 42
43#include <linux/types.h> 43#include <linux/types.h>
44#include <linux/skbuff.h> 44#include <linux/skbuff.h>
45#include <linux/timer.h>
46 45
47#ifdef CONFIG_NETFILTER_DEBUG 46#ifdef CONFIG_NETFILTER_DEBUG
48#define NF_CT_ASSERT(x) WARN_ON(!(x)) 47#define NF_CT_ASSERT(x) WARN_ON(!(x))
@@ -73,7 +72,7 @@ struct nf_conn_help {
73#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 72#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
74 73
75struct nf_conn { 74struct nf_conn {
76 /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, 75 /* Usage count in here is 1 for hash table, 1 per skb,
77 * plus 1 for any connection(s) we are `master' for 76 * plus 1 for any connection(s) we are `master' for
78 * 77 *
79 * Hint, SKB address this struct and refcnt via skb->nfct and 78 * Hint, SKB address this struct and refcnt via skb->nfct and
@@ -96,8 +95,8 @@ struct nf_conn {
96 /* Have we seen traffic both ways yet? (bitset) */ 95 /* Have we seen traffic both ways yet? (bitset) */
97 unsigned long status; 96 unsigned long status;
98 97
99 /* Timer function; drops refcnt when it goes off. */ 98 /* jiffies32 when this ct is considered dead */
100 struct timer_list timeout; 99 u32 timeout;
101 100
102 possible_net_t ct_net; 101 possible_net_t ct_net;
103 102
@@ -220,21 +219,14 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
220 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); 219 __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
221} 220}
222 221
223bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
224 const struct sk_buff *skb, int do_acct);
225
226/* kill conntrack and do accounting */ 222/* kill conntrack and do accounting */
227static inline bool nf_ct_kill_acct(struct nf_conn *ct, 223bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
228 enum ip_conntrack_info ctinfo, 224 const struct sk_buff *skb);
229 const struct sk_buff *skb)
230{
231 return __nf_ct_kill_acct(ct, ctinfo, skb, 1);
232}
233 225
234/* kill conntrack without accounting */ 226/* kill conntrack without accounting */
235static inline bool nf_ct_kill(struct nf_conn *ct) 227static inline bool nf_ct_kill(struct nf_conn *ct)
236{ 228{
237 return __nf_ct_kill_acct(ct, 0, NULL, 0); 229 return nf_ct_delete(ct, 0, 0);
238} 230}
239 231
240/* These are for NAT. Icky. */ 232/* These are for NAT. Icky. */
@@ -291,21 +283,55 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
291 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; 283 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
292} 284}
293 285
286#define nfct_time_stamp ((u32)(jiffies))
287
294/* jiffies until ct expires, 0 if already expired */ 288/* jiffies until ct expires, 0 if already expired */
295static inline unsigned long nf_ct_expires(const struct nf_conn *ct) 289static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
296{ 290{
297 long timeout = (long)ct->timeout.expires - (long)jiffies; 291 s32 timeout = ct->timeout - nfct_time_stamp;
298 292
299 return timeout > 0 ? timeout : 0; 293 return timeout > 0 ? timeout : 0;
300} 294}
301 295
296static inline bool nf_ct_is_expired(const struct nf_conn *ct)
297{
298 return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
299}
300
301/* use after obtaining a reference count */
302static inline bool nf_ct_should_gc(const struct nf_conn *ct)
303{
304 return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
305 !nf_ct_is_dying(ct);
306}
307
302struct kernel_param; 308struct kernel_param;
303 309
304int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 310int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
305int nf_conntrack_hash_resize(unsigned int hashsize); 311int nf_conntrack_hash_resize(unsigned int hashsize);
312
313extern struct hlist_nulls_head *nf_conntrack_hash;
306extern unsigned int nf_conntrack_htable_size; 314extern unsigned int nf_conntrack_htable_size;
315extern seqcount_t nf_conntrack_generation;
307extern unsigned int nf_conntrack_max; 316extern unsigned int nf_conntrack_max;
308 317
318/* must be called with rcu read lock held */
319static inline void
320nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
321{
322 struct hlist_nulls_head *hptr;
323 unsigned int sequence, hsz;
324
325 do {
326 sequence = read_seqcount_begin(&nf_conntrack_generation);
327 hsz = nf_conntrack_htable_size;
328 hptr = nf_conntrack_hash;
329 } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
330
331 *hash = hptr;
332 *hsize = hsz;
333}
334
309struct nf_conn *nf_ct_tmpl_alloc(struct net *net, 335struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
310 const struct nf_conntrack_zone *zone, 336 const struct nf_conntrack_zone *zone,
311 gfp_t flags); 337 gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 79d7ac5c9740..62e17d1319ff 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -51,8 +51,6 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
51 const struct nf_conntrack_l3proto *l3proto, 51 const struct nf_conntrack_l3proto *l3proto,
52 const struct nf_conntrack_l4proto *l4proto); 52 const struct nf_conntrack_l4proto *l4proto);
53 53
54void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize);
55
56/* Find a connection corresponding to a tuple. */ 54/* Find a connection corresponding to a tuple. */
57struct nf_conntrack_tuple_hash * 55struct nf_conntrack_tuple_hash *
58nf_conntrack_find_get(struct net *net, 56nf_conntrack_find_get(struct net *net,
@@ -83,7 +81,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
83 81
84#define CONNTRACK_LOCKS 1024 82#define CONNTRACK_LOCKS 1024
85 83
86extern struct hlist_nulls_head *nf_conntrack_hash;
87extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; 84extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
88void nf_conntrack_lock(spinlock_t *lock); 85void nf_conntrack_lock(spinlock_t *lock);
89 86
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index fa36447371c6..12d967b58726 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,12 +12,19 @@
12#include <linux/netfilter/nf_conntrack_tuple_common.h> 12#include <linux/netfilter/nf_conntrack_tuple_common.h>
13#include <net/netfilter/nf_conntrack_extend.h> 13#include <net/netfilter/nf_conntrack_extend.h>
14 14
15enum nf_ct_ecache_state {
16 NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
17 NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
18 NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
19};
20
15struct nf_conntrack_ecache { 21struct nf_conntrack_ecache {
16 unsigned long cache; /* bitops want long */ 22 unsigned long cache; /* bitops want long */
17 unsigned long missed; /* missed events */ 23 unsigned long missed; /* missed events */
18 u16 ctmask; /* bitmask of ct events to be delivered */ 24 u16 ctmask; /* bitmask of ct events to be delivered */
19 u16 expmask; /* bitmask of expect events to be delivered */ 25 u16 expmask; /* bitmask of expect events to be delivered */
20 u32 portid; /* netlink portid of destroyer */ 26 u32 portid; /* netlink portid of destroyer */
27 enum nf_ct_ecache_state state; /* ecache state */
21}; 28};
22 29
23static inline struct nf_conntrack_ecache * 30static inline struct nf_conntrack_ecache *
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index cdc920b4c4c2..8992e4229da9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -63,10 +63,6 @@ struct nf_conntrack_l3proto {
63 63
64 size_t nla_size; 64 size_t nla_size;
65 65
66#ifdef CONFIG_SYSCTL
67 const char *ctl_table_path;
68#endif /* CONFIG_SYSCTL */
69
70 /* Init l3proto pernet data */ 66 /* Init l3proto pernet data */
71 int (*init_net)(struct net *net); 67 int (*init_net)(struct net *net);
72 68
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1a5fb36f165f..de629f1520df 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -134,14 +134,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
134int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto); 134int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
135void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto); 135void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
136 136
137static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
138{
139#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
140 kfree(pn->ctl_compat_table);
141 pn->ctl_compat_table = NULL;
142#endif
143}
144
145/* Generic netlink helpers */ 137/* Generic netlink helpers */
146int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 138int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
147 const struct nf_conntrack_tuple *tuple); 139 const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 83d855ba6af1..309cd267be4f 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -2,15 +2,10 @@
2#define _NF_LOG_H 2#define _NF_LOG_H
3 3
4#include <linux/netfilter.h> 4#include <linux/netfilter.h>
5#include <linux/netfilter/nf_log.h>
5 6
6/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will 7/* Log tcp sequence, tcp options, ip options and uid owning local socket */
7 * disappear once iptables is replaced with pkttables. Please DO NOT use them 8#define NF_LOG_DEFAULT_MASK 0x0f
8 * for any new code! */
9#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
10#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
11#define NF_LOG_IPOPT 0x04 /* Log IP options */
12#define NF_LOG_UID 0x08 /* Log UID owning local socket */
13#define NF_LOG_MASK 0x0f
14 9
15/* This flag indicates that copy_len field in nf_loginfo is set */ 10/* This flag indicates that copy_len field in nf_loginfo is set */
16#define NF_LOG_F_COPY_LEN 0x1 11#define NF_LOG_F_COPY_LEN 0x1
@@ -60,8 +55,7 @@ struct nf_logger {
60int nf_log_register(u_int8_t pf, struct nf_logger *logger); 55int nf_log_register(u_int8_t pf, struct nf_logger *logger);
61void nf_log_unregister(struct nf_logger *logger); 56void nf_log_unregister(struct nf_logger *logger);
62 57
63void nf_log_set(struct net *net, u_int8_t pf, 58int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
64 const struct nf_logger *logger);
65void nf_log_unset(struct net *net, const struct nf_logger *logger); 59void nf_log_unset(struct net *net, const struct nf_logger *logger);
66 60
67int nf_log_bind_pf(struct net *net, u_int8_t pf, 61int nf_log_bind_pf(struct net *net, u_int8_t pf,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 0dbce55437f2..2280cfe86c56 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -11,7 +11,6 @@ struct nf_queue_entry {
11 struct sk_buff *skb; 11 struct sk_buff *skb;
12 unsigned int id; 12 unsigned int id;
13 13
14 struct nf_hook_ops *elem;
15 struct nf_hook_state state; 14 struct nf_hook_state state;
16 u16 size; /* sizeof(entry) + saved route keys */ 15 u16 size; /* sizeof(entry) + saved route keys */
17 16
@@ -22,10 +21,10 @@ struct nf_queue_entry {
22 21
23/* Packet queuing */ 22/* Packet queuing */
24struct nf_queue_handler { 23struct nf_queue_handler {
25 int (*outfn)(struct nf_queue_entry *entry, 24 int (*outfn)(struct nf_queue_entry *entry,
26 unsigned int queuenum); 25 unsigned int queuenum);
27 void (*nf_hook_drop)(struct net *net, 26 void (*nf_hook_drop)(struct net *net,
28 struct nf_hook_ops *ops); 27 const struct nf_hook_entry *hooks);
29}; 28};
30 29
31void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); 30void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
@@ -41,23 +40,19 @@ static inline void init_hashrandom(u32 *jhash_initval)
41 *jhash_initval = prandom_u32(); 40 *jhash_initval = prandom_u32();
42} 41}
43 42
44static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval) 43static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
45{ 44{
46 const struct iphdr *iph = ip_hdr(skb);
47
48 /* packets in either direction go into same queue */ 45 /* packets in either direction go into same queue */
49 if ((__force u32)iph->saddr < (__force u32)iph->daddr) 46 if ((__force u32)iph->saddr < (__force u32)iph->daddr)
50 return jhash_3words((__force u32)iph->saddr, 47 return jhash_3words((__force u32)iph->saddr,
51 (__force u32)iph->daddr, iph->protocol, jhash_initval); 48 (__force u32)iph->daddr, iph->protocol, initval);
52 49
53 return jhash_3words((__force u32)iph->daddr, 50 return jhash_3words((__force u32)iph->daddr,
54 (__force u32)iph->saddr, iph->protocol, jhash_initval); 51 (__force u32)iph->saddr, iph->protocol, initval);
55} 52}
56 53
57#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 54static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
58static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
59{ 55{
60 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
61 u32 a, b, c; 56 u32 a, b, c;
62 57
63 if ((__force u32)ip6h->saddr.s6_addr32[3] < 58 if ((__force u32)ip6h->saddr.s6_addr32[3] <
@@ -75,20 +70,50 @@ static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
75 else 70 else
76 c = (__force u32) ip6h->daddr.s6_addr32[1]; 71 c = (__force u32) ip6h->daddr.s6_addr32[1];
77 72
78 return jhash_3words(a, b, c, jhash_initval); 73 return jhash_3words(a, b, c, initval);
74}
75
76static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
77{
78 struct ipv6hdr *ip6h, _ip6h;
79 struct iphdr *iph, _iph;
80
81 switch (eth_hdr(skb)->h_proto) {
82 case htons(ETH_P_IP):
83 iph = skb_header_pointer(skb, skb_network_offset(skb),
84 sizeof(*iph), &_iph);
85 if (iph)
86 return hash_v4(iph, initval);
87 break;
88 case htons(ETH_P_IPV6):
89 ip6h = skb_header_pointer(skb, skb_network_offset(skb),
90 sizeof(*ip6h), &_ip6h);
91 if (ip6h)
92 return hash_v6(ip6h, initval);
93 break;
94 }
95
96 return 0;
79} 97}
80#endif
81 98
82static inline u32 99static inline u32
83nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, 100nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
84 u32 jhash_initval) 101 u32 initval)
85{ 102{
86 if (family == NFPROTO_IPV4) 103 switch (family) {
87 queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32; 104 case NFPROTO_IPV4:
88#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 105 queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
89 else if (family == NFPROTO_IPV6) 106 queues_total);
90 queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32; 107 break;
91#endif 108 case NFPROTO_IPV6:
109 queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
110 queues_total);
111 break;
112 case NFPROTO_BRIDGE:
113 queue += reciprocal_scale(hash_bridge(skb, initval),
114 queues_total);
115 break;
116 }
92 117
93 return queue; 118 return queue;
94} 119}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f2f13399ce44..5031e072567b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -19,6 +19,7 @@ struct nft_pktinfo {
19 const struct net_device *out; 19 const struct net_device *out;
20 u8 pf; 20 u8 pf;
21 u8 hook; 21 u8 hook;
22 bool tprot_set;
22 u8 tprot; 23 u8 tprot;
23 /* for x_tables compatibility */ 24 /* for x_tables compatibility */
24 struct xt_action_param xt; 25 struct xt_action_param xt;
@@ -36,6 +37,23 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
36 pkt->pf = pkt->xt.family = state->pf; 37 pkt->pf = pkt->xt.family = state->pf;
37} 38}
38 39
40static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
41 struct sk_buff *skb)
42{
43 pkt->tprot_set = false;
44 pkt->tprot = 0;
45 pkt->xt.thoff = 0;
46 pkt->xt.fragoff = 0;
47}
48
49static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
50 struct sk_buff *skb,
51 const struct nf_hook_state *state)
52{
53 nft_set_pktinfo(pkt, skb, state);
54 nft_set_pktinfo_proto_unspec(pkt, skb);
55}
56
39/** 57/**
40 * struct nft_verdict - nf_tables verdict 58 * struct nft_verdict - nf_tables verdict
41 * 59 *
@@ -127,6 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
127 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; 145 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
128} 146}
129 147
148unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
130unsigned int nft_parse_register(const struct nlattr *attr); 149unsigned int nft_parse_register(const struct nlattr *attr);
131int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); 150int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
132 151
@@ -251,7 +270,8 @@ struct nft_set_ops {
251 270
252 int (*insert)(const struct net *net, 271 int (*insert)(const struct net *net,
253 const struct nft_set *set, 272 const struct nft_set *set,
254 const struct nft_set_elem *elem); 273 const struct nft_set_elem *elem,
274 struct nft_set_ext **ext);
255 void (*activate)(const struct net *net, 275 void (*activate)(const struct net *net,
256 const struct nft_set *set, 276 const struct nft_set *set,
257 const struct nft_set_elem *elem); 277 const struct nft_set_elem *elem);
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
deleted file mode 100644
index 511fb79f6dad..000000000000
--- a/include/net/netfilter/nf_tables_bridge.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _NET_NF_TABLES_BRIDGE_H
2#define _NET_NF_TABLES_BRIDGE_H
3
4int nft_bridge_iphdr_validate(struct sk_buff *skb);
5int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
6
7#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a9060dd99db7..00f4f6b1b1ba 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -28,6 +28,9 @@ extern const struct nft_expr_ops nft_cmp_fast_ops;
28int nft_cmp_module_init(void); 28int nft_cmp_module_init(void);
29void nft_cmp_module_exit(void); 29void nft_cmp_module_exit(void);
30 30
31int nft_range_module_init(void);
32void nft_range_module_exit(void);
33
31int nft_lookup_module_init(void); 34int nft_lookup_module_init(void);
32void nft_lookup_module_exit(void); 35void nft_lookup_module_exit(void);
33 36
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ca6ef6bf775e..968f00b82fb5 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -14,11 +14,54 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
14 nft_set_pktinfo(pkt, skb, state); 14 nft_set_pktinfo(pkt, skb, state);
15 15
16 ip = ip_hdr(pkt->skb); 16 ip = ip_hdr(pkt->skb);
17 pkt->tprot_set = true;
17 pkt->tprot = ip->protocol; 18 pkt->tprot = ip->protocol;
18 pkt->xt.thoff = ip_hdrlen(pkt->skb); 19 pkt->xt.thoff = ip_hdrlen(pkt->skb);
19 pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; 20 pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
20} 21}
21 22
23static inline int
24__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
25 struct sk_buff *skb,
26 const struct nf_hook_state *state)
27{
28 struct iphdr *iph, _iph;
29 u32 len, thoff;
30
31 iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
32 &_iph);
33 if (!iph)
34 return -1;
35
36 iph = ip_hdr(skb);
37 if (iph->ihl < 5 || iph->version != 4)
38 return -1;
39
40 len = ntohs(iph->tot_len);
41 thoff = iph->ihl * 4;
42 if (skb->len < len)
43 return -1;
44 else if (len < thoff)
45 return -1;
46
47 pkt->tprot_set = true;
48 pkt->tprot = iph->protocol;
49 pkt->xt.thoff = thoff;
50 pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
51
52 return 0;
53}
54
55static inline void
56nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
57 struct sk_buff *skb,
58 const struct nf_hook_state *state)
59{
60 nft_set_pktinfo(pkt, skb, state);
61 if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
62 nft_set_pktinfo_proto_unspec(pkt, skb);
63}
64
22extern struct nft_af_info nft_af_ipv4; 65extern struct nft_af_info nft_af_ipv4;
23 66
24#endif 67#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 8ad39a6a5fe1..d150b5066201 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -4,7 +4,7 @@
4#include <linux/netfilter_ipv6/ip6_tables.h> 4#include <linux/netfilter_ipv6/ip6_tables.h>
5#include <net/ipv6.h> 5#include <net/ipv6.h>
6 6
7static inline int 7static inline void
8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, 8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9 struct sk_buff *skb, 9 struct sk_buff *skb,
10 const struct nf_hook_state *state) 10 const struct nf_hook_state *state)
@@ -15,15 +15,64 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
15 nft_set_pktinfo(pkt, skb, state); 15 nft_set_pktinfo(pkt, skb, state);
16 16
17 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 17 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
18 /* If malformed, drop it */ 18 if (protohdr < 0) {
19 nft_set_pktinfo_proto_unspec(pkt, skb);
20 return;
21 }
22
23 pkt->tprot_set = true;
24 pkt->tprot = protohdr;
25 pkt->xt.thoff = thoff;
26 pkt->xt.fragoff = frag_off;
27}
28
29static inline int
30__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
31 struct sk_buff *skb,
32 const struct nf_hook_state *state)
33{
34#if IS_ENABLED(CONFIG_IPV6)
35 struct ipv6hdr *ip6h, _ip6h;
36 unsigned int thoff = 0;
37 unsigned short frag_off;
38 int protohdr;
39 u32 pkt_len;
40
41 ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
42 &_ip6h);
43 if (!ip6h)
44 return -1;
45
46 if (ip6h->version != 6)
47 return -1;
48
49 pkt_len = ntohs(ip6h->payload_len);
50 if (pkt_len + sizeof(*ip6h) > skb->len)
51 return -1;
52
53 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
19 if (protohdr < 0) 54 if (protohdr < 0)
20 return -1; 55 return -1;
21 56
57 pkt->tprot_set = true;
22 pkt->tprot = protohdr; 58 pkt->tprot = protohdr;
23 pkt->xt.thoff = thoff; 59 pkt->xt.thoff = thoff;
24 pkt->xt.fragoff = frag_off; 60 pkt->xt.fragoff = frag_off;
25 61
26 return 0; 62 return 0;
63#else
64 return -1;
65#endif
66}
67
68static inline void
69nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
70 struct sk_buff *skb,
71 const struct nf_hook_state *state)
72{
73 nft_set_pktinfo(pkt, skb, state);
74 if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
75 nft_set_pktinfo_proto_unspec(pkt, skb);
27} 76}
28 77
29extern struct nft_af_info nft_af_ipv6; 78extern struct nft_af_info nft_af_ipv6;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 38b1a80517f0..e469e85de3f9 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -15,10 +15,6 @@ struct nf_proto_net {
15#ifdef CONFIG_SYSCTL 15#ifdef CONFIG_SYSCTL
16 struct ctl_table_header *ctl_table_header; 16 struct ctl_table_header *ctl_table_header;
17 struct ctl_table *ctl_table; 17 struct ctl_table *ctl_table;
18#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
19 struct ctl_table_header *ctl_compat_header;
20 struct ctl_table *ctl_compat_table;
21#endif
22#endif 18#endif
23 unsigned int users; 19 unsigned int users;
24}; 20};
@@ -58,10 +54,6 @@ struct nf_ip_net {
58 struct nf_udp_net udp; 54 struct nf_udp_net udp;
59 struct nf_icmp_net icmp; 55 struct nf_icmp_net icmp;
60 struct nf_icmp_net icmpv6; 56 struct nf_icmp_net icmpv6;
61#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
62 struct ctl_table_header *ctl_table_header;
63 struct ctl_table *ctl_table;
64#endif
65}; 57};
66 58
67struct ct_pcpu { 59struct ct_pcpu {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d061ffeb1e71..7adf4386ac8f 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,7 +40,6 @@ struct netns_ipv4 {
40#ifdef CONFIG_IP_MULTIPLE_TABLES 40#ifdef CONFIG_IP_MULTIPLE_TABLES
41 struct fib_rules_ops *rules_ops; 41 struct fib_rules_ops *rules_ops;
42 bool fib_has_custom_rules; 42 bool fib_has_custom_rules;
43 struct fib_table __rcu *fib_local;
44 struct fib_table __rcu *fib_main; 43 struct fib_table __rcu *fib_main;
45 struct fib_table __rcu *fib_default; 44 struct fib_table __rcu *fib_default;
46#endif 45#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 36d723579af2..58487b1cc99a 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,6 +16,6 @@ struct netns_nf {
16#ifdef CONFIG_SYSCTL 16#ifdef CONFIG_SYSCTL
17 struct ctl_table_header *nf_log_dir_header; 17 struct ctl_table_header *nf_log_dir_header;
18#endif 18#endif
19 struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 19 struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
20}; 20};
21#endif 21#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 24cd3949a9a4..27bb9633c69d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -11,7 +11,7 @@
11struct ctl_table_header; 11struct ctl_table_header;
12 12
13struct xfrm_policy_hash { 13struct xfrm_policy_hash {
14 struct hlist_head *table; 14 struct hlist_head __rcu *table;
15 unsigned int hmask; 15 unsigned int hmask;
16 u8 dbits4; 16 u8 dbits4;
17 u8 sbits4; 17 u8 sbits4;
@@ -38,14 +38,12 @@ struct netns_xfrm {
38 * mode. Also, it can be used by ah/esp icmp error handler to find 38 * mode. Also, it can be used by ah/esp icmp error handler to find
39 * offending SA. 39 * offending SA.
40 */ 40 */
41 struct hlist_head *state_bydst; 41 struct hlist_head __rcu *state_bydst;
42 struct hlist_head *state_bysrc; 42 struct hlist_head __rcu *state_bysrc;
43 struct hlist_head *state_byspi; 43 struct hlist_head __rcu *state_byspi;
44 unsigned int state_hmask; 44 unsigned int state_hmask;
45 unsigned int state_num; 45 unsigned int state_num;
46 struct work_struct state_hash_work; 46 struct work_struct state_hash_work;
47 struct hlist_head state_gc_list;
48 struct work_struct state_gc_work;
49 47
50 struct list_head policy_all; 48 struct list_head policy_all;
51 struct hlist_head *policy_byidx; 49 struct hlist_head *policy_byidx;
@@ -73,7 +71,7 @@ struct netns_xfrm {
73 struct dst_ops xfrm6_dst_ops; 71 struct dst_ops xfrm6_dst_ops;
74#endif 72#endif
75 spinlock_t xfrm_state_lock; 73 spinlock_t xfrm_state_lock;
76 rwlock_t xfrm_policy_lock; 74 spinlock_t xfrm_policy_lock;
77 struct mutex xfrm_cfg_mutex; 75 struct mutex xfrm_cfg_mutex;
78 76
79 /* flow cache part */ 77 /* flow cache part */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index c99508d426cc..767b03a3fe67 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -69,17 +69,19 @@ struct tcf_exts {
69 int police; 69 int police;
70}; 70};
71 71
72static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police) 72static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
73{ 73{
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 exts->type = 0; 75 exts->type = 0;
76 exts->nr_actions = 0; 76 exts->nr_actions = 0;
77 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 77 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
78 GFP_KERNEL); 78 GFP_KERNEL);
79 WARN_ON(!exts->actions); /* TODO: propagate the error to callers */ 79 if (!exts->actions)
80 return -ENOMEM;
80#endif 81#endif
81 exts->action = action; 82 exts->action = action;
82 exts->police = police; 83 exts->police = police;
84 return 0;
83} 85}
84 86
85/** 87/**
@@ -121,7 +123,7 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts,
121 for (i = 0; i < exts->nr_actions; i++) { 123 for (i = 0; i < exts->nr_actions; i++) {
122 struct tc_action *a = exts->actions[i]; 124 struct tc_action *a = exts->actions[i];
123 125
124 list_add(&a->list, actions); 126 list_add_tail(&a->list, actions);
125 } 127 }
126#endif 128#endif
127} 129}
@@ -484,4 +486,20 @@ struct tc_cls_matchall_offload {
484 unsigned long cookie; 486 unsigned long cookie;
485}; 487};
486 488
489enum tc_clsbpf_command {
490 TC_CLSBPF_ADD,
491 TC_CLSBPF_REPLACE,
492 TC_CLSBPF_DESTROY,
493 TC_CLSBPF_STATS,
494};
495
496struct tc_cls_bpf_offload {
497 enum tc_clsbpf_command command;
498 struct tcf_exts *exts;
499 struct bpf_prog *prog;
500 const char *name;
501 bool exts_integrated;
502 u32 gen_flags;
503};
504
487#endif 505#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7caa99b482c6..cd334c9584e9 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,8 +90,8 @@ int unregister_qdisc(struct Qdisc_ops *qops);
90void qdisc_get_default(char *id, size_t len); 90void qdisc_get_default(char *id, size_t len);
91int qdisc_set_default(const char *id); 91int qdisc_set_default(const char *id);
92 92
93void qdisc_list_add(struct Qdisc *q); 93void qdisc_hash_add(struct Qdisc *q);
94void qdisc_list_del(struct Qdisc *q); 94void qdisc_hash_del(struct Qdisc *q);
95struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); 95struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
96struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); 96struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
97struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, 97struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/pptp.h b/include/net/pptp.h
new file mode 100644
index 000000000000..92e9f1fe2628
--- /dev/null
+++ b/include/net/pptp.h
@@ -0,0 +1,23 @@
1#ifndef _NET_PPTP_H
2#define _NET_PPTP_H
3
4#define PPP_LCP_ECHOREQ 0x09
5#define PPP_LCP_ECHOREP 0x0A
6#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
7
8#define MISSING_WINDOW 20
9#define WRAPPED(curseq, lastseq)\
10 ((((curseq) & 0xffffff00) == 0) &&\
11 (((lastseq) & 0xffffff00) == 0xffffff00))
12
13#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
14struct pptp_gre_header {
15 struct gre_base_hdr gre_hd;
16 __be16 payload_len;
17 __be16 call_id;
18 __be32 seq;
19 __be32 ack;
20} __packed;
21
22
23#endif
diff --git a/include/net/route.h b/include/net/route.h
index ad777d79af94..0429d47cad25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,7 +29,6 @@
29#include <net/flow.h> 29#include <net/flow.h>
30#include <net/inet_sock.h> 30#include <net/inet_sock.h>
31#include <net/ip_fib.h> 31#include <net/ip_fib.h>
32#include <net/l3mdev.h>
33#include <linux/in_route.h> 32#include <linux/in_route.h>
34#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
35#include <linux/rcupdate.h> 34#include <linux/rcupdate.h>
@@ -285,15 +284,6 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
285 ip_route_connect_init(fl4, dst, src, tos, oif, protocol, 284 ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
286 sport, dport, sk); 285 sport, dport, sk);
287 286
288 if (!src && oif) {
289 int rc;
290
291 rc = l3mdev_get_saddr(net, oif, fl4);
292 if (rc < 0)
293 return ERR_PTR(rc);
294
295 src = fl4->saddr;
296 }
297 if (!dst || !src) { 287 if (!dst || !src) {
298 rt = __ip_route_output_key(net, fl4); 288 rt = __ip_route_output_key(net, fl4);
299 if (IS_ERR(rt)) 289 if (IS_ERR(rt))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 909aff2db2b3..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
36 u16 data[]; 36 u16 data[];
37}; 37};
38 38
39/* similar to sk_buff_head, but skb->prev pointer is undefined. */
40struct qdisc_skb_head {
41 struct sk_buff *head;
42 struct sk_buff *tail;
43 __u32 qlen;
44 spinlock_t lock;
45};
46
39struct Qdisc { 47struct Qdisc {
40 int (*enqueue)(struct sk_buff *skb, 48 int (*enqueue)(struct sk_buff *skb,
41 struct Qdisc *sch, 49 struct Qdisc *sch,
@@ -61,7 +69,7 @@ struct Qdisc {
61 u32 limit; 69 u32 limit;
62 const struct Qdisc_ops *ops; 70 const struct Qdisc_ops *ops;
63 struct qdisc_size_table __rcu *stab; 71 struct qdisc_size_table __rcu *stab;
64 struct list_head list; 72 struct hlist_node hash;
65 u32 handle; 73 u32 handle;
66 u32 parent; 74 u32 parent;
67 void *u32_node; 75 void *u32_node;
@@ -76,7 +84,7 @@ struct Qdisc {
76 * For performance sake on SMP, we put highly modified fields at the end 84 * For performance sake on SMP, we put highly modified fields at the end
77 */ 85 */
78 struct sk_buff *gso_skb ____cacheline_aligned_in_smp; 86 struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
79 struct sk_buff_head q; 87 struct qdisc_skb_head q;
80 struct gnet_stats_basic_packed bstats; 88 struct gnet_stats_basic_packed bstats;
81 seqcount_t running; 89 seqcount_t running;
82 struct gnet_stats_queue qstats; 90 struct gnet_stats_queue qstats;
@@ -592,7 +600,7 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch)
592 600
593static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch) 601static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
594{ 602{
595 qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats)); 603 this_cpu_inc(sch->cpu_qstats->drops);
596} 604}
597 605
598static inline void qdisc_qstats_overlimit(struct Qdisc *sch) 606static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
600 sch->qstats.overlimits++; 608 sch->qstats.overlimits++;
601} 609}
602 610
611static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
612{
613 qh->head = NULL;
614 qh->tail = NULL;
615 qh->qlen = 0;
616}
617
603static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 618static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
604 struct sk_buff_head *list) 619 struct qdisc_skb_head *qh)
605{ 620{
606 __skb_queue_tail(list, skb); 621 struct sk_buff *last = qh->tail;
622
623 if (last) {
624 skb->next = NULL;
625 last->next = skb;
626 qh->tail = skb;
627 } else {
628 qh->tail = skb;
629 qh->head = skb;
630 }
631 qh->qlen++;
607 qdisc_qstats_backlog_inc(sch, skb); 632 qdisc_qstats_backlog_inc(sch, skb);
608 633
609 return NET_XMIT_SUCCESS; 634 return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
614 return __qdisc_enqueue_tail(skb, sch, &sch->q); 639 return __qdisc_enqueue_tail(skb, sch, &sch->q);
615} 640}
616 641
617static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch, 642static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
618 struct sk_buff_head *list)
619{ 643{
620 struct sk_buff *skb = __skb_dequeue(list); 644 struct sk_buff *skb = qh->head;
621 645
622 if (likely(skb != NULL)) { 646 if (likely(skb != NULL)) {
623 qdisc_qstats_backlog_dec(sch, skb); 647 qh->head = skb->next;
624 qdisc_bstats_update(sch, skb); 648 qh->qlen--;
649 if (qh->head == NULL)
650 qh->tail = NULL;
651 skb->next = NULL;
625 } 652 }
626 653
627 return skb; 654 return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
629 656
630static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) 657static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
631{ 658{
632 return __qdisc_dequeue_head(sch, &sch->q); 659 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
660
661 if (likely(skb != NULL)) {
662 qdisc_qstats_backlog_dec(sch, skb);
663 qdisc_bstats_update(sch, skb);
664 }
665
666 return skb;
633} 667}
634 668
635/* Instead of calling kfree_skb() while root qdisc lock is held, 669/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
642} 676}
643 677
644static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 678static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
645 struct sk_buff_head *list, 679 struct qdisc_skb_head *qh,
646 struct sk_buff **to_free) 680 struct sk_buff **to_free)
647{ 681{
648 struct sk_buff *skb = __skb_dequeue(list); 682 struct sk_buff *skb = __qdisc_dequeue_head(qh);
649 683
650 if (likely(skb != NULL)) { 684 if (likely(skb != NULL)) {
651 unsigned int len = qdisc_pkt_len(skb); 685 unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
666 700
667static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) 701static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
668{ 702{
669 return skb_peek(&sch->q); 703 const struct qdisc_skb_head *qh = &sch->q;
704
705 return qh->head;
670} 706}
671 707
672/* generic pseudo peek method for non-work-conserving qdisc */ 708/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
701 return skb; 737 return skb;
702} 738}
703 739
704static inline void __qdisc_reset_queue(struct sk_buff_head *list) 740static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
705{ 741{
706 /* 742 /*
707 * We do not know the backlog in bytes of this list, it 743 * We do not know the backlog in bytes of this list, it
708 * is up to the caller to correct it 744 * is up to the caller to correct it
709 */ 745 */
710 if (!skb_queue_empty(list)) { 746 ASSERT_RTNL();
711 rtnl_kfree_skbs(list->next, list->prev); 747 if (qh->qlen) {
712 __skb_queue_head_init(list); 748 rtnl_kfree_skbs(qh->head, qh->tail);
749
750 qh->head = NULL;
751 qh->tail = NULL;
752 qh->qlen = 0;
713 } 753 }
714} 754}
715 755
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
83#endif 83#endif
84 84
85/* Round an int up to the next multiple of 4. */ 85/* Round an int up to the next multiple of 4. */
86#define WORD_ROUND(s) (((s)+3)&~3) 86#define SCTP_PAD4(s) (((s)+3)&~3)
87/* Truncate to the previous multiple of 4. */ 87/* Truncate to the previous multiple of 4. */
88#define WORD_TRUNC(s) ((s)&~3) 88#define SCTP_TRUNC4(s) ((s)&~3)
89 89
90/* 90/*
91 * Function declarations. 91 * Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
433 if (asoc->user_frag) 433 if (asoc->user_frag)
434 frag = min_t(int, frag, asoc->user_frag); 434 frag = min_t(int, frag, asoc->user_frag);
435 435
436 frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN)); 436 frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
437 437
438 return frag; 438 return frag;
439} 439}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
462for (pos.v = chunk->member;\ 462for (pos.v = chunk->member;\
463 pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ 463 pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
464 ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ 464 ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
465 pos.v += WORD_ROUND(ntohs(pos.p->length))) 465 pos.v += SCTP_PAD4(ntohs(pos.p->length)))
466 466
467#define sctp_walk_errors(err, chunk_hdr)\ 467#define sctp_walk_errors(err, chunk_hdr)\
468_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) 468_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
472 sizeof(sctp_chunkhdr_t));\ 472 sizeof(sctp_chunkhdr_t));\
473 (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ 473 (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
474 ntohs(err->length) >= sizeof(sctp_errhdr_t); \ 474 ntohs(err->length) >= sizeof(sctp_errhdr_t); \
475 err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) 475 err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
476 476
477#define sctp_walk_fwdtsn(pos, chunk)\ 477#define sctp_walk_fwdtsn(pos, chunk)\
478_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk)) 478_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index bafe2a0ab908..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
307} 307}
308 308
309/* Compare two TSNs */ 309/* Compare two TSNs */
310#define TSN_lt(a,b) \
311 (typecheck(__u32, a) && \
312 typecheck(__u32, b) && \
313 ((__s32)((a) - (b)) < 0))
310 314
311/* RFC 1982 - Serial Number Arithmetic 315#define TSN_lte(a,b) \
312 * 316 (typecheck(__u32, a) && \
313 * 2. Comparison 317 typecheck(__u32, b) && \
314 * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2, 318 ((__s32)((a) - (b)) <= 0))
315 * in all other cases, s1 is not equal to s2.
316 *
317 * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
318 * and
319 *
320 * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
321 * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
322 *
323 * s1 is said to be greater than s2 if, and only if, s1 is not equal to
324 * s2, and
325 *
326 * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
327 * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
328 */
329
330/*
331 * RFC 2960
332 * 1.6 Serial Number Arithmetic
333 *
334 * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
335 * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
336 */
337
338enum {
339 TSN_SIGN_BIT = (1<<31)
340};
341
342static inline int TSN_lt(__u32 s, __u32 t)
343{
344 return ((s) - (t)) & TSN_SIGN_BIT;
345}
346
347static inline int TSN_lte(__u32 s, __u32 t)
348{
349 return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
350}
351 319
352/* Compare two SSNs */ 320/* Compare two SSNs */
353 321#define SSN_lt(a,b) \
354/* 322 (typecheck(__u16, a) && \
355 * RFC 2960 323 typecheck(__u16, b) && \
356 * 1.6 Serial Number Arithmetic 324 ((__s16)((a) - (b)) < 0))
357 * 325
358 * Comparisons and arithmetic on Stream Sequence Numbers in this document 326/* ADDIP 3.1.1 */
359 * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where 327#define ADDIP_SERIAL_gte(a,b) \
360 * SERIAL_BITS = 16. 328 (typecheck(__u32, a) && \
361 */ 329 typecheck(__u32, b) && \
362enum { 330 ((__s32)((b) - (a)) <= 0))
363 SSN_SIGN_BIT = (1<<15)
364};
365
366static inline int SSN_lt(__u16 s, __u16 t)
367{
368 return ((s) - (t)) & SSN_SIGN_BIT;
369}
370
371static inline int SSN_lte(__u16 s, __u16 t)
372{
373 return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
374}
375
376/*
377 * ADDIP 3.1.1
378 * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
379 * Numbers wrap back to 0 after reaching 4294967295.
380 */
381enum {
382 ADDIP_SERIAL_SIGN_BIT = (1<<31)
383};
384
385static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
386{
387 return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
388}
389 331
390/* Check VTAG of the packet matches the sender's own tag. */ 332/* Check VTAG of the packet matches the sender's own tag. */
391static inline int 333static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ced0df374e60..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,6 +537,7 @@ struct sctp_datamsg {
537struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, 537struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
538 struct sctp_sndrcvinfo *, 538 struct sctp_sndrcvinfo *,
539 struct iov_iter *); 539 struct iov_iter *);
540void sctp_datamsg_free(struct sctp_datamsg *);
540void sctp_datamsg_put(struct sctp_datamsg *); 541void sctp_datamsg_put(struct sctp_datamsg *);
541void sctp_chunk_fail(struct sctp_chunk *, int error); 542void sctp_chunk_fail(struct sctp_chunk *, int error);
542int sctp_chunk_abandoned(struct sctp_chunk *); 543int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -1069,7 +1070,7 @@ struct sctp_outq {
1069void sctp_outq_init(struct sctp_association *, struct sctp_outq *); 1070void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
1070void sctp_outq_teardown(struct sctp_outq *); 1071void sctp_outq_teardown(struct sctp_outq *);
1071void sctp_outq_free(struct sctp_outq*); 1072void sctp_outq_free(struct sctp_outq*);
1072int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t); 1073void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
1073int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); 1074int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
1074int sctp_outq_is_empty(const struct sctp_outq *); 1075int sctp_outq_is_empty(const struct sctp_outq *);
1075void sctp_outq_restart(struct sctp_outq *); 1076void sctp_outq_restart(struct sctp_outq *);
@@ -1077,7 +1078,7 @@ void sctp_outq_restart(struct sctp_outq *);
1077void sctp_retransmit(struct sctp_outq *, struct sctp_transport *, 1078void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
1078 sctp_retransmit_reason_t); 1079 sctp_retransmit_reason_t);
1079void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); 1080void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
1080int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); 1081void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
1081void sctp_prsctp_prune(struct sctp_association *asoc, 1082void sctp_prsctp_prune(struct sctp_association *asoc,
1082 struct sctp_sndrcvinfo *sinfo, int msg_len); 1083 struct sctp_sndrcvinfo *sinfo, int msg_len);
1083/* Uncork and flush an outqueue. */ 1084/* Uncork and flush an outqueue. */
diff --git a/include/net/sock.h b/include/net/sock.h
index 8741988e6880..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1020,7 +1020,6 @@ struct proto {
1020 void (*unhash)(struct sock *sk); 1020 void (*unhash)(struct sock *sk);
1021 void (*rehash)(struct sock *sk); 1021 void (*rehash)(struct sock *sk);
1022 int (*get_port)(struct sock *sk, unsigned short snum); 1022 int (*get_port)(struct sock *sk, unsigned short snum);
1023 void (*clear_sk)(struct sock *sk, int size);
1024 1023
1025 /* Keeping track of sockets in use */ 1024 /* Keeping track of sockets in use */
1026#ifdef CONFIG_PROC_FS 1025#ifdef CONFIG_PROC_FS
@@ -1114,6 +1113,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk)
1114 sk_stream_memory_free(sk); 1113 sk_stream_memory_free(sk);
1115} 1114}
1116 1115
1116static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1117 struct cgroup *ancestor)
1118{
1119#ifdef CONFIG_SOCK_CGROUP_DATA
1120 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1121 ancestor);
1122#else
1123 return -ENOTSUPP;
1124#endif
1125}
1117 1126
1118static inline bool sk_has_memory_pressure(const struct sock *sk) 1127static inline bool sk_has_memory_pressure(const struct sock *sk)
1119{ 1128{
@@ -1232,8 +1241,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
1232 return sk->sk_prot->hash(sk); 1241 return sk->sk_prot->hash(sk);
1233} 1242}
1234 1243
1235void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
1236
1237/* About 10 seconds */ 1244/* About 10 seconds */
1238#define SOCK_DESTROY_TIME (10*HZ) 1245#define SOCK_DESTROY_TIME (10*HZ)
1239 1246
diff --git a/include/net/strparser.h b/include/net/strparser.h
new file mode 100644
index 000000000000..0c28ad97c52f
--- /dev/null
+++ b/include/net/strparser.h
@@ -0,0 +1,142 @@
1/*
2 * Stream Parser
3 *
4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 */
10
11#ifndef __NET_STRPARSER_H_
12#define __NET_STRPARSER_H_
13
14#include <linux/skbuff.h>
15#include <net/sock.h>
16
17#define STRP_STATS_ADD(stat, count) ((stat) += (count))
18#define STRP_STATS_INCR(stat) ((stat)++)
19
20struct strp_stats {
21 unsigned long long rx_msgs;
22 unsigned long long rx_bytes;
23 unsigned int rx_mem_fail;
24 unsigned int rx_need_more_hdr;
25 unsigned int rx_msg_too_big;
26 unsigned int rx_msg_timeouts;
27 unsigned int rx_bad_hdr_len;
28};
29
30struct strp_aggr_stats {
31 unsigned long long rx_msgs;
32 unsigned long long rx_bytes;
33 unsigned int rx_mem_fail;
34 unsigned int rx_need_more_hdr;
35 unsigned int rx_msg_too_big;
36 unsigned int rx_msg_timeouts;
37 unsigned int rx_bad_hdr_len;
38 unsigned int rx_aborts;
39 unsigned int rx_interrupted;
40 unsigned int rx_unrecov_intr;
41};
42
43struct strparser;
44
45/* Callbacks are called with lock held for the attached socket */
46struct strp_callbacks {
47 int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
48 void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
49 int (*read_sock_done)(struct strparser *strp, int err);
50 void (*abort_parser)(struct strparser *strp, int err);
51};
52
53struct strp_rx_msg {
54 int full_len;
55 int offset;
56};
57
58static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
59{
60 return (struct strp_rx_msg *)((void *)skb->cb +
61 offsetof(struct qdisc_skb_cb, data));
62}
63
64/* Structure for an attached lower socket */
65struct strparser {
66 struct sock *sk;
67
68 u32 rx_stopped : 1;
69 u32 rx_paused : 1;
70 u32 rx_aborted : 1;
71 u32 rx_interrupted : 1;
72 u32 rx_unrecov_intr : 1;
73
74 struct sk_buff **rx_skb_nextp;
75 struct timer_list rx_msg_timer;
76 struct sk_buff *rx_skb_head;
77 unsigned int rx_need_bytes;
78 struct delayed_work rx_delayed_work;
79 struct work_struct rx_work;
80 struct strp_stats stats;
81 struct strp_callbacks cb;
82};
83
84/* Must be called with lock held for attached socket */
85static inline void strp_pause(struct strparser *strp)
86{
87 strp->rx_paused = 1;
88}
89
90/* May be called without holding lock for attached socket */
91void strp_unpause(struct strparser *strp);
92
93static inline void save_strp_stats(struct strparser *strp,
94 struct strp_aggr_stats *agg_stats)
95{
96 /* Save psock statistics in the mux when psock is being unattached. */
97
98#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
99 strp->stats._stat)
100 SAVE_PSOCK_STATS(rx_msgs);
101 SAVE_PSOCK_STATS(rx_bytes);
102 SAVE_PSOCK_STATS(rx_mem_fail);
103 SAVE_PSOCK_STATS(rx_need_more_hdr);
104 SAVE_PSOCK_STATS(rx_msg_too_big);
105 SAVE_PSOCK_STATS(rx_msg_timeouts);
106 SAVE_PSOCK_STATS(rx_bad_hdr_len);
107#undef SAVE_PSOCK_STATS
108
109 if (strp->rx_aborted)
110 agg_stats->rx_aborts++;
111 if (strp->rx_interrupted)
112 agg_stats->rx_interrupted++;
113 if (strp->rx_unrecov_intr)
114 agg_stats->rx_unrecov_intr++;
115}
116
117static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
118 struct strp_aggr_stats *agg_stats)
119{
120#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
121 SAVE_PSOCK_STATS(rx_msgs);
122 SAVE_PSOCK_STATS(rx_bytes);
123 SAVE_PSOCK_STATS(rx_mem_fail);
124 SAVE_PSOCK_STATS(rx_need_more_hdr);
125 SAVE_PSOCK_STATS(rx_msg_too_big);
126 SAVE_PSOCK_STATS(rx_msg_timeouts);
127 SAVE_PSOCK_STATS(rx_bad_hdr_len);
128 SAVE_PSOCK_STATS(rx_aborts);
129 SAVE_PSOCK_STATS(rx_interrupted);
130 SAVE_PSOCK_STATS(rx_unrecov_intr);
131#undef SAVE_PSOCK_STATS
132
133}
134
135void strp_done(struct strparser *strp);
136void strp_stop(struct strparser *strp);
137void strp_check_rcv(struct strparser *strp);
138int strp_init(struct strparser *strp, struct sock *csk,
139 struct strp_callbacks *cb);
140void strp_data_ready(struct strparser *strp);
141
142#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 62f6a967a1b7..eba80c4fc56f 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -68,7 +68,6 @@ struct switchdev_attr {
68enum switchdev_obj_id { 68enum switchdev_obj_id {
69 SWITCHDEV_OBJ_ID_UNDEFINED, 69 SWITCHDEV_OBJ_ID_UNDEFINED,
70 SWITCHDEV_OBJ_ID_PORT_VLAN, 70 SWITCHDEV_OBJ_ID_PORT_VLAN,
71 SWITCHDEV_OBJ_ID_IPV4_FIB,
72 SWITCHDEV_OBJ_ID_PORT_FDB, 71 SWITCHDEV_OBJ_ID_PORT_FDB,
73 SWITCHDEV_OBJ_ID_PORT_MDB, 72 SWITCHDEV_OBJ_ID_PORT_MDB,
74}; 73};
@@ -92,21 +91,6 @@ struct switchdev_obj_port_vlan {
92#define SWITCHDEV_OBJ_PORT_VLAN(obj) \ 91#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
93 container_of(obj, struct switchdev_obj_port_vlan, obj) 92 container_of(obj, struct switchdev_obj_port_vlan, obj)
94 93
95/* SWITCHDEV_OBJ_ID_IPV4_FIB */
96struct switchdev_obj_ipv4_fib {
97 struct switchdev_obj obj;
98 u32 dst;
99 int dst_len;
100 struct fib_info *fi;
101 u8 tos;
102 u8 type;
103 u32 nlflags;
104 u32 tb_id;
105};
106
107#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
108 container_of(obj, struct switchdev_obj_ipv4_fib, obj)
109
110/* SWITCHDEV_OBJ_ID_PORT_FDB */ 94/* SWITCHDEV_OBJ_ID_PORT_FDB */
111struct switchdev_obj_port_fdb { 95struct switchdev_obj_port_fdb {
112 struct switchdev_obj obj; 96 struct switchdev_obj obj;
@@ -209,11 +193,6 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
209 struct nlmsghdr *nlh, u16 flags); 193 struct nlmsghdr *nlh, u16 flags);
210int switchdev_port_bridge_dellink(struct net_device *dev, 194int switchdev_port_bridge_dellink(struct net_device *dev,
211 struct nlmsghdr *nlh, u16 flags); 195 struct nlmsghdr *nlh, u16 flags);
212int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
213 u8 tos, u8 type, u32 nlflags, u32 tb_id);
214int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
215 u8 tos, u8 type, u32 tb_id);
216void switchdev_fib_ipv4_abort(struct fib_info *fi);
217int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 196int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
218 struct net_device *dev, const unsigned char *addr, 197 struct net_device *dev, const unsigned char *addr,
219 u16 vid, u16 nlm_flags); 198 u16 vid, u16 nlm_flags);
@@ -222,7 +201,7 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
222 u16 vid); 201 u16 vid);
223int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 202int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
224 struct net_device *dev, 203 struct net_device *dev,
225 struct net_device *filter_dev, int idx); 204 struct net_device *filter_dev, int *idx);
226void switchdev_port_fwd_mark_set(struct net_device *dev, 205void switchdev_port_fwd_mark_set(struct net_device *dev,
227 struct net_device *group_dev, 206 struct net_device *group_dev,
228 bool joining); 207 bool joining);
@@ -304,25 +283,6 @@ static inline int switchdev_port_bridge_dellink(struct net_device *dev,
304 return -EOPNOTSUPP; 283 return -EOPNOTSUPP;
305} 284}
306 285
307static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
308 struct fib_info *fi,
309 u8 tos, u8 type,
310 u32 nlflags, u32 tb_id)
311{
312 return 0;
313}
314
315static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
316 struct fib_info *fi,
317 u8 tos, u8 type, u32 tb_id)
318{
319 return 0;
320}
321
322static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
323{
324}
325
326static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 286static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
327 struct net_device *dev, 287 struct net_device *dev,
328 const unsigned char *addr, 288 const unsigned char *addr,
@@ -342,15 +302,9 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
342 struct netlink_callback *cb, 302 struct netlink_callback *cb,
343 struct net_device *dev, 303 struct net_device *dev,
344 struct net_device *filter_dev, 304 struct net_device *filter_dev,
345 int idx) 305 int *idx)
346{
347 return idx;
348}
349
350static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
351 struct net_device *group_dev,
352 bool joining)
353{ 306{
307 return *idx;
354} 308}
355 309
356static inline bool switchdev_port_same_parent_id(struct net_device *a, 310static inline bool switchdev_port_same_parent_id(struct net_device *a,
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
50int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); 50int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
51int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp); 51int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
52int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); 52int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
53int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
53int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi); 54int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
54int ife_validate_meta_u32(void *val, int len); 55int ife_validate_meta_u32(void *val, int len);
55int ife_validate_meta_u16(void *val, int len); 56int ife_validate_meta_u16(void *val, int len);
57int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
56void ife_release_meta_gen(struct tcf_meta_info *mi); 58void ife_release_meta_gen(struct tcf_meta_info *mi);
57int register_ife_op(struct tcf_meta_ops *mops); 59int register_ife_op(struct tcf_meta_ops *mops);
58int unregister_ife_op(struct tcf_meta_ops *mops); 60int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..644a2116b47b
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2016, Jamal Hadi Salim
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8*/
9
10#ifndef __NET_TC_SKBMOD_H
11#define __NET_TC_SKBMOD_H
12
13#include <net/act_api.h>
14#include <linux/tc_act/tc_skbmod.h>
15
16struct tcf_skbmod_params {
17 struct rcu_head rcu;
18 u64 flags; /*up to 64 types of operations; extend if needed */
19 u8 eth_dst[ETH_ALEN];
20 u16 eth_type;
21 u8 eth_src[ETH_ALEN];
22};
23
24struct tcf_skbmod {
25 struct tc_action common;
26 struct tcf_skbmod_params __rcu *skbmod_p;
27};
28#define to_skbmod(a) ((struct tcf_skbmod *)a)
29
30#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..253f8da6c2a6
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
3 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __NET_TC_TUNNEL_KEY_H
12#define __NET_TC_TUNNEL_KEY_H
13
14#include <net/act_api.h>
15
16struct tcf_tunnel_key_params {
17 struct rcu_head rcu;
18 int tcft_action;
19 int action;
20 struct metadata_dst *tcft_enc_metadata;
21};
22
23struct tcf_tunnel_key {
24 struct tc_action common;
25 struct tcf_tunnel_key_params __rcu *params;
26};
27
28#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
29
30#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index e29f52e8bdf1..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
11#define __NET_TC_VLAN_H 11#define __NET_TC_VLAN_H
12 12
13#include <net/act_api.h> 13#include <net/act_api.h>
14#include <linux/tc_act/tc_vlan.h>
14 15
15#define VLAN_F_POP 0x1 16#define VLAN_F_POP 0x1
16#define VLAN_F_PUSH 0x2 17#define VLAN_F_PUSH 0x2
@@ -20,7 +21,32 @@ struct tcf_vlan {
20 int tcfv_action; 21 int tcfv_action;
21 u16 tcfv_push_vid; 22 u16 tcfv_push_vid;
22 __be16 tcfv_push_proto; 23 __be16 tcfv_push_proto;
24 u8 tcfv_push_prio;
23}; 25};
24#define to_vlan(a) ((struct tcf_vlan *)a) 26#define to_vlan(a) ((struct tcf_vlan *)a)
25 27
28static inline bool is_tcf_vlan(const struct tc_action *a)
29{
30#ifdef CONFIG_NET_CLS_ACT
31 if (a->ops && a->ops->type == TCA_ACT_VLAN)
32 return true;
33#endif
34 return false;
35}
36
37static inline u32 tcf_vlan_action(const struct tc_action *a)
38{
39 return to_vlan(a)->tcfv_action;
40}
41
42static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
43{
44 return to_vlan(a)->tcfv_push_vid;
45}
46
47static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
48{
49 return to_vlan(a)->tcfv_push_proto;
50}
51
26#endif /* __NET_TC_VLAN_H */ 52#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7717302cab91..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -227,10 +227,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
227#define TFO_SERVER_COOKIE_NOT_REQD 0x200 227#define TFO_SERVER_COOKIE_NOT_REQD 0x200
228 228
229/* Force enable TFO on all listeners, i.e., not requiring the 229/* Force enable TFO on all listeners, i.e., not requiring the
230 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen. 230 * TCP_FASTOPEN socket option.
231 */ 231 */
232#define TFO_SERVER_WO_SOCKOPT1 0x400 232#define TFO_SERVER_WO_SOCKOPT1 0x400
233#define TFO_SERVER_WO_SOCKOPT2 0x800
234 233
235extern struct inet_timewait_death_row tcp_death_row; 234extern struct inet_timewait_death_row tcp_death_row;
236 235
@@ -534,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
534#endif 533#endif
535/* tcp_output.c */ 534/* tcp_output.c */
536 535
536u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
537 int min_tso_segs);
537void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 538void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
538 int nonagle); 539 int nonagle);
539bool tcp_may_send_now(struct sock *sk); 540bool tcp_may_send_now(struct sock *sk);
@@ -604,8 +605,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
604void tcp_get_info(struct sock *, struct tcp_info *); 605void tcp_get_info(struct sock *, struct tcp_info *);
605 606
606/* Read 'sendfile()'-style from a TCP socket */ 607/* Read 'sendfile()'-style from a TCP socket */
607typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
608 unsigned int, size_t);
609int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 608int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
610 sk_read_actor_t recv_actor); 609 sk_read_actor_t recv_actor);
611 610
@@ -643,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
643{ 642{
644 struct tcp_sock *tp = tcp_sk(sk); 643 struct tcp_sock *tp = tcp_sk(sk);
645 644
646 if (skb_queue_empty(&tp->out_of_order_queue) && 645 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
647 tp->rcv_wnd && 646 tp->rcv_wnd &&
648 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 647 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
649 !tp->urg_data) 648 !tp->urg_data)
@@ -674,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
674/* Minimum RTT in usec. ~0 means not available. */ 673/* Minimum RTT in usec. ~0 means not available. */
675static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 674static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
676{ 675{
677 return tp->rtt_min[0].rtt; 676 return minmax_get(&tp->rtt_min);
678} 677}
679 678
680/* Compute the actual receive window we are currently advertising. 679/* Compute the actual receive window we are currently advertising.
@@ -766,8 +765,16 @@ struct tcp_skb_cb {
766 __u32 ack_seq; /* Sequence number ACK'd */ 765 __u32 ack_seq; /* Sequence number ACK'd */
767 union { 766 union {
768 struct { 767 struct {
769 /* There is space for up to 20 bytes */ 768 /* There is space for up to 24 bytes */
770 __u32 in_flight;/* Bytes in flight when packet sent */ 769 __u32 in_flight:30,/* Bytes in flight at transmit */
770 is_app_limited:1, /* cwnd not fully used? */
771 unused:1;
772 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
773 __u32 delivered;
774 /* start of send pipeline phase */
775 struct skb_mstamp first_tx_mstamp;
776 /* when we reached the "delivered" count */
777 struct skb_mstamp delivered_mstamp;
771 } tx; /* only used for outgoing skbs */ 778 } tx; /* only used for outgoing skbs */
772 union { 779 union {
773 struct inet_skb_parm h4; 780 struct inet_skb_parm h4;
@@ -863,6 +870,27 @@ struct ack_sample {
863 u32 in_flight; 870 u32 in_flight;
864}; 871};
865 872
873/* A rate sample measures the number of (original/retransmitted) data
874 * packets delivered "delivered" over an interval of time "interval_us".
875 * The tcp_rate.c code fills in the rate sample, and congestion
876 * control modules that define a cong_control function to run at the end
877 * of ACK processing can optionally chose to consult this sample when
878 * setting cwnd and pacing rate.
879 * A sample is invalid if "delivered" or "interval_us" is negative.
880 */
881struct rate_sample {
882 struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
883 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
884 s32 delivered; /* number of packets delivered over interval */
885 long interval_us; /* time for tp->delivered to incr "delivered" */
886 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
887 int losses; /* number of packets marked lost upon ACK */
888 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
889 u32 prior_in_flight; /* in flight before this ACK */
890 bool is_app_limited; /* is sample from packet with bubble in pipe? */
891 bool is_retrans; /* is sample from retransmission? */
892};
893
866struct tcp_congestion_ops { 894struct tcp_congestion_ops {
867 struct list_head list; 895 struct list_head list;
868 u32 key; 896 u32 key;
@@ -887,6 +915,14 @@ struct tcp_congestion_ops {
887 u32 (*undo_cwnd)(struct sock *sk); 915 u32 (*undo_cwnd)(struct sock *sk);
888 /* hook for packet ack accounting (optional) */ 916 /* hook for packet ack accounting (optional) */
889 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 917 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
918 /* suggest number of segments for each skb to transmit (optional) */
919 u32 (*tso_segs_goal)(struct sock *sk);
920 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
921 u32 (*sndbuf_expand)(struct sock *sk);
922 /* call when packets are delivered to update cwnd and pacing rate,
923 * after all the ca_state processing. (optional)
924 */
925 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
890 /* get info for inet_diag (optional) */ 926 /* get info for inet_diag (optional) */
891 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 927 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
892 union tcp_cc_info *info); 928 union tcp_cc_info *info);
@@ -949,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
949 icsk->icsk_ca_ops->cwnd_event(sk, event); 985 icsk->icsk_ca_ops->cwnd_event(sk, event);
950} 986}
951 987
988/* From tcp_rate.c */
989void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
990void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
991 struct rate_sample *rs);
992void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
993 struct skb_mstamp *now, struct rate_sample *rs);
994void tcp_rate_check_app_limited(struct sock *sk);
995
952/* These functions determine how the current flow behaves in respect of SACK 996/* These functions determine how the current flow behaves in respect of SACK
953 * handling. SACK is negotiated with the peer, and therefore it can vary 997 * handling. SACK is negotiated with the peer, and therefore it can vary
954 * between different flows. 998 * between different flows.
@@ -1164,6 +1208,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1164} 1208}
1165 1209
1166bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1210bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1211bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1167 1212
1168#undef STATE_TRACE 1213#undef STATE_TRACE
1169 1214
@@ -1853,6 +1898,8 @@ static inline int tcp_inq(struct sock *sk)
1853 return answ; 1898 return answ;
1854} 1899}
1855 1900
1901int tcp_peek_len(struct socket *sock);
1902
1856static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 1903static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1857{ 1904{
1858 u16 segs_in; 1905 u16 segs_in;
diff --git a/include/net/udp.h b/include/net/udp.h
index 8894d7144189..ea53a87d880f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -251,6 +251,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
251 int (*saddr_cmp)(const struct sock *, 251 int (*saddr_cmp)(const struct sock *,
252 const struct sock *)); 252 const struct sock *));
253void udp_err(struct sk_buff *, u32); 253void udp_err(struct sk_buff *, u32);
254int udp_abort(struct sock *sk, int err);
254int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 255int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
255int udp_push_pending_frames(struct sock *sk); 256int udp_push_pending_frames(struct sock *sk);
256void udp_flush_pending_frames(struct sock *sk); 257void udp_flush_pending_frames(struct sock *sk);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b96d0360c095..0255613a54a4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -350,24 +350,6 @@ static inline __be32 vxlan_vni_field(__be32 vni)
350#endif 350#endif
351} 351}
352 352
353static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
354{
355#if defined(__BIG_ENDIAN)
356 return (__force __be32)tun_id;
357#else
358 return (__force __be32)((__force u64)tun_id >> 32);
359#endif
360}
361
362static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
363{
364#if defined(__BIG_ENDIAN)
365 return (__force __be64)vni;
366#else
367 return (__force __be64)((u64)(__force u32)vni << 32);
368#endif
369}
370
371static inline size_t vxlan_rco_start(__be32 vni_field) 353static inline size_t vxlan_rco_start(__be32 vni_field)
372{ 354{
373 return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; 355 return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 17934312eecb..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -187,7 +187,7 @@ struct xfrm_state {
187 struct xfrm_replay_state_esn *preplay_esn; 187 struct xfrm_replay_state_esn *preplay_esn;
188 188
189 /* The functions for replay detection. */ 189 /* The functions for replay detection. */
190 struct xfrm_replay *repl; 190 const struct xfrm_replay *repl;
191 191
192 /* internal flag that only holds state for delayed aevent at the 192 /* internal flag that only holds state for delayed aevent at the
193 * moment 193 * moment
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b2017440b765..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
24 */ 24 */
25struct rxrpc_wire_header { 25struct rxrpc_wire_header {
26 __be32 epoch; /* client boot timestamp */ 26 __be32 epoch; /* client boot timestamp */
27#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
27 28
28 __be32 cid; /* connection and channel ID */ 29 __be32 cid; /* connection and channel ID */
29#define RXRPC_MAXCALLS 4 /* max active calls per conn */ 30#define RXRPC_MAXCALLS 4 /* max active calls per conn */
@@ -33,8 +34,6 @@ struct rxrpc_wire_header {
33#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */ 34#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
34 35
35 __be32 callNumber; /* call ID (0 for connection-level packets) */ 36 __be32 callNumber; /* call ID (0 for connection-level packets) */
36#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
37
38 __be32 seq; /* sequence number of pkt in call stream */ 37 __be32 seq; /* sequence number of pkt in call stream */
39 __be32 serial; /* serial number of pkt sent to network */ 38 __be32 serial; /* serial number of pkt sent to network */
40 39
@@ -92,10 +91,14 @@ struct rxrpc_wire_header {
92struct rxrpc_jumbo_header { 91struct rxrpc_jumbo_header {
93 uint8_t flags; /* packet flags (as per rxrpc_header) */ 92 uint8_t flags; /* packet flags (as per rxrpc_header) */
94 uint8_t pad; 93 uint8_t pad;
95 __be16 _rsvd; /* reserved (used by kerberos security as cksum) */ 94 union {
95 __be16 _rsvd; /* reserved */
96 __be16 cksum; /* kerberos security checksum */
97 };
96}; 98};
97 99
98#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ 100#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
101#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
99 102
100/*****************************************************************************/ 103/*****************************************************************************/
101/* 104/*
@@ -120,6 +123,7 @@ struct rxrpc_ackpacket {
120#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */ 123#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
121#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */ 124#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
122#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */ 125#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
126#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
123 127
124 uint8_t nAcks; /* number of ACKs */ 128 uint8_t nAcks; /* number of ACKs */
125#define RXRPC_MAXACKS 255 129#define RXRPC_MAXACKS 255
@@ -130,6 +134,13 @@ struct rxrpc_ackpacket {
130 134
131} __packed; 135} __packed;
132 136
137/* Some ACKs refer to specific packets and some are general and can be updated. */
138#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
139 (1 << RXRPC_ACK_PING_RESPONSE) | \
140 (1 << RXRPC_ACK_DELAY) | \
141 (1 << RXRPC_ACK_IDLE))
142
143
133/* 144/*
134 * ACK packets can have a further piece of information tagged on the end 145 * ACK packets can have a further piece of information tagged on the end
135 */ 146 */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
new file mode 100644
index 000000000000..0383e5e9a0f3
--- /dev/null
+++ b/include/trace/events/rxrpc.h
@@ -0,0 +1,625 @@
1/* AF_RXRPC tracepoints
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#undef TRACE_SYSTEM
12#define TRACE_SYSTEM rxrpc
13
14#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
15#define _TRACE_RXRPC_H
16
17#include <linux/tracepoint.h>
18
19TRACE_EVENT(rxrpc_conn,
20 TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
21 int usage, const void *where),
22
23 TP_ARGS(conn, op, usage, where),
24
25 TP_STRUCT__entry(
26 __field(struct rxrpc_connection *, conn )
27 __field(int, op )
28 __field(int, usage )
29 __field(const void *, where )
30 ),
31
32 TP_fast_assign(
33 __entry->conn = conn;
34 __entry->op = op;
35 __entry->usage = usage;
36 __entry->where = where;
37 ),
38
39 TP_printk("C=%p %s u=%d sp=%pSR",
40 __entry->conn,
41 rxrpc_conn_traces[__entry->op],
42 __entry->usage,
43 __entry->where)
44 );
45
46TRACE_EVENT(rxrpc_client,
47 TP_PROTO(struct rxrpc_connection *conn, int channel,
48 enum rxrpc_client_trace op),
49
50 TP_ARGS(conn, channel, op),
51
52 TP_STRUCT__entry(
53 __field(struct rxrpc_connection *, conn )
54 __field(u32, cid )
55 __field(int, channel )
56 __field(int, usage )
57 __field(enum rxrpc_client_trace, op )
58 __field(enum rxrpc_conn_cache_state, cs )
59 ),
60
61 TP_fast_assign(
62 __entry->conn = conn;
63 __entry->channel = channel;
64 __entry->usage = atomic_read(&conn->usage);
65 __entry->op = op;
66 __entry->cid = conn->proto.cid;
67 __entry->cs = conn->cache_state;
68 ),
69
70 TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
71 __entry->conn,
72 __entry->channel,
73 rxrpc_client_traces[__entry->op],
74 rxrpc_conn_cache_states[__entry->cs],
75 __entry->cid,
76 __entry->usage)
77 );
78
79TRACE_EVENT(rxrpc_call,
80 TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
81 int usage, const void *where, const void *aux),
82
83 TP_ARGS(call, op, usage, where, aux),
84
85 TP_STRUCT__entry(
86 __field(struct rxrpc_call *, call )
87 __field(int, op )
88 __field(int, usage )
89 __field(const void *, where )
90 __field(const void *, aux )
91 ),
92
93 TP_fast_assign(
94 __entry->call = call;
95 __entry->op = op;
96 __entry->usage = usage;
97 __entry->where = where;
98 __entry->aux = aux;
99 ),
100
101 TP_printk("c=%p %s u=%d sp=%pSR a=%p",
102 __entry->call,
103 rxrpc_call_traces[__entry->op],
104 __entry->usage,
105 __entry->where,
106 __entry->aux)
107 );
108
109TRACE_EVENT(rxrpc_skb,
110 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
111 int usage, int mod_count, const void *where),
112
113 TP_ARGS(skb, op, usage, mod_count, where),
114
115 TP_STRUCT__entry(
116 __field(struct sk_buff *, skb )
117 __field(enum rxrpc_skb_trace, op )
118 __field(int, usage )
119 __field(int, mod_count )
120 __field(const void *, where )
121 ),
122
123 TP_fast_assign(
124 __entry->skb = skb;
125 __entry->op = op;
126 __entry->usage = usage;
127 __entry->mod_count = mod_count;
128 __entry->where = where;
129 ),
130
131 TP_printk("s=%p %s u=%d m=%d p=%pSR",
132 __entry->skb,
133 rxrpc_skb_traces[__entry->op],
134 __entry->usage,
135 __entry->mod_count,
136 __entry->where)
137 );
138
139TRACE_EVENT(rxrpc_rx_packet,
140 TP_PROTO(struct rxrpc_skb_priv *sp),
141
142 TP_ARGS(sp),
143
144 TP_STRUCT__entry(
145 __field_struct(struct rxrpc_host_header, hdr )
146 ),
147
148 TP_fast_assign(
149 memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
150 ),
151
152 TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
153 __entry->hdr.epoch, __entry->hdr.cid,
154 __entry->hdr.callNumber, __entry->hdr.serviceId,
155 __entry->hdr.serial, __entry->hdr.seq,
156 __entry->hdr.type, __entry->hdr.flags,
157 __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
158 );
159
160TRACE_EVENT(rxrpc_rx_done,
161 TP_PROTO(int result, int abort_code),
162
163 TP_ARGS(result, abort_code),
164
165 TP_STRUCT__entry(
166 __field(int, result )
167 __field(int, abort_code )
168 ),
169
170 TP_fast_assign(
171 __entry->result = result;
172 __entry->abort_code = abort_code;
173 ),
174
175 TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
176 );
177
178TRACE_EVENT(rxrpc_abort,
179 TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
180 int abort_code, int error),
181
182 TP_ARGS(why, cid, call_id, seq, abort_code, error),
183
184 TP_STRUCT__entry(
185 __array(char, why, 4 )
186 __field(u32, cid )
187 __field(u32, call_id )
188 __field(rxrpc_seq_t, seq )
189 __field(int, abort_code )
190 __field(int, error )
191 ),
192
193 TP_fast_assign(
194 memcpy(__entry->why, why, 4);
195 __entry->cid = cid;
196 __entry->call_id = call_id;
197 __entry->abort_code = abort_code;
198 __entry->error = error;
199 __entry->seq = seq;
200 ),
201
202 TP_printk("%08x:%08x s=%u a=%d e=%d %s",
203 __entry->cid, __entry->call_id, __entry->seq,
204 __entry->abort_code, __entry->error, __entry->why)
205 );
206
207TRACE_EVENT(rxrpc_transmit,
208 TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
209
210 TP_ARGS(call, why),
211
212 TP_STRUCT__entry(
213 __field(struct rxrpc_call *, call )
214 __field(enum rxrpc_transmit_trace, why )
215 __field(rxrpc_seq_t, tx_hard_ack )
216 __field(rxrpc_seq_t, tx_top )
217 ),
218
219 TP_fast_assign(
220 __entry->call = call;
221 __entry->why = why;
222 __entry->tx_hard_ack = call->tx_hard_ack;
223 __entry->tx_top = call->tx_top;
224 ),
225
226 TP_printk("c=%p %s f=%08x n=%u",
227 __entry->call,
228 rxrpc_transmit_traces[__entry->why],
229 __entry->tx_hard_ack + 1,
230 __entry->tx_top - __entry->tx_hard_ack)
231 );
232
233TRACE_EVENT(rxrpc_rx_ack,
234 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
235
236 TP_ARGS(call, first, reason, n_acks),
237
238 TP_STRUCT__entry(
239 __field(struct rxrpc_call *, call )
240 __field(rxrpc_seq_t, first )
241 __field(u8, reason )
242 __field(u8, n_acks )
243 ),
244
245 TP_fast_assign(
246 __entry->call = call;
247 __entry->first = first;
248 __entry->reason = reason;
249 __entry->n_acks = n_acks;
250 ),
251
252 TP_printk("c=%p %s f=%08x n=%u",
253 __entry->call,
254 rxrpc_ack_names[__entry->reason],
255 __entry->first,
256 __entry->n_acks)
257 );
258
259TRACE_EVENT(rxrpc_tx_data,
260 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
261 rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
262
263 TP_ARGS(call, seq, serial, flags, retrans, lose),
264
265 TP_STRUCT__entry(
266 __field(struct rxrpc_call *, call )
267 __field(rxrpc_seq_t, seq )
268 __field(rxrpc_serial_t, serial )
269 __field(u8, flags )
270 __field(bool, retrans )
271 __field(bool, lose )
272 ),
273
274 TP_fast_assign(
275 __entry->call = call;
276 __entry->seq = seq;
277 __entry->serial = serial;
278 __entry->flags = flags;
279 __entry->retrans = retrans;
280 __entry->lose = lose;
281 ),
282
283 TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
284 __entry->call,
285 __entry->serial,
286 __entry->seq,
287 __entry->flags,
288 __entry->retrans ? " *RETRANS*" : "",
289 __entry->lose ? " *LOSE*" : "")
290 );
291
292TRACE_EVENT(rxrpc_tx_ack,
293 TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
294 rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
295 u8 reason, u8 n_acks),
296
297 TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
298
299 TP_STRUCT__entry(
300 __field(struct rxrpc_call *, call )
301 __field(rxrpc_serial_t, serial )
302 __field(rxrpc_seq_t, ack_first )
303 __field(rxrpc_serial_t, ack_serial )
304 __field(u8, reason )
305 __field(u8, n_acks )
306 ),
307
308 TP_fast_assign(
309 __entry->call = call;
310 __entry->serial = serial;
311 __entry->ack_first = ack_first;
312 __entry->ack_serial = ack_serial;
313 __entry->reason = reason;
314 __entry->n_acks = n_acks;
315 ),
316
317 TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
318 __entry->call,
319 __entry->serial,
320 rxrpc_ack_names[__entry->reason],
321 __entry->ack_first,
322 __entry->ack_serial,
323 __entry->n_acks)
324 );
325
326TRACE_EVENT(rxrpc_receive,
327 TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
328 rxrpc_serial_t serial, rxrpc_seq_t seq),
329
330 TP_ARGS(call, why, serial, seq),
331
332 TP_STRUCT__entry(
333 __field(struct rxrpc_call *, call )
334 __field(enum rxrpc_receive_trace, why )
335 __field(rxrpc_serial_t, serial )
336 __field(rxrpc_seq_t, seq )
337 __field(rxrpc_seq_t, hard_ack )
338 __field(rxrpc_seq_t, top )
339 ),
340
341 TP_fast_assign(
342 __entry->call = call;
343 __entry->why = why;
344 __entry->serial = serial;
345 __entry->seq = seq;
346 __entry->hard_ack = call->rx_hard_ack;
347 __entry->top = call->rx_top;
348 ),
349
350 TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
351 __entry->call,
352 rxrpc_receive_traces[__entry->why],
353 __entry->serial,
354 __entry->seq,
355 __entry->hard_ack,
356 __entry->top)
357 );
358
359TRACE_EVENT(rxrpc_recvmsg,
360 TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
361 rxrpc_seq_t seq, unsigned int offset, unsigned int len,
362 int ret),
363
364 TP_ARGS(call, why, seq, offset, len, ret),
365
366 TP_STRUCT__entry(
367 __field(struct rxrpc_call *, call )
368 __field(enum rxrpc_recvmsg_trace, why )
369 __field(rxrpc_seq_t, seq )
370 __field(unsigned int, offset )
371 __field(unsigned int, len )
372 __field(int, ret )
373 ),
374
375 TP_fast_assign(
376 __entry->call = call;
377 __entry->why = why;
378 __entry->seq = seq;
379 __entry->offset = offset;
380 __entry->len = len;
381 __entry->ret = ret;
382 ),
383
384 TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
385 __entry->call,
386 rxrpc_recvmsg_traces[__entry->why],
387 __entry->seq,
388 __entry->offset,
389 __entry->len,
390 __entry->ret)
391 );
392
393TRACE_EVENT(rxrpc_rtt_tx,
394 TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
395 rxrpc_serial_t send_serial),
396
397 TP_ARGS(call, why, send_serial),
398
399 TP_STRUCT__entry(
400 __field(struct rxrpc_call *, call )
401 __field(enum rxrpc_rtt_tx_trace, why )
402 __field(rxrpc_serial_t, send_serial )
403 ),
404
405 TP_fast_assign(
406 __entry->call = call;
407 __entry->why = why;
408 __entry->send_serial = send_serial;
409 ),
410
411 TP_printk("c=%p %s sr=%08x",
412 __entry->call,
413 rxrpc_rtt_tx_traces[__entry->why],
414 __entry->send_serial)
415 );
416
417TRACE_EVENT(rxrpc_rtt_rx,
418 TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
419 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
420 s64 rtt, u8 nr, s64 avg),
421
422 TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
423
424 TP_STRUCT__entry(
425 __field(struct rxrpc_call *, call )
426 __field(enum rxrpc_rtt_rx_trace, why )
427 __field(u8, nr )
428 __field(rxrpc_serial_t, send_serial )
429 __field(rxrpc_serial_t, resp_serial )
430 __field(s64, rtt )
431 __field(u64, avg )
432 ),
433
434 TP_fast_assign(
435 __entry->call = call;
436 __entry->why = why;
437 __entry->send_serial = send_serial;
438 __entry->resp_serial = resp_serial;
439 __entry->rtt = rtt;
440 __entry->nr = nr;
441 __entry->avg = avg;
442 ),
443
444 TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
445 __entry->call,
446 rxrpc_rtt_rx_traces[__entry->why],
447 __entry->send_serial,
448 __entry->resp_serial,
449 __entry->rtt,
450 __entry->nr,
451 __entry->avg)
452 );
453
454TRACE_EVENT(rxrpc_timer,
455 TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
456 ktime_t now, unsigned long now_j),
457
458 TP_ARGS(call, why, now, now_j),
459
460 TP_STRUCT__entry(
461 __field(struct rxrpc_call *, call )
462 __field(enum rxrpc_timer_trace, why )
463 __field_struct(ktime_t, now )
464 __field_struct(ktime_t, expire_at )
465 __field_struct(ktime_t, ack_at )
466 __field_struct(ktime_t, resend_at )
467 __field(unsigned long, now_j )
468 __field(unsigned long, timer )
469 ),
470
471 TP_fast_assign(
472 __entry->call = call;
473 __entry->why = why;
474 __entry->now = now;
475 __entry->expire_at = call->expire_at;
476 __entry->ack_at = call->ack_at;
477 __entry->resend_at = call->resend_at;
478 __entry->now_j = now_j;
479 __entry->timer = call->timer.expires;
480 ),
481
482 TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
483 __entry->call,
484 rxrpc_timer_traces[__entry->why],
485 ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
486 ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
487 ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
488 __entry->timer - __entry->now_j)
489 );
490
491TRACE_EVENT(rxrpc_rx_lose,
492 TP_PROTO(struct rxrpc_skb_priv *sp),
493
494 TP_ARGS(sp),
495
496 TP_STRUCT__entry(
497 __field_struct(struct rxrpc_host_header, hdr )
498 ),
499
500 TP_fast_assign(
501 memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
502 ),
503
504 TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
505 __entry->hdr.epoch, __entry->hdr.cid,
506 __entry->hdr.callNumber, __entry->hdr.serviceId,
507 __entry->hdr.serial, __entry->hdr.seq,
508 __entry->hdr.type, __entry->hdr.flags,
509 __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
510 );
511
512TRACE_EVENT(rxrpc_propose_ack,
513 TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
514 u8 ack_reason, rxrpc_serial_t serial, bool immediate,
515 bool background, enum rxrpc_propose_ack_outcome outcome),
516
517 TP_ARGS(call, why, ack_reason, serial, immediate, background,
518 outcome),
519
520 TP_STRUCT__entry(
521 __field(struct rxrpc_call *, call )
522 __field(enum rxrpc_propose_ack_trace, why )
523 __field(rxrpc_serial_t, serial )
524 __field(u8, ack_reason )
525 __field(bool, immediate )
526 __field(bool, background )
527 __field(enum rxrpc_propose_ack_outcome, outcome )
528 ),
529
530 TP_fast_assign(
531 __entry->call = call;
532 __entry->why = why;
533 __entry->serial = serial;
534 __entry->ack_reason = ack_reason;
535 __entry->immediate = immediate;
536 __entry->background = background;
537 __entry->outcome = outcome;
538 ),
539
540 TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
541 __entry->call,
542 rxrpc_propose_ack_traces[__entry->why],
543 rxrpc_ack_names[__entry->ack_reason],
544 __entry->serial,
545 __entry->immediate,
546 __entry->background,
547 rxrpc_propose_ack_outcomes[__entry->outcome])
548 );
549
550TRACE_EVENT(rxrpc_retransmit,
551 TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
552 s64 expiry),
553
554 TP_ARGS(call, seq, annotation, expiry),
555
556 TP_STRUCT__entry(
557 __field(struct rxrpc_call *, call )
558 __field(rxrpc_seq_t, seq )
559 __field(u8, annotation )
560 __field(s64, expiry )
561 ),
562
563 TP_fast_assign(
564 __entry->call = call;
565 __entry->seq = seq;
566 __entry->annotation = annotation;
567 __entry->expiry = expiry;
568 ),
569
570 TP_printk("c=%p q=%x a=%02x xp=%lld",
571 __entry->call,
572 __entry->seq,
573 __entry->annotation,
574 __entry->expiry)
575 );
576
577TRACE_EVENT(rxrpc_congest,
578 TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
579 rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
580
581 TP_ARGS(call, summary, ack_serial, change),
582
583 TP_STRUCT__entry(
584 __field(struct rxrpc_call *, call )
585 __field(enum rxrpc_congest_change, change )
586 __field(rxrpc_seq_t, hard_ack )
587 __field(rxrpc_seq_t, top )
588 __field(rxrpc_seq_t, lowest_nak )
589 __field(rxrpc_serial_t, ack_serial )
590 __field_struct(struct rxrpc_ack_summary, sum )
591 ),
592
593 TP_fast_assign(
594 __entry->call = call;
595 __entry->change = change;
596 __entry->hard_ack = call->tx_hard_ack;
597 __entry->top = call->tx_top;
598 __entry->lowest_nak = call->acks_lowest_nak;
599 __entry->ack_serial = ack_serial;
600 memcpy(&__entry->sum, summary, sizeof(__entry->sum));
601 ),
602
603 TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
604 __entry->call,
605 __entry->ack_serial,
606 rxrpc_ack_names[__entry->sum.ack_reason],
607 __entry->hard_ack,
608 rxrpc_congest_modes[__entry->sum.mode],
609 __entry->sum.cwnd,
610 __entry->sum.ssthresh,
611 __entry->sum.nr_acks, __entry->sum.nr_nacks,
612 __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
613 __entry->sum.nr_rot_new_acks,
614 __entry->top - __entry->hard_ack,
615 __entry->sum.cumulative_acks,
616 __entry->sum.dup_acks,
617 __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
618 rxrpc_congest_changes[__entry->change],
619 __entry->sum.retrans_timeo ? " rTxTo" : "")
620 );
621
622#endif /* _TRACE_RXRPC_H */
623
624/* This part must be outside protection */
625#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 185f8ea2702f..d0352a971ebd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -71,6 +71,7 @@ header-y += binfmts.h
71header-y += blkpg.h 71header-y += blkpg.h
72header-y += blktrace_api.h 72header-y += blktrace_api.h
73header-y += bpf_common.h 73header-y += bpf_common.h
74header-y += bpf_perf_event.h
74header-y += bpf.h 75header-y += bpf.h
75header-y += bpqether.h 76header-y += bpqether.h
76header-y += bsg.h 77header-y += bsg.h
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 0fbf6fd4711b..734fe83ab645 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -23,6 +23,42 @@
23#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter" 23#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
24 24
25/** 25/**
26 * enum batadv_tt_client_flags - TT client specific flags
27 * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
28 * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
29 * update telling its new real location has not been received/sent yet
30 * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
31 * This information is used by the "AP Isolation" feature
32 * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
33 * information is used by the Extended Isolation feature
34 * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
35 * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
36 * not been announced yet
37 * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
38 * in the table for one more originator interval for consistency purposes
39 * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
40 * the network but no nnode has already announced it
41 *
42 * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
43 * Bits from 8 to 15 are called _local flags_ because they are used for local
44 * computations only.
45 *
46 * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
47 * the other nodes in the network. To achieve this goal these flags are included
48 * in the TT CRC computation.
49 */
50enum batadv_tt_client_flags {
51 BATADV_TT_CLIENT_DEL = (1 << 0),
52 BATADV_TT_CLIENT_ROAM = (1 << 1),
53 BATADV_TT_CLIENT_WIFI = (1 << 4),
54 BATADV_TT_CLIENT_ISOLA = (1 << 5),
55 BATADV_TT_CLIENT_NOPURGE = (1 << 8),
56 BATADV_TT_CLIENT_NEW = (1 << 9),
57 BATADV_TT_CLIENT_PENDING = (1 << 10),
58 BATADV_TT_CLIENT_TEMP = (1 << 11),
59};
60
61/**
26 * enum batadv_nl_attrs - batman-adv netlink attributes 62 * enum batadv_nl_attrs - batman-adv netlink attributes
27 * 63 *
28 * @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors 64 * @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors
@@ -40,6 +76,26 @@
40 * @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run 76 * @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run
41 * @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session 77 * @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session
42 * @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment 78 * @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment
79 * @BATADV_ATTR_ACTIVE: Flag indicating if the hard interface is active
80 * @BATADV_ATTR_TT_ADDRESS: Client MAC address
81 * @BATADV_ATTR_TT_TTVN: Translation table version
82 * @BATADV_ATTR_TT_LAST_TTVN: Previous translation table version
83 * @BATADV_ATTR_TT_CRC32: CRC32 over translation table
84 * @BATADV_ATTR_TT_VID: VLAN ID
85 * @BATADV_ATTR_TT_FLAGS: Translation table client flags
86 * @BATADV_ATTR_FLAG_BEST: Flags indicating entry is the best
87 * @BATADV_ATTR_LAST_SEEN_MSECS: Time in milliseconds since last seen
88 * @BATADV_ATTR_NEIGH_ADDRESS: Neighbour MAC address
89 * @BATADV_ATTR_TQ: TQ to neighbour
90 * @BATADV_ATTR_THROUGHPUT: Estimated throughput to Neighbour
91 * @BATADV_ATTR_BANDWIDTH_UP: Reported uplink bandwidth
92 * @BATADV_ATTR_BANDWIDTH_DOWN: Reported downlink bandwidth
93 * @BATADV_ATTR_ROUTER: Gateway router MAC address
94 * @BATADV_ATTR_BLA_OWN: Flag indicating own originator
95 * @BATADV_ATTR_BLA_ADDRESS: Bridge loop avoidance claim MAC address
96 * @BATADV_ATTR_BLA_VID: BLA VLAN ID
97 * @BATADV_ATTR_BLA_BACKBONE: BLA gateway originator MAC address
98 * @BATADV_ATTR_BLA_CRC: BLA CRC
43 * @__BATADV_ATTR_AFTER_LAST: internal use 99 * @__BATADV_ATTR_AFTER_LAST: internal use
44 * @NUM_BATADV_ATTR: total number of batadv_nl_attrs available 100 * @NUM_BATADV_ATTR: total number of batadv_nl_attrs available
45 * @BATADV_ATTR_MAX: highest attribute number currently defined 101 * @BATADV_ATTR_MAX: highest attribute number currently defined
@@ -60,6 +116,26 @@ enum batadv_nl_attrs {
60 BATADV_ATTR_TPMETER_BYTES, 116 BATADV_ATTR_TPMETER_BYTES,
61 BATADV_ATTR_TPMETER_COOKIE, 117 BATADV_ATTR_TPMETER_COOKIE,
62 BATADV_ATTR_PAD, 118 BATADV_ATTR_PAD,
119 BATADV_ATTR_ACTIVE,
120 BATADV_ATTR_TT_ADDRESS,
121 BATADV_ATTR_TT_TTVN,
122 BATADV_ATTR_TT_LAST_TTVN,
123 BATADV_ATTR_TT_CRC32,
124 BATADV_ATTR_TT_VID,
125 BATADV_ATTR_TT_FLAGS,
126 BATADV_ATTR_FLAG_BEST,
127 BATADV_ATTR_LAST_SEEN_MSECS,
128 BATADV_ATTR_NEIGH_ADDRESS,
129 BATADV_ATTR_TQ,
130 BATADV_ATTR_THROUGHPUT,
131 BATADV_ATTR_BANDWIDTH_UP,
132 BATADV_ATTR_BANDWIDTH_DOWN,
133 BATADV_ATTR_ROUTER,
134 BATADV_ATTR_BLA_OWN,
135 BATADV_ATTR_BLA_ADDRESS,
136 BATADV_ATTR_BLA_VID,
137 BATADV_ATTR_BLA_BACKBONE,
138 BATADV_ATTR_BLA_CRC,
63 /* add attributes above here, update the policy in netlink.c */ 139 /* add attributes above here, update the policy in netlink.c */
64 __BATADV_ATTR_AFTER_LAST, 140 __BATADV_ATTR_AFTER_LAST,
65 NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST, 141 NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST,
@@ -73,6 +149,15 @@ enum batadv_nl_attrs {
73 * @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device 149 * @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device
74 * @BATADV_CMD_TP_METER: Start a tp meter session 150 * @BATADV_CMD_TP_METER: Start a tp meter session
75 * @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session 151 * @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session
152 * @BATADV_CMD_GET_ROUTING_ALGOS: Query the list of routing algorithms.
153 * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces
154 * @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations
155 * @BATADV_CMD_GET_TRANSTABLE_GLOBAL Query list of global translations
156 * @BATADV_CMD_GET_ORIGINATORS: Query list of originators
157 * @BATADV_CMD_GET_NEIGHBORS: Query list of neighbours
158 * @BATADV_CMD_GET_GATEWAYS: Query list of gateways
159 * @BATADV_CMD_GET_BLA_CLAIM: Query list of bridge loop avoidance claims
160 * @BATADV_CMD_GET_BLA_BACKBONE: Query list of bridge loop avoidance backbones
76 * @__BATADV_CMD_AFTER_LAST: internal use 161 * @__BATADV_CMD_AFTER_LAST: internal use
77 * @BATADV_CMD_MAX: highest used command number 162 * @BATADV_CMD_MAX: highest used command number
78 */ 163 */
@@ -81,6 +166,15 @@ enum batadv_nl_commands {
81 BATADV_CMD_GET_MESH_INFO, 166 BATADV_CMD_GET_MESH_INFO,
82 BATADV_CMD_TP_METER, 167 BATADV_CMD_TP_METER,
83 BATADV_CMD_TP_METER_CANCEL, 168 BATADV_CMD_TP_METER_CANCEL,
169 BATADV_CMD_GET_ROUTING_ALGOS,
170 BATADV_CMD_GET_HARDIFS,
171 BATADV_CMD_GET_TRANSTABLE_LOCAL,
172 BATADV_CMD_GET_TRANSTABLE_GLOBAL,
173 BATADV_CMD_GET_ORIGINATORS,
174 BATADV_CMD_GET_NEIGHBORS,
175 BATADV_CMD_GET_GATEWAYS,
176 BATADV_CMD_GET_BLA_CLAIM,
177 BATADV_CMD_GET_BLA_BACKBONE,
84 /* add new commands above here */ 178 /* add new commands above here */
85 __BATADV_CMD_AFTER_LAST, 179 __BATADV_CMD_AFTER_LAST,
86 BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1 180 BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9e5fc168c8a3..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -95,6 +95,7 @@ enum bpf_prog_type {
95 BPF_PROG_TYPE_SCHED_ACT, 95 BPF_PROG_TYPE_SCHED_ACT,
96 BPF_PROG_TYPE_TRACEPOINT, 96 BPF_PROG_TYPE_TRACEPOINT,
97 BPF_PROG_TYPE_XDP, 97 BPF_PROG_TYPE_XDP,
98 BPF_PROG_TYPE_PERF_EVENT,
98}; 99};
99 100
100#define BPF_PSEUDO_MAP_FD 1 101#define BPF_PSEUDO_MAP_FD 1
@@ -375,6 +376,56 @@ enum bpf_func_id {
375 */ 376 */
376 BPF_FUNC_probe_write_user, 377 BPF_FUNC_probe_write_user,
377 378
379 /**
380 * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
381 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
382 * @index: index of the cgroup in the bpf_map
383 * Return:
384 * == 0 current failed the cgroup2 descendant test
385 * == 1 current succeeded the cgroup2 descendant test
386 * < 0 error
387 */
388 BPF_FUNC_current_task_under_cgroup,
389
390 /**
391 * bpf_skb_change_tail(skb, len, flags)
392 * The helper will resize the skb to the given new size,
393 * to be used f.e. with control messages.
394 * @skb: pointer to skb
395 * @len: new skb length
396 * @flags: reserved
397 * Return: 0 on success or negative error
398 */
399 BPF_FUNC_skb_change_tail,
400
401 /**
402 * bpf_skb_pull_data(skb, len)
403 * The helper will pull in non-linear data in case the
404 * skb is non-linear and not all of len are part of the
405 * linear section. Only needed for read/write with direct
406 * packet access.
407 * @skb: pointer to skb
408 * @len: len to make read/writeable
409 * Return: 0 on success or negative error
410 */
411 BPF_FUNC_skb_pull_data,
412
413 /**
414 * bpf_csum_update(skb, csum)
415 * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
416 * @skb: pointer to skb
417 * @csum: csum to add
418 * Return: csum on success or negative error
419 */
420 BPF_FUNC_csum_update,
421
422 /**
423 * bpf_set_hash_invalid(skb)
424 * Invalidate current skb>hash.
425 * @skb: pointer to skb
426 */
427 BPF_FUNC_set_hash_invalid,
428
378 __BPF_FUNC_MAX_ID, 429 __BPF_FUNC_MAX_ID,
379}; 430};
380 431
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9
10#include <linux/types.h>
11#include <linux/ptrace.h>
12
13struct bpf_perf_event_data {
14 struct pt_regs regs;
15 __u64 sample_period;
16};
17
18#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b8f38e84d93a..099a4200732c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1362,7 +1362,14 @@ enum ethtool_link_mode_bit_indices {
1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, 1362 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, 1363 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, 1364 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
1365 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, 1365 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
1366 ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
1367 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
1368 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
1369 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
1370 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
1371 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
1372
1366 1373
1367 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1374 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
1368 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* 1375 * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1371,7 +1378,7 @@ enum ethtool_link_mode_bit_indices {
1371 */ 1378 */
1372 1379
1373 __ETHTOOL_LINK_MODE_LAST 1380 __ETHTOOL_LINK_MODE_LAST
1374 = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1381 = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
1375}; 1382};
1376 1383
1377#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1384#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c186f64fffca..ab92bca6d448 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -140,7 +140,7 @@ struct bridge_vlan_xstats {
140 __u64 tx_bytes; 140 __u64 tx_bytes;
141 __u64 tx_packets; 141 __u64 tx_packets;
142 __u16 vid; 142 __u16 vid;
143 __u16 pad1; 143 __u16 flags;
144 __u32 pad2; 144 __u32 pad2;
145}; 145};
146 146
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a1b5202c5f6b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -318,6 +318,7 @@ enum {
318 IFLA_BRPORT_FLUSH, 318 IFLA_BRPORT_FLUSH,
319 IFLA_BRPORT_MULTICAST_ROUTER, 319 IFLA_BRPORT_MULTICAST_ROUTER,
320 IFLA_BRPORT_PAD, 320 IFLA_BRPORT_PAD,
321 IFLA_BRPORT_MCAST_FLOOD,
321 __IFLA_BRPORT_MAX 322 __IFLA_BRPORT_MAX
322}; 323};
323#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) 324#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -463,6 +464,7 @@ enum {
463enum ipvlan_mode { 464enum ipvlan_mode {
464 IPVLAN_MODE_L2 = 0, 465 IPVLAN_MODE_L2 = 0,
465 IPVLAN_MODE_L3, 466 IPVLAN_MODE_L3,
467 IPVLAN_MODE_L3S,
466 IPVLAN_MODE_MAX 468 IPVLAN_MODE_MAX
467}; 469};
468 470
@@ -617,7 +619,7 @@ enum {
617enum { 619enum {
618 IFLA_VF_UNSPEC, 620 IFLA_VF_UNSPEC,
619 IFLA_VF_MAC, /* Hardware queue specific attributes */ 621 IFLA_VF_MAC, /* Hardware queue specific attributes */
620 IFLA_VF_VLAN, 622 IFLA_VF_VLAN, /* VLAN ID and QoS */
621 IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */ 623 IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
622 IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */ 624 IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
623 IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */ 625 IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -629,6 +631,7 @@ enum {
629 IFLA_VF_TRUST, /* Trust VF */ 631 IFLA_VF_TRUST, /* Trust VF */
630 IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ 632 IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
631 IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ 633 IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
634 IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
632 __IFLA_VF_MAX, 635 __IFLA_VF_MAX,
633}; 636};
634 637
@@ -645,6 +648,22 @@ struct ifla_vf_vlan {
645 __u32 qos; 648 __u32 qos;
646}; 649};
647 650
651enum {
652 IFLA_VF_VLAN_INFO_UNSPEC,
653 IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
654 __IFLA_VF_VLAN_INFO_MAX,
655};
656
657#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
658#define MAX_VLAN_LIST_LEN 1
659
660struct ifla_vf_vlan_info {
661 __u32 vf;
662 __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
663 __u32 qos;
664 __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
665};
666
648struct ifla_vf_tx_rate { 667struct ifla_vf_tx_rate {
649 __u32 vf; 668 __u32 vf;
650 __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ 669 __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -825,6 +844,7 @@ enum {
825 IFLA_STATS_LINK_64, 844 IFLA_STATS_LINK_64,
826 IFLA_STATS_LINK_XSTATS, 845 IFLA_STATS_LINK_XSTATS,
827 IFLA_STATS_LINK_XSTATS_SLAVE, 846 IFLA_STATS_LINK_XSTATS_SLAVE,
847 IFLA_STATS_LINK_OFFLOAD_XSTATS,
828 __IFLA_STATS_MAX, 848 __IFLA_STATS_MAX,
829}; 849};
830 850
@@ -844,6 +864,14 @@ enum {
844}; 864};
845#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1) 865#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
846 866
867/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
868enum {
869 IFLA_OFFLOAD_XSTATS_UNSPEC,
870 IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
871 __IFLA_OFFLOAD_XSTATS_MAX
872};
873#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
874
847/* XDP section */ 875/* XDP section */
848 876
849enum { 877enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 777b6cdb1b7b..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -27,9 +27,23 @@
27#define GRE_SEQ __cpu_to_be16(0x1000) 27#define GRE_SEQ __cpu_to_be16(0x1000)
28#define GRE_STRICT __cpu_to_be16(0x0800) 28#define GRE_STRICT __cpu_to_be16(0x0800)
29#define GRE_REC __cpu_to_be16(0x0700) 29#define GRE_REC __cpu_to_be16(0x0700)
30#define GRE_FLAGS __cpu_to_be16(0x00F8) 30#define GRE_ACK __cpu_to_be16(0x0080)
31#define GRE_FLAGS __cpu_to_be16(0x0078)
31#define GRE_VERSION __cpu_to_be16(0x0007) 32#define GRE_VERSION __cpu_to_be16(0x0007)
32 33
34#define GRE_IS_CSUM(f) ((f) & GRE_CSUM)
35#define GRE_IS_ROUTING(f) ((f) & GRE_ROUTING)
36#define GRE_IS_KEY(f) ((f) & GRE_KEY)
37#define GRE_IS_SEQ(f) ((f) & GRE_SEQ)
38#define GRE_IS_STRICT(f) ((f) & GRE_STRICT)
39#define GRE_IS_REC(f) ((f) & GRE_REC)
40#define GRE_IS_ACK(f) ((f) & GRE_ACK)
41
42#define GRE_VERSION_0 __cpu_to_be16(0x0000)
43#define GRE_VERSION_1 __cpu_to_be16(0x0001)
44#define GRE_PROTO_PPP __cpu_to_be16(0x880b)
45#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff)
46
33struct ip_tunnel_parm { 47struct ip_tunnel_parm {
34 char name[IFNAMSIZ]; 48 char name[IFNAMSIZ];
35 int link; 49 int link;
@@ -60,6 +74,7 @@ enum {
60 IFLA_IPTUN_ENCAP_FLAGS, 74 IFLA_IPTUN_ENCAP_FLAGS,
61 IFLA_IPTUN_ENCAP_SPORT, 75 IFLA_IPTUN_ENCAP_SPORT,
62 IFLA_IPTUN_ENCAP_DPORT, 76 IFLA_IPTUN_ENCAP_DPORT,
77 IFLA_IPTUN_COLLECT_METADATA,
63 __IFLA_IPTUN_MAX, 78 __IFLA_IPTUN_MAX,
64}; 79};
65#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) 80#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index abbd1dc5d683..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -73,6 +73,7 @@ enum {
73 INET_DIAG_BC_S_COND, 73 INET_DIAG_BC_S_COND,
74 INET_DIAG_BC_D_COND, 74 INET_DIAG_BC_D_COND,
75 INET_DIAG_BC_DEV_COND, /* u32 ifindex */ 75 INET_DIAG_BC_DEV_COND, /* u32 ifindex */
76 INET_DIAG_BC_MARK_COND,
76}; 77};
77 78
78struct inet_diag_hostcond { 79struct inet_diag_hostcond {
@@ -82,6 +83,11 @@ struct inet_diag_hostcond {
82 __be32 addr[0]; 83 __be32 addr[0];
83}; 84};
84 85
86struct inet_diag_markcond {
87 __u32 mark;
88 __u32 mask;
89};
90
85/* Base info structure. It contains socket identity (addrs/ports/cookie) 91/* Base info structure. It contains socket identity (addrs/ports/cookie)
86 * and, alas, the information shown by netstat. */ 92 * and, alas, the information shown by netstat. */
87struct inet_diag_msg { 93struct inet_diag_msg {
@@ -117,6 +123,8 @@ enum {
117 INET_DIAG_LOCALS, 123 INET_DIAG_LOCALS,
118 INET_DIAG_PEERS, 124 INET_DIAG_PEERS,
119 INET_DIAG_PAD, 125 INET_DIAG_PAD,
126 INET_DIAG_MARK,
127 INET_DIAG_BBRINFO,
120 __INET_DIAG_MAX, 128 __INET_DIAG_MAX,
121}; 129};
122 130
@@ -150,8 +158,20 @@ struct tcp_dctcp_info {
150 __u32 dctcp_ab_tot; 158 __u32 dctcp_ab_tot;
151}; 159};
152 160
161/* INET_DIAG_BBRINFO */
162
163struct tcp_bbr_info {
164 /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
165 __u32 bbr_bw_lo; /* lower 32 bits of bw */
166 __u32 bbr_bw_hi; /* upper 32 bits of bw */
167 __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
168 __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
169 __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
170};
171
153union tcp_cc_info { 172union tcp_cc_info {
154 struct tcpvegas_info vegas; 173 struct tcpvegas_info vegas;
155 struct tcp_dctcp_info dctcp; 174 struct tcp_dctcp_info dctcp;
175 struct tcp_bbr_info bbr;
156}; 176};
157#endif /* _UAPI_INET_DIAG_H_ */ 177#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 395876060f50..8c2772340c3f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -177,6 +177,7 @@ enum {
177 DEVCONF_DROP_UNICAST_IN_L2_MULTICAST, 177 DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
178 DEVCONF_DROP_UNSOLICITED_NA, 178 DEVCONF_DROP_UNSOLICITED_NA,
179 DEVCONF_KEEP_ADDR_ON_DOWN, 179 DEVCONF_KEEP_ADDR_ON_DOWN,
180 DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
180 DEVCONF_MAX 181 DEVCONF_MAX
181}; 182};
182 183
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 237fac4bc17b..15d8510cdae0 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -48,6 +48,7 @@
48#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ 48#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
49#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ 49#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
50#define BMCR_RESET 0x8000 /* Reset to default state */ 50#define BMCR_RESET 0x8000 /* Reset to default state */
51#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
51 52
52/* Basic mode status register. */ 53/* Basic mode status register. */
53#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ 54#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
new file mode 100644
index 000000000000..8be21e02387d
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -0,0 +1,12 @@
1#ifndef _NETFILTER_NF_LOG_H
2#define _NETFILTER_NF_LOG_H
3
4#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
5#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
6#define NF_LOG_IPOPT 0x04 /* Log IP options */
7#define NF_LOG_UID 0x08 /* Log UID owning local socket */
8#define NF_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
10#define NF_LOG_MASK 0x2f
11
12#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c674ba2563b7..c6c4477c136b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,35 @@ enum nft_cmp_attributes {
546}; 546};
547#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1) 547#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
548 548
549/**
550 * enum nft_range_ops - nf_tables range operator
551 *
552 * @NFT_RANGE_EQ: equal
553 * @NFT_RANGE_NEQ: not equal
554 */
555enum nft_range_ops {
556 NFT_RANGE_EQ,
557 NFT_RANGE_NEQ,
558};
559
560/**
561 * enum nft_range_attributes - nf_tables range expression netlink attributes
562 *
563 * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
564 * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
565 * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
566 * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
567 */
568enum nft_range_attributes {
569 NFTA_RANGE_UNSPEC,
570 NFTA_RANGE_SREG,
571 NFTA_RANGE_OP,
572 NFTA_RANGE_FROM_DATA,
573 NFTA_RANGE_TO_DATA,
574 __NFTA_RANGE_MAX
575};
576#define NFTA_RANGE_MAX (__NFTA_RANGE_MAX - 1)
577
549enum nft_lookup_flags { 578enum nft_lookup_flags {
550 NFT_LOOKUP_F_INV = (1 << 0), 579 NFT_LOOKUP_F_INV = (1 << 0),
551}; 580};
@@ -575,6 +604,10 @@ enum nft_dynset_ops {
575 NFT_DYNSET_OP_UPDATE, 604 NFT_DYNSET_OP_UPDATE,
576}; 605};
577 606
607enum nft_dynset_flags {
608 NFT_DYNSET_F_INV = (1 << 0),
609};
610
578/** 611/**
579 * enum nft_dynset_attributes - dynset expression attributes 612 * enum nft_dynset_attributes - dynset expression attributes
580 * 613 *
@@ -585,6 +618,7 @@ enum nft_dynset_ops {
585 * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32) 618 * @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
586 * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64) 619 * @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
587 * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes) 620 * @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
621 * @NFTA_DYNSET_FLAGS: flags (NLA_U32)
588 */ 622 */
589enum nft_dynset_attributes { 623enum nft_dynset_attributes {
590 NFTA_DYNSET_UNSPEC, 624 NFTA_DYNSET_UNSPEC,
@@ -596,6 +630,7 @@ enum nft_dynset_attributes {
596 NFTA_DYNSET_TIMEOUT, 630 NFTA_DYNSET_TIMEOUT,
597 NFTA_DYNSET_EXPR, 631 NFTA_DYNSET_EXPR,
598 NFTA_DYNSET_PAD, 632 NFTA_DYNSET_PAD,
633 NFTA_DYNSET_FLAGS,
599 __NFTA_DYNSET_MAX, 634 __NFTA_DYNSET_MAX,
600}; 635};
601#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1) 636#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -724,6 +759,28 @@ enum nft_meta_keys {
724}; 759};
725 760
726/** 761/**
762 * enum nft_hash_attributes - nf_tables hash expression netlink attributes
763 *
764 * @NFTA_HASH_SREG: source register (NLA_U32)
765 * @NFTA_HASH_DREG: destination register (NLA_U32)
766 * @NFTA_HASH_LEN: source data length (NLA_U32)
767 * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
768 * @NFTA_HASH_SEED: seed value (NLA_U32)
769 * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
770 */
771enum nft_hash_attributes {
772 NFTA_HASH_UNSPEC,
773 NFTA_HASH_SREG,
774 NFTA_HASH_DREG,
775 NFTA_HASH_LEN,
776 NFTA_HASH_MODULUS,
777 NFTA_HASH_SEED,
778 NFTA_HASH_OFFSET,
779 __NFTA_HASH_MAX,
780};
781#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
782
783/**
727 * enum nft_meta_attributes - nf_tables meta expression netlink attributes 784 * enum nft_meta_attributes - nf_tables meta expression netlink attributes
728 * 785 *
729 * @NFTA_META_DREG: destination register (NLA_U32) 786 * @NFTA_META_DREG: destination register (NLA_U32)
@@ -866,12 +923,14 @@ enum nft_log_attributes {
866 * @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16) 923 * @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
867 * @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16) 924 * @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
868 * @NFTA_QUEUE_FLAGS: various flags (NLA_U16) 925 * @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
926 * @NFTA_QUEUE_SREG_QNUM: source register of queue number (NLA_U32: nft_registers)
869 */ 927 */
870enum nft_queue_attributes { 928enum nft_queue_attributes {
871 NFTA_QUEUE_UNSPEC, 929 NFTA_QUEUE_UNSPEC,
872 NFTA_QUEUE_NUM, 930 NFTA_QUEUE_NUM,
873 NFTA_QUEUE_TOTAL, 931 NFTA_QUEUE_TOTAL,
874 NFTA_QUEUE_FLAGS, 932 NFTA_QUEUE_FLAGS,
933 NFTA_QUEUE_SREG_QNUM,
875 __NFTA_QUEUE_MAX 934 __NFTA_QUEUE_MAX
876}; 935};
877#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1) 936#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
@@ -880,6 +939,25 @@ enum nft_queue_attributes {
880#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */ 939#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
881#define NFT_QUEUE_FLAG_MASK 0x03 940#define NFT_QUEUE_FLAG_MASK 0x03
882 941
942enum nft_quota_flags {
943 NFT_QUOTA_F_INV = (1 << 0),
944};
945
946/**
947 * enum nft_quota_attributes - nf_tables quota expression netlink attributes
948 *
949 * @NFTA_QUOTA_BYTES: quota in bytes (NLA_U16)
950 * @NFTA_QUOTA_FLAGS: flags (NLA_U32)
951 */
952enum nft_quota_attributes {
953 NFTA_QUOTA_UNSPEC,
954 NFTA_QUOTA_BYTES,
955 NFTA_QUOTA_FLAGS,
956 NFTA_QUOTA_PAD,
957 __NFTA_QUOTA_MAX
958};
959#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
960
883/** 961/**
884 * enum nft_reject_types - nf_tables reject expression reject types 962 * enum nft_reject_types - nf_tables reject expression reject types
885 * 963 *
@@ -1051,7 +1129,7 @@ enum nft_gen_attributes {
1051 * @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32) 1129 * @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32)
1052 * @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32) 1130 * @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32)
1053 */ 1131 */
1054enum nft_trace_attibutes { 1132enum nft_trace_attributes {
1055 NFTA_TRACE_UNSPEC, 1133 NFTA_TRACE_UNSPEC,
1056 NFTA_TRACE_TABLE, 1134 NFTA_TRACE_TABLE,
1057 NFTA_TRACE_CHAIN, 1135 NFTA_TRACE_CHAIN,
@@ -1082,4 +1160,30 @@ enum nft_trace_types {
1082 __NFT_TRACETYPE_MAX 1160 __NFT_TRACETYPE_MAX
1083}; 1161};
1084#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1) 1162#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1)
1163
1164/**
1165 * enum nft_ng_attributes - nf_tables number generator expression netlink attributes
1166 *
1167 * @NFTA_NG_DREG: destination register (NLA_U32)
1168 * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
1169 * @NFTA_NG_TYPE: operation type (NLA_U32)
1170 * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
1171 */
1172enum nft_ng_attributes {
1173 NFTA_NG_UNSPEC,
1174 NFTA_NG_DREG,
1175 NFTA_NG_MODULUS,
1176 NFTA_NG_TYPE,
1177 NFTA_NG_OFFSET,
1178 __NFTA_NG_MAX
1179};
1180#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
1181
1182enum nft_ng_types {
1183 NFT_NG_INCREMENTAL,
1184 NFT_NG_RANDOM,
1185 __NFT_NG_MAX
1186};
1187#define NFT_NG_MAX (__NFT_NG_MAX - 1)
1188
1085#endif /* _LINUX_NF_TABLES_H */ 1189#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 9df789709abe..6deb8867c5fc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -231,13 +231,13 @@ enum ctattr_secctx {
231 231
232enum ctattr_stats_cpu { 232enum ctattr_stats_cpu {
233 CTA_STATS_UNSPEC, 233 CTA_STATS_UNSPEC,
234 CTA_STATS_SEARCHED, 234 CTA_STATS_SEARCHED, /* no longer used */
235 CTA_STATS_FOUND, 235 CTA_STATS_FOUND,
236 CTA_STATS_NEW, 236 CTA_STATS_NEW, /* no longer used */
237 CTA_STATS_INVALID, 237 CTA_STATS_INVALID,
238 CTA_STATS_IGNORE, 238 CTA_STATS_IGNORE,
239 CTA_STATS_DELETE, 239 CTA_STATS_DELETE, /* no longer used */
240 CTA_STATS_DELETE_LIST, 240 CTA_STATS_DELETE_LIST, /* no longer used */
241 CTA_STATS_INSERT, 241 CTA_STATS_INSERT,
242 CTA_STATS_INSERT_FAILED, 242 CTA_STATS_INSERT_FAILED,
243 CTA_STATS_DROP, 243 CTA_STATS_DROP,
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 6db90372f09c..3efc0ca18345 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -6,6 +6,7 @@
6 6
7/* timings are in milliseconds. */ 7/* timings are in milliseconds. */
8#define XT_HASHLIMIT_SCALE 10000 8#define XT_HASHLIMIT_SCALE 10000
9#define XT_HASHLIMIT_SCALE_v2 1000000llu
9/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 10/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
10 * seconds, or one packet every 59 hours. 11 * seconds, or one packet every 59 hours.
11 */ 12 */
@@ -63,6 +64,20 @@ struct hashlimit_cfg1 {
63 __u8 srcmask, dstmask; 64 __u8 srcmask, dstmask;
64}; 65};
65 66
67struct hashlimit_cfg2 {
68 __u64 avg; /* Average secs between packets * scale */
69 __u64 burst; /* Period multiplier for upper limit. */
70 __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
71
72 /* user specified */
73 __u32 size; /* how many buckets */
74 __u32 max; /* max number of entries */
75 __u32 gc_interval; /* gc interval */
76 __u32 expire; /* when do entries expire? */
77
78 __u8 srcmask, dstmask;
79};
80
66struct xt_hashlimit_mtinfo1 { 81struct xt_hashlimit_mtinfo1 {
67 char name[IFNAMSIZ]; 82 char name[IFNAMSIZ];
68 struct hashlimit_cfg1 cfg; 83 struct hashlimit_cfg1 cfg;
@@ -71,4 +86,12 @@ struct xt_hashlimit_mtinfo1 {
71 struct xt_hashlimit_htable *hinfo __attribute__((aligned(8))); 86 struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
72}; 87};
73 88
89struct xt_hashlimit_mtinfo2 {
90 char name[NAME_MAX];
91 struct hashlimit_cfg2 cfg;
92
93 /* Used internally by the kernel */
94 struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
95};
96
74#endif /* _UAPI_XT_HASHLIMIT_H */ 97#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 220694151434..56368e9b4622 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -48,6 +48,7 @@
48#define NL80211_MULTICAST_GROUP_REG "regulatory" 48#define NL80211_MULTICAST_GROUP_REG "regulatory"
49#define NL80211_MULTICAST_GROUP_MLME "mlme" 49#define NL80211_MULTICAST_GROUP_MLME "mlme"
50#define NL80211_MULTICAST_GROUP_VENDOR "vendor" 50#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
51#define NL80211_MULTICAST_GROUP_NAN "nan"
51#define NL80211_MULTICAST_GROUP_TESTMODE "testmode" 52#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
52 53
53/** 54/**
@@ -838,6 +839,41 @@
838 * not running. The driver indicates the status of the scan through 839 * not running. The driver indicates the status of the scan through
839 * cfg80211_scan_done(). 840 * cfg80211_scan_done().
840 * 841 *
842 * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
843 * %NL80211_ATTR_WDEV interface. This interface must have been previously
844 * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
845 * NAN interface will create or join a cluster. This command must have a
846 * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
847 * %NL80211_ATTR_NAN_DUAL attributes.
848 * After this command NAN functions can be added.
849 * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
850 * its %NL80211_ATTR_WDEV interface.
851 * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
852 * with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this
853 * operation returns the strictly positive and unique instance id
854 * (%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE)
855 * of the function upon success.
856 * Since instance ID's can be re-used, this cookie is the right
857 * way to identify the function. This will avoid races when a termination
858 * event is handled by the user space after it has already added a new
859 * function that got the same instance id from the kernel as the one
860 * which just terminated.
861 * This cookie may be used in NAN events even before the command
862 * returns, so userspace shouldn't process NAN events until it processes
863 * the response to this command.
864 * Look at %NL80211_ATTR_SOCKET_OWNER as well.
865 * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie.
866 * This command is also used as a notification sent when a NAN function is
867 * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
868 * and %NL80211_ATTR_COOKIE attributes.
869 * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
870 * must be operational (%NL80211_CMD_START_NAN was executed).
871 * It must contain at least one of the following attributes:
872 * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
873 * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
874 * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
875 * %NL80211_ATTR_COOKIE.
876 *
841 * @NL80211_CMD_MAX: highest used command number 877 * @NL80211_CMD_MAX: highest used command number
842 * @__NL80211_CMD_AFTER_LAST: internal use 878 * @__NL80211_CMD_AFTER_LAST: internal use
843 */ 879 */
@@ -1026,6 +1062,13 @@ enum nl80211_commands {
1026 1062
1027 NL80211_CMD_ABORT_SCAN, 1063 NL80211_CMD_ABORT_SCAN,
1028 1064
1065 NL80211_CMD_START_NAN,
1066 NL80211_CMD_STOP_NAN,
1067 NL80211_CMD_ADD_NAN_FUNCTION,
1068 NL80211_CMD_DEL_NAN_FUNCTION,
1069 NL80211_CMD_CHANGE_NAN_CONFIG,
1070 NL80211_CMD_NAN_MATCH,
1071
1029 /* add new commands above here */ 1072 /* add new commands above here */
1030 1073
1031 /* used to define NL80211_CMD_MAX below */ 1074 /* used to define NL80211_CMD_MAX below */
@@ -1343,7 +1386,13 @@ enum nl80211_commands {
1343 * enum nl80211_band value is used as the index (nla_type() of the nested 1386 * enum nl80211_band value is used as the index (nla_type() of the nested
1344 * data. If a band is not included, it will be configured to allow all 1387 * data. If a band is not included, it will be configured to allow all
1345 * rates based on negotiated supported rates information. This attribute 1388 * rates based on negotiated supported rates information. This attribute
1346 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. 1389 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
1390 * and joining mesh networks (not IBSS yet). In the later case, it must
1391 * specify just a single bitrate, which is to be used for the beacon.
1392 * The driver must also specify support for this with the extended
1393 * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
1394 * NL80211_EXT_FEATURE_BEACON_RATE_HT and
1395 * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
1347 * 1396 *
1348 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain 1397 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
1349 * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME. 1398 * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1733,6 +1782,12 @@ enum nl80211_commands {
1733 * regulatory indoor configuration would be owned by the netlink socket 1782 * regulatory indoor configuration would be owned by the netlink socket
1734 * that configured the indoor setting, and the indoor operation would be 1783 * that configured the indoor setting, and the indoor operation would be
1735 * cleared when the socket is closed. 1784 * cleared when the socket is closed.
1785 * If set during NAN interface creation, the interface will be destroyed
1786 * if the socket is closed just like any other interface. Moreover, only
1787 * the netlink socket that created the interface will be allowed to add
1788 * and remove functions. NAN notifications will be sent in unicast to that
1789 * socket. Without this attribute, any socket can add functions and the
1790 * notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
1736 * 1791 *
1737 * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is 1792 * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
1738 * the TDLS link initiator. 1793 * the TDLS link initiator.
@@ -1867,6 +1922,21 @@ enum nl80211_commands {
1867 * @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is 1922 * @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
1868 * used to pull the stored data for mesh peer in power save state. 1923 * used to pull the stored data for mesh peer in power save state.
1869 * 1924 *
1925 * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by
1926 * %NL80211_CMD_START_NAN and optionally with
1927 * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
1928 * Also, values 1 and 255 are reserved for certification purposes and
1929 * should not be used during a normal device operation.
1930 * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
1931 * &enum nl80211_nan_dual_band_conf). This attribute is used with
1932 * %NL80211_CMD_START_NAN and optionally with
1933 * %NL80211_CMD_CHANGE_NAN_CONFIG.
1934 * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
1935 * &enum nl80211_nan_func_attributes for description of this nested
1936 * attribute.
1937 * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
1938 * See &enum nl80211_nan_match_attributes.
1939 *
1870 * @NUM_NL80211_ATTR: total number of nl80211_attrs available 1940 * @NUM_NL80211_ATTR: total number of nl80211_attrs available
1871 * @NL80211_ATTR_MAX: highest attribute number currently defined 1941 * @NL80211_ATTR_MAX: highest attribute number currently defined
1872 * @__NL80211_ATTR_AFTER_LAST: internal use 1942 * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2261,6 +2331,11 @@ enum nl80211_attrs {
2261 2331
2262 NL80211_ATTR_MESH_PEER_AID, 2332 NL80211_ATTR_MESH_PEER_AID,
2263 2333
2334 NL80211_ATTR_NAN_MASTER_PREF,
2335 NL80211_ATTR_NAN_DUAL,
2336 NL80211_ATTR_NAN_FUNC,
2337 NL80211_ATTR_NAN_MATCH,
2338
2264 /* add attributes here, update the policy in nl80211.c */ 2339 /* add attributes here, update the policy in nl80211.c */
2265 2340
2266 __NL80211_ATTR_AFTER_LAST, 2341 __NL80211_ATTR_AFTER_LAST,
@@ -2339,6 +2414,7 @@ enum nl80211_attrs {
2339 * commands to create and destroy one 2414 * commands to create and destroy one
2340 * @NL80211_IF_TYPE_OCB: Outside Context of a BSS 2415 * @NL80211_IF_TYPE_OCB: Outside Context of a BSS
2341 * This mode corresponds to the MIB variable dot11OCBActivated=true 2416 * This mode corresponds to the MIB variable dot11OCBActivated=true
2417 * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
2342 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 2418 * @NL80211_IFTYPE_MAX: highest interface type number currently defined
2343 * @NUM_NL80211_IFTYPES: number of defined interface types 2419 * @NUM_NL80211_IFTYPES: number of defined interface types
2344 * 2420 *
@@ -2359,6 +2435,7 @@ enum nl80211_iftype {
2359 NL80211_IFTYPE_P2P_GO, 2435 NL80211_IFTYPE_P2P_GO,
2360 NL80211_IFTYPE_P2P_DEVICE, 2436 NL80211_IFTYPE_P2P_DEVICE,
2361 NL80211_IFTYPE_OCB, 2437 NL80211_IFTYPE_OCB,
2438 NL80211_IFTYPE_NAN,
2362 2439
2363 /* keep last */ 2440 /* keep last */
2364 NUM_NL80211_IFTYPES, 2441 NUM_NL80211_IFTYPES,
@@ -4551,6 +4628,12 @@ enum nl80211_feature_flags {
4551 * (if available). 4628 * (if available).
4552 * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of 4629 * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
4553 * channel dwell time. 4630 * channel dwell time.
4631 * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
4632 * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
4633 * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
4634 * configuration (AP/mesh) with HT rates.
4635 * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
4636 * configuration (AP/mesh) with VHT rates.
4554 * 4637 *
4555 * @NUM_NL80211_EXT_FEATURES: number of extended features. 4638 * @NUM_NL80211_EXT_FEATURES: number of extended features.
4556 * @MAX_NL80211_EXT_FEATURES: highest extended feature index. 4639 * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4562,6 +4645,9 @@ enum nl80211_ext_feature_index {
4562 NL80211_EXT_FEATURE_SCAN_START_TIME, 4645 NL80211_EXT_FEATURE_SCAN_START_TIME,
4563 NL80211_EXT_FEATURE_BSS_PARENT_TSF, 4646 NL80211_EXT_FEATURE_BSS_PARENT_TSF,
4564 NL80211_EXT_FEATURE_SET_SCAN_DWELL, 4647 NL80211_EXT_FEATURE_SET_SCAN_DWELL,
4648 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
4649 NL80211_EXT_FEATURE_BEACON_RATE_HT,
4650 NL80211_EXT_FEATURE_BEACON_RATE_VHT,
4565 4651
4566 /* add new features before the definition below */ 4652 /* add new features before the definition below */
4567 NUM_NL80211_EXT_FEATURES, 4653 NUM_NL80211_EXT_FEATURES,
@@ -4855,4 +4941,186 @@ enum nl80211_bss_select_attr {
4855 NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1 4941 NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
4856}; 4942};
4857 4943
4944/**
4945 * enum nl80211_nan_dual_band_conf - NAN dual band configuration
4946 *
4947 * Defines the NAN dual band mode of operation
4948 *
4949 * @NL80211_NAN_BAND_DEFAULT: device default mode
4950 * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode
4951 * @NL80211_NAN_BAND_5GHZ: 5GHz mode
4952 */
4953enum nl80211_nan_dual_band_conf {
4954 NL80211_NAN_BAND_DEFAULT = 1 << 0,
4955 NL80211_NAN_BAND_2GHZ = 1 << 1,
4956 NL80211_NAN_BAND_5GHZ = 1 << 2,
4957};
4958
4959/**
4960 * enum nl80211_nan_function_type - NAN function type
4961 *
4962 * Defines the function type of a NAN function
4963 *
4964 * @NL80211_NAN_FUNC_PUBLISH: function is publish
4965 * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe
4966 * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up
4967 */
4968enum nl80211_nan_function_type {
4969 NL80211_NAN_FUNC_PUBLISH,
4970 NL80211_NAN_FUNC_SUBSCRIBE,
4971 NL80211_NAN_FUNC_FOLLOW_UP,
4972
4973 /* keep last */
4974 __NL80211_NAN_FUNC_TYPE_AFTER_LAST,
4975 NL80211_NAN_FUNC_MAX_TYPE = __NL80211_NAN_FUNC_TYPE_AFTER_LAST - 1,
4976};
4977
4978/**
4979 * enum nl80211_nan_publish_type - NAN publish tx type
4980 *
4981 * Defines how to send publish Service Discovery Frames
4982 *
4983 * @NL80211_NAN_SOLICITED_PUBLISH: publish function is solicited
4984 * @NL80211_NAN_UNSOLICITED_PUBLISH: publish function is unsolicited
4985 */
4986enum nl80211_nan_publish_type {
4987 NL80211_NAN_SOLICITED_PUBLISH = 1 << 0,
4988 NL80211_NAN_UNSOLICITED_PUBLISH = 1 << 1,
4989};
4990
4991/**
4992 * enum nl80211_nan_func_term_reason - NAN functions termination reason
4993 *
4994 * Defines termination reasons of a NAN function
4995 *
4996 * @NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST: requested by user
4997 * @NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED: timeout
4998 * @NL80211_NAN_FUNC_TERM_REASON_ERROR: errored
4999 */
5000enum nl80211_nan_func_term_reason {
5001 NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST,
5002 NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED,
5003 NL80211_NAN_FUNC_TERM_REASON_ERROR,
5004};
5005
5006#define NL80211_NAN_FUNC_SERVICE_ID_LEN 6
5007#define NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN 0xff
5008#define NL80211_NAN_FUNC_SRF_MAX_LEN 0xff
5009
5010/**
5011 * enum nl80211_nan_func_attributes - NAN function attributes
5012 * @__NL80211_NAN_FUNC_INVALID: invalid
5013 * @NL80211_NAN_FUNC_TYPE: &enum nl80211_nan_function_type (u8).
5014 * @NL80211_NAN_FUNC_SERVICE_ID: 6 bytes of the service ID hash as
5015 * specified in NAN spec. This is a binary attribute.
5016 * @NL80211_NAN_FUNC_PUBLISH_TYPE: relevant if the function's type is
5017 * publish. Defines the transmission type for the publish Service Discovery
5018 * Frame, see &enum nl80211_nan_publish_type. Its type is u8.
5019 * @NL80211_NAN_FUNC_PUBLISH_BCAST: relevant if the function is a solicited
5020 * publish. Should the solicited publish Service Discovery Frame be sent to
5021 * the NAN Broadcast address. This is a flag.
5022 * @NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE: relevant if the function's type is
5023 * subscribe. Is the subscribe active. This is a flag.
5024 * @NL80211_NAN_FUNC_FOLLOW_UP_ID: relevant if the function's type is follow up.
5025 * The instance ID for the follow up Service Discovery Frame. This is u8.
5026 * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
5027 * is follow up. This is a u8.
5028 * The requestor instance ID for the follow up Service Discovery Frame.
5029 * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
5030 * follow up Service Discovery Frame. This is a binary attribute.
5031 * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
5032 * close range. The range itself (RSSI) is defined by the device.
5033 * This is a flag.
5034 * @NL80211_NAN_FUNC_TTL: strictly positive number of DWs this function should
5035 * stay active. If not present infinite TTL is assumed. This is a u32.
5036 * @NL80211_NAN_FUNC_SERVICE_INFO: array of bytes describing the service
5037 * specific info. This is a binary attribute.
5038 * @NL80211_NAN_FUNC_SRF: Service Receive Filter. This is a nested attribute.
5039 * See &enum nl80211_nan_srf_attributes.
5040 * @NL80211_NAN_FUNC_RX_MATCH_FILTER: Receive Matching filter. This is a nested
5041 * attribute. It is a list of binary values.
5042 * @NL80211_NAN_FUNC_TX_MATCH_FILTER: Transmit Matching filter. This is a
5043 * nested attribute. It is a list of binary values.
5044 * @NL80211_NAN_FUNC_INSTANCE_ID: The instance ID of the function.
5045 * Its type is u8 and it cannot be 0.
5046 * @NL80211_NAN_FUNC_TERM_REASON: NAN function termination reason.
5047 * See &enum nl80211_nan_func_term_reason.
5048 *
5049 * @NUM_NL80211_NAN_FUNC_ATTR: internal
5050 * @NL80211_NAN_FUNC_ATTR_MAX: highest NAN function attribute
5051 */
5052enum nl80211_nan_func_attributes {
5053 __NL80211_NAN_FUNC_INVALID,
5054 NL80211_NAN_FUNC_TYPE,
5055 NL80211_NAN_FUNC_SERVICE_ID,
5056 NL80211_NAN_FUNC_PUBLISH_TYPE,
5057 NL80211_NAN_FUNC_PUBLISH_BCAST,
5058 NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE,
5059 NL80211_NAN_FUNC_FOLLOW_UP_ID,
5060 NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID,
5061 NL80211_NAN_FUNC_FOLLOW_UP_DEST,
5062 NL80211_NAN_FUNC_CLOSE_RANGE,
5063 NL80211_NAN_FUNC_TTL,
5064 NL80211_NAN_FUNC_SERVICE_INFO,
5065 NL80211_NAN_FUNC_SRF,
5066 NL80211_NAN_FUNC_RX_MATCH_FILTER,
5067 NL80211_NAN_FUNC_TX_MATCH_FILTER,
5068 NL80211_NAN_FUNC_INSTANCE_ID,
5069 NL80211_NAN_FUNC_TERM_REASON,
5070
5071 /* keep last */
5072 NUM_NL80211_NAN_FUNC_ATTR,
5073 NL80211_NAN_FUNC_ATTR_MAX = NUM_NL80211_NAN_FUNC_ATTR - 1
5074};
5075
5076/**
5077 * enum nl80211_nan_srf_attributes - NAN Service Response filter attributes
5078 * @__NL80211_NAN_SRF_INVALID: invalid
5079 * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
5080 * This is a flag.
5081 * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
5082 * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
5083 * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
5084 * &NL80211_NAN_SRF_BF is present. This is a u8.
5085 * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
5086 * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
5087 * attribute. Each nested attribute is a MAC address.
5088 * @NUM_NL80211_NAN_SRF_ATTR: internal
5089 * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
5090 */
5091enum nl80211_nan_srf_attributes {
5092 __NL80211_NAN_SRF_INVALID,
5093 NL80211_NAN_SRF_INCLUDE,
5094 NL80211_NAN_SRF_BF,
5095 NL80211_NAN_SRF_BF_IDX,
5096 NL80211_NAN_SRF_MAC_ADDRS,
5097
5098 /* keep last */
5099 NUM_NL80211_NAN_SRF_ATTR,
5100 NL80211_NAN_SRF_ATTR_MAX = NUM_NL80211_NAN_SRF_ATTR - 1,
5101};
5102
5103/**
5104 * enum nl80211_nan_match_attributes - NAN match attributes
5105 * @__NL80211_NAN_MATCH_INVALID: invalid
5106 * @NL80211_NAN_MATCH_FUNC_LOCAL: the local function that had the
5107 * match. This is a nested attribute.
5108 * See &enum nl80211_nan_func_attributes.
5109 * @NL80211_NAN_MATCH_FUNC_PEER: the peer function
5110 * that caused the match. This is a nested attribute.
5111 * See &enum nl80211_nan_func_attributes.
5112 *
5113 * @NUM_NL80211_NAN_MATCH_ATTR: internal
5114 * @NL80211_NAN_MATCH_ATTR_MAX: highest NAN match attribute
5115 */
5116enum nl80211_nan_match_attributes {
5117 __NL80211_NAN_MATCH_INVALID,
5118 NL80211_NAN_MATCH_FUNC_LOCAL,
5119 NL80211_NAN_MATCH_FUNC_PEER,
5120
5121 /* keep last */
5122 NUM_NL80211_NAN_MATCH_ATTR,
5123 NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1
5124};
5125
4858#endif /* __LINUX_NL80211_H */ 5126#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 54c3b4f4aceb..59ed3992c760 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -605,13 +605,13 @@ struct ovs_action_push_mpls {
605 * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set 605 * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
606 * (but it will not be set in the 802.1Q header that is pushed). 606 * (but it will not be set in the 802.1Q header that is pushed).
607 * 607 *
608 * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID 608 * The @vlan_tpid value is typically %ETH_P_8021Q or %ETH_P_8021AD.
609 * values are those that the kernel module also parses as 802.1Q headers, to 609 * The only acceptable TPID values are those that the kernel module also parses
610 * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN 610 * as 802.1Q or 802.1AD headers, to prevent %OVS_ACTION_ATTR_PUSH_VLAN followed
611 * from having surprising results. 611 * by %OVS_ACTION_ATTR_POP_VLAN from having surprising results.
612 */ 612 */
613struct ovs_action_push_vlan { 613struct ovs_action_push_vlan {
614 __be16 vlan_tpid; /* 802.1Q TPID. */ 614 __be16 vlan_tpid; /* 802.1Q or 802.1ad TPID. */
615 __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ 615 __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
616}; 616};
617 617
@@ -721,9 +721,10 @@ enum ovs_nat_attr {
721 * is copied from the value to the packet header field, rest of the bits are 721 * is copied from the value to the packet header field, rest of the bits are
722 * left unchanged. The non-masked value bits must be passed in as zeroes. 722 * left unchanged. The non-masked value bits must be passed in as zeroes.
723 * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute. 723 * Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
724 * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the 724 * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q or 802.1ad header
725 * packet. 725 * onto the packet.
726 * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. 726 * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q or 802.1ad header
727 * from the packet.
727 * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in 728 * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
728 * the nested %OVS_SAMPLE_ATTR_* attributes. 729 * the nested %OVS_SAMPLE_ATTR_* attributes.
729 * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the 730 * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d1c1ccaba787..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
396 TCA_BPF_FD, 396 TCA_BPF_FD,
397 TCA_BPF_NAME, 397 TCA_BPF_NAME,
398 TCA_BPF_FLAGS, 398 TCA_BPF_FLAGS,
399 TCA_BPF_FLAGS_GEN,
399 __TCA_BPF_MAX, 400 __TCA_BPF_MAX,
400}; 401};
401 402
@@ -428,6 +429,24 @@ enum {
428 TCA_FLOWER_KEY_UDP_DST, /* be16 */ 429 TCA_FLOWER_KEY_UDP_DST, /* be16 */
429 430
430 TCA_FLOWER_FLAGS, 431 TCA_FLOWER_FLAGS,
432 TCA_FLOWER_KEY_VLAN_ID, /* be16 */
433 TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
434 TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
435
436 TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
437 TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
438 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
439 TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
440 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
441 TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
442 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
443 TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
444 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
445
446 TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
447 TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
448 TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
449 TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
431 __TCA_FLOWER_MAX, 450 __TCA_FLOWER_MAX,
432}; 451};
433 452
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
792 792
793 TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */ 793 TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
794 794
795 TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
796
795 __TCA_FQ_MAX 797 __TCA_FQ_MAX
796}; 798};
797 799
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
809 __u32 flows; 811 __u32 flows;
810 __u32 inactive_flows; 812 __u32 inactive_flows;
811 __u32 throttled_flows; 813 __u32 throttled_flows;
812 __u32 pad; 814 __u32 unthrottle_latency_ns;
813}; 815};
814 816
815/* Heavy-Hitter Filter */ 817/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 25a9ad8bcef1..e7a31f830690 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -235,6 +235,7 @@ enum
235 LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ 235 LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
236 LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ 236 LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
237 LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ 237 LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
238 LINUX_MIB_TCPMD5FAILURE, /* TCPMD5Failure */
238 LINUX_MIB_SACKSHIFTED, 239 LINUX_MIB_SACKSHIFTED,
239 LINUX_MIB_SACKMERGED, 240 LINUX_MIB_SACKMERGED,
240 LINUX_MIB_SACKSHIFTFALLBACK, 241 LINUX_MIB_SACKSHIFTFALLBACK,
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
32#define IFE_META_HASHID 2 32#define IFE_META_HASHID 2
33#define IFE_META_PRIO 3 33#define IFE_META_PRIO 3
34#define IFE_META_QMAP 4 34#define IFE_META_QMAP 4
35#define IFE_META_TCINDEX 5
35/*Can be overridden at runtime by module option*/ 36/*Can be overridden at runtime by module option*/
36#define __IFE_META_MAX 5 37#define __IFE_META_MAX 6
37#define IFE_META_MAX (__IFE_META_MAX - 1) 38#define IFE_META_MAX (__IFE_META_MAX - 1)
38 39
39#endif 40#endif
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..10fc07da6c69
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2016, Jamal Hadi Salim
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8*/
9
10#ifndef __LINUX_TC_SKBMOD_H
11#define __LINUX_TC_SKBMOD_H
12
13#include <linux/pkt_cls.h>
14
15#define TCA_ACT_SKBMOD 15
16
17#define SKBMOD_F_DMAC 0x1
18#define SKBMOD_F_SMAC 0x2
19#define SKBMOD_F_ETYPE 0x4
20#define SKBMOD_F_SWAPMAC 0x8
21
22struct tc_skbmod {
23 tc_gen;
24 __u64 flags;
25};
26
27enum {
28 TCA_SKBMOD_UNSPEC,
29 TCA_SKBMOD_TM,
30 TCA_SKBMOD_PARMS,
31 TCA_SKBMOD_DMAC,
32 TCA_SKBMOD_SMAC,
33 TCA_SKBMOD_ETYPE,
34 TCA_SKBMOD_PAD,
35 __TCA_SKBMOD_MAX
36};
37#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
38
39#endif
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..890106ff16e6
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
3 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#ifndef __LINUX_TC_TUNNEL_KEY_H
12#define __LINUX_TC_TUNNEL_KEY_H
13
14#include <linux/pkt_cls.h>
15
16#define TCA_ACT_TUNNEL_KEY 17
17
18#define TCA_TUNNEL_KEY_ACT_SET 1
19#define TCA_TUNNEL_KEY_ACT_RELEASE 2
20
21struct tc_tunnel_key {
22 tc_gen;
23 int t_action;
24};
25
26enum {
27 TCA_TUNNEL_KEY_UNSPEC,
28 TCA_TUNNEL_KEY_TM,
29 TCA_TUNNEL_KEY_PARMS,
30 TCA_TUNNEL_KEY_ENC_IPV4_SRC, /* be32 */
31 TCA_TUNNEL_KEY_ENC_IPV4_DST, /* be32 */
32 TCA_TUNNEL_KEY_ENC_IPV6_SRC, /* struct in6_addr */
33 TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
34 TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
35 TCA_TUNNEL_KEY_PAD,
36 __TCA_TUNNEL_KEY_MAX,
37};
38
39#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
40
41#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 31151ff6264f..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
16 16
17#define TCA_VLAN_ACT_POP 1 17#define TCA_VLAN_ACT_POP 1
18#define TCA_VLAN_ACT_PUSH 2 18#define TCA_VLAN_ACT_PUSH 2
19#define TCA_VLAN_ACT_MODIFY 3
19 20
20struct tc_vlan { 21struct tc_vlan {
21 tc_gen; 22 tc_gen;
@@ -29,6 +30,7 @@ enum {
29 TCA_VLAN_PUSH_VLAN_ID, 30 TCA_VLAN_PUSH_VLAN_ID,
30 TCA_VLAN_PUSH_VLAN_PROTOCOL, 31 TCA_VLAN_PUSH_VLAN_PROTOCOL,
31 TCA_VLAN_PAD, 32 TCA_VLAN_PAD,
33 TCA_VLAN_PUSH_VLAN_PRIORITY,
32 __TCA_VLAN_MAX, 34 __TCA_VLAN_MAX,
33}; 35};
34#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) 36#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
167 __u8 tcpi_backoff; 167 __u8 tcpi_backoff;
168 __u8 tcpi_options; 168 __u8 tcpi_options;
169 __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4; 169 __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
170 __u8 tcpi_delivery_rate_app_limited:1;
170 171
171 __u32 tcpi_rto; 172 __u32 tcpi_rto;
172 __u32 tcpi_ato; 173 __u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
211 __u32 tcpi_min_rtt; 212 __u32 tcpi_min_rtt;
212 __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */ 213 __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
213 __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */ 214 __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
215
216 __u64 tcpi_delivery_rate;
214}; 217};
215 218
216/* for TCP_MD5SIG socket option */ 219/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 5f3f6d09fb79..f9edd20fe9ba 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -59,6 +59,9 @@ enum {
59 TIPC_NL_MON_SET, 59 TIPC_NL_MON_SET,
60 TIPC_NL_MON_GET, 60 TIPC_NL_MON_GET,
61 TIPC_NL_MON_PEER_GET, 61 TIPC_NL_MON_PEER_GET,
62 TIPC_NL_PEER_REMOVE,
63 TIPC_NL_BEARER_ADD,
64 TIPC_NL_UDP_GET_REMOTEIP,
62 65
63 __TIPC_NL_CMD_MAX, 66 __TIPC_NL_CMD_MAX,
64 TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 67 TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -98,6 +101,7 @@ enum {
98 TIPC_NLA_UDP_UNSPEC, 101 TIPC_NLA_UDP_UNSPEC,
99 TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */ 102 TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
100 TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */ 103 TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
104 TIPC_NLA_UDP_MULTI_REMOTEIP, /* flag */
101 105
102 __TIPC_NLA_UDP_MAX, 106 __TIPC_NLA_UDP_MAX,
103 TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1 107 TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
298 XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */ 298 XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
299 XFRMA_MARK, /* struct xfrm_mark */ 299 XFRMA_MARK, /* struct xfrm_mark */
300 XFRMA_TFCPAD, /* __u32 */ 300 XFRMA_TFCPAD, /* __u32 */
301 XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */ 301 XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
302 XFRMA_SA_EXTRA_FLAGS, /* __u32 */ 302 XFRMA_SA_EXTRA_FLAGS, /* __u32 */
303 XFRMA_PROTO, /* __u8 */ 303 XFRMA_PROTO, /* __u8 */
304 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 304 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */