aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /include/linux
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_regs.h1
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cgroup.h23
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h34
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h441
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h297
-rw-r--r--include/linux/mlx5/port.h40
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/qed/common_hsi.h359
-rw-r--r--include/linux/qed/eth_common.h155
-rw-r--r--include/linux/qed/iscsi_common.h28
-rw-r--r--include/linux/qed/qed_chain.h13
-rw-r--r--include/linux/qed/qed_eth_if.h3
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/qed/tcp_common.h16
-rw-r--r--include/linux/rhashtable.h543
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/skbuff.h73
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/win_minmax.h37
53 files changed, 2680 insertions, 970 deletions
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3db25df396cb..8eeedb2db924 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,6 +205,9 @@ struct bcma_host_ops {
205#define BCMA_PKG_ID_BCM4709 0 205#define BCMA_PKG_ID_BCM4709 0
206#define BCMA_CHIP_ID_BCM47094 53030 206#define BCMA_CHIP_ID_BCM47094 53030
207#define BCMA_CHIP_ID_BCM53018 53018 207#define BCMA_CHIP_ID_BCM53018 53018
208#define BCMA_CHIP_ID_BCM53573 53573
209#define BCMA_PKG_ID_BCM53573 0
210#define BCMA_PKG_ID_BCM47189 1
208 211
209/* Board types (on PCI usually equals to the subsystem dev id) */ 212/* Board types (on PCI usually equals to the subsystem dev id) */
210/* BCM4313 */ 213/* BCM4313 */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 4901fb358b07..9986f8288d01 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -24,6 +24,7 @@
24#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ 24#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
25 25
26/* Agent registers (common for every core) */ 26/* Agent registers (common for every core) */
27#define BCMA_OOB_SEL_OUT_A30 0x0100
27#define BCMA_IOCTL 0x0408 /* IO control */ 28#define BCMA_IOCTL 0x0408 /* IO control */
28#define BCMA_IOCTL_CLK 0x0001 29#define BCMA_IOCTL_CLK 0x0001
29#define BCMA_IOCTL_FGC 0x0002 30#define BCMA_IOCTL_FGC 0x0002
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _LINUX_BITFIELD_H
16#define _LINUX_BITFIELD_H
17
18#include <linux/bug.h>
19
20/*
21 * Bitfield access macros
22 *
23 * FIELD_{GET,PREP} macros take as first parameter shifted mask
24 * from which they extract the base mask and shift amount.
25 * Mask must be a compilation time constant.
26 *
27 * Example:
28 *
29 * #define REG_FIELD_A GENMASK(6, 0)
30 * #define REG_FIELD_B BIT(7)
31 * #define REG_FIELD_C GENMASK(15, 8)
32 * #define REG_FIELD_D GENMASK(31, 16)
33 *
34 * Get:
35 * a = FIELD_GET(REG_FIELD_A, reg);
36 * b = FIELD_GET(REG_FIELD_B, reg);
37 *
38 * Set:
39 * reg = FIELD_PREP(REG_FIELD_A, 1) |
40 * FIELD_PREP(REG_FIELD_B, 0) |
41 * FIELD_PREP(REG_FIELD_C, c) |
42 * FIELD_PREP(REG_FIELD_D, 0x40);
43 *
44 * Modify:
45 * reg &= ~REG_FIELD_C;
46 * reg |= FIELD_PREP(REG_FIELD_C, c);
47 */
48
49#define __bf_shf(x) (__builtin_ffsll(x) - 1)
50
51#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
52 ({ \
53 BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
54 _pfx "mask is not constant"); \
55 BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
56 BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
57 ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
58 _pfx "value too large for the field"); \
59 BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
60 _pfx "type of reg too small for mask"); \
61 __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
62 (1ULL << __bf_shf(_mask))); \
63 })
64
65/**
66 * FIELD_PREP() - prepare a bitfield element
67 * @_mask: shifted mask defining the field's length and position
68 * @_val: value to put in the field
69 *
70 * FIELD_PREP() masks and shifts up the value. The result should
71 * be combined with other fields of the bitfield using logical OR.
72 */
73#define FIELD_PREP(_mask, _val) \
74 ({ \
75 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
76 ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
77 })
78
79/**
80 * FIELD_GET() - extract a bitfield element
81 * @_mask: shifted mask defining the field's length and position
82 * @_reg: 32bit value of entire bitfield
83 *
84 * FIELD_GET() extracts the field specified by @_mask from the
85 * bitfield passed in as @_reg by masking and shifting it down.
86 */
87#define FIELD_GET(_mask, _reg) \
88 ({ \
89 __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
90 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
91 })
92
93#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11134238417d..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
96struct bpf_func_proto { 96struct bpf_func_proto {
97 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 97 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
98 bool gpl_only; 98 bool gpl_only;
99 bool pkt_access;
99 enum bpf_return_type ret_type; 100 enum bpf_return_type ret_type;
100 enum bpf_arg_type arg1_type; 101 enum bpf_arg_type arg1_type;
101 enum bpf_arg_type arg2_type; 102 enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
138 */ 139 */
139 PTR_TO_PACKET, 140 PTR_TO_PACKET,
140 PTR_TO_PACKET_END, /* skb->data + headlen */ 141 PTR_TO_PACKET_END, /* skb->data + headlen */
142
143 /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
144 * elem value. We only allow this if we can statically verify that
145 * access from this register are going to fall within the size of the
146 * map element.
147 */
148 PTR_TO_MAP_VALUE_ADJ,
141}; 149};
142 150
143struct bpf_prog; 151struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
151 */ 159 */
152 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, 160 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
153 enum bpf_reg_type *reg_type); 161 enum bpf_reg_type *reg_type);
154 162 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
163 const struct bpf_prog *prog);
155 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, 164 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
156 int src_reg, int ctx_off, 165 int src_reg, int ctx_off,
157 struct bpf_insn *insn, struct bpf_prog *prog); 166 struct bpf_insn *insn, struct bpf_prog *prog);
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
297static inline void bpf_prog_put(struct bpf_prog *prog) 306static inline void bpf_prog_put(struct bpf_prog *prog)
298{ 307{
299} 308}
309static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
310{
311 return ERR_PTR(-EOPNOTSUPP);
312}
300#endif /* CONFIG_BPF_SYSCALL */ 313#endif /* CONFIG_BPF_SYSCALL */
301 314
302/* verifier prototypes for helper functions called from eBPF programs */ 315/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_VERIFIER_H
8#define _LINUX_BPF_VERIFIER_H 1
9
10#include <linux/bpf.h> /* for enum bpf_reg_type */
11#include <linux/filter.h> /* for MAX_BPF_STACK */
12
13 /* Just some arbitrary values so we can safely do math without overflowing and
14 * are obviously wrong for any sort of memory access.
15 */
16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
17#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
18
19struct bpf_reg_state {
20 enum bpf_reg_type type;
21 /*
22 * Used to determine if any memory access using this register will
23 * result in a bad access.
24 */
25 u64 min_value, max_value;
26 union {
27 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
28 s64 imm;
29
30 /* valid when type == PTR_TO_PACKET* */
31 struct {
32 u32 id;
33 u16 off;
34 u16 range;
35 };
36
37 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
38 * PTR_TO_MAP_VALUE_OR_NULL
39 */
40 struct bpf_map *map_ptr;
41 };
42};
43
44enum bpf_stack_slot_type {
45 STACK_INVALID, /* nothing was stored in this stack slot */
46 STACK_SPILL, /* register spilled into stack */
47 STACK_MISC /* BPF program wrote some data into this slot */
48};
49
50#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
51
52/* state of the program:
53 * type of all registers and stack info
54 */
55struct bpf_verifier_state {
56 struct bpf_reg_state regs[MAX_BPF_REG];
57 u8 stack_slot_type[MAX_BPF_STACK];
58 struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
59};
60
61/* linked list of verifier states used to prune search */
62struct bpf_verifier_state_list {
63 struct bpf_verifier_state state;
64 struct bpf_verifier_state_list *next;
65};
66
67struct bpf_insn_aux_data {
68 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
69};
70
71#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
72
73struct bpf_verifier_env;
74struct bpf_ext_analyzer_ops {
75 int (*insn_hook)(struct bpf_verifier_env *env,
76 int insn_idx, int prev_insn_idx);
77};
78
79/* single container for all structs
80 * one verifier_env per bpf_check() call
81 */
82struct bpf_verifier_env {
83 struct bpf_prog *prog; /* eBPF program being verified */
84 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
85 int stack_size; /* number of states to be processed */
86 struct bpf_verifier_state cur_state; /* current verifier state */
87 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
88 const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
89 void *analyzer_priv; /* pointer to external analyzer's private data */
90 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
91 u32 used_map_cnt; /* number of used maps */
92 u32 id_gen; /* used to generate unique reg IDs */
93 bool allow_ptr_leaks;
94 bool seen_direct_write;
95 bool varlen_map_value_access;
96 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
97};
98
99int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
100 void *priv);
101
102#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
13struct pt_regs; 13struct pt_regs;
14 14
15#ifdef __CHECKER__ 15#ifdef __CHECKER__
16#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
16#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) 17#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
17#define BUILD_BUG_ON_ZERO(e) (0) 18#define BUILD_BUG_ON_ZERO(e) (0)
18#define BUILD_BUG_ON_NULL(e) ((void*)0) 19#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
24#else /* __CHECKER__ */ 25#else /* __CHECKER__ */
25 26
26/* Force a compilation error if a constant expression is not a power of 2 */ 27/* Force a compilation error if a constant expression is not a power of 2 */
28#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
29 BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
27#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ 30#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
28 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) 31 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
29 32
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 984f73b719a9..a4414a11eea7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
497 return cgrp->ancestor_ids[ancestor->level] == ancestor->id; 497 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
498} 498}
499 499
500/**
501 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
502 * @task: the task to be tested
503 * @ancestor: possible ancestor of @task's cgroup
504 *
505 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
506 * It follows all the same rules as cgroup_is_descendant, and only applies
507 * to the default hierarchy.
508 */
509static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
510 struct cgroup *ancestor)
511{
512 struct css_set *cset = task_css_set(task);
513
514 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
515}
516
500/* no synchronization, the result can only be used as a hint */ 517/* no synchronization, the result can only be used as a hint */
501static inline bool cgroup_is_populated(struct cgroup *cgrp) 518static inline bool cgroup_is_populated(struct cgroup *cgrp)
502{ 519{
@@ -557,6 +574,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
557#else /* !CONFIG_CGROUPS */ 574#else /* !CONFIG_CGROUPS */
558 575
559struct cgroup_subsys_state; 576struct cgroup_subsys_state;
577struct cgroup;
560 578
561static inline void css_put(struct cgroup_subsys_state *css) {} 579static inline void css_put(struct cgroup_subsys_state *css) {}
562static inline int cgroup_attach_task_all(struct task_struct *from, 580static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -574,6 +592,11 @@ static inline void cgroup_free(struct task_struct *p) {}
574static inline int cgroup_init_early(void) { return 0; } 592static inline int cgroup_init_early(void) { return 0; }
575static inline int cgroup_init(void) { return 0; } 593static inline int cgroup_init(void) { return 0; }
576 594
595static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
596 struct cgroup *ancestor)
597{
598 return true;
599}
577#endif /* !CONFIG_CGROUPS */ 600#endif /* !CONFIG_CGROUPS */
578 601
579/* 602/*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
314 bpf_size; \ 314 bpf_size; \
315}) 315})
316 316
317#define BPF_SIZEOF(type) \
318 ({ \
319 const int __size = bytes_to_bpf_size(sizeof(type)); \
320 BUILD_BUG_ON(__size < 0); \
321 __size; \
322 })
323
324#define BPF_FIELD_SIZEOF(type, field) \
325 ({ \
326 const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
327 BUILD_BUG_ON(__size < 0); \
328 __size; \
329 })
330
331#define __BPF_MAP_0(m, v, ...) v
332#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
333#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
334#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
335#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
336#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
337
338#define __BPF_REG_0(...) __BPF_PAD(5)
339#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
340#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
341#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
342#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
343#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
344
345#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
346#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
347
348#define __BPF_CAST(t, a) \
349 (__force t) \
350 (__force \
351 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
352 (unsigned long)0, (t)0))) a
353#define __BPF_V void
354#define __BPF_N
355
356#define __BPF_DECL_ARGS(t, a) t a
357#define __BPF_DECL_REGS(t, a) u64 a
358
359#define __BPF_PAD(n) \
360 __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
361 u64, __ur_3, u64, __ur_4, u64, __ur_5)
362
363#define BPF_CALL_x(x, name, ...) \
364 static __always_inline \
365 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
366 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
367 u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
368 { \
369 return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
370 } \
371 static __always_inline \
372 u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
373
374#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
375#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
376#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
377#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
378#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
379#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
380
317#ifdef CONFIG_COMPAT 381#ifdef CONFIG_COMPAT
318/* A struct sock_filter is architecture independent. */ 382/* A struct sock_filter is architecture independent. */
319struct compat_sock_fprog { 383struct compat_sock_fprog {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bdca58f..6824556d37ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1169 const char *mod_name); 1169 const char *mod_name);
1170void vmbus_driver_unregister(struct hv_driver *hv_driver); 1170void vmbus_driver_unregister(struct hv_driver *hv_driver);
1171 1171
1172static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
1173{
1174 const struct kobject *kobj = &device_obj->device.kobj;
1175
1176 return kobj->name;
1177}
1178
1172void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1179void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1173 1180
1174int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1181int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dcb89e3515db..c6587c01d951 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -45,6 +45,7 @@ struct br_ip_list {
45#define BR_PROXYARP BIT(8) 45#define BR_PROXYARP BIT(8)
46#define BR_LEARNING_SYNC BIT(9) 46#define BR_LEARNING_SYNC BIT(9)
47#define BR_PROXYARP_WIFI BIT(10) 47#define BR_PROXYARP_WIFI BIT(10)
48#define BR_MCAST_FLOOD BIT(11)
48 49
49#define BR_DEFAULT_AGEING_TIME (300 * HZ) 50#define BR_DEFAULT_AGEING_TIME (300 * HZ)
50 51
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
25 __u32 max_tx_rate; 25 __u32 max_tx_rate;
26 __u32 rss_query_en; 26 __u32 rss_query_en;
27 __u32 trusted; 27 __u32 trusted;
28 __be16 vlan_proto;
28}; 29};
29#endif /* _LINUX_IF_LINK_H */ 30#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5f6ce6b578c..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 81#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 82#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) 83#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
84#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
84 85
85/** 86/**
86 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats 87 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
271} 272}
272#endif 273#endif
273 274
275/**
276 * eth_type_vlan - check for valid vlan ether type.
277 * @ethertype: ether type to check
278 *
279 * Returns true if the ether type is a vlan ether type.
280 */
281static inline bool eth_type_vlan(__be16 ethertype)
282{
283 switch (ethertype) {
284 case htons(ETH_P_8021Q):
285 case htons(ETH_P_8021AD):
286 return true;
287 default:
288 return false;
289 }
290}
291
274static inline bool vlan_hw_offload_capable(netdev_features_t features, 292static inline bool vlan_hw_offload_capable(netdev_features_t features,
275 __be16 proto) 293 __be16 proto)
276{ 294{
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
424{ 442{
425 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; 443 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
426 444
427 if (veth->h_vlan_proto != htons(ETH_P_8021Q) && 445 if (!eth_type_vlan(veth->h_vlan_proto))
428 veth->h_vlan_proto != htons(ETH_P_8021AD))
429 return -EINVAL; 446 return -EINVAL;
430 447
431 *vlan_tci = ntohs(veth->h_vlan_TCI); 448 *vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
487 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at 504 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
488 * ETH_HLEN otherwise 505 * ETH_HLEN otherwise
489 */ 506 */
490 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { 507 if (eth_type_vlan(type)) {
491 if (vlan_depth) { 508 if (vlan_depth) {
492 if (WARN_ON(vlan_depth < VLAN_HLEN)) 509 if (WARN_ON(vlan_depth < VLAN_HLEN))
493 return 0; 510 return 0;
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
505 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 522 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
506 type = vh->h_vlan_encapsulated_proto; 523 type = vh->h_vlan_encapsulated_proto;
507 vlan_depth += VLAN_HLEN; 524 vlan_depth += VLAN_HLEN;
508 } while (type == htons(ETH_P_8021Q) || 525 } while (eth_type_vlan(type));
509 type == htons(ETH_P_8021AD));
510 } 526 }
511 527
512 if (depth) 528 if (depth)
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
571static inline bool skb_vlan_tagged(const struct sk_buff *skb) 587static inline bool skb_vlan_tagged(const struct sk_buff *skb)
572{ 588{
573 if (!skb_vlan_tag_present(skb) && 589 if (!skb_vlan_tag_present(skb) &&
574 likely(skb->protocol != htons(ETH_P_8021Q) && 590 likely(!eth_type_vlan(skb->protocol)))
575 skb->protocol != htons(ETH_P_8021AD)))
576 return false; 591 return false;
577 592
578 return true; 593 return true;
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
592 if (!skb_vlan_tag_present(skb)) { 607 if (!skb_vlan_tag_present(skb)) {
593 struct vlan_ethhdr *veh; 608 struct vlan_ethhdr *veh;
594 609
595 if (likely(protocol != htons(ETH_P_8021Q) && 610 if (likely(!eth_type_vlan(protocol)))
596 protocol != htons(ETH_P_8021AD)))
597 return false; 611 return false;
598 612
599 veh = (struct vlan_ethhdr *)skb->data; 613 veh = (struct vlan_ethhdr *)skb->data;
600 protocol = veh->h_vlan_encapsulated_proto; 614 protocol = veh->h_vlan_encapsulated_proto;
601 } 615 }
602 616
603 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) 617 if (!eth_type_vlan(protocol))
604 return false; 618 return false;
605 619
606 return true; 620 return true;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
37 struct sk_buff *skb, const struct inet_diag_req_v2 *req, 37 struct sk_buff *skb, const struct inet_diag_req_v2 *req,
38 struct user_namespace *user_ns, 38 struct user_namespace *user_ns,
39 u32 pid, u32 seq, u16 nlmsg_flags, 39 u32 pid, u32 seq, u16 nlmsg_flags,
40 const struct nlmsghdr *unlh); 40 const struct nlmsghdr *unlh, bool net_admin);
41void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, 41void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
42 struct netlink_callback *cb, 42 struct netlink_callback *cb,
43 const struct inet_diag_req_v2 *r, 43 const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
56 56
57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, 57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
58 struct inet_diag_msg *r, int ext, 58 struct inet_diag_msg *r, int ext,
59 struct user_namespace *user_ns); 59 struct user_namespace *user_ns, bool net_admin);
60 60
61extern int inet_diag_register(const struct inet_diag_handler *handler); 61extern int inet_diag_register(const struct inet_diag_handler *handler);
62extern void inet_diag_unregister(const struct inet_diag_handler *handler); 62extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
18 __s32 dad_transmits; 18 __s32 dad_transmits;
19 __s32 rtr_solicits; 19 __s32 rtr_solicits;
20 __s32 rtr_solicit_interval; 20 __s32 rtr_solicit_interval;
21 __s32 rtr_solicit_max_interval;
21 __s32 rtr_solicit_delay; 22 __s32 rtr_solicit_delay;
22 __s32 force_mld_version; 23 __s32 force_mld_version;
23 __s32 mldv1_unsolicited_report_interval; 24 __s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3ffc69ebe967..0fb7ffb1775f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -238,6 +238,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
238 return ktime_sub_ns(kt, usec * NSEC_PER_USEC); 238 return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
239} 239}
240 240
241static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
242{
243 return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
244}
245
241extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); 246extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
242 247
243/** 248/**
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
309 struct ifla_vf_stats *vf_stats); 309 struct ifla_vf_stats *vf_stats);
310u32 mlx4_comm_get_version(void); 310u32 mlx4_comm_get_version(void);
311int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); 311int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
312int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); 312int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
313 u8 qos, __be16 proto);
313int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, 314int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
314 int max_tx_rate); 315 int max_tx_rate);
315int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); 316int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..59b50d3eedb4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -221,6 +221,7 @@ enum {
221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, 221 MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, 222 MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
223 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35, 223 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
224 MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
224}; 225};
225 226
226enum { 227enum {
@@ -1371,6 +1372,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1371int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); 1372int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1372int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val); 1373int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1373int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); 1374int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1375int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
1376 bool *vlan_offload_disabled);
1374int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1377int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1375int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1378int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1376int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1379int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
160 160
161enum { /* fl */ 161enum { /* fl */
162 MLX4_FL_CV = 1 << 6, 162 MLX4_FL_CV = 1 << 6,
163 MLX4_FL_SV = 1 << 5,
163 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, 164 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
164 MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, 165 MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
165 MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, 166 MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
267 MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, 268 MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
268 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, 269 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
269 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, 270 MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
271 MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
270}; 272};
271 273
272enum { /* param3 */ 274enum { /* param3 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
170int mlx5_init_cq_table(struct mlx5_core_dev *dev); 170int mlx5_init_cq_table(struct mlx5_core_dev *dev);
171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); 171void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 172int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
173 struct mlx5_create_cq_mbox_in *in, int inlen); 173 u32 *in, int inlen);
174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 174int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 175int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
176 struct mlx5_query_cq_mbox_out *out); 176 u32 *out, int outlen);
177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 177int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
178 struct mlx5_modify_cq_mbox_in *in, int in_sz); 178 u32 *in, int inlen);
179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 179int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
180 struct mlx5_core_cq *cq, u16 cq_period, 180 struct mlx5_core_cq *cq, u16 cq_period,
181 u16 cq_max_count); 181 u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..77c141797152 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -198,19 +198,6 @@ enum {
198}; 198};
199 199
200enum { 200enum {
201 MLX5_ACCESS_MODE_PA = 0,
202 MLX5_ACCESS_MODE_MTT = 1,
203 MLX5_ACCESS_MODE_KLM = 2
204};
205
206enum {
207 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
208 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
209 MLX5_MKEY_BSF_EN = 1 << 30,
210 MLX5_MKEY_LEN64 = 1 << 31,
211};
212
213enum {
214 MLX5_EN_RD = (u64)1, 201 MLX5_EN_RD = (u64)1,
215 MLX5_EN_WR = (u64)2 202 MLX5_EN_WR = (u64)2
216}; 203};
@@ -411,33 +398,6 @@ enum {
411 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 398 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
412}; 399};
413 400
414struct mlx5_inbox_hdr {
415 __be16 opcode;
416 u8 rsvd[4];
417 __be16 opmod;
418};
419
420struct mlx5_outbox_hdr {
421 u8 status;
422 u8 rsvd[3];
423 __be32 syndrome;
424};
425
426struct mlx5_cmd_query_adapter_mbox_in {
427 struct mlx5_inbox_hdr hdr;
428 u8 rsvd[8];
429};
430
431struct mlx5_cmd_query_adapter_mbox_out {
432 struct mlx5_outbox_hdr hdr;
433 u8 rsvd0[24];
434 u8 intapin;
435 u8 rsvd1[13];
436 __be16 vsd_vendor_id;
437 u8 vsd[208];
438 u8 vsd_psid[16];
439};
440
441enum mlx5_odp_transport_cap_bits { 401enum mlx5_odp_transport_cap_bits {
442 MLX5_ODP_SUPPORT_SEND = 1 << 31, 402 MLX5_ODP_SUPPORT_SEND = 1 << 31,
443 MLX5_ODP_SUPPORT_RECV = 1 << 30, 403 MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +415,6 @@ struct mlx5_odp_caps {
455 char reserved2[0xe4]; 415 char reserved2[0xe4];
456}; 416};
457 417
458struct mlx5_cmd_init_hca_mbox_in {
459 struct mlx5_inbox_hdr hdr;
460 u8 rsvd0[2];
461 __be16 profile;
462 u8 rsvd1[4];
463};
464
465struct mlx5_cmd_init_hca_mbox_out {
466 struct mlx5_outbox_hdr hdr;
467 u8 rsvd[8];
468};
469
470struct mlx5_cmd_teardown_hca_mbox_in {
471 struct mlx5_inbox_hdr hdr;
472 u8 rsvd0[2];
473 __be16 profile;
474 u8 rsvd1[4];
475};
476
477struct mlx5_cmd_teardown_hca_mbox_out {
478 struct mlx5_outbox_hdr hdr;
479 u8 rsvd[8];
480};
481
482struct mlx5_cmd_layout { 418struct mlx5_cmd_layout {
483 u8 type; 419 u8 type;
484 u8 rsvd0[3]; 420 u8 rsvd0[3];
@@ -494,7 +430,6 @@ struct mlx5_cmd_layout {
494 u8 status_own; 430 u8 status_own;
495}; 431};
496 432
497
498struct health_buffer { 433struct health_buffer {
499 __be32 assert_var[5]; 434 __be32 assert_var[5];
500 __be32 rsvd0[3]; 435 __be32 rsvd0[3];
@@ -856,245 +791,15 @@ struct mlx5_cqe128 {
856 struct mlx5_cqe64 cqe64; 791 struct mlx5_cqe64 cqe64;
857}; 792};
858 793
859struct mlx5_srq_ctx { 794enum {
860 u8 state_log_sz; 795 MLX5_MKEY_STATUS_FREE = 1 << 6,
861 u8 rsvd0[3];
862 __be32 flags_xrcd;
863 __be32 pgoff_cqn;
864 u8 rsvd1[4];
865 u8 log_pg_sz;
866 u8 rsvd2[7];
867 __be32 pd;
868 __be16 lwm;
869 __be16 wqe_cnt;
870 u8 rsvd3[8];
871 __be64 db_record;
872};
873
874struct mlx5_create_srq_mbox_in {
875 struct mlx5_inbox_hdr hdr;
876 __be32 input_srqn;
877 u8 rsvd0[4];
878 struct mlx5_srq_ctx ctx;
879 u8 rsvd1[208];
880 __be64 pas[0];
881};
882
883struct mlx5_create_srq_mbox_out {
884 struct mlx5_outbox_hdr hdr;
885 __be32 srqn;
886 u8 rsvd[4];
887};
888
889struct mlx5_destroy_srq_mbox_in {
890 struct mlx5_inbox_hdr hdr;
891 __be32 srqn;
892 u8 rsvd[4];
893};
894
895struct mlx5_destroy_srq_mbox_out {
896 struct mlx5_outbox_hdr hdr;
897 u8 rsvd[8];
898};
899
900struct mlx5_query_srq_mbox_in {
901 struct mlx5_inbox_hdr hdr;
902 __be32 srqn;
903 u8 rsvd0[4];
904};
905
906struct mlx5_query_srq_mbox_out {
907 struct mlx5_outbox_hdr hdr;
908 u8 rsvd0[8];
909 struct mlx5_srq_ctx ctx;
910 u8 rsvd1[32];
911 __be64 pas[0];
912};
913
914struct mlx5_arm_srq_mbox_in {
915 struct mlx5_inbox_hdr hdr;
916 __be32 srqn;
917 __be16 rsvd;
918 __be16 lwm;
919};
920
921struct mlx5_arm_srq_mbox_out {
922 struct mlx5_outbox_hdr hdr;
923 u8 rsvd[8];
924};
925
926struct mlx5_cq_context {
927 u8 status;
928 u8 cqe_sz_flags;
929 u8 st;
930 u8 rsvd3;
931 u8 rsvd4[6];
932 __be16 page_offset;
933 __be32 log_sz_usr_page;
934 __be16 cq_period;
935 __be16 cq_max_count;
936 __be16 rsvd20;
937 __be16 c_eqn;
938 u8 log_pg_sz;
939 u8 rsvd25[7];
940 __be32 last_notified_index;
941 __be32 solicit_producer_index;
942 __be32 consumer_counter;
943 __be32 producer_counter;
944 u8 rsvd48[8];
945 __be64 db_record_addr;
946};
947
948struct mlx5_create_cq_mbox_in {
949 struct mlx5_inbox_hdr hdr;
950 __be32 input_cqn;
951 u8 rsvdx[4];
952 struct mlx5_cq_context ctx;
953 u8 rsvd6[192];
954 __be64 pas[0];
955};
956
957struct mlx5_create_cq_mbox_out {
958 struct mlx5_outbox_hdr hdr;
959 __be32 cqn;
960 u8 rsvd0[4];
961};
962
963struct mlx5_destroy_cq_mbox_in {
964 struct mlx5_inbox_hdr hdr;
965 __be32 cqn;
966 u8 rsvd0[4];
967};
968
969struct mlx5_destroy_cq_mbox_out {
970 struct mlx5_outbox_hdr hdr;
971 u8 rsvd0[8];
972};
973
974struct mlx5_query_cq_mbox_in {
975 struct mlx5_inbox_hdr hdr;
976 __be32 cqn;
977 u8 rsvd0[4];
978};
979
980struct mlx5_query_cq_mbox_out {
981 struct mlx5_outbox_hdr hdr;
982 u8 rsvd0[8];
983 struct mlx5_cq_context ctx;
984 u8 rsvd6[16];
985 __be64 pas[0];
986};
987
988struct mlx5_modify_cq_mbox_in {
989 struct mlx5_inbox_hdr hdr;
990 __be32 cqn;
991 __be32 field_select;
992 struct mlx5_cq_context ctx;
993 u8 rsvd[192];
994 __be64 pas[0];
995};
996
997struct mlx5_modify_cq_mbox_out {
998 struct mlx5_outbox_hdr hdr;
999 u8 rsvd[8];
1000};
1001
1002struct mlx5_enable_hca_mbox_in {
1003 struct mlx5_inbox_hdr hdr;
1004 u8 rsvd[8];
1005};
1006
1007struct mlx5_enable_hca_mbox_out {
1008 struct mlx5_outbox_hdr hdr;
1009 u8 rsvd[8];
1010};
1011
1012struct mlx5_disable_hca_mbox_in {
1013 struct mlx5_inbox_hdr hdr;
1014 u8 rsvd[8];
1015};
1016
1017struct mlx5_disable_hca_mbox_out {
1018 struct mlx5_outbox_hdr hdr;
1019 u8 rsvd[8];
1020};
1021
1022struct mlx5_eq_context {
1023 u8 status;
1024 u8 ec_oi;
1025 u8 st;
1026 u8 rsvd2[7];
1027 __be16 page_pffset;
1028 __be32 log_sz_usr_page;
1029 u8 rsvd3[7];
1030 u8 intr;
1031 u8 log_page_size;
1032 u8 rsvd4[15];
1033 __be32 consumer_counter;
1034 __be32 produser_counter;
1035 u8 rsvd5[16];
1036};
1037
1038struct mlx5_create_eq_mbox_in {
1039 struct mlx5_inbox_hdr hdr;
1040 u8 rsvd0[3];
1041 u8 input_eqn;
1042 u8 rsvd1[4];
1043 struct mlx5_eq_context ctx;
1044 u8 rsvd2[8];
1045 __be64 events_mask;
1046 u8 rsvd3[176];
1047 __be64 pas[0];
1048};
1049
1050struct mlx5_create_eq_mbox_out {
1051 struct mlx5_outbox_hdr hdr;
1052 u8 rsvd0[3];
1053 u8 eq_number;
1054 u8 rsvd1[4];
1055};
1056
1057struct mlx5_destroy_eq_mbox_in {
1058 struct mlx5_inbox_hdr hdr;
1059 u8 rsvd0[3];
1060 u8 eqn;
1061 u8 rsvd1[4];
1062};
1063
1064struct mlx5_destroy_eq_mbox_out {
1065 struct mlx5_outbox_hdr hdr;
1066 u8 rsvd[8];
1067};
1068
1069struct mlx5_map_eq_mbox_in {
1070 struct mlx5_inbox_hdr hdr;
1071 __be64 mask;
1072 u8 mu;
1073 u8 rsvd0[2];
1074 u8 eqn;
1075 u8 rsvd1[24];
1076};
1077
1078struct mlx5_map_eq_mbox_out {
1079 struct mlx5_outbox_hdr hdr;
1080 u8 rsvd[8];
1081};
1082
1083struct mlx5_query_eq_mbox_in {
1084 struct mlx5_inbox_hdr hdr;
1085 u8 rsvd0[3];
1086 u8 eqn;
1087 u8 rsvd1[4];
1088};
1089
1090struct mlx5_query_eq_mbox_out {
1091 struct mlx5_outbox_hdr hdr;
1092 u8 rsvd[8];
1093 struct mlx5_eq_context ctx;
1094}; 796};
1095 797
1096enum { 798enum {
1097 MLX5_MKEY_STATUS_FREE = 1 << 6, 799 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
800 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
801 MLX5_MKEY_BSF_EN = 1 << 30,
802 MLX5_MKEY_LEN64 = 1 << 31,
1098}; 803};
1099 804
1100struct mlx5_mkey_seg { 805struct mlx5_mkey_seg {
@@ -1119,134 +824,12 @@ struct mlx5_mkey_seg {
1119 u8 rsvd4[4]; 824 u8 rsvd4[4];
1120}; 825};
1121 826
1122struct mlx5_query_special_ctxs_mbox_in {
1123 struct mlx5_inbox_hdr hdr;
1124 u8 rsvd[8];
1125};
1126
1127struct mlx5_query_special_ctxs_mbox_out {
1128 struct mlx5_outbox_hdr hdr;
1129 __be32 dump_fill_mkey;
1130 __be32 reserved_lkey;
1131};
1132
1133struct mlx5_create_mkey_mbox_in {
1134 struct mlx5_inbox_hdr hdr;
1135 __be32 input_mkey_index;
1136 __be32 flags;
1137 struct mlx5_mkey_seg seg;
1138 u8 rsvd1[16];
1139 __be32 xlat_oct_act_size;
1140 __be32 rsvd2;
1141 u8 rsvd3[168];
1142 __be64 pas[0];
1143};
1144
1145struct mlx5_create_mkey_mbox_out {
1146 struct mlx5_outbox_hdr hdr;
1147 __be32 mkey;
1148 u8 rsvd[4];
1149};
1150
1151struct mlx5_destroy_mkey_mbox_in {
1152 struct mlx5_inbox_hdr hdr;
1153 __be32 mkey;
1154 u8 rsvd[4];
1155};
1156
1157struct mlx5_destroy_mkey_mbox_out {
1158 struct mlx5_outbox_hdr hdr;
1159 u8 rsvd[8];
1160};
1161
1162struct mlx5_query_mkey_mbox_in {
1163 struct mlx5_inbox_hdr hdr;
1164 __be32 mkey;
1165};
1166
1167struct mlx5_query_mkey_mbox_out {
1168 struct mlx5_outbox_hdr hdr;
1169 __be64 pas[0];
1170};
1171
1172struct mlx5_modify_mkey_mbox_in {
1173 struct mlx5_inbox_hdr hdr;
1174 __be32 mkey;
1175 __be64 pas[0];
1176};
1177
1178struct mlx5_modify_mkey_mbox_out {
1179 struct mlx5_outbox_hdr hdr;
1180 u8 rsvd[8];
1181};
1182
1183struct mlx5_dump_mkey_mbox_in {
1184 struct mlx5_inbox_hdr hdr;
1185};
1186
1187struct mlx5_dump_mkey_mbox_out {
1188 struct mlx5_outbox_hdr hdr;
1189 __be32 mkey;
1190};
1191
1192struct mlx5_mad_ifc_mbox_in {
1193 struct mlx5_inbox_hdr hdr;
1194 __be16 remote_lid;
1195 u8 rsvd0;
1196 u8 port;
1197 u8 rsvd1[4];
1198 u8 data[256];
1199};
1200
1201struct mlx5_mad_ifc_mbox_out {
1202 struct mlx5_outbox_hdr hdr;
1203 u8 rsvd[8];
1204 u8 data[256];
1205};
1206
1207struct mlx5_access_reg_mbox_in {
1208 struct mlx5_inbox_hdr hdr;
1209 u8 rsvd0[2];
1210 __be16 register_id;
1211 __be32 arg;
1212 __be32 data[0];
1213};
1214
1215struct mlx5_access_reg_mbox_out {
1216 struct mlx5_outbox_hdr hdr;
1217 u8 rsvd[8];
1218 __be32 data[0];
1219};
1220
1221#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 827#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1222 828
1223enum { 829enum {
1224 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 830 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1225}; 831};
1226 832
1227struct mlx5_allocate_psv_in {
1228 struct mlx5_inbox_hdr hdr;
1229 __be32 npsv_pd;
1230 __be32 rsvd_psv0;
1231};
1232
1233struct mlx5_allocate_psv_out {
1234 struct mlx5_outbox_hdr hdr;
1235 u8 rsvd[8];
1236 __be32 psv_idx[4];
1237};
1238
1239struct mlx5_destroy_psv_in {
1240 struct mlx5_inbox_hdr hdr;
1241 __be32 psv_number;
1242 u8 rsvd[4];
1243};
1244
1245struct mlx5_destroy_psv_out {
1246 struct mlx5_outbox_hdr hdr;
1247 u8 rsvd[8];
1248};
1249
1250enum { 833enum {
1251 VPORT_STATE_DOWN = 0x0, 834 VPORT_STATE_DOWN = 0x0,
1252 VPORT_STATE_UP = 0x1, 835 VPORT_STATE_UP = 0x1,
@@ -1381,6 +964,18 @@ enum mlx5_cap_type {
1381#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 964#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1382 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 965 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1383 966
967#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
968 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
969
970#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
971 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
972
973#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
974 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
975
976#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
977 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
978
1384#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 979#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1385 MLX5_GET(flow_table_eswitch_cap, \ 980 MLX5_GET(flow_table_eswitch_cap, \
1386 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 981 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,10 +49,6 @@
49#include <linux/mlx5/srq.h> 49#include <linux/mlx5/srq.h>
50 50
51enum { 51enum {
52 MLX5_RQ_BITMASK_VSD = 1 << 1,
53};
54
55enum {
56 MLX5_BOARD_ID_LEN = 64, 52 MLX5_BOARD_ID_LEN = 64,
57 MLX5_MAX_NAME_LEN = 16, 53 MLX5_MAX_NAME_LEN = 16,
58}; 54};
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
481}; 477};
482 478
483struct mlx5_eswitch; 479struct mlx5_eswitch;
480struct mlx5_lag;
484 481
485struct mlx5_rl_entry { 482struct mlx5_rl_entry {
486 u32 rate; 483 u32 rate;
@@ -554,6 +551,7 @@ struct mlx5_priv {
554 struct mlx5_flow_steering *steering; 551 struct mlx5_flow_steering *steering;
555 struct mlx5_eswitch *eswitch; 552 struct mlx5_eswitch *eswitch;
556 struct mlx5_core_sriov sriov; 553 struct mlx5_core_sriov sriov;
554 struct mlx5_lag *lag;
557 unsigned long pci_dev_data; 555 unsigned long pci_dev_data;
558 struct mlx5_fc_stats fc_stats; 556 struct mlx5_fc_stats fc_stats;
559 struct mlx5_rl_table rl_table; 557 struct mlx5_rl_table rl_table;
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
771void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 769void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
772void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 770void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
773void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 771void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
774int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 772
775int mlx5_cmd_status_to_err_v2(void *ptr);
776int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
777int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 773int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
778 int out_size); 774 int out_size);
779int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 775int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
780 void *out, int out_size, mlx5_cmd_cbk_t callback, 776 void *out, int out_size, mlx5_cmd_cbk_t callback,
781 void *context); 777 void *context);
778void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
779
780int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
782int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); 781int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
783int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); 782int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
784int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); 783int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
807 u16 lwm, int is_srq); 806 u16 lwm, int is_srq);
808void mlx5_init_mkey_table(struct mlx5_core_dev *dev); 807void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
809void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); 808void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
809int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
810 struct mlx5_core_mkey *mkey,
811 u32 *in, int inlen,
812 u32 *out, int outlen,
813 mlx5_cmd_cbk_t callback, void *context);
810int mlx5_core_create_mkey(struct mlx5_core_dev *dev, 814int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
811 struct mlx5_core_mkey *mkey, 815 struct mlx5_core_mkey *mkey,
812 struct mlx5_create_mkey_mbox_in *in, int inlen, 816 u32 *in, int inlen);
813 mlx5_cmd_cbk_t callback, void *context,
814 struct mlx5_create_mkey_mbox_out *out);
815int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, 817int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
816 struct mlx5_core_mkey *mkey); 818 struct mlx5_core_mkey *mkey);
817int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, 819int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
818 struct mlx5_query_mkey_mbox_out *out, int outlen); 820 u32 *out, int outlen);
819int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, 821int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
820 u32 *mkey); 822 u32 *mkey);
821int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 823int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -826,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
826void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 828void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
827int mlx5_pagealloc_start(struct mlx5_core_dev *dev); 829int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
828void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); 830void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
829int mlx5_sriov_init(struct mlx5_core_dev *dev);
830int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
831void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, 831void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
832 s32 npages); 832 s32 npages);
833int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); 833int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -865,7 +865,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
865int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 865int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
866void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 866void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
867int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 867int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
868 struct mlx5_query_eq_mbox_out *out, int outlen); 868 u32 *out, int outlen);
869int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); 869int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
870void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); 870void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
871int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 871int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
@@ -930,6 +930,8 @@ enum {
930struct mlx5_interface { 930struct mlx5_interface {
931 void * (*add)(struct mlx5_core_dev *dev); 931 void * (*add)(struct mlx5_core_dev *dev);
932 void (*remove)(struct mlx5_core_dev *dev, void *context); 932 void (*remove)(struct mlx5_core_dev *dev, void *context);
933 int (*attach)(struct mlx5_core_dev *dev, void *context);
934 void (*detach)(struct mlx5_core_dev *dev, void *context);
933 void (*event)(struct mlx5_core_dev *dev, void *context, 935 void (*event)(struct mlx5_core_dev *dev, void *context,
934 enum mlx5_dev_event event, unsigned long param); 936 enum mlx5_dev_event event, unsigned long param);
935 void * (*get_dev)(void *context); 937 void * (*get_dev)(void *context);
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
942void mlx5_unregister_interface(struct mlx5_interface *intf); 944void mlx5_unregister_interface(struct mlx5_interface *intf);
943int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); 945int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
944 946
947int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
948int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
949bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
950struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
951
945struct mlx5_profile { 952struct mlx5_profile {
946 u64 mask; 953 u64 mask;
947 u8 log_max_qp; 954 u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e036d6030867..93ebc5e21334 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
54 54
55enum mlx5_flow_namespace_type { 55enum mlx5_flow_namespace_type {
56 MLX5_FLOW_NAMESPACE_BYPASS, 56 MLX5_FLOW_NAMESPACE_BYPASS,
57 MLX5_FLOW_NAMESPACE_LAG,
57 MLX5_FLOW_NAMESPACE_OFFLOADS, 58 MLX5_FLOW_NAMESPACE_OFFLOADS,
58 MLX5_FLOW_NAMESPACE_ETHTOOL, 59 MLX5_FLOW_NAMESPACE_ETHTOOL,
59 MLX5_FLOW_NAMESPACE_KERNEL, 60 MLX5_FLOW_NAMESPACE_KERNEL,
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
62 MLX5_FLOW_NAMESPACE_FDB, 63 MLX5_FLOW_NAMESPACE_FDB,
63 MLX5_FLOW_NAMESPACE_ESW_EGRESS, 64 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
64 MLX5_FLOW_NAMESPACE_ESW_INGRESS, 65 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
66 MLX5_FLOW_NAMESPACE_SNIFFER_RX,
67 MLX5_FLOW_NAMESPACE_SNIFFER_TX,
65}; 68};
66 69
67struct mlx5_flow_table; 70struct mlx5_flow_table;
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
106 int prio, 109 int prio,
107 int num_flow_table_entries, 110 int num_flow_table_entries,
108 u32 level, u16 vport); 111 u32 level, u16 vport);
112struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
113 struct mlx5_flow_namespace *ns,
114 int prio, u32 level);
109int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); 115int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
110 116
111/* inbox should be set with the following values: 117/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d1f9a581aca8..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
152 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, 152 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
153 MLX5_CMD_OP_ACCESS_REG = 0x805, 153 MLX5_CMD_OP_ACCESS_REG = 0x805,
154 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, 154 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
155 MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, 155 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
156 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, 156 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
157 MLX5_CMD_OP_MAD_IFC = 0x50d, 157 MLX5_CMD_OP_MAD_IFC = 0x50d,
158 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, 158 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -174,6 +174,12 @@ enum {
174 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, 174 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
175 MLX5_CMD_OP_SET_WOL_ROL = 0x830, 175 MLX5_CMD_OP_SET_WOL_ROL = 0x830,
176 MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, 176 MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
177 MLX5_CMD_OP_CREATE_LAG = 0x840,
178 MLX5_CMD_OP_MODIFY_LAG = 0x841,
179 MLX5_CMD_OP_QUERY_LAG = 0x842,
180 MLX5_CMD_OP_DESTROY_LAG = 0x843,
181 MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
182 MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
177 MLX5_CMD_OP_CREATE_TIR = 0x900, 183 MLX5_CMD_OP_CREATE_TIR = 0x900,
178 MLX5_CMD_OP_MODIFY_TIR = 0x901, 184 MLX5_CMD_OP_MODIFY_TIR = 0x901,
179 MLX5_CMD_OP_DESTROY_TIR = 0x902, 185 MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -212,6 +218,8 @@ enum {
212 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, 218 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
213 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, 219 MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
214 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, 220 MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
221 MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
222 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
215 MLX5_CMD_OP_MAX 223 MLX5_CMD_OP_MAX
216}; 224};
217 225
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
281 u8 modify_root[0x1]; 289 u8 modify_root[0x1];
282 u8 identified_miss_table_mode[0x1]; 290 u8 identified_miss_table_mode[0x1];
283 u8 flow_table_modify[0x1]; 291 u8 flow_table_modify[0x1];
284 u8 reserved_at_7[0x19]; 292 u8 encap[0x1];
293 u8 decap[0x1];
294 u8 reserved_at_9[0x17];
285 295
286 u8 reserved_at_20[0x2]; 296 u8 reserved_at_20[0x2];
287 u8 log_max_ft_size[0x6]; 297 u8 log_max_ft_size[0x6];
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
473 483
474struct mlx5_ifc_flow_table_nic_cap_bits { 484struct mlx5_ifc_flow_table_nic_cap_bits {
475 u8 nic_rx_multi_path_tirs[0x1]; 485 u8 nic_rx_multi_path_tirs[0x1];
476 u8 reserved_at_1[0x1ff]; 486 u8 nic_rx_multi_path_tirs_fts[0x1];
487 u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
488 u8 reserved_at_3[0x1fd];
477 489
478 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 490 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
479 491
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
512 u8 nic_vport_node_guid_modify[0x1]; 524 u8 nic_vport_node_guid_modify[0x1];
513 u8 nic_vport_port_guid_modify[0x1]; 525 u8 nic_vport_port_guid_modify[0x1];
514 526
515 u8 reserved_at_20[0x7e0]; 527 u8 vxlan_encap_decap[0x1];
528 u8 nvgre_encap_decap[0x1];
529 u8 reserved_at_22[0x9];
530 u8 log_max_encap_headers[0x5];
531 u8 reserved_2b[0x6];
532 u8 max_encap_header_size[0xa];
533
534 u8 reserved_40[0x7c0];
535
516}; 536};
517 537
518struct mlx5_ifc_qos_cap_bits { 538struct mlx5_ifc_qos_cap_bits {
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
767 u8 out_of_seq_cnt[0x1]; 787 u8 out_of_seq_cnt[0x1];
768 u8 vport_counters[0x1]; 788 u8 vport_counters[0x1];
769 u8 retransmission_q_counters[0x1]; 789 u8 retransmission_q_counters[0x1];
770 u8 reserved_at_183[0x3]; 790 u8 reserved_at_183[0x1];
791 u8 modify_rq_counter_set_id[0x1];
792 u8 reserved_at_185[0x1];
771 u8 max_qp_cnt[0xa]; 793 u8 max_qp_cnt[0xa];
772 u8 pkey_table_size[0x10]; 794 u8 pkey_table_size[0x10];
773 795
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
870 u8 pad_tx_eth_packet[0x1]; 892 u8 pad_tx_eth_packet[0x1];
871 u8 reserved_at_263[0x8]; 893 u8 reserved_at_263[0x8];
872 u8 log_bf_reg_size[0x5]; 894 u8 log_bf_reg_size[0x5];
873 u8 reserved_at_270[0x10]; 895
896 u8 reserved_at_270[0xb];
897 u8 lag_master[0x1];
898 u8 num_lag_ports[0x4];
874 899
875 u8 reserved_at_280[0x10]; 900 u8 reserved_at_280[0x10];
876 u8 max_wqe_sz_sq[0x10]; 901 u8 max_wqe_sz_sq[0x10];
@@ -1904,7 +1929,7 @@ enum {
1904 1929
1905struct mlx5_ifc_qpc_bits { 1930struct mlx5_ifc_qpc_bits {
1906 u8 state[0x4]; 1931 u8 state[0x4];
1907 u8 reserved_at_4[0x4]; 1932 u8 lag_tx_port_affinity[0x4];
1908 u8 st[0x8]; 1933 u8 st[0x8];
1909 u8 reserved_at_10[0x3]; 1934 u8 reserved_at_10[0x3];
1910 u8 pm_state[0x2]; 1935 u8 pm_state[0x2];
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
1966 u8 reserved_at_3e0[0x8]; 1991 u8 reserved_at_3e0[0x8];
1967 u8 cqn_snd[0x18]; 1992 u8 cqn_snd[0x18];
1968 1993
1969 u8 reserved_at_400[0x40]; 1994 u8 reserved_at_400[0x8];
1995 u8 deth_sqpn[0x18];
1996
1997 u8 reserved_at_420[0x20];
1970 1998
1971 u8 reserved_at_440[0x8]; 1999 u8 reserved_at_440[0x8];
1972 u8 last_acked_psn[0x18]; 2000 u8 last_acked_psn[0x18];
@@ -2064,6 +2092,8 @@ enum {
2064 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, 2092 MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
2065 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, 2093 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
2066 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, 2094 MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
2095 MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
2096 MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
2067}; 2097};
2068 2098
2069struct mlx5_ifc_flow_context_bits { 2099struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
2083 u8 reserved_at_a0[0x8]; 2113 u8 reserved_at_a0[0x8];
2084 u8 flow_counter_list_size[0x18]; 2114 u8 flow_counter_list_size[0x18];
2085 2115
2086 u8 reserved_at_c0[0x140]; 2116 u8 encap_id[0x20];
2117
2118 u8 reserved_at_e0[0x120];
2087 2119
2088 struct mlx5_ifc_fte_match_param_bits match_value; 2120 struct mlx5_ifc_fte_match_param_bits match_value;
2089 2121
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
2146}; 2178};
2147 2179
2148struct mlx5_ifc_tisc_bits { 2180struct mlx5_ifc_tisc_bits {
2149 u8 reserved_at_0[0xc]; 2181 u8 strict_lag_tx_port_affinity[0x1];
2182 u8 reserved_at_1[0x3];
2183 u8 lag_tx_port_affinity[0x04];
2184
2185 u8 reserved_at_8[0x4];
2150 u8 prio[0x4]; 2186 u8 prio[0x4];
2151 u8 reserved_at_10[0x10]; 2187 u8 reserved_at_10[0x10];
2152 2188
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
2808 2844
2809 struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context; 2845 struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
2810 2846
2811 u8 reserved_at_180[0x180]; 2847 u8 reserved_at_180[0x200];
2812 2848
2813 struct mlx5_ifc_wq_bits wq; 2849 struct mlx5_ifc_wq_bits wq;
2814}; 2850};
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
3489 3525
3490 u8 syndrome[0x20]; 3526 u8 syndrome[0x20];
3491 3527
3492 u8 reserved_at_40[0x20]; 3528 u8 dump_fill_mkey[0x20];
3493 3529
3494 u8 resd_lkey[0x20]; 3530 u8 resd_lkey[0x20];
3495}; 3531};
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
4213 u8 reserved_at_60[0x20]; 4249 u8 reserved_at_60[0x20];
4214}; 4250};
4215 4251
4252struct mlx5_ifc_encap_header_in_bits {
4253 u8 reserved_at_0[0x5];
4254 u8 header_type[0x3];
4255 u8 reserved_at_8[0xe];
4256 u8 encap_header_size[0xa];
4257
4258 u8 reserved_at_20[0x10];
4259 u8 encap_header[2][0x8];
4260
4261 u8 more_encap_header[0][0x8];
4262};
4263
4264struct mlx5_ifc_query_encap_header_out_bits {
4265 u8 status[0x8];
4266 u8 reserved_at_8[0x18];
4267
4268 u8 syndrome[0x20];
4269
4270 u8 reserved_at_40[0xa0];
4271
4272 struct mlx5_ifc_encap_header_in_bits encap_header[0];
4273};
4274
4275struct mlx5_ifc_query_encap_header_in_bits {
4276 u8 opcode[0x10];
4277 u8 reserved_at_10[0x10];
4278
4279 u8 reserved_at_20[0x10];
4280 u8 op_mod[0x10];
4281
4282 u8 encap_id[0x20];
4283
4284 u8 reserved_at_60[0xa0];
4285};
4286
4287struct mlx5_ifc_alloc_encap_header_out_bits {
4288 u8 status[0x8];
4289 u8 reserved_at_8[0x18];
4290
4291 u8 syndrome[0x20];
4292
4293 u8 encap_id[0x20];
4294
4295 u8 reserved_at_60[0x20];
4296};
4297
4298struct mlx5_ifc_alloc_encap_header_in_bits {
4299 u8 opcode[0x10];
4300 u8 reserved_at_10[0x10];
4301
4302 u8 reserved_at_20[0x10];
4303 u8 op_mod[0x10];
4304
4305 u8 reserved_at_40[0xa0];
4306
4307 struct mlx5_ifc_encap_header_in_bits encap_header;
4308};
4309
4310struct mlx5_ifc_dealloc_encap_header_out_bits {
4311 u8 status[0x8];
4312 u8 reserved_at_8[0x18];
4313
4314 u8 syndrome[0x20];
4315
4316 u8 reserved_at_40[0x40];
4317};
4318
4319struct mlx5_ifc_dealloc_encap_header_in_bits {
4320 u8 opcode[0x10];
4321 u8 reserved_at_10[0x10];
4322
4323 u8 reserved_20[0x10];
4324 u8 op_mod[0x10];
4325
4326 u8 encap_id[0x20];
4327
4328 u8 reserved_60[0x20];
4329};
4330
4216struct mlx5_ifc_query_dct_out_bits { 4331struct mlx5_ifc_query_dct_out_bits {
4217 u8 status[0x8]; 4332 u8 status[0x8];
4218 u8 reserved_at_8[0x18]; 4333 u8 reserved_at_8[0x18];
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
4517struct mlx5_ifc_modify_tis_bitmask_bits { 4632struct mlx5_ifc_modify_tis_bitmask_bits {
4518 u8 reserved_at_0[0x20]; 4633 u8 reserved_at_0[0x20];
4519 4634
4520 u8 reserved_at_20[0x1f]; 4635 u8 reserved_at_20[0x1d];
4636 u8 lag_tx_port_affinity[0x1];
4637 u8 strict_lag_tx_port_affinity[0x1];
4521 u8 prio[0x1]; 4638 u8 prio[0x1];
4522}; 4639};
4523 4640
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
4652 u8 reserved_at_40[0x40]; 4769 u8 reserved_at_40[0x40];
4653}; 4770};
4654 4771
4772enum {
4773 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
4774 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
4775};
4776
4655struct mlx5_ifc_modify_rq_in_bits { 4777struct mlx5_ifc_modify_rq_in_bits {
4656 u8 opcode[0x10]; 4778 u8 opcode[0x10];
4657 u8 reserved_at_10[0x10]; 4779 u8 reserved_at_10[0x10];
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
4721 u8 reserved_at_0[0x16]; 4843 u8 reserved_at_0[0x16];
4722 u8 node_guid[0x1]; 4844 u8 node_guid[0x1];
4723 u8 port_guid[0x1]; 4845 u8 port_guid[0x1];
4724 u8 reserved_at_18[0x1]; 4846 u8 min_inline[0x1];
4725 u8 mtu[0x1]; 4847 u8 mtu[0x1];
4726 u8 change_event[0x1]; 4848 u8 change_event[0x1];
4727 u8 promisc[0x1]; 4849 u8 promisc[0x1];
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
6099 6221
6100 u8 reserved_at_a0[0x20]; 6222 u8 reserved_at_a0[0x20];
6101 6223
6102 u8 reserved_at_c0[0x4]; 6224 u8 encap_en[0x1];
6225 u8 decap_en[0x1];
6226 u8 reserved_at_c2[0x2];
6103 u8 table_miss_mode[0x4]; 6227 u8 table_miss_mode[0x4];
6104 u8 level[0x8]; 6228 u8 level[0x8];
6105 u8 reserved_at_d0[0x8]; 6229 u8 reserved_at_d0[0x8];
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
6108 u8 reserved_at_e0[0x8]; 6232 u8 reserved_at_e0[0x8];
6109 u8 table_miss_id[0x18]; 6233 u8 table_miss_id[0x18];
6110 6234
6111 u8 reserved_at_100[0x100]; 6235 u8 reserved_at_100[0x8];
6236 u8 lag_master_next_table_id[0x18];
6237
6238 u8 reserved_at_120[0x80];
6112}; 6239};
6113 6240
6114struct mlx5_ifc_create_flow_group_out_bits { 6241struct mlx5_ifc_create_flow_group_out_bits {
@@ -7563,7 +7690,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
7563}; 7690};
7564 7691
7565enum { 7692enum {
7566 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1, 7693 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
7694 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
7567}; 7695};
7568 7696
7569struct mlx5_ifc_modify_flow_table_out_bits { 7697struct mlx5_ifc_modify_flow_table_out_bits {
@@ -7602,7 +7730,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
7602 u8 reserved_at_e0[0x8]; 7730 u8 reserved_at_e0[0x8];
7603 u8 table_miss_id[0x18]; 7731 u8 table_miss_id[0x18];
7604 7732
7605 u8 reserved_at_100[0x100]; 7733 u8 reserved_at_100[0x8];
7734 u8 lag_master_next_table_id[0x18];
7735
7736 u8 reserved_at_120[0x80];
7606}; 7737};
7607 7738
7608struct mlx5_ifc_ets_tcn_config_reg_bits { 7739struct mlx5_ifc_ets_tcn_config_reg_bits {
@@ -7710,4 +7841,134 @@ struct mlx5_ifc_dcbx_param_bits {
7710 u8 error[0x8]; 7841 u8 error[0x8];
7711 u8 reserved_at_a0[0x160]; 7842 u8 reserved_at_a0[0x160];
7712}; 7843};
7844
7845struct mlx5_ifc_lagc_bits {
7846 u8 reserved_at_0[0x1d];
7847 u8 lag_state[0x3];
7848
7849 u8 reserved_at_20[0x14];
7850 u8 tx_remap_affinity_2[0x4];
7851 u8 reserved_at_38[0x4];
7852 u8 tx_remap_affinity_1[0x4];
7853};
7854
7855struct mlx5_ifc_create_lag_out_bits {
7856 u8 status[0x8];
7857 u8 reserved_at_8[0x18];
7858
7859 u8 syndrome[0x20];
7860
7861 u8 reserved_at_40[0x40];
7862};
7863
7864struct mlx5_ifc_create_lag_in_bits {
7865 u8 opcode[0x10];
7866 u8 reserved_at_10[0x10];
7867
7868 u8 reserved_at_20[0x10];
7869 u8 op_mod[0x10];
7870
7871 struct mlx5_ifc_lagc_bits ctx;
7872};
7873
7874struct mlx5_ifc_modify_lag_out_bits {
7875 u8 status[0x8];
7876 u8 reserved_at_8[0x18];
7877
7878 u8 syndrome[0x20];
7879
7880 u8 reserved_at_40[0x40];
7881};
7882
7883struct mlx5_ifc_modify_lag_in_bits {
7884 u8 opcode[0x10];
7885 u8 reserved_at_10[0x10];
7886
7887 u8 reserved_at_20[0x10];
7888 u8 op_mod[0x10];
7889
7890 u8 reserved_at_40[0x20];
7891 u8 field_select[0x20];
7892
7893 struct mlx5_ifc_lagc_bits ctx;
7894};
7895
7896struct mlx5_ifc_query_lag_out_bits {
7897 u8 status[0x8];
7898 u8 reserved_at_8[0x18];
7899
7900 u8 syndrome[0x20];
7901
7902 u8 reserved_at_40[0x40];
7903
7904 struct mlx5_ifc_lagc_bits ctx;
7905};
7906
7907struct mlx5_ifc_query_lag_in_bits {
7908 u8 opcode[0x10];
7909 u8 reserved_at_10[0x10];
7910
7911 u8 reserved_at_20[0x10];
7912 u8 op_mod[0x10];
7913
7914 u8 reserved_at_40[0x40];
7915};
7916
7917struct mlx5_ifc_destroy_lag_out_bits {
7918 u8 status[0x8];
7919 u8 reserved_at_8[0x18];
7920
7921 u8 syndrome[0x20];
7922
7923 u8 reserved_at_40[0x40];
7924};
7925
7926struct mlx5_ifc_destroy_lag_in_bits {
7927 u8 opcode[0x10];
7928 u8 reserved_at_10[0x10];
7929
7930 u8 reserved_at_20[0x10];
7931 u8 op_mod[0x10];
7932
7933 u8 reserved_at_40[0x40];
7934};
7935
7936struct mlx5_ifc_create_vport_lag_out_bits {
7937 u8 status[0x8];
7938 u8 reserved_at_8[0x18];
7939
7940 u8 syndrome[0x20];
7941
7942 u8 reserved_at_40[0x40];
7943};
7944
7945struct mlx5_ifc_create_vport_lag_in_bits {
7946 u8 opcode[0x10];
7947 u8 reserved_at_10[0x10];
7948
7949 u8 reserved_at_20[0x10];
7950 u8 op_mod[0x10];
7951
7952 u8 reserved_at_40[0x40];
7953};
7954
7955struct mlx5_ifc_destroy_vport_lag_out_bits {
7956 u8 status[0x8];
7957 u8 reserved_at_8[0x18];
7958
7959 u8 syndrome[0x20];
7960
7961 u8 reserved_at_40[0x40];
7962};
7963
7964struct mlx5_ifc_destroy_vport_lag_in_bits {
7965 u8 opcode[0x10];
7966 u8 reserved_at_10[0x10];
7967
7968 u8 reserved_at_20[0x10];
7969 u8 op_mod[0x10];
7970
7971 u8 reserved_at_40[0x40];
7972};
7973
7713#endif /* MLX5_IFC_H */ 7974#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e3012cc64b8a..b3065acd20b4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,6 +61,39 @@ enum mlx5_an_status {
61#define MLX5_I2C_ADDR_HIGH 0x51 61#define MLX5_I2C_ADDR_HIGH 0x51
62#define MLX5_EEPROM_PAGE_LENGTH 256 62#define MLX5_EEPROM_PAGE_LENGTH 256
63 63
64enum mlx5e_link_mode {
65 MLX5E_1000BASE_CX_SGMII = 0,
66 MLX5E_1000BASE_KX = 1,
67 MLX5E_10GBASE_CX4 = 2,
68 MLX5E_10GBASE_KX4 = 3,
69 MLX5E_10GBASE_KR = 4,
70 MLX5E_20GBASE_KR2 = 5,
71 MLX5E_40GBASE_CR4 = 6,
72 MLX5E_40GBASE_KR4 = 7,
73 MLX5E_56GBASE_R4 = 8,
74 MLX5E_10GBASE_CR = 12,
75 MLX5E_10GBASE_SR = 13,
76 MLX5E_10GBASE_ER = 14,
77 MLX5E_40GBASE_SR4 = 15,
78 MLX5E_40GBASE_LR4 = 16,
79 MLX5E_50GBASE_SR2 = 18,
80 MLX5E_100GBASE_CR4 = 20,
81 MLX5E_100GBASE_SR4 = 21,
82 MLX5E_100GBASE_KR4 = 22,
83 MLX5E_100GBASE_LR4 = 23,
84 MLX5E_100BASE_TX = 24,
85 MLX5E_1000BASE_T = 25,
86 MLX5E_10GBASE_T = 26,
87 MLX5E_25GBASE_CR = 27,
88 MLX5E_25GBASE_KR = 28,
89 MLX5E_25GBASE_SR = 29,
90 MLX5E_50GBASE_CR2 = 30,
91 MLX5E_50GBASE_KR2 = 31,
92 MLX5E_LINK_MODES_NUMBER,
93};
94
95#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
96
64int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 97int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
65int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, 98int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
66 int ptys_size, int proto_mask, u8 local_port); 99 int ptys_size, int proto_mask, u8 local_port);
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
70 u32 *proto_admin, int proto_mask); 103 u32 *proto_admin, int proto_mask);
71int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, 104int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
72 u8 *link_width_oper, u8 local_port); 105 u8 *link_width_oper, u8 local_port);
73int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, 106int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
74 u8 *proto_oper, int proto_mask, 107 u8 *proto_oper, u8 local_port);
75 u8 local_port); 108int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
109 u32 *proto_oper, u8 local_port);
76int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, 110int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
77 u32 proto_admin, int proto_mask); 111 u32 proto_admin, int proto_mask);
78void mlx5_toggle_port_link(struct mlx5_core_dev *dev); 112void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
123}; 123};
124 124
125enum { 125enum {
126 MLX5_NON_ZERO_RQ = 0 << 24, 126 MLX5_NON_ZERO_RQ = 0x0,
127 MLX5_SRQ_RQ = 1 << 24, 127 MLX5_SRQ_RQ = 0x1,
128 MLX5_CRQ_RQ = 2 << 24, 128 MLX5_CRQ_RQ = 0x2,
129 MLX5_ZERO_LEN_RQ = 3 << 24 129 MLX5_ZERO_LEN_RQ = 0x3
130}; 130};
131 131
132/* TODO REM */
132enum { 133enum {
133 /* params1 */ 134 /* params1 */
134 MLX5_QP_BIT_SRE = 1 << 15, 135 MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
178}; 179};
179 180
180enum { 181enum {
181 MLX5_QP_LAT_SENSITIVE = 1 << 28,
182 MLX5_QP_BLOCK_MCAST = 1 << 30,
183 MLX5_QP_ENABLE_SIG = 1 << 31,
184};
185
186enum {
187 MLX5_RCV_DBR = 0, 182 MLX5_RCV_DBR = 0,
188 MLX5_SND_DBR = 1, 183 MLX5_SND_DBR = 1,
189}; 184};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
484 u8 rmac[6]; 479 u8 rmac[6];
485}; 480};
486 481
482/* FIXME: use mlx5_ifc.h qpc */
487struct mlx5_qp_context { 483struct mlx5_qp_context {
488 __be32 flags; 484 __be32 flags;
489 __be32 flags_pd; 485 __be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
525 u8 rsvd1[24]; 521 u8 rsvd1[24];
526}; 522};
527 523
528struct mlx5_create_qp_mbox_in {
529 struct mlx5_inbox_hdr hdr;
530 __be32 input_qpn;
531 u8 rsvd0[4];
532 __be32 opt_param_mask;
533 u8 rsvd1[4];
534 struct mlx5_qp_context ctx;
535 u8 rsvd3[16];
536 __be64 pas[0];
537};
538
539struct mlx5_create_qp_mbox_out {
540 struct mlx5_outbox_hdr hdr;
541 __be32 qpn;
542 u8 rsvd0[4];
543};
544
545struct mlx5_destroy_qp_mbox_in {
546 struct mlx5_inbox_hdr hdr;
547 __be32 qpn;
548 u8 rsvd0[4];
549};
550
551struct mlx5_destroy_qp_mbox_out {
552 struct mlx5_outbox_hdr hdr;
553 u8 rsvd0[8];
554};
555
556struct mlx5_modify_qp_mbox_in {
557 struct mlx5_inbox_hdr hdr;
558 __be32 qpn;
559 u8 rsvd0[4];
560 __be32 optparam;
561 u8 rsvd1[4];
562 struct mlx5_qp_context ctx;
563 u8 rsvd2[16];
564};
565
566struct mlx5_modify_qp_mbox_out {
567 struct mlx5_outbox_hdr hdr;
568 u8 rsvd0[8];
569};
570
571struct mlx5_query_qp_mbox_in {
572 struct mlx5_inbox_hdr hdr;
573 __be32 qpn;
574 u8 rsvd[4];
575};
576
577struct mlx5_query_qp_mbox_out {
578 struct mlx5_outbox_hdr hdr;
579 u8 rsvd1[8];
580 __be32 optparam;
581 u8 rsvd0[4];
582 struct mlx5_qp_context ctx;
583 u8 rsvd2[16];
584 __be64 pas[0];
585};
586
587struct mlx5_conf_sqp_mbox_in {
588 struct mlx5_inbox_hdr hdr;
589 __be32 qpn;
590 u8 rsvd[3];
591 u8 type;
592};
593
594struct mlx5_conf_sqp_mbox_out {
595 struct mlx5_outbox_hdr hdr;
596 u8 rsvd[8];
597};
598
599struct mlx5_alloc_xrcd_mbox_in {
600 struct mlx5_inbox_hdr hdr;
601 u8 rsvd[8];
602};
603
604struct mlx5_alloc_xrcd_mbox_out {
605 struct mlx5_outbox_hdr hdr;
606 __be32 xrcdn;
607 u8 rsvd[4];
608};
609
610struct mlx5_dealloc_xrcd_mbox_in {
611 struct mlx5_inbox_hdr hdr;
612 __be32 xrcdn;
613 u8 rsvd[4];
614};
615
616struct mlx5_dealloc_xrcd_mbox_out {
617 struct mlx5_outbox_hdr hdr;
618 u8 rsvd[8];
619};
620
621static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) 524static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
622{ 525{
623 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); 526 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
628 return radix_tree_lookup(&dev->priv.mkey_table.tree, key); 531 return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
629} 532}
630 533
631struct mlx5_page_fault_resume_mbox_in {
632 struct mlx5_inbox_hdr hdr;
633 __be32 flags_qpn;
634 u8 reserved[4];
635};
636
637struct mlx5_page_fault_resume_mbox_out {
638 struct mlx5_outbox_hdr hdr;
639 u8 rsvd[8];
640};
641
642int mlx5_core_create_qp(struct mlx5_core_dev *dev, 534int mlx5_core_create_qp(struct mlx5_core_dev *dev,
643 struct mlx5_core_qp *qp, 535 struct mlx5_core_qp *qp,
644 struct mlx5_create_qp_mbox_in *in, 536 u32 *in,
645 int inlen); 537 int inlen);
646int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, 538int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
647 struct mlx5_modify_qp_mbox_in *in, int sqd_event, 539 u32 opt_param_mask, void *qpc,
648 struct mlx5_core_qp *qp); 540 struct mlx5_core_qp *qp);
649int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, 541int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
650 struct mlx5_core_qp *qp); 542 struct mlx5_core_qp *qp);
651int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 543int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
652 struct mlx5_query_qp_mbox_out *out, int outlen); 544 u32 *out, int outlen);
653 545
654int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); 546int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
655int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); 547int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 46void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
47 u8 *min_inline); 47 u8 *min_inline);
48int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
49 u16 vport, u8 min_inline);
48int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 50int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
49 u16 vport, u8 *addr); 51 u16 vport, u8 *addr);
50int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); 52int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
32#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c 32#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d 33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
35#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 36#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
36#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 37#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
37#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 38#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/net.h b/include/linux/net.h
index b9f0ff4d489c..cd0c8bd0a1de 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -25,6 +25,7 @@
25#include <linux/kmemcheck.h> 25#include <linux/kmemcheck.h>
26#include <linux/rcupdate.h> 26#include <linux/rcupdate.h>
27#include <linux/once.h> 27#include <linux/once.h>
28#include <linux/fs.h>
28 29
29#include <uapi/linux/net.h> 30#include <uapi/linux/net.h>
30 31
@@ -128,6 +129,9 @@ struct page;
128struct sockaddr; 129struct sockaddr;
129struct msghdr; 130struct msghdr;
130struct module; 131struct module;
132struct sk_buff;
133typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
134 unsigned int, size_t);
131 135
132struct proto_ops { 136struct proto_ops {
133 int family; 137 int family;
@@ -186,6 +190,8 @@ struct proto_ops {
186 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 190 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
187 int (*set_peek_off)(struct sock *sk, int val); 191 int (*set_peek_off)(struct sock *sk, int val);
188 int (*peek_len)(struct socket *sock); 192 int (*peek_len)(struct socket *sock);
193 int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
194 sk_read_actor_t recv_actor);
189}; 195};
190 196
191#define DECLARE_SOCKADDR(type, dst, src) \ 197#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8d79d4ebcfe..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
52#include <uapi/linux/netdevice.h> 52#include <uapi/linux/netdevice.h>
53#include <uapi/linux/if_bonding.h> 53#include <uapi/linux/if_bonding.h>
54#include <uapi/linux/pkt_cls.h> 54#include <uapi/linux/pkt_cls.h>
55#include <linux/hashtable.h>
55 56
56struct netpoll_info; 57struct netpoll_info;
57struct device; 58struct device;
@@ -788,6 +789,7 @@ enum {
788 TC_SETUP_CLSU32, 789 TC_SETUP_CLSU32,
789 TC_SETUP_CLSFLOWER, 790 TC_SETUP_CLSFLOWER,
790 TC_SETUP_MATCHALL, 791 TC_SETUP_MATCHALL,
792 TC_SETUP_CLSBPF,
791}; 793};
792 794
793struct tc_cls_u32_offload; 795struct tc_cls_u32_offload;
@@ -799,6 +801,7 @@ struct tc_to_netdev {
799 struct tc_cls_u32_offload *cls_u32; 801 struct tc_cls_u32_offload *cls_u32;
800 struct tc_cls_flower_offload *cls_flower; 802 struct tc_cls_flower_offload *cls_flower;
801 struct tc_cls_matchall_offload *cls_mall; 803 struct tc_cls_matchall_offload *cls_mall;
804 struct tc_cls_bpf_offload *cls_bpf;
802 }; 805 };
803}; 806};
804 807
@@ -923,6 +926,14 @@ struct netdev_xdp {
923 * 3. Update dev->stats asynchronously and atomically, and define 926 * 3. Update dev->stats asynchronously and atomically, and define
924 * neither operation. 927 * neither operation.
925 * 928 *
929 * bool (*ndo_has_offload_stats)(int attr_id)
930 * Return true if this device supports offload stats of this attr_id.
931 *
932 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
933 * void *attr_data)
934 * Get statistics for offload operations by attr_id. Write it into the
935 * attr_data pointer.
936 *
926 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 937 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
927 * If device supports VLAN filtering this function is called when a 938 * If device supports VLAN filtering this function is called when a
928 * VLAN id is registered. 939 * VLAN id is registered.
@@ -935,7 +946,8 @@ struct netdev_xdp {
935 * 946 *
936 * SR-IOV management functions. 947 * SR-IOV management functions.
937 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 948 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
938 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 949 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
950 * u8 qos, __be16 proto);
939 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, 951 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
940 * int max_tx_rate); 952 * int max_tx_rate);
941 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 953 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1030,7 +1042,7 @@ struct netdev_xdp {
1030 * Deletes the FDB entry from dev coresponding to addr. 1042 * Deletes the FDB entry from dev coresponding to addr.
1031 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 1043 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1032 * struct net_device *dev, struct net_device *filter_dev, 1044 * struct net_device *dev, struct net_device *filter_dev,
1033 * int idx) 1045 * int *idx)
1034 * Used to add FDB entries to dump requests. Implementers should add 1046 * Used to add FDB entries to dump requests. Implementers should add
1035 * entries to skb and update idx with the number of entries. 1047 * entries to skb and update idx with the number of entries.
1036 * 1048 *
@@ -1154,6 +1166,10 @@ struct net_device_ops {
1154 1166
1155 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 1167 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
1156 struct rtnl_link_stats64 *storage); 1168 struct rtnl_link_stats64 *storage);
1169 bool (*ndo_has_offload_stats)(int attr_id);
1170 int (*ndo_get_offload_stats)(int attr_id,
1171 const struct net_device *dev,
1172 void *attr_data);
1157 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 1173 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1158 1174
1159 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 1175 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1172,7 +1188,8 @@ struct net_device_ops {
1172 int (*ndo_set_vf_mac)(struct net_device *dev, 1188 int (*ndo_set_vf_mac)(struct net_device *dev,
1173 int queue, u8 *mac); 1189 int queue, u8 *mac);
1174 int (*ndo_set_vf_vlan)(struct net_device *dev, 1190 int (*ndo_set_vf_vlan)(struct net_device *dev,
1175 int queue, u16 vlan, u8 qos); 1191 int queue, u16 vlan,
1192 u8 qos, __be16 proto);
1176 int (*ndo_set_vf_rate)(struct net_device *dev, 1193 int (*ndo_set_vf_rate)(struct net_device *dev,
1177 int vf, int min_tx_rate, 1194 int vf, int min_tx_rate,
1178 int max_tx_rate); 1195 int max_tx_rate);
@@ -1262,7 +1279,7 @@ struct net_device_ops {
1262 struct netlink_callback *cb, 1279 struct netlink_callback *cb,
1263 struct net_device *dev, 1280 struct net_device *dev,
1264 struct net_device *filter_dev, 1281 struct net_device *filter_dev,
1265 int idx); 1282 int *idx);
1266 1283
1267 int (*ndo_bridge_setlink)(struct net_device *dev, 1284 int (*ndo_bridge_setlink)(struct net_device *dev,
1268 struct nlmsghdr *nlh, 1285 struct nlmsghdr *nlh,
@@ -1561,8 +1578,6 @@ enum netdev_priv_flags {
1561 * 1578 *
1562 * @xps_maps: XXX: need comments on this one 1579 * @xps_maps: XXX: need comments on this one
1563 * 1580 *
1564 * @offload_fwd_mark: Offload device fwding mark
1565 *
1566 * @watchdog_timeo: Represents the timeout that is used by 1581 * @watchdog_timeo: Represents the timeout that is used by
1567 * the watchdog (see dev_watchdog()) 1582 * the watchdog (see dev_watchdog())
1568 * @watchdog_timer: List of timers 1583 * @watchdog_timer: List of timers
@@ -1784,7 +1799,7 @@ struct net_device {
1784#endif 1799#endif
1785 struct netdev_queue __rcu *ingress_queue; 1800 struct netdev_queue __rcu *ingress_queue;
1786#ifdef CONFIG_NETFILTER_INGRESS 1801#ifdef CONFIG_NETFILTER_INGRESS
1787 struct list_head nf_hooks_ingress; 1802 struct nf_hook_entry __rcu *nf_hooks_ingress;
1788#endif 1803#endif
1789 1804
1790 unsigned char broadcast[MAX_ADDR_LEN]; 1805 unsigned char broadcast[MAX_ADDR_LEN];
@@ -1800,6 +1815,9 @@ struct net_device {
1800 unsigned int num_tx_queues; 1815 unsigned int num_tx_queues;
1801 unsigned int real_num_tx_queues; 1816 unsigned int real_num_tx_queues;
1802 struct Qdisc *qdisc; 1817 struct Qdisc *qdisc;
1818#ifdef CONFIG_NET_SCHED
1819 DECLARE_HASHTABLE (qdisc_hash, 4);
1820#endif
1803 unsigned long tx_queue_len; 1821 unsigned long tx_queue_len;
1804 spinlock_t tx_global_lock; 1822 spinlock_t tx_global_lock;
1805 int watchdog_timeo; 1823 int watchdog_timeo;
@@ -1810,9 +1828,6 @@ struct net_device {
1810#ifdef CONFIG_NET_CLS_ACT 1828#ifdef CONFIG_NET_CLS_ACT
1811 struct tcf_proto __rcu *egress_cl_list; 1829 struct tcf_proto __rcu *egress_cl_list;
1812#endif 1830#endif
1813#ifdef CONFIG_NET_SWITCHDEV
1814 u32 offload_fwd_mark;
1815#endif
1816 1831
1817 /* These may be needed for future network-power-down code. */ 1832 /* These may be needed for future network-power-down code. */
1818 struct timer_list watchdog_timer; 1833 struct timer_list watchdog_timer;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
55 struct net_device *out; 55 struct net_device *out;
56 struct sock *sk; 56 struct sock *sk;
57 struct net *net; 57 struct net *net;
58 struct list_head *hook_list; 58 struct nf_hook_entry __rcu *hook_entries;
59 int (*okfn)(struct net *, struct sock *, struct sk_buff *); 59 int (*okfn)(struct net *, struct sock *, struct sk_buff *);
60}; 60};
61 61
62typedef unsigned int nf_hookfn(void *priv,
63 struct sk_buff *skb,
64 const struct nf_hook_state *state);
65struct nf_hook_ops {
66 struct list_head list;
67
68 /* User fills in from here down. */
69 nf_hookfn *hook;
70 struct net_device *dev;
71 void *priv;
72 u_int8_t pf;
73 unsigned int hooknum;
74 /* Hooks are ordered in ascending priority. */
75 int priority;
76};
77
78struct nf_hook_entry {
79 struct nf_hook_entry __rcu *next;
80 struct nf_hook_ops ops;
81 const struct nf_hook_ops *orig_ops;
82};
83
62static inline void nf_hook_state_init(struct nf_hook_state *p, 84static inline void nf_hook_state_init(struct nf_hook_state *p,
63 struct list_head *hook_list, 85 struct nf_hook_entry *hook_entry,
64 unsigned int hook, 86 unsigned int hook,
65 int thresh, u_int8_t pf, 87 int thresh, u_int8_t pf,
66 struct net_device *indev, 88 struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
76 p->out = outdev; 98 p->out = outdev;
77 p->sk = sk; 99 p->sk = sk;
78 p->net = net; 100 p->net = net;
79 p->hook_list = hook_list; 101 RCU_INIT_POINTER(p->hook_entries, hook_entry);
80 p->okfn = okfn; 102 p->okfn = okfn;
81} 103}
82 104
83typedef unsigned int nf_hookfn(void *priv,
84 struct sk_buff *skb,
85 const struct nf_hook_state *state);
86
87struct nf_hook_ops {
88 struct list_head list;
89 105
90 /* User fills in from here down. */
91 nf_hookfn *hook;
92 struct net_device *dev;
93 void *priv;
94 u_int8_t pf;
95 unsigned int hooknum;
96 /* Hooks are ordered in ascending priority. */
97 int priority;
98};
99 106
100struct nf_sockopt_ops { 107struct nf_sockopt_ops {
101 struct list_head list; 108 struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
133void nf_unregister_hook(struct nf_hook_ops *reg); 140void nf_unregister_hook(struct nf_hook_ops *reg);
134int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 141int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
135void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); 142void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
143int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
144void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
136 145
137/* Functions to register get/setsockopt ranges (non-inclusive). You 146/* Functions to register get/setsockopt ranges (non-inclusive). You
138 need to check permissions yourself! */ 147 need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
161 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 170 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
162 int thresh) 171 int thresh)
163{ 172{
164 struct list_head *hook_list; 173 struct nf_hook_entry *hook_head;
174 int ret = 1;
165 175
166#ifdef HAVE_JUMP_LABEL 176#ifdef HAVE_JUMP_LABEL
167 if (__builtin_constant_p(pf) && 177 if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
170 return 1; 180 return 1;
171#endif 181#endif
172 182
173 hook_list = &net->nf.hooks[pf][hook]; 183 rcu_read_lock();
174 184 hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
175 if (!list_empty(hook_list)) { 185 if (hook_head) {
176 struct nf_hook_state state; 186 struct nf_hook_state state;
177 187
178 nf_hook_state_init(&state, hook_list, hook, thresh, 188 nf_hook_state_init(&state, hook_head, hook, thresh,
179 pf, indev, outdev, sk, net, okfn); 189 pf, indev, outdev, sk, net, okfn);
180 return nf_hook_slow(skb, &state); 190
191 ret = nf_hook_slow(skb, &state);
181 } 192 }
182 return 1; 193 rcu_read_unlock();
194
195 return ret;
183} 196}
184 197
185static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 198static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
4#include <uapi/linux/netfilter/nf_conntrack_common.h> 4#include <uapi/linux/netfilter/nf_conntrack_common.h>
5 5
6struct ip_conntrack_stat { 6struct ip_conntrack_stat {
7 unsigned int searched;
8 unsigned int found; 7 unsigned int found;
9 unsigned int new;
10 unsigned int invalid; 8 unsigned int invalid;
11 unsigned int ignore; 9 unsigned int ignore;
12 unsigned int delete;
13 unsigned int delete_list;
14 unsigned int insert; 10 unsigned int insert;
15 unsigned int insert_failed; 11 unsigned int insert_failed;
16 unsigned int drop; 12 unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
1#ifndef _CONNTRACK_PROTO_GRE_H 1#ifndef _CONNTRACK_PROTO_GRE_H
2#define _CONNTRACK_PROTO_GRE_H 2#define _CONNTRACK_PROTO_GRE_H
3#include <asm/byteorder.h> 3#include <asm/byteorder.h>
4 4#include <net/gre.h>
5/* GRE PROTOCOL HEADER */ 5#include <net/pptp.h>
6
7/* GRE Version field */
8#define GRE_VERSION_1701 0x0
9#define GRE_VERSION_PPTP 0x1
10
11/* GRE Protocol field */
12#define GRE_PROTOCOL_PPTP 0x880B
13
14/* GRE Flags */
15#define GRE_FLAG_C 0x80
16#define GRE_FLAG_R 0x40
17#define GRE_FLAG_K 0x20
18#define GRE_FLAG_S 0x10
19#define GRE_FLAG_A 0x80
20
21#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
22#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
23#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
24#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
25#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
26
27/* GRE is a mess: Four different standards */
28struct gre_hdr {
29#if defined(__LITTLE_ENDIAN_BITFIELD)
30 __u16 rec:3,
31 srr:1,
32 seq:1,
33 key:1,
34 routing:1,
35 csum:1,
36 version:3,
37 reserved:4,
38 ack:1;
39#elif defined(__BIG_ENDIAN_BITFIELD)
40 __u16 csum:1,
41 routing:1,
42 key:1,
43 seq:1,
44 srr:1,
45 rec:3,
46 ack:1,
47 reserved:4,
48 version:3;
49#else
50#error "Adjust your <asm/byteorder.h> defines"
51#endif
52 __be16 protocol;
53};
54
55/* modified GRE header for PPTP */
56struct gre_hdr_pptp {
57 __u8 flags; /* bitfield */
58 __u8 version; /* should be GRE_VERSION_PPTP */
59 __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
60 __be16 payload_len; /* size of ppp payload, not inc. gre header */
61 __be16 call_id; /* peer's call_id for this session */
62 __be32 seq; /* sequence number. Present if S==1 */
63 __be32 ack; /* seq number of highest packet received by */
64 /* sender in this session */
65};
66 6
67struct nf_ct_gre { 7struct nf_ct_gre {
68 unsigned int stream_timeout; 8 unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) 11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
12 return false; 12 return false;
13#endif 13#endif
14 return !list_empty(&skb->dev->nf_hooks_ingress); 14 return rcu_access_pointer(skb->dev->nf_hooks_ingress);
15} 15}
16 16
17/* caller must hold rcu_read_lock */
17static inline int nf_hook_ingress(struct sk_buff *skb) 18static inline int nf_hook_ingress(struct sk_buff *skb)
18{ 19{
20 struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
19 struct nf_hook_state state; 21 struct nf_hook_state state;
20 22
21 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, 23 /* Must recheck the ingress hook head, in the event it became NULL
22 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, 24 * after the check in nf_hook_ingress_active evaluated to true.
23 skb->dev, NULL, NULL, dev_net(skb->dev), NULL); 25 */
26 if (unlikely(!e))
27 return 0;
28
29 nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
30 NFPROTO_NETDEV, skb->dev, NULL, NULL,
31 dev_net(skb->dev), NULL);
24 return nf_hook_slow(skb, &state); 32 return nf_hook_slow(skb, &state);
25} 33}
26 34
27static inline void nf_hook_ingress_init(struct net_device *dev) 35static inline void nf_hook_ingress_init(struct net_device *dev)
28{ 36{
29 INIT_LIST_HEAD(&dev->nf_hooks_ingress); 37 RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
30} 38}
31#else /* CONFIG_NETFILTER_INGRESS */ 39#else /* CONFIG_NETFILTER_INGRESS */
32static inline int nf_hook_ingress_active(struct sk_buff *skb) 40static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 5c5362584aba..060d0ede88df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -690,6 +690,10 @@ struct perf_event {
690 u64 (*clock)(void); 690 u64 (*clock)(void);
691 perf_overflow_handler_t overflow_handler; 691 perf_overflow_handler_t overflow_handler;
692 void *overflow_handler_context; 692 void *overflow_handler_context;
693#ifdef CONFIG_BPF_SYSCALL
694 perf_overflow_handler_t orig_overflow_handler;
695 struct bpf_prog *prog;
696#endif
693 697
694#ifdef CONFIG_EVENT_TRACING 698#ifdef CONFIG_EVENT_TRACING
695 struct trace_event_call *tp_event; 699 struct trace_event_call *tp_event;
@@ -802,6 +806,11 @@ struct perf_output_handle {
802 int page; 806 int page;
803}; 807};
804 808
809struct bpf_perf_event_data_kern {
810 struct pt_regs *regs;
811 struct perf_sample_data *data;
812};
813
805#ifdef CONFIG_CGROUP_PERF 814#ifdef CONFIG_CGROUP_PERF
806 815
807/* 816/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
80 PHY_INTERFACE_MODE_XGMII, 80 PHY_INTERFACE_MODE_XGMII,
81 PHY_INTERFACE_MODE_MOCA, 81 PHY_INTERFACE_MODE_MOCA,
82 PHY_INTERFACE_MODE_QSGMII, 82 PHY_INTERFACE_MODE_QSGMII,
83 PHY_INTERFACE_MODE_TRGMII,
83 PHY_INTERFACE_MODE_MAX, 84 PHY_INTERFACE_MODE_MAX,
84} phy_interface_t; 85} phy_interface_t;
85 86
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
123 return "moca"; 124 return "moca";
124 case PHY_INTERFACE_MODE_QSGMII: 125 case PHY_INTERFACE_MODE_QSGMII:
125 return "qsgmii"; 126 return "qsgmii";
127 case PHY_INTERFACE_MODE_TRGMII:
128 return "trgmii";
126 default: 129 default:
127 return "unknown"; 130 return "unknown";
128 } 131 }
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
127 * 127 *
128 * @info: Structure describing the new clock. 128 * @info: Structure describing the new clock.
129 * @parent: Pointer to the parent device of the new clock. 129 * @parent: Pointer to the parent device of the new clock.
130 *
131 * Returns a valid pointer on success or PTR_ERR on failure. If PHC
132 * support is missing at the configuration level, this function
133 * returns NULL, and drivers are expected to gracefully handle that
134 * case separately.
130 */ 135 */
131 136
132extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 137extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 40c0ada01806..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -5,28 +5,77 @@
5 * (GPL) Version 2, available from the file COPYING in the main directory of 5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree. 6 * this source tree.
7 */ 7 */
8#ifndef _COMMON_HSI_H
9#define _COMMON_HSI_H
10#include <linux/types.h>
11#include <asm/byteorder.h>
12#include <linux/bitops.h>
13#include <linux/slab.h>
14
15/* dma_addr_t manip */
16#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
17#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
18#define DMA_REGPAIR_LE(x, val) do { \
19 (x).hi = DMA_HI_LE((val)); \
20 (x).lo = DMA_LO_LE((val)); \
21 } while (0)
22
23#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
24#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
25#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
26#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
8 27
9#ifndef __COMMON_HSI__ 28#ifndef __COMMON_HSI__
10#define __COMMON_HSI__ 29#define __COMMON_HSI__
11 30
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13 31
14#define X_FINAL_CLEANUP_AGG_INT 1 32#define X_FINAL_CLEANUP_AGG_INT 1
33
34#define EVENT_RING_PAGE_SIZE_BYTES 4096
35
15#define NUM_OF_GLOBAL_QUEUES 128 36#define NUM_OF_GLOBAL_QUEUES 128
37#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
38
39#define ISCSI_CDU_TASK_SEG_TYPE 0
40#define RDMA_CDU_TASK_SEG_TYPE 1
41
42#define FW_ASSERT_GENERAL_ATTN_IDX 32
43
44#define MAX_PINNED_CCFC 32
16 45
17/* Queue Zone sizes in bytes */ 46/* Queue Zone sizes in bytes */
18#define TSTORM_QZONE_SIZE 8 47#define TSTORM_QZONE_SIZE 8
19#define MSTORM_QZONE_SIZE 0 48#define MSTORM_QZONE_SIZE 16
20#define USTORM_QZONE_SIZE 8 49#define USTORM_QZONE_SIZE 8
21#define XSTORM_QZONE_SIZE 8 50#define XSTORM_QZONE_SIZE 8
22#define YSTORM_QZONE_SIZE 0 51#define YSTORM_QZONE_SIZE 0
23#define PSTORM_QZONE_SIZE 0 52#define PSTORM_QZONE_SIZE 0
24 53
25#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16 54#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
55#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
56#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
57#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
58
59/********************************/
60/* CORE (LIGHT L2) FW CONSTANTS */
61/********************************/
62
63#define CORE_LL2_MAX_RAMROD_PER_CON 8
64#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
65#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
66#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
67#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
68
69#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
70
71#define CORE_SPQE_PAGE_SIZE_BYTES 4096
72
73#define MAX_NUM_LL2_RX_QUEUES 32
74#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
26 75
27#define FW_MAJOR_VERSION 8 76#define FW_MAJOR_VERSION 8
28#define FW_MINOR_VERSION 10 77#define FW_MINOR_VERSION 10
29#define FW_REVISION_VERSION 5 78#define FW_REVISION_VERSION 10
30#define FW_ENGINEERING_VERSION 0 79#define FW_ENGINEERING_VERSION 0
31 80
32/***********************/ 81/***********************/
@@ -83,6 +132,20 @@
83#define NUM_OF_LCIDS (320) 132#define NUM_OF_LCIDS (320)
84#define NUM_OF_LTIDS (320) 133#define NUM_OF_LTIDS (320)
85 134
135/* Clock values */
136#define MASTER_CLK_FREQ_E4 (375e6)
137#define STORM_CLK_FREQ_E4 (1000e6)
138#define CLK25M_CLK_FREQ_E4 (25e6)
139
140/* Global PXP windows (GTT) */
141#define NUM_OF_GTT 19
142#define GTT_DWORD_SIZE_BITS 10
143#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
144#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
145
146/* Tools Version */
147#define TOOLS_VERSION 10
148
86/*****************/ 149/*****************/
87/* CDU CONSTANTS */ 150/* CDU CONSTANTS */
88/*****************/ 151/*****************/
@@ -90,6 +153,8 @@
90#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) 153#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
91#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) 154#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
92 155
156#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
157#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
93/*****************/ 158/*****************/
94/* DQ CONSTANTS */ 159/* DQ CONSTANTS */
95/*****************/ 160/*****************/
@@ -115,6 +180,11 @@
115#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 180#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
116#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 181#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
117#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 182#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
183#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
184#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
185#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
186#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
187#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
118 188
119/* UCM agg val selection (HW) */ 189/* UCM agg val selection (HW) */
120#define DQ_UCM_AGG_VAL_SEL_WORD0 0 190#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -159,13 +229,16 @@
159#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 229#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
160 230
161/* XCM agg counter flag selection */ 231/* XCM agg counter flag selection */
162#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) 232#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
163#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) 233#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
164#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) 234#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
165#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18) 235#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
166#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19) 236#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
167#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22) 237#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
168#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23) 238#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
239#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
240#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
241#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
169 242
170/* UCM agg counter flag selection (HW) */ 243/* UCM agg counter flag selection (HW) */
171#define DQ_UCM_AGG_FLG_SHIFT_CF0 0 244#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -178,9 +251,45 @@
178#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7 251#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
179 252
180/* UCM agg counter flag selection (FW) */ 253/* UCM agg counter flag selection (FW) */
181#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4) 254#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
182#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5) 255#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
183 256#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
257#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
258
259/* TCM agg counter flag selection (HW) */
260#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
261#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
262#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
263#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
264#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
265#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
266#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
267#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
268/* TCM agg counter flag selection (FW) */
269#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
270#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
271
272/* PWM address mapping */
273#define DQ_PWM_OFFSET_DPM_BASE 0x0
274#define DQ_PWM_OFFSET_DPM_END 0x27
275#define DQ_PWM_OFFSET_XCM16_BASE 0x40
276#define DQ_PWM_OFFSET_XCM32_BASE 0x44
277#define DQ_PWM_OFFSET_UCM16_BASE 0x48
278#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
279#define DQ_PWM_OFFSET_UCM16_4 0x50
280#define DQ_PWM_OFFSET_TCM16_BASE 0x58
281#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
282#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
283#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
284#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
285
286#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
287#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
288#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
289#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
290#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
291#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
292#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
184#define DQ_REGION_SHIFT (12) 293#define DQ_REGION_SHIFT (12)
185 294
186/* DPM */ 295/* DPM */
@@ -214,15 +323,17 @@
214 */ 323 */
215#define CM_TX_PQ_BASE 0x200 324#define CM_TX_PQ_BASE 0x200
216 325
326/* number of global Vport/QCN rate limiters */
327#define MAX_QM_GLOBAL_RLS 256
217/* QM registers data */ 328/* QM registers data */
218#define QM_LINE_CRD_REG_WIDTH 16 329#define QM_LINE_CRD_REG_WIDTH 16
219#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) 330#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
220#define QM_BYTE_CRD_REG_WIDTH 24 331#define QM_BYTE_CRD_REG_WIDTH 24
221#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) 332#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1))
222#define QM_WFQ_CRD_REG_WIDTH 32 333#define QM_WFQ_CRD_REG_WIDTH 32
223#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) 334#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1))
224#define QM_RL_CRD_REG_WIDTH 32 335#define QM_RL_CRD_REG_WIDTH 32
225#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) 336#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1))
226 337
227/*****************/ 338/*****************/
228/* CAU CONSTANTS */ 339/* CAU CONSTANTS */
@@ -287,6 +398,17 @@
287/* PXP CONSTANTS */ 398/* PXP CONSTANTS */
288/*****************/ 399/*****************/
289 400
401/* Bars for Blocks */
402#define PXP_BAR_GRC 0
403#define PXP_BAR_TSDM 0
404#define PXP_BAR_USDM 0
405#define PXP_BAR_XSDM 0
406#define PXP_BAR_MSDM 0
407#define PXP_BAR_YSDM 0
408#define PXP_BAR_PSDM 0
409#define PXP_BAR_IGU 0
410#define PXP_BAR_DQ 1
411
290/* PTT and GTT */ 412/* PTT and GTT */
291#define PXP_NUM_PF_WINDOWS 12 413#define PXP_NUM_PF_WINDOWS 12
292#define PXP_PER_PF_ENTRY_SIZE 8 414#define PXP_PER_PF_ENTRY_SIZE 8
@@ -334,6 +456,52 @@
334 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ 456 (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
335 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) 457 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
336 458
459/* PF BAR */
460#define PXP_BAR0_START_GRC 0x0000
461#define PXP_BAR0_GRC_LENGTH 0x1C00000
462#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
463 PXP_BAR0_GRC_LENGTH - 1)
464
465#define PXP_BAR0_START_IGU 0x1C00000
466#define PXP_BAR0_IGU_LENGTH 0x10000
467#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
468 PXP_BAR0_IGU_LENGTH - 1)
469
470#define PXP_BAR0_START_TSDM 0x1C80000
471#define PXP_BAR0_SDM_LENGTH 0x40000
472#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
473#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
474 PXP_BAR0_SDM_LENGTH - 1)
475
476#define PXP_BAR0_START_MSDM 0x1D00000
477#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
478 PXP_BAR0_SDM_LENGTH - 1)
479
480#define PXP_BAR0_START_USDM 0x1D80000
481#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
482 PXP_BAR0_SDM_LENGTH - 1)
483
484#define PXP_BAR0_START_XSDM 0x1E00000
485#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
486 PXP_BAR0_SDM_LENGTH - 1)
487
488#define PXP_BAR0_START_YSDM 0x1E80000
489#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
490 PXP_BAR0_SDM_LENGTH - 1)
491
492#define PXP_BAR0_START_PSDM 0x1F00000
493#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
494 PXP_BAR0_SDM_LENGTH - 1)
495
496#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
497
498/* VF BAR */
499#define PXP_VF_BAR0 0
500
501#define PXP_VF_BAR0_START_GRC 0x3E00
502#define PXP_VF_BAR0_GRC_LENGTH 0x200
503#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
504 PXP_VF_BAR0_GRC_LENGTH - 1)
337 505
338#define PXP_VF_BAR0_START_IGU 0 506#define PXP_VF_BAR0_START_IGU 0
339#define PXP_VF_BAR0_IGU_LENGTH 0x3000 507#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -399,6 +567,20 @@
399#define PXP_NUM_ILT_RECORDS_BB 7600 567#define PXP_NUM_ILT_RECORDS_BB 7600
400#define PXP_NUM_ILT_RECORDS_K2 11000 568#define PXP_NUM_ILT_RECORDS_K2 11000
401#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) 569#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
570#define PXP_QUEUES_ZONE_MAX_NUM 320
571/*****************/
572/* PRM CONSTANTS */
573/*****************/
574#define PRM_DMA_PAD_BYTES_NUM 2
575/******************/
576/* SDMs CONSTANTS */
577/******************/
578#define SDM_OP_GEN_TRIG_NONE 0
579#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
580#define SDM_OP_GEN_TRIG_AGG_INT 2
581#define SDM_OP_GEN_TRIG_LOADER 4
582#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
583#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
402 584
403#define SDM_COMP_TYPE_NONE 0 585#define SDM_COMP_TYPE_NONE 0
404#define SDM_COMP_TYPE_WAKE_THREAD 1 586#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -424,6 +606,8 @@
424/* PRS CONSTANTS */ 606/* PRS CONSTANTS */
425/*****************/ 607/*****************/
426 608
609#define PRS_GFT_CAM_LINES_NO_MATCH 31
610
427/* Async data KCQ CQE */ 611/* Async data KCQ CQE */
428struct async_data { 612struct async_data {
429 __le32 cid; 613 __le32 cid;
@@ -440,20 +624,6 @@ struct coalescing_timeset {
440#define COALESCING_TIMESET_VALID_SHIFT 7 624#define COALESCING_TIMESET_VALID_SHIFT 7
441}; 625};
442 626
443struct common_prs_pf_msg_info {
444 __le32 value;
445#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
446#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
447#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
448#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
449#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
450#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
451#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
452#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
453#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
454#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
455};
456
457struct common_queue_zone { 627struct common_queue_zone {
458 __le16 ring_drv_data_consumer; 628 __le16 ring_drv_data_consumer;
459 __le16 reserved; 629 __le16 reserved;
@@ -473,6 +643,19 @@ struct vf_pf_channel_eqe_data {
473 struct regpair msg_addr; 643 struct regpair msg_addr;
474}; 644};
475 645
646struct iscsi_eqe_data {
647 __le32 cid;
648 __le16 conn_id;
649 u8 error_code;
650 u8 error_pdu_opcode_reserved;
651#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
652#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
653#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
654#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
655#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
656#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
657};
658
476struct malicious_vf_eqe_data { 659struct malicious_vf_eqe_data {
477 u8 vf_id; 660 u8 vf_id;
478 u8 err_id; 661 u8 err_id;
@@ -488,8 +671,10 @@ struct initial_cleanup_eqe_data {
488union event_ring_data { 671union event_ring_data {
489 u8 bytes[8]; 672 u8 bytes[8];
490 struct vf_pf_channel_eqe_data vf_pf_channel; 673 struct vf_pf_channel_eqe_data vf_pf_channel;
674 struct iscsi_eqe_data iscsi_info;
491 struct malicious_vf_eqe_data malicious_vf; 675 struct malicious_vf_eqe_data malicious_vf;
492 struct initial_cleanup_eqe_data vf_init_cleanup; 676 struct initial_cleanup_eqe_data vf_init_cleanup;
677 struct regpair roce_handle;
493}; 678};
494 679
495/* Event Ring Entry */ 680/* Event Ring Entry */
@@ -616,6 +801,52 @@ enum db_dest {
616 MAX_DB_DEST 801 MAX_DB_DEST
617}; 802};
618 803
804/* Enum of doorbell DPM types */
805enum db_dpm_type {
806 DPM_LEGACY,
807 DPM_ROCE,
808 DPM_L2_INLINE,
809 DPM_L2_BD,
810 MAX_DB_DPM_TYPE
811};
812
813/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
814struct db_l2_dpm_data {
815 __le16 icid;
816 __le16 bd_prod;
817 __le32 params;
818#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
819#define DB_L2_DPM_DATA_SIZE_SHIFT 0
820#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
821#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
822#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF
823#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
824#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
825#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
826#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
827#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
828#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
829#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
830#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
831#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
832};
833
834/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
835struct db_l2_dpm_sge {
836 struct regpair addr;
837 __le16 nbytes;
838 __le16 bitfields;
839#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
840#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
841#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
842#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
843#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
844#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
845#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
846#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
847 __le32 reserved2;
848};
849
619/* Structure for doorbell address, in legacy mode */ 850/* Structure for doorbell address, in legacy mode */
620struct db_legacy_addr { 851struct db_legacy_addr {
621 __le32 addr; 852 __le32 addr;
@@ -627,6 +858,49 @@ struct db_legacy_addr {
627#define DB_LEGACY_ADDR_ICID_SHIFT 5 858#define DB_LEGACY_ADDR_ICID_SHIFT 5
628}; 859};
629 860
861/* Structure for doorbell address, in PWM mode */
862struct db_pwm_addr {
863 __le32 addr;
864#define DB_PWM_ADDR_RESERVED0_MASK 0x7
865#define DB_PWM_ADDR_RESERVED0_SHIFT 0
866#define DB_PWM_ADDR_OFFSET_MASK 0x7F
867#define DB_PWM_ADDR_OFFSET_SHIFT 3
868#define DB_PWM_ADDR_WID_MASK 0x3
869#define DB_PWM_ADDR_WID_SHIFT 10
870#define DB_PWM_ADDR_DPI_MASK 0xFFFF
871#define DB_PWM_ADDR_DPI_SHIFT 12
872#define DB_PWM_ADDR_RESERVED1_MASK 0xF
873#define DB_PWM_ADDR_RESERVED1_SHIFT 28
874};
875
876/* Parameters to RoCE firmware, passed in EDPM doorbell */
877struct db_roce_dpm_params {
878 __le32 params;
879#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
880#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
881#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
882#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
883#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
884#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
885#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
886#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
887#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
888#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
889#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
890#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
891#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
892#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
893#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
894#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
895};
896
897/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
898struct db_roce_dpm_data {
899 __le16 icid;
900 __le16 prod_val;
901 struct db_roce_dpm_params params;
902};
903
630/* Igu interrupt command */ 904/* Igu interrupt command */
631enum igu_int_cmd { 905enum igu_int_cmd {
632 IGU_INT_ENABLE = 0, 906 IGU_INT_ENABLE = 0,
@@ -764,6 +1038,19 @@ struct pxp_ptt_entry {
764 struct pxp_pretend_cmd pretend; 1038 struct pxp_pretend_cmd pretend;
765}; 1039};
766 1040
1041/* VF Zone A Permission Register. */
1042struct pxp_vf_zone_a_permission {
1043 __le32 control;
1044#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
1045#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
1046#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
1047#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
1048#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
1049#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
1050#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
1051#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
1052};
1053
767/* RSS hash type */ 1054/* RSS hash type */
768struct rdif_task_context { 1055struct rdif_task_context {
769 __le32 initial_ref_tag; 1056 __le32 initial_ref_tag;
@@ -831,6 +1118,7 @@ struct rdif_task_context {
831 __le32 reserved2; 1118 __le32 reserved2;
832}; 1119};
833 1120
1121/* RSS hash type */
834enum rss_hash_type { 1122enum rss_hash_type {
835 RSS_HASH_TYPE_DEFAULT = 0, 1123 RSS_HASH_TYPE_DEFAULT = 0,
836 RSS_HASH_TYPE_IPV4 = 1, 1124 RSS_HASH_TYPE_IPV4 = 1,
@@ -942,7 +1230,7 @@ struct tdif_task_context {
942}; 1230};
943 1231
944struct timers_context { 1232struct timers_context {
945 __le32 logical_client0; 1233 __le32 logical_client_0;
946#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF 1234#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
947#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 1235#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
948#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 1236#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
@@ -951,7 +1239,7 @@ struct timers_context {
951#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 1239#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
952#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 1240#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
953#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 1241#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
954 __le32 logical_client1; 1242 __le32 logical_client_1;
955#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF 1243#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
956#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 1244#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
957#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 1245#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
@@ -960,7 +1248,7 @@ struct timers_context {
960#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 1248#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
961#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 1249#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
962#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 1250#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
963 __le32 logical_client2; 1251 __le32 logical_client_2;
964#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF 1252#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
965#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 1253#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
966#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 1254#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
@@ -978,3 +1266,4 @@ struct timers_context {
978#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 1266#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
979}; 1267};
980#endif /* __COMMON_HSI__ */ 1268#endif /* __COMMON_HSI__ */
1269#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index b5ebc697d05f..1aa0727c4136 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -13,9 +13,12 @@
13/* ETH FW CONSTANTS */ 13/* ETH FW CONSTANTS */
14/********************/ 14/********************/
15#define ETH_HSI_VER_MAJOR 3 15#define ETH_HSI_VER_MAJOR 3
16#define ETH_HSI_VER_MINOR 0 16#define ETH_HSI_VER_MINOR 10
17#define ETH_CACHE_LINE_SIZE 64 17
18#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
18 19
20#define ETH_CACHE_LINE_SIZE 64
21#define ETH_RX_CQE_GAP 32
19#define ETH_MAX_RAMROD_PER_CON 8 22#define ETH_MAX_RAMROD_PER_CON 8
20#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 23#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
21#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 24#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
@@ -24,15 +27,25 @@
24 27
25#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 28#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
26#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 29#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
30#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
27#define ETH_TX_MAX_LSO_HDR_NBD 4 31#define ETH_TX_MAX_LSO_HDR_NBD 4
28#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 32#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
29#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 33#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
30#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 34#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
31#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 35#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
32#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) 36#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
33#define ETH_TX_MAX_LSO_HDR_BYTES 510 37#define ETH_TX_MAX_LSO_HDR_BYTES 510
38#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
39#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
40#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
41#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
42#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
34 43
35#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 44#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
45#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
46 (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
47#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
48 (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
36 49
37/* Maximum number of buffers, used for RX packet placement */ 50/* Maximum number of buffers, used for RX packet placement */
38#define ETH_RX_MAX_BUFF_PER_PKT 5 51#define ETH_RX_MAX_BUFF_PER_PKT 5
@@ -59,6 +72,8 @@
59#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 72#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
60#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 73#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
61 74
75/* Control frame check constants */
76#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
62 77
63struct eth_tx_1st_bd_flags { 78struct eth_tx_1st_bd_flags {
64 u8 bitfields; 79 u8 bitfields;
@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
82 97
83/* The parsing information data fo rthe first tx bd of a given packet. */ 98/* The parsing information data fo rthe first tx bd of a given packet. */
84struct eth_tx_data_1st_bd { 99struct eth_tx_data_1st_bd {
85 __le16 vlan; 100 __le16 vlan;
86 u8 nbds; 101 u8 nbds;
87 struct eth_tx_1st_bd_flags bd_flags; 102 struct eth_tx_1st_bd_flags bd_flags;
88 __le16 bitfields; 103 __le16 bitfields;
89#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 104#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
90#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 105#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
91#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 106#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
96 111
97/* The parsing information data for the second tx bd of a given packet. */ 112/* The parsing information data for the second tx bd of a given packet. */
98struct eth_tx_data_2nd_bd { 113struct eth_tx_data_2nd_bd {
99 __le16 tunn_ip_size; 114 __le16 tunn_ip_size;
100 __le16 bitfields1; 115 __le16 bitfields1;
101#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF 116#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
102#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 117#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
125#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 140#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
126}; 141};
127 142
143/* Firmware data for L2-EDPM packet. */
144struct eth_edpm_fw_data {
145 struct eth_tx_data_1st_bd data_1st_bd;
146 struct eth_tx_data_2nd_bd data_2nd_bd;
147 __le32 reserved;
148};
149
128struct eth_fast_path_cqe_fw_debug { 150struct eth_fast_path_cqe_fw_debug {
129 u8 reserved0;
130 u8 reserved1;
131 __le16 reserved2; 151 __le16 reserved2;
132}; 152};
133 153
@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
148#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 168#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
149}; 169};
150 170
171/* PMD flow control bits */
172struct eth_pmd_flow_flags {
173 u8 flags;
174#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
175#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
176#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
177#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
178#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
179#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
180};
181
151/* Regular ETH Rx FP CQE. */ 182/* Regular ETH Rx FP CQE. */
152struct eth_fast_path_rx_reg_cqe { 183struct eth_fast_path_rx_reg_cqe {
153 u8 type; 184 u8 type;
@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
166 u8 placement_offset; 197 u8 placement_offset;
167 struct eth_tunnel_parsing_flags tunnel_pars_flags; 198 struct eth_tunnel_parsing_flags tunnel_pars_flags;
168 u8 bd_num; 199 u8 bd_num;
169 u8 reserved[7]; 200 u8 reserved[9];
170 struct eth_fast_path_cqe_fw_debug fw_debug; 201 struct eth_fast_path_cqe_fw_debug fw_debug;
171 u8 reserved1[3]; 202 u8 reserved1[3];
172 u8 flags; 203 struct eth_pmd_flow_flags pmd_flags;
173#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
174#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
175#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
176#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
177#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
178#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
179}; 204};
180 205
181/* TPA-continue ETH Rx FP CQE. */ 206/* TPA-continue ETH Rx FP CQE. */
182struct eth_fast_path_rx_tpa_cont_cqe { 207struct eth_fast_path_rx_tpa_cont_cqe {
183 u8 type; 208 u8 type;
184 u8 tpa_agg_index; 209 u8 tpa_agg_index;
185 __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; 210 __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
186 u8 reserved[5]; 211 u8 reserved;
187 u8 reserved1; 212 u8 reserved1;
188 __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; 213 __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
214 u8 reserved3[3];
215 struct eth_pmd_flow_flags pmd_flags;
189}; 216};
190 217
191/* TPA-end ETH Rx FP CQE. */ 218/* TPA-end ETH Rx FP CQE. */
192struct eth_fast_path_rx_tpa_end_cqe { 219struct eth_fast_path_rx_tpa_end_cqe {
193 u8 type; 220 u8 type;
194 u8 tpa_agg_index; 221 u8 tpa_agg_index;
195 __le16 total_packet_len; 222 __le16 total_packet_len;
196 u8 num_of_bds; 223 u8 num_of_bds;
197 u8 end_reason; 224 u8 end_reason;
198 __le16 num_of_coalesced_segs; 225 __le16 num_of_coalesced_segs;
199 __le32 ts_delta; 226 __le32 ts_delta;
200 __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; 227 __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
201 u8 reserved1[3]; 228 __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
202 u8 reserved2; 229 __le16 reserved1;
203 __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; 230 u8 reserved2;
231 struct eth_pmd_flow_flags pmd_flags;
204}; 232};
205 233
206/* TPA-start ETH Rx FP CQE. */ 234/* TPA-start ETH Rx FP CQE. */
207struct eth_fast_path_rx_tpa_start_cqe { 235struct eth_fast_path_rx_tpa_start_cqe {
208 u8 type; 236 u8 type;
209 u8 bitfields; 237 u8 bitfields;
210#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 238#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
211#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 239#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
212#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF 240#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
213#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 241#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
214#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 242#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
215#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 243#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
216 __le16 seg_len; 244 __le16 seg_len;
217 struct parsing_and_err_flags pars_flags; 245 struct parsing_and_err_flags pars_flags;
218 __le16 vlan_tag; 246 __le16 vlan_tag;
219 __le32 rss_hash; 247 __le32 rss_hash;
220 __le16 len_on_first_bd; 248 __le16 len_on_first_bd;
221 u8 placement_offset; 249 u8 placement_offset;
222 struct eth_tunnel_parsing_flags tunnel_pars_flags; 250 struct eth_tunnel_parsing_flags tunnel_pars_flags;
223 u8 tpa_agg_index; 251 u8 tpa_agg_index;
224 u8 header_len; 252 u8 header_len;
225 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; 253 __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
226 struct eth_fast_path_cqe_fw_debug fw_debug; 254 struct eth_fast_path_cqe_fw_debug fw_debug;
255 u8 reserved;
256 struct eth_pmd_flow_flags pmd_flags;
227}; 257};
228 258
229/* The L4 pseudo checksum mode for Ethernet */ 259/* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
245 u8 reserved[25]; 275 u8 reserved[25];
246 __le16 echo; 276 __le16 echo;
247 u8 reserved1; 277 u8 reserved1;
248 u8 flags; 278 struct eth_pmd_flow_flags pmd_flags;
249/* for PMD mode - valid indication */
250#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
251#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
252/* for PMD mode - valid toggle indication */
253#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
254#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
255#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
256#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
257}; 279};
258 280
259/* union for all ETH Rx CQE types */ 281/* union for all ETH Rx CQE types */
@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
276 MAX_ETH_RX_CQE_TYPE 298 MAX_ETH_RX_CQE_TYPE
277}; 299};
278 300
301struct eth_rx_pmd_cqe {
302 union eth_rx_cqe cqe;
303 u8 reserved[ETH_RX_CQE_GAP];
304};
305
279enum eth_rx_tunn_type { 306enum eth_rx_tunn_type {
280 ETH_RX_NO_TUNN, 307 ETH_RX_NO_TUNN,
281 ETH_RX_TUNN_GENEVE, 308 ETH_RX_TUNN_GENEVE,
@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
313 340
314/* The parsing information data for the third tx bd of a given packet. */ 341/* The parsing information data for the third tx bd of a given packet. */
315struct eth_tx_data_3rd_bd { 342struct eth_tx_data_3rd_bd {
316 __le16 lso_mss; 343 __le16 lso_mss;
317 __le16 bitfields; 344 __le16 bitfields;
318#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF 345#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
319#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 346#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
320#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF 347#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
323#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 350#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
324#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F 351#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
325#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 352#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
326 u8 tunn_l4_hdr_start_offset_w; 353 u8 tunn_l4_hdr_start_offset_w;
327 u8 tunn_hdr_size_w; 354 u8 tunn_hdr_size_w;
328}; 355};
329 356
330/* The third tx bd of a given packet */ 357/* The third tx bd of a given packet */
@@ -355,10 +382,10 @@ struct eth_tx_bd {
355}; 382};
356 383
357union eth_tx_bd_types { 384union eth_tx_bd_types {
358 struct eth_tx_1st_bd first_bd; 385 struct eth_tx_1st_bd first_bd;
359 struct eth_tx_2nd_bd second_bd; 386 struct eth_tx_2nd_bd second_bd;
360 struct eth_tx_3rd_bd third_bd; 387 struct eth_tx_3rd_bd third_bd;
361 struct eth_tx_bd reg_bd; 388 struct eth_tx_bd reg_bd;
362}; 389};
363 390
364/* Mstorm Queue Zone */ 391/* Mstorm Queue Zone */
@@ -389,8 +416,8 @@ struct eth_db_data {
389#define ETH_DB_DATA_RESERVED_SHIFT 5 416#define ETH_DB_DATA_RESERVED_SHIFT 5
390#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 417#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
391#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 418#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
392 u8 agg_flags; 419 u8 agg_flags;
393 __le16 bd_prod; 420 __le16 bd_prod;
394}; 421};
395 422
396#endif /* __ETH_COMMON__ */ 423#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b3c0feb15ae9..8f64b1223c2f 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
311#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 311#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
312#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF 312#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
313#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 313#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
314 __le32 isid_TABC; 314 __le32 isid_tabc;
315 __le16 tsih; 315 __le16 tsih;
316 __le16 isid_d; 316 __le16 isid_d;
317 __le32 itt; 317 __le32 itt;
@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
464#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 464#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
465#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF 465#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
466#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 466#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
467 __le32 isid_TABC; 467 __le32 isid_tabc;
468 __le16 tsih; 468 __le16 tsih;
469 __le16 isid_d; 469 __le16 isid_d;
470 __le32 itt; 470 __le32 itt;
@@ -688,8 +688,7 @@ union iscsi_cqe {
688enum iscsi_cqes_type { 688enum iscsi_cqes_type {
689 ISCSI_CQE_TYPE_SOLICITED = 1, 689 ISCSI_CQE_TYPE_SOLICITED = 1,
690 ISCSI_CQE_TYPE_UNSOLICITED, 690 ISCSI_CQE_TYPE_UNSOLICITED,
691 ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE 691 ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
692 ,
693 ISCSI_CQE_TYPE_TASK_CLEANUP, 692 ISCSI_CQE_TYPE_TASK_CLEANUP,
694 ISCSI_CQE_TYPE_DUMMY, 693 ISCSI_CQE_TYPE_DUMMY,
695 MAX_ISCSI_CQES_TYPE 694 MAX_ISCSI_CQES_TYPE
@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
769 ISCSI_EVENT_TYPE_UPDATE_CONN, 768 ISCSI_EVENT_TYPE_UPDATE_CONN,
770 ISCSI_EVENT_TYPE_CLEAR_SQ, 769 ISCSI_EVENT_TYPE_CLEAR_SQ,
771 ISCSI_EVENT_TYPE_TERMINATE_CONN, 770 ISCSI_EVENT_TYPE_TERMINATE_CONN,
771 ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
772 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, 772 ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
773 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, 773 ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
774 RESERVED8,
775 RESERVED9, 774 RESERVED9,
776 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, 775 ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
777 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, 776 ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
867 ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4, 866 ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
868 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, 867 ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
869 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, 868 ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
869 ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
870 MAX_ISCSI_RAMROD_CMD_ID 870 MAX_ISCSI_RAMROD_CMD_ID
871}; 871};
872 872
@@ -883,6 +883,16 @@ union iscsi_seq_num {
883 __le16 r2t_sn; 883 __le16 r2t_sn;
884}; 884};
885 885
886struct iscsi_spe_conn_mac_update {
887 struct iscsi_slow_path_hdr hdr;
888 __le16 conn_id;
889 __le32 fw_cid;
890 __le16 remote_mac_addr_lo;
891 __le16 remote_mac_addr_mid;
892 __le16 remote_mac_addr_hi;
893 u8 reserved0[2];
894};
895
886struct iscsi_spe_conn_offload { 896struct iscsi_spe_conn_offload {
887 struct iscsi_slow_path_hdr hdr; 897 struct iscsi_slow_path_hdr hdr;
888 __le16 conn_id; 898 __le16 conn_id;
@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
1302 struct regpair iscsi_rx_dropped_pdus_task_not_valid; 1312 struct regpair iscsi_rx_dropped_pdus_task_not_valid;
1303}; 1313};
1304 1314
1305struct ooo_opaque {
1306 __le32 cid;
1307 u8 drop_isle;
1308 u8 drop_size;
1309 u8 ooo_opcode;
1310 u8 ooo_isle;
1311};
1312
1313struct pstorm_iscsi_stats_drv { 1315struct pstorm_iscsi_stats_drv {
1314 struct regpair iscsi_tx_bytes_cnt; 1316 struct regpair iscsi_tx_bytes_cnt;
1315 struct regpair iscsi_tx_packet_cnt; 1317 struct regpair iscsi_tx_packet_cnt;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 7e441bdeabdc..72d88cf3ca25 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -16,19 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/qed/common_hsi.h> 17#include <linux/qed/common_hsi.h>
18 18
19/* dma_addr_t manip */
20#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
21#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
22#define DMA_REGPAIR_LE(x, val) do { \
23 (x).hi = DMA_HI_LE((val)); \
24 (x).lo = DMA_LO_LE((val)); \
25 } while (0)
26
27#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
28#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
29#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
30#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
31
32enum qed_chain_mode { 19enum qed_chain_mode {
33 /* Each Page contains a next pointer at its end */ 20 /* Each Page contains a next pointer at its end */
34 QED_CHAIN_MODE_NEXT_PTR, 21 QED_CHAIN_MODE_NEXT_PTR,
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4475a9d8ae15..33c24ebc9b7f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
23 23
24 u8 port_mac[ETH_ALEN]; 24 u8 port_mac[ETH_ALEN];
25 u8 num_vlan_filters; 25 u8 num_vlan_filters;
26
27 /* Legacy VF - this affects the datapath, so qede has to know */
28 bool is_legacy;
26}; 29};
27 30
28struct qed_update_vport_rss_params { 31struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d6c4177df7cb..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
34 DCBX_MAX_PROTOCOL_TYPE 34 DCBX_MAX_PROTOCOL_TYPE
35}; 35};
36 36
37#define QED_ROCE_PROTOCOL_INDEX (3)
38
37#ifdef CONFIG_DCB 39#ifdef CONFIG_DCB
38#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 40#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
39#define QED_LLDP_PORT_ID_STAT_LEN 4 41#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
260 /* MFW version */ 262 /* MFW version */
261 u32 mfw_rev; 263 u32 mfw_rev;
262 264
263 bool rdma_supported;
264
265 u32 flash_size; 265 u32 flash_size;
266 u8 mf_mode; 266 u8 mf_mode;
267 bool tx_switching; 267 bool tx_switching;
268 bool rdma_supported;
268}; 269};
269 270
270enum qed_sb_type { 271enum qed_sb_type {
271 QED_SB_TYPE_L2_QUEUE, 272 QED_SB_TYPE_L2_QUEUE,
273 QED_SB_TYPE_CNQ,
272}; 274};
273 275
274enum qed_protocol { 276enum qed_protocol {
@@ -276,6 +278,21 @@ enum qed_protocol {
276 QED_PROTOCOL_ISCSI, 278 QED_PROTOCOL_ISCSI,
277}; 279};
278 280
281enum qed_link_mode_bits {
282 QED_LM_FIBRE_BIT = BIT(0),
283 QED_LM_Autoneg_BIT = BIT(1),
284 QED_LM_Asym_Pause_BIT = BIT(2),
285 QED_LM_Pause_BIT = BIT(3),
286 QED_LM_1000baseT_Half_BIT = BIT(4),
287 QED_LM_1000baseT_Full_BIT = BIT(5),
288 QED_LM_10000baseKR_Full_BIT = BIT(6),
289 QED_LM_25000baseKR_Full_BIT = BIT(7),
290 QED_LM_40000baseLR4_Full_BIT = BIT(8),
291 QED_LM_50000baseKR2_Full_BIT = BIT(9),
292 QED_LM_100000baseKR4_Full_BIT = BIT(10),
293 QED_LM_COUNT = 11
294};
295
279struct qed_link_params { 296struct qed_link_params {
280 bool link_up; 297 bool link_up;
281 298
@@ -303,9 +320,11 @@ struct qed_link_params {
303struct qed_link_output { 320struct qed_link_output {
304 bool link_up; 321 bool link_up;
305 322
306 u32 supported_caps; /* In SUPPORTED defs */ 323 /* In QED_LM_* defs */
307 u32 advertised_caps; /* In ADVERTISED defs */ 324 u32 supported_caps;
308 u32 lp_caps; /* In ADVERTISED defs */ 325 u32 advertised_caps;
326 u32 lp_caps;
327
309 u32 speed; /* In Mb/s */ 328 u32 speed; /* In Mb/s */
310 u8 duplex; /* In DUPLEX defs */ 329 u8 duplex; /* In DUPLEX defs */
311 u8 port; /* In PORT defs */ 330 u8 port; /* In PORT defs */
@@ -438,6 +457,10 @@ struct qed_common_ops {
438 void (*simd_handler_clean)(struct qed_dev *cdev, 457 void (*simd_handler_clean)(struct qed_dev *cdev,
439 int index); 458 int index);
440 459
460 int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
461
462 int (*dbg_all_data_size) (struct qed_dev *cdev);
463
441/** 464/**
442 * @brief can_link_change - can the instance change the link or not 465 * @brief can_link_change - can the instance change the link or not
443 * 466 *
@@ -606,8 +629,9 @@ enum DP_MODULE {
606 QED_MSG_SP = 0x100000, 629 QED_MSG_SP = 0x100000,
607 QED_MSG_STORAGE = 0x200000, 630 QED_MSG_STORAGE = 0x200000,
608 QED_MSG_CXT = 0x800000, 631 QED_MSG_CXT = 0x800000,
632 QED_MSG_LL2 = 0x1000000,
609 QED_MSG_ILT = 0x2000000, 633 QED_MSG_ILT = 0x2000000,
610 QED_MSG_ROCE = 0x4000000, 634 QED_MSG_RDMA = 0x4000000,
611 QED_MSG_DEBUG = 0x8000000, 635 QED_MSG_DEBUG = 0x8000000,
612 /* to be added...up to 0x8000000 */ 636 /* to be added...up to 0x8000000 */
613}; 637};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QED_LL2_IF_H
11#define _QED_LL2_IF_H
12
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/pci.h>
17#include <linux/skbuff.h>
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/qed/qed_if.h>
22
23struct qed_ll2_stats {
24 u64 gsi_invalid_hdr;
25 u64 gsi_invalid_pkt_length;
26 u64 gsi_unsupported_pkt_typ;
27 u64 gsi_crcchksm_error;
28
29 u64 packet_too_big_discard;
30 u64 no_buff_discard;
31
32 u64 rcv_ucast_bytes;
33 u64 rcv_mcast_bytes;
34 u64 rcv_bcast_bytes;
35 u64 rcv_ucast_pkts;
36 u64 rcv_mcast_pkts;
37 u64 rcv_bcast_pkts;
38
39 u64 sent_ucast_bytes;
40 u64 sent_mcast_bytes;
41 u64 sent_bcast_bytes;
42 u64 sent_ucast_pkts;
43 u64 sent_mcast_pkts;
44 u64 sent_bcast_pkts;
45};
46
47#define QED_LL2_UNUSED_HANDLE (0xff)
48
49struct qed_ll2_cb_ops {
50 int (*rx_cb)(void *, struct sk_buff *, u32, u32);
51 int (*tx_cb)(void *, struct sk_buff *, bool);
52};
53
54struct qed_ll2_params {
55 u16 mtu;
56 bool drop_ttl0_packets;
57 bool rx_vlan_stripping;
58 u8 tx_tc;
59 bool frags_mapped;
60 u8 ll2_mac_address[ETH_ALEN];
61};
62
63struct qed_ll2_ops {
64/**
65 * @brief start - initializes ll2
66 *
67 * @param cdev
68 * @param params - protocol driver configuration for the ll2.
69 *
70 * @return 0 on success, otherwise error value.
71 */
72 int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
73
74/**
75 * @brief stop - stops the ll2
76 *
77 * @param cdev
78 *
79 * @return 0 on success, otherwise error value.
80 */
81 int (*stop)(struct qed_dev *cdev);
82
83/**
84 * @brief start_xmit - transmits an skb over the ll2 interface
85 *
86 * @param cdev
87 * @param skb
88 *
89 * @return 0 on success, otherwise error value.
90 */
91 int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
92
93/**
94 * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
95 * packets. Should be called before `start'.
96 *
97 * @param cdev
98 * @param cookie - to be passed to the callback functions.
99 * @param ops - the callback functions to register for Rx / Tx.
100 *
101 * @return 0 on success, otherwise error value.
102 */
103 void (*register_cb_ops)(struct qed_dev *cdev,
104 const struct qed_ll2_cb_ops *ops,
105 void *cookie);
106
107/**
108 * @brief get LL2 related statistics
109 *
110 * @param cdev
111 * @param stats - pointer to struct that would be filled with stats
112 *
113 * @return 0 on success, error otherwise.
114 */
115 int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
116};
117
118#ifdef CONFIG_QED_LL2
119int qed_ll2_alloc_if(struct qed_dev *);
120void qed_ll2_dealloc_if(struct qed_dev *);
121#else
122static const struct qed_ll2_ops qed_ll2_ops_pass = {
123 .start = NULL,
124 .stop = NULL,
125 .start_xmit = NULL,
126 .register_cb_ops = NULL,
127 .get_stats = NULL,
128};
129
130static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
131{
132 return 0;
133}
134
135static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
136{
137}
138#endif
139#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _QED_ROCE_IF_H
33#define _QED_ROCE_IF_H
34#include <linux/types.h>
35#include <linux/delay.h>
36#include <linux/list.h>
37#include <linux/mutex.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/qed/qed_if.h>
41#include <linux/qed/qed_ll2_if.h>
42#include <linux/qed/rdma_common.h>
43
44enum qed_roce_ll2_tx_dest {
45 /* Light L2 TX Destination to the Network */
46 QED_ROCE_LL2_TX_DEST_NW,
47
48 /* Light L2 TX Destination to the Loopback */
49 QED_ROCE_LL2_TX_DEST_LB,
50 QED_ROCE_LL2_TX_DEST_MAX
51};
52
53#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
54
55/* rdma interface */
56
57enum qed_roce_qp_state {
58 QED_ROCE_QP_STATE_RESET,
59 QED_ROCE_QP_STATE_INIT,
60 QED_ROCE_QP_STATE_RTR,
61 QED_ROCE_QP_STATE_RTS,
62 QED_ROCE_QP_STATE_SQD,
63 QED_ROCE_QP_STATE_ERR,
64 QED_ROCE_QP_STATE_SQE
65};
66
67enum qed_rdma_tid_type {
68 QED_RDMA_TID_REGISTERED_MR,
69 QED_RDMA_TID_FMR,
70 QED_RDMA_TID_MW_TYPE1,
71 QED_RDMA_TID_MW_TYPE2A
72};
73
74struct qed_rdma_events {
75 void *context;
76 void (*affiliated_event)(void *context, u8 fw_event_code,
77 void *fw_handle);
78 void (*unaffiliated_event)(void *context, u8 event_code);
79};
80
81struct qed_rdma_device {
82 u32 vendor_id;
83 u32 vendor_part_id;
84 u32 hw_ver;
85 u64 fw_ver;
86
87 u64 node_guid;
88 u64 sys_image_guid;
89
90 u8 max_cnq;
91 u8 max_sge;
92 u8 max_srq_sge;
93 u16 max_inline;
94 u32 max_wqe;
95 u32 max_srq_wqe;
96 u8 max_qp_resp_rd_atomic_resc;
97 u8 max_qp_req_rd_atomic_resc;
98 u64 max_dev_resp_rd_atomic_resc;
99 u32 max_cq;
100 u32 max_qp;
101 u32 max_srq;
102 u32 max_mr;
103 u64 max_mr_size;
104 u32 max_cqe;
105 u32 max_mw;
106 u32 max_fmr;
107 u32 max_mr_mw_fmr_pbl;
108 u64 max_mr_mw_fmr_size;
109 u32 max_pd;
110 u32 max_ah;
111 u8 max_pkey;
112 u16 max_srq_wr;
113 u8 max_stats_queues;
114 u32 dev_caps;
115
116 /* Abilty to support RNR-NAK generation */
117
118#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
119#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
120 /* Abilty to support shutdown port */
121#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
122#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
123 /* Abilty to support port active event */
124#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
125#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
126 /* Abilty to support port change event */
127#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
128#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
129 /* Abilty to support system image GUID */
130#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
131#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
132 /* Abilty to support bad P_Key counter support */
133#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
134#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
135 /* Abilty to support atomic operations */
136#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
137#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
138#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
139#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
140 /* Abilty to support modifying the maximum number of
141 * outstanding work requests per QP
142 */
143#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
144#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
145 /* Abilty to support automatic path migration */
146#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
147#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
148 /* Abilty to support the base memory management extensions */
149#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
150#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
151#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
152#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
153 /* Abilty to support multipile page sizes per memory region */
154#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
155#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
156 /* Abilty to support block list physical buffer list */
157#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
158#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
159 /* Abilty to support zero based virtual addresses */
160#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
161#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
162 /* Abilty to support local invalidate fencing */
163#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
164#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
165 /* Abilty to support Loopback on QP */
166#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
167#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
168 u64 page_size_caps;
169 u8 dev_ack_delay;
170 u32 reserved_lkey;
171 u32 bad_pkey_counter;
172 struct qed_rdma_events events;
173};
174
175enum qed_port_state {
176 QED_RDMA_PORT_UP,
177 QED_RDMA_PORT_DOWN,
178};
179
180enum qed_roce_capability {
181 QED_ROCE_V1 = 1 << 0,
182 QED_ROCE_V2 = 1 << 1,
183};
184
185struct qed_rdma_port {
186 enum qed_port_state port_state;
187 int link_speed;
188 u64 max_msg_size;
189 u8 source_gid_table_len;
190 void *source_gid_table_ptr;
191 u8 pkey_table_len;
192 void *pkey_table_ptr;
193 u32 pkey_bad_counter;
194 enum qed_roce_capability capability;
195};
196
197struct qed_rdma_cnq_params {
198 u8 num_pbl_pages;
199 u64 pbl_ptr;
200};
201
202/* The CQ Mode affects the CQ doorbell transaction size.
203 * 64/32 bit machines should configure to 32/16 bits respectively.
204 */
205enum qed_rdma_cq_mode {
206 QED_RDMA_CQ_MODE_16_BITS,
207 QED_RDMA_CQ_MODE_32_BITS,
208};
209
210struct qed_roce_dcqcn_params {
211 u8 notification_point;
212 u8 reaction_point;
213
214 /* fields for notification point */
215 u32 cnp_send_timeout;
216
217 /* fields for reaction point */
218 u32 rl_bc_rate;
219 u16 rl_max_rate;
220 u16 rl_r_ai;
221 u16 rl_r_hai;
222 u16 dcqcn_g;
223 u32 dcqcn_k_us;
224 u32 dcqcn_timeout_us;
225};
226
227struct qed_rdma_start_in_params {
228 struct qed_rdma_events *events;
229 struct qed_rdma_cnq_params cnq_pbl_list[128];
230 u8 desired_cnq;
231 enum qed_rdma_cq_mode cq_mode;
232 struct qed_roce_dcqcn_params dcqcn_params;
233 u16 max_mtu;
234 u8 mac_addr[ETH_ALEN];
235 u8 iwarp_flags;
236};
237
238struct qed_rdma_add_user_out_params {
239 u16 dpi;
240 u64 dpi_addr;
241 u64 dpi_phys_addr;
242 u32 dpi_size;
243};
244
245enum roce_mode {
246 ROCE_V1,
247 ROCE_V2_IPV4,
248 ROCE_V2_IPV6,
249 MAX_ROCE_MODE
250};
251
252union qed_gid {
253 u8 bytes[16];
254 u16 words[8];
255 u32 dwords[4];
256 u64 qwords[2];
257 u32 ipv4_addr;
258};
259
260struct qed_rdma_register_tid_in_params {
261 u32 itid;
262 enum qed_rdma_tid_type tid_type;
263 u8 key;
264 u16 pd;
265 bool local_read;
266 bool local_write;
267 bool remote_read;
268 bool remote_write;
269 bool remote_atomic;
270 bool mw_bind;
271 u64 pbl_ptr;
272 bool pbl_two_level;
273 u8 pbl_page_size_log;
274 u8 page_size_log;
275 u32 fbo;
276 u64 length;
277 u64 vaddr;
278 bool zbva;
279 bool phy_mr;
280 bool dma_mr;
281
282 bool dif_enabled;
283 u64 dif_error_addr;
284 u64 dif_runt_addr;
285};
286
287struct qed_rdma_create_cq_in_params {
288 u32 cq_handle_lo;
289 u32 cq_handle_hi;
290 u32 cq_size;
291 u16 dpi;
292 bool pbl_two_level;
293 u64 pbl_ptr;
294 u16 pbl_num_pages;
295 u8 pbl_page_size_log;
296 u8 cnq_id;
297 u16 int_timeout;
298};
299
300struct qed_rdma_create_srq_in_params {
301 u64 pbl_base_addr;
302 u64 prod_pair_addr;
303 u16 num_pages;
304 u16 pd_id;
305 u16 page_size;
306};
307
308struct qed_rdma_destroy_cq_in_params {
309 u16 icid;
310};
311
312struct qed_rdma_destroy_cq_out_params {
313 u16 num_cq_notif;
314};
315
316struct qed_rdma_create_qp_in_params {
317 u32 qp_handle_lo;
318 u32 qp_handle_hi;
319 u32 qp_handle_async_lo;
320 u32 qp_handle_async_hi;
321 bool use_srq;
322 bool signal_all;
323 bool fmr_and_reserved_lkey;
324 u16 pd;
325 u16 dpi;
326 u16 sq_cq_id;
327 u16 sq_num_pages;
328 u64 sq_pbl_ptr;
329 u8 max_sq_sges;
330 u16 rq_cq_id;
331 u16 rq_num_pages;
332 u64 rq_pbl_ptr;
333 u16 srq_id;
334 u8 stats_queue;
335};
336
337struct qed_rdma_create_qp_out_params {
338 u32 qp_id;
339 u16 icid;
340 void *rq_pbl_virt;
341 dma_addr_t rq_pbl_phys;
342 void *sq_pbl_virt;
343 dma_addr_t sq_pbl_phys;
344};
345
346struct qed_rdma_modify_qp_in_params {
347 u32 modify_flags;
348#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
349#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
350#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
351#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
352#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
353#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
354#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
355#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
356#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
357#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
358#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
359#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
360#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
361#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
362#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
363#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
364#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
365#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
366#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
367#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
368#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
369#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
370#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
371#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
372#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
373#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
374#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
375#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
376#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
377#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
378
379 enum qed_roce_qp_state new_state;
380 u16 pkey;
381 bool incoming_rdma_read_en;
382 bool incoming_rdma_write_en;
383 bool incoming_atomic_en;
384 bool e2e_flow_control_en;
385 u32 dest_qp;
386 bool lb_indication;
387 u16 mtu;
388 u8 traffic_class_tos;
389 u8 hop_limit_ttl;
390 u32 flow_label;
391 union qed_gid sgid;
392 union qed_gid dgid;
393 u16 udp_src_port;
394
395 u16 vlan_id;
396
397 u32 rq_psn;
398 u32 sq_psn;
399 u8 max_rd_atomic_resp;
400 u8 max_rd_atomic_req;
401 u32 ack_timeout;
402 u8 retry_cnt;
403 u8 rnr_retry_cnt;
404 u8 min_rnr_nak_timer;
405 bool sqd_async;
406 u8 remote_mac_addr[6];
407 u8 local_mac_addr[6];
408 bool use_local_mac;
409 enum roce_mode roce_mode;
410};
411
412struct qed_rdma_query_qp_out_params {
413 enum qed_roce_qp_state state;
414 u32 rq_psn;
415 u32 sq_psn;
416 bool draining;
417 u16 mtu;
418 u32 dest_qp;
419 bool incoming_rdma_read_en;
420 bool incoming_rdma_write_en;
421 bool incoming_atomic_en;
422 bool e2e_flow_control_en;
423 union qed_gid sgid;
424 union qed_gid dgid;
425 u32 flow_label;
426 u8 hop_limit_ttl;
427 u8 traffic_class_tos;
428 u32 timeout;
429 u8 rnr_retry;
430 u8 retry_cnt;
431 u8 min_rnr_nak_timer;
432 u16 pkey_index;
433 u8 max_rd_atomic;
434 u8 max_dest_rd_atomic;
435 bool sqd_async;
436};
437
438struct qed_rdma_create_srq_out_params {
439 u16 srq_id;
440};
441
442struct qed_rdma_destroy_srq_in_params {
443 u16 srq_id;
444};
445
446struct qed_rdma_modify_srq_in_params {
447 u32 wqe_limit;
448 u16 srq_id;
449};
450
451struct qed_rdma_stats_out_params {
452 u64 sent_bytes;
453 u64 sent_pkts;
454 u64 rcv_bytes;
455 u64 rcv_pkts;
456};
457
458struct qed_rdma_counters_out_params {
459 u64 pd_count;
460 u64 max_pd;
461 u64 dpi_count;
462 u64 max_dpi;
463 u64 cq_count;
464 u64 max_cq;
465 u64 qp_count;
466 u64 max_qp;
467 u64 tid_count;
468 u64 max_tid;
469};
470
471#define QED_ROCE_TX_HEAD_FAILURE (1)
472#define QED_ROCE_TX_FRAG_FAILURE (2)
473
474struct qed_roce_ll2_header {
475 void *vaddr;
476 dma_addr_t baddr;
477 size_t len;
478};
479
480struct qed_roce_ll2_buffer {
481 dma_addr_t baddr;
482 size_t len;
483};
484
485struct qed_roce_ll2_packet {
486 struct qed_roce_ll2_header header;
487 int n_seg;
488 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
489 int roce_mode;
490 enum qed_roce_ll2_tx_dest tx_dest;
491};
492
493struct qed_roce_ll2_tx_params {
494 int reserved;
495};
496
497struct qed_roce_ll2_rx_params {
498 u16 vlan_id;
499 u8 smac[ETH_ALEN];
500 int rc;
501};
502
503struct qed_roce_ll2_cbs {
504 void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
505
506 void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
507 struct qed_roce_ll2_rx_params *params);
508};
509
510struct qed_roce_ll2_params {
511 u16 max_rx_buffers;
512 u16 max_tx_buffers;
513 u16 mtu;
514 u8 mac_address[ETH_ALEN];
515 struct qed_roce_ll2_cbs cbs;
516 void *cb_cookie;
517};
518
519struct qed_roce_ll2_info {
520 u8 handle;
521 struct qed_roce_ll2_cbs cbs;
522 u8 mac_address[ETH_ALEN];
523 void *cb_cookie;
524
525 /* Lock to protect ll2 */
526 struct mutex lock;
527};
528
529enum qed_rdma_type {
530 QED_RDMA_TYPE_ROCE,
531};
532
533struct qed_dev_rdma_info {
534 struct qed_dev_info common;
535 enum qed_rdma_type rdma_type;
536};
537
538struct qed_rdma_ops {
539 const struct qed_common_ops *common;
540
541 int (*fill_dev_info)(struct qed_dev *cdev,
542 struct qed_dev_rdma_info *info);
543 void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
544
545 int (*rdma_init)(struct qed_dev *dev,
546 struct qed_rdma_start_in_params *iparams);
547
548 int (*rdma_add_user)(void *rdma_cxt,
549 struct qed_rdma_add_user_out_params *oparams);
550
551 void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
552 int (*rdma_stop)(void *rdma_cxt);
553 struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
554 struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
555 int (*rdma_get_start_sb)(struct qed_dev *cdev);
556 int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
557 void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
558 int (*rdma_get_rdma_int)(struct qed_dev *cdev,
559 struct qed_int_info *info);
560 int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
561 int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
562 void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
563 int (*rdma_create_cq)(void *rdma_cxt,
564 struct qed_rdma_create_cq_in_params *params,
565 u16 *icid);
566 int (*rdma_destroy_cq)(void *rdma_cxt,
567 struct qed_rdma_destroy_cq_in_params *iparams,
568 struct qed_rdma_destroy_cq_out_params *oparams);
569 struct qed_rdma_qp *
570 (*rdma_create_qp)(void *rdma_cxt,
571 struct qed_rdma_create_qp_in_params *iparams,
572 struct qed_rdma_create_qp_out_params *oparams);
573
574 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
575 struct qed_rdma_modify_qp_in_params *iparams);
576
577 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
578 struct qed_rdma_query_qp_out_params *oparams);
579 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
580 int
581 (*rdma_register_tid)(void *rdma_cxt,
582 struct qed_rdma_register_tid_in_params *iparams);
583 int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
584 int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
585 void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
586 int (*roce_ll2_start)(struct qed_dev *cdev,
587 struct qed_roce_ll2_params *params);
588 int (*roce_ll2_stop)(struct qed_dev *cdev);
589 int (*roce_ll2_tx)(struct qed_dev *cdev,
590 struct qed_roce_ll2_packet *packet,
591 struct qed_roce_ll2_tx_params *params);
592 int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
593 struct qed_roce_ll2_buffer *buf,
594 u64 cookie, u8 notify_fw);
595 int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
596 u8 *old_mac_address,
597 u8 *new_mac_address);
598 int (*roce_ll2_stats)(struct qed_dev *cdev,
599 struct qed_ll2_stats *stats);
600};
601
602const struct qed_rdma_ops *qed_get_rdma_ops(void);
603
604#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef QEDE_ROCE_H
33#define QEDE_ROCE_H
34
35struct qedr_dev;
36struct qed_dev;
37struct qede_dev;
38
39enum qede_roce_event {
40 QEDE_UP,
41 QEDE_DOWN,
42 QEDE_CHANGE_ADDR,
43 QEDE_CLOSE
44};
45
46struct qede_roce_event_work {
47 struct list_head list;
48 struct work_struct work;
49 void *ptr;
50 enum qede_roce_event event;
51};
52
53struct qedr_driver {
54 unsigned char name[32];
55
56 struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
57 struct net_device *);
58
59 void (*remove)(struct qedr_dev *);
60 void (*notify)(struct qedr_dev *, enum qede_roce_event);
61};
62
63/* APIs for RoCE driver to register callback handlers,
64 * which will be invoked when device is added, removed, ifup, ifdown
65 */
66int qede_roce_register_driver(struct qedr_driver *drv);
67void qede_roce_unregister_driver(struct qedr_driver *drv);
68
69bool qede_roce_supported(struct qede_dev *dev);
70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev);
75void qede_roce_dev_remove(struct qede_dev *dev);
76void qede_roce_event_changeaddr(struct qede_dev *qedr);
77#else
78static inline int qede_roce_dev_add(struct qede_dev *dev)
79{
80 return 0;
81}
82
83static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
84static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
85static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
86static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
87#endif
88#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
28#define RDMA_MAX_PDS (64 * 1024) 28#define RDMA_MAX_PDS (64 * 1024)
29 29
30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
31#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
31 32
32#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) 33#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
33 34
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index accba0e6b704..dc3889d1bbe6 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -11,6 +11,14 @@
11 11
12#define TCP_INVALID_TIMEOUT_VAL -1 12#define TCP_INVALID_TIMEOUT_VAL -1
13 13
14struct ooo_opaque {
15 __le32 cid;
16 u8 drop_isle;
17 u8 drop_size;
18 u8 ooo_opcode;
19 u8 ooo_isle;
20};
21
14enum tcp_connect_mode { 22enum tcp_connect_mode {
15 TCP_CONNECT_ACTIVE, 23 TCP_CONNECT_ACTIVE,
16 TCP_CONNECT_PASSIVE, 24 TCP_CONNECT_PASSIVE,
@@ -18,14 +26,10 @@ enum tcp_connect_mode {
18}; 26};
19 27
20struct tcp_init_params { 28struct tcp_init_params {
21 __le32 max_cwnd; 29 __le32 two_msl_timer;
22 __le16 dup_ack_threshold;
23 __le16 tx_sws_timer; 30 __le16 tx_sws_timer;
24 __le16 min_rto;
25 __le16 min_rto_rt;
26 __le16 max_rto;
27 u8 maxfinrt; 31 u8 maxfinrt;
28 u8 reserved[1]; 32 u8 reserved[9];
29}; 33};
30 34
31enum tcp_ip_version { 35enum tcp_ip_version {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Resizable, Scalable, Concurrent Hash Table 2 * Resizable, Scalable, Concurrent Hash Table
3 * 3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * 7 *
@@ -53,6 +53,11 @@ struct rhash_head {
53 struct rhash_head __rcu *next; 53 struct rhash_head __rcu *next;
54}; 54};
55 55
56struct rhlist_head {
57 struct rhash_head rhead;
58 struct rhlist_head __rcu *next;
59};
60
56/** 61/**
57 * struct bucket_table - Table of hash buckets 62 * struct bucket_table - Table of hash buckets
58 * @size: Number of hash buckets 63 * @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
137 * @key_len: Key length for hashfn 142 * @key_len: Key length for hashfn
138 * @elasticity: Maximum chain length before rehash 143 * @elasticity: Maximum chain length before rehash
139 * @p: Configuration parameters 144 * @p: Configuration parameters
145 * @rhlist: True if this is an rhltable
140 * @run_work: Deferred worker to expand/shrink asynchronously 146 * @run_work: Deferred worker to expand/shrink asynchronously
141 * @mutex: Mutex to protect current/future table swapping 147 * @mutex: Mutex to protect current/future table swapping
142 * @lock: Spin lock to protect walker list 148 * @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
147 unsigned int key_len; 153 unsigned int key_len;
148 unsigned int elasticity; 154 unsigned int elasticity;
149 struct rhashtable_params p; 155 struct rhashtable_params p;
156 bool rhlist;
150 struct work_struct run_work; 157 struct work_struct run_work;
151 struct mutex mutex; 158 struct mutex mutex;
152 spinlock_t lock; 159 spinlock_t lock;
153}; 160};
154 161
155/** 162/**
163 * struct rhltable - Hash table with duplicate objects in a list
164 * @ht: Underlying rhtable
165 */
166struct rhltable {
167 struct rhashtable ht;
168};
169
170/**
156 * struct rhashtable_walker - Hash table walker 171 * struct rhashtable_walker - Hash table walker
157 * @list: List entry on list of walkers 172 * @list: List entry on list of walkers
158 * @tbl: The table that we were walking over 173 * @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
163}; 178};
164 179
165/** 180/**
166 * struct rhashtable_iter - Hash table iterator, fits into netlink cb 181 * struct rhashtable_iter - Hash table iterator
167 * @ht: Table to iterate through 182 * @ht: Table to iterate through
168 * @p: Current pointer 183 * @p: Current pointer
184 * @list: Current hash list pointer
169 * @walker: Associated rhashtable walker 185 * @walker: Associated rhashtable walker
170 * @slot: Current slot 186 * @slot: Current slot
171 * @skip: Number of entries to skip in slot 187 * @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
173struct rhashtable_iter { 189struct rhashtable_iter {
174 struct rhashtable *ht; 190 struct rhashtable *ht;
175 struct rhash_head *p; 191 struct rhash_head *p;
176 struct rhashtable_walker *walker; 192 struct rhlist_head *list;
193 struct rhashtable_walker walker;
177 unsigned int slot; 194 unsigned int slot;
178 unsigned int skip; 195 unsigned int skip;
179}; 196};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
339 356
340int rhashtable_init(struct rhashtable *ht, 357int rhashtable_init(struct rhashtable *ht,
341 const struct rhashtable_params *params); 358 const struct rhashtable_params *params);
359int rhltable_init(struct rhltable *hlt,
360 const struct rhashtable_params *params);
342 361
343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 362void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
344 const void *key, 363 struct rhash_head *obj);
345 struct rhash_head *obj,
346 struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348 364
349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, 365void rhashtable_walk_enter(struct rhashtable *ht,
350 gfp_t gfp); 366 struct rhashtable_iter *iter);
351void rhashtable_walk_exit(struct rhashtable_iter *iter); 367void rhashtable_walk_exit(struct rhashtable_iter *iter);
352int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 368int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
353void *rhashtable_walk_next(struct rhashtable_iter *iter); 369void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
506 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
507 tbl, hash, member) 523 tbl, hash, member)
508 524
525/**
526 * rhl_for_each_rcu - iterate over rcu hash table list
527 * @pos: the &struct rlist_head to use as a loop cursor.
528 * @list: the head of the list
529 *
530 * This hash chain list-traversal primitive should be used on the
531 * list returned by rhltable_lookup.
532 */
533#define rhl_for_each_rcu(pos, list) \
534 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
535
536/**
537 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
538 * @tpos: the type * to use as a loop cursor.
539 * @pos: the &struct rlist_head to use as a loop cursor.
540 * @list: the head of the list
541 * @member: name of the &struct rlist_head within the hashable struct.
542 *
543 * This hash chain list-traversal primitive should be used on the
544 * list returned by rhltable_lookup.
545 */
546#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
547 for (pos = list; pos && rht_entry(tpos, pos, member); \
548 pos = rcu_dereference_raw(pos->next))
549
509static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 550static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
510 const void *obj) 551 const void *obj)
511{ 552{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
515 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 556 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
516} 557}
517 558
518/** 559/* Internal function, do not use. */
519 * rhashtable_lookup_fast - search hash table, inlined version 560static inline struct rhash_head *__rhashtable_lookup(
520 * @ht: hash table
521 * @key: the pointer to the key
522 * @params: hash table parameters
523 *
524 * Computes the hash value for the key and traverses the bucket chain looking
525 * for a entry with an identical key. The first matching entry is returned.
526 *
527 * Returns the first entry on which the compare function returned true.
528 */
529static inline void *rhashtable_lookup_fast(
530 struct rhashtable *ht, const void *key, 561 struct rhashtable *ht, const void *key,
531 const struct rhashtable_params params) 562 const struct rhashtable_params params)
532{ 563{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
538 struct rhash_head *he; 569 struct rhash_head *he;
539 unsigned int hash; 570 unsigned int hash;
540 571
541 rcu_read_lock();
542
543 tbl = rht_dereference_rcu(ht->tbl, ht); 572 tbl = rht_dereference_rcu(ht->tbl, ht);
544restart: 573restart:
545 hash = rht_key_hashfn(ht, tbl, key, params); 574 hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
548 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 577 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
549 rhashtable_compare(&arg, rht_obj(ht, he))) 578 rhashtable_compare(&arg, rht_obj(ht, he)))
550 continue; 579 continue;
551 rcu_read_unlock(); 580 return he;
552 return rht_obj(ht, he);
553 } 581 }
554 582
555 /* Ensure we see any new tables. */ 583 /* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
558 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 586 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
559 if (unlikely(tbl)) 587 if (unlikely(tbl))
560 goto restart; 588 goto restart;
561 rcu_read_unlock();
562 589
563 return NULL; 590 return NULL;
564} 591}
565 592
566/* Internal function, please use rhashtable_insert_fast() instead */ 593/**
567static inline int __rhashtable_insert_fast( 594 * rhashtable_lookup - search hash table
568 struct rhashtable *ht, const void *key, struct rhash_head *obj, 595 * @ht: hash table
596 * @key: the pointer to the key
597 * @params: hash table parameters
598 *
599 * Computes the hash value for the key and traverses the bucket chain looking
600 * for a entry with an identical key. The first matching entry is returned.
601 *
602 * This must only be called under the RCU read lock.
603 *
604 * Returns the first entry on which the compare function returned true.
605 */
606static inline void *rhashtable_lookup(
607 struct rhashtable *ht, const void *key,
569 const struct rhashtable_params params) 608 const struct rhashtable_params params)
570{ 609{
610 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
611
612 return he ? rht_obj(ht, he) : NULL;
613}
614
615/**
616 * rhashtable_lookup_fast - search hash table, without RCU read lock
617 * @ht: hash table
618 * @key: the pointer to the key
619 * @params: hash table parameters
620 *
621 * Computes the hash value for the key and traverses the bucket chain looking
622 * for a entry with an identical key. The first matching entry is returned.
623 *
624 * Only use this function when you have other mechanisms guaranteeing
625 * that the object won't go away after the RCU read lock is released.
626 *
627 * Returns the first entry on which the compare function returned true.
628 */
629static inline void *rhashtable_lookup_fast(
630 struct rhashtable *ht, const void *key,
631 const struct rhashtable_params params)
632{
633 void *obj;
634
635 rcu_read_lock();
636 obj = rhashtable_lookup(ht, key, params);
637 rcu_read_unlock();
638
639 return obj;
640}
641
642/**
643 * rhltable_lookup - search hash list table
644 * @hlt: hash table
645 * @key: the pointer to the key
646 * @params: hash table parameters
647 *
648 * Computes the hash value for the key and traverses the bucket chain looking
649 * for a entry with an identical key. All matching entries are returned
650 * in a list.
651 *
652 * This must only be called under the RCU read lock.
653 *
654 * Returns the list of entries that match the given key.
655 */
656static inline struct rhlist_head *rhltable_lookup(
657 struct rhltable *hlt, const void *key,
658 const struct rhashtable_params params)
659{
660 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
661
662 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
663}
664
665/* Internal function, please use rhashtable_insert_fast() instead. This
666 * function returns the existing element already in hashes in there is a clash,
667 * otherwise it returns an error via ERR_PTR().
668 */
669static inline void *__rhashtable_insert_fast(
670 struct rhashtable *ht, const void *key, struct rhash_head *obj,
671 const struct rhashtable_params params, bool rhlist)
672{
571 struct rhashtable_compare_arg arg = { 673 struct rhashtable_compare_arg arg = {
572 .ht = ht, 674 .ht = ht,
573 .key = key, 675 .key = key,
574 }; 676 };
575 struct bucket_table *tbl, *new_tbl; 677 struct rhash_head __rcu **pprev;
678 struct bucket_table *tbl;
576 struct rhash_head *head; 679 struct rhash_head *head;
577 spinlock_t *lock; 680 spinlock_t *lock;
578 unsigned int elasticity;
579 unsigned int hash; 681 unsigned int hash;
580 int err; 682 int elasticity;
683 void *data;
581 684
582restart:
583 rcu_read_lock(); 685 rcu_read_lock();
584 686
585 tbl = rht_dereference_rcu(ht->tbl, ht); 687 tbl = rht_dereference_rcu(ht->tbl, ht);
688 hash = rht_head_hashfn(ht, tbl, obj, params);
689 lock = rht_bucket_lock(tbl, hash);
690 spin_lock_bh(lock);
586 691
587 /* All insertions must grab the oldest table containing 692 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
588 * the hashed bucket that is yet to be rehashed.
589 */
590 for (;;) {
591 hash = rht_head_hashfn(ht, tbl, obj, params);
592 lock = rht_bucket_lock(tbl, hash);
593 spin_lock_bh(lock);
594
595 if (tbl->rehash <= hash)
596 break;
597
598 spin_unlock_bh(lock);
599 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 }
601
602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 if (unlikely(new_tbl)) {
604 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
605 if (!IS_ERR_OR_NULL(tbl))
606 goto slow_path;
607
608 err = PTR_ERR(tbl);
609 goto out;
610 }
611
612 err = -E2BIG;
613 if (unlikely(rht_grow_above_max(ht, tbl)))
614 goto out;
615
616 if (unlikely(rht_grow_above_100(ht, tbl))) {
617slow_path: 693slow_path:
618 spin_unlock_bh(lock); 694 spin_unlock_bh(lock);
619 err = rhashtable_insert_rehash(ht, tbl);
620 rcu_read_unlock(); 695 rcu_read_unlock();
621 if (err) 696 return rhashtable_insert_slow(ht, key, obj);
622 return err;
623
624 goto restart;
625 } 697 }
626 698
627 err = -EEXIST;
628 elasticity = ht->elasticity; 699 elasticity = ht->elasticity;
700 pprev = &tbl->buckets[hash];
629 rht_for_each(head, tbl, hash) { 701 rht_for_each(head, tbl, hash) {
630 if (key && 702 struct rhlist_head *plist;
631 unlikely(!(params.obj_cmpfn ? 703 struct rhlist_head *list;
632 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 704
633 rhashtable_compare(&arg, rht_obj(ht, head))))) 705 elasticity--;
706 if (!key ||
707 (params.obj_cmpfn ?
708 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
709 rhashtable_compare(&arg, rht_obj(ht, head))))
710 continue;
711
712 data = rht_obj(ht, head);
713
714 if (!rhlist)
634 goto out; 715 goto out;
635 if (!--elasticity) 716
636 goto slow_path; 717
718 list = container_of(obj, struct rhlist_head, rhead);
719 plist = container_of(head, struct rhlist_head, rhead);
720
721 RCU_INIT_POINTER(list->next, plist);
722 head = rht_dereference_bucket(head->next, tbl, hash);
723 RCU_INIT_POINTER(list->rhead.next, head);
724 rcu_assign_pointer(*pprev, obj);
725
726 goto good;
637 } 727 }
638 728
639 err = 0; 729 if (elasticity <= 0)
730 goto slow_path;
731
732 data = ERR_PTR(-E2BIG);
733 if (unlikely(rht_grow_above_max(ht, tbl)))
734 goto out;
735
736 if (unlikely(rht_grow_above_100(ht, tbl)))
737 goto slow_path;
640 738
641 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 739 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
642 740
643 RCU_INIT_POINTER(obj->next, head); 741 RCU_INIT_POINTER(obj->next, head);
742 if (rhlist) {
743 struct rhlist_head *list;
744
745 list = container_of(obj, struct rhlist_head, rhead);
746 RCU_INIT_POINTER(list->next, NULL);
747 }
644 748
645 rcu_assign_pointer(tbl->buckets[hash], obj); 749 rcu_assign_pointer(tbl->buckets[hash], obj);
646 750
@@ -648,11 +752,14 @@ slow_path:
648 if (rht_grow_above_75(ht, tbl)) 752 if (rht_grow_above_75(ht, tbl))
649 schedule_work(&ht->run_work); 753 schedule_work(&ht->run_work);
650 754
755good:
756 data = NULL;
757
651out: 758out:
652 spin_unlock_bh(lock); 759 spin_unlock_bh(lock);
653 rcu_read_unlock(); 760 rcu_read_unlock();
654 761
655 return err; 762 return data;
656} 763}
657 764
658/** 765/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
675 struct rhashtable *ht, struct rhash_head *obj, 782 struct rhashtable *ht, struct rhash_head *obj,
676 const struct rhashtable_params params) 783 const struct rhashtable_params params)
677{ 784{
678 return __rhashtable_insert_fast(ht, NULL, obj, params); 785 void *ret;
786
787 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
788 if (IS_ERR(ret))
789 return PTR_ERR(ret);
790
791 return ret == NULL ? 0 : -EEXIST;
792}
793
794/**
795 * rhltable_insert_key - insert object into hash list table
796 * @hlt: hash list table
797 * @key: the pointer to the key
798 * @list: pointer to hash list head inside object
799 * @params: hash table parameters
800 *
801 * Will take a per bucket spinlock to protect against mutual mutations
802 * on the same bucket. Multiple insertions may occur in parallel unless
803 * they map to the same bucket lock.
804 *
805 * It is safe to call this function from atomic context.
806 *
807 * Will trigger an automatic deferred table resizing if the size grows
808 * beyond the watermark indicated by grow_decision() which can be passed
809 * to rhashtable_init().
810 */
811static inline int rhltable_insert_key(
812 struct rhltable *hlt, const void *key, struct rhlist_head *list,
813 const struct rhashtable_params params)
814{
815 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
816 params, true));
817}
818
819/**
820 * rhltable_insert - insert object into hash list table
821 * @hlt: hash list table
822 * @list: pointer to hash list head inside object
823 * @params: hash table parameters
824 *
825 * Will take a per bucket spinlock to protect against mutual mutations
826 * on the same bucket. Multiple insertions may occur in parallel unless
827 * they map to the same bucket lock.
828 *
829 * It is safe to call this function from atomic context.
830 *
831 * Will trigger an automatic deferred table resizing if the size grows
832 * beyond the watermark indicated by grow_decision() which can be passed
833 * to rhashtable_init().
834 */
835static inline int rhltable_insert(
836 struct rhltable *hlt, struct rhlist_head *list,
837 const struct rhashtable_params params)
838{
839 const char *key = rht_obj(&hlt->ht, &list->rhead);
840
841 key += params.key_offset;
842
843 return rhltable_insert_key(hlt, key, list, params);
679} 844}
680 845
681/** 846/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
704 const struct rhashtable_params params) 869 const struct rhashtable_params params)
705{ 870{
706 const char *key = rht_obj(ht, obj); 871 const char *key = rht_obj(ht, obj);
872 void *ret;
707 873
708 BUG_ON(ht->p.obj_hashfn); 874 BUG_ON(ht->p.obj_hashfn);
709 875
710 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, 876 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
711 params); 877 false);
878 if (IS_ERR(ret))
879 return PTR_ERR(ret);
880
881 return ret == NULL ? 0 : -EEXIST;
712} 882}
713 883
714/** 884/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
737 struct rhashtable *ht, const void *key, struct rhash_head *obj, 907 struct rhashtable *ht, const void *key, struct rhash_head *obj,
738 const struct rhashtable_params params) 908 const struct rhashtable_params params)
739{ 909{
910 void *ret;
911
912 BUG_ON(!ht->p.obj_hashfn || !key);
913
914 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
915 if (IS_ERR(ret))
916 return PTR_ERR(ret);
917
918 return ret == NULL ? 0 : -EEXIST;
919}
920
921/**
922 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
923 * @ht: hash table
924 * @obj: pointer to hash head inside object
925 * @params: hash table parameters
926 * @data: pointer to element data already in hashes
927 *
928 * Just like rhashtable_lookup_insert_key(), but this function returns the
929 * object if it exists, NULL if it does not and the insertion was successful,
930 * and an ERR_PTR otherwise.
931 */
932static inline void *rhashtable_lookup_get_insert_key(
933 struct rhashtable *ht, const void *key, struct rhash_head *obj,
934 const struct rhashtable_params params)
935{
740 BUG_ON(!ht->p.obj_hashfn || !key); 936 BUG_ON(!ht->p.obj_hashfn || !key);
741 937
742 return __rhashtable_insert_fast(ht, key, obj, params); 938 return __rhashtable_insert_fast(ht, key, obj, params, false);
743} 939}
744 940
745/* Internal function, please use rhashtable_remove_fast() instead */ 941/* Internal function, please use rhashtable_remove_fast() instead */
746static inline int __rhashtable_remove_fast( 942static inline int __rhashtable_remove_fast_one(
747 struct rhashtable *ht, struct bucket_table *tbl, 943 struct rhashtable *ht, struct bucket_table *tbl,
748 struct rhash_head *obj, const struct rhashtable_params params) 944 struct rhash_head *obj, const struct rhashtable_params params,
945 bool rhlist)
749{ 946{
750 struct rhash_head __rcu **pprev; 947 struct rhash_head __rcu **pprev;
751 struct rhash_head *he; 948 struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
760 957
761 pprev = &tbl->buckets[hash]; 958 pprev = &tbl->buckets[hash];
762 rht_for_each(he, tbl, hash) { 959 rht_for_each(he, tbl, hash) {
960 struct rhlist_head *list;
961
962 list = container_of(he, struct rhlist_head, rhead);
963
763 if (he != obj) { 964 if (he != obj) {
965 struct rhlist_head __rcu **lpprev;
966
764 pprev = &he->next; 967 pprev = &he->next;
765 continue; 968
969 if (!rhlist)
970 continue;
971
972 do {
973 lpprev = &list->next;
974 list = rht_dereference_bucket(list->next,
975 tbl, hash);
976 } while (list && obj != &list->rhead);
977
978 if (!list)
979 continue;
980
981 list = rht_dereference_bucket(list->next, tbl, hash);
982 RCU_INIT_POINTER(*lpprev, list);
983 err = 0;
984 break;
766 } 985 }
767 986
768 rcu_assign_pointer(*pprev, obj->next); 987 obj = rht_dereference_bucket(obj->next, tbl, hash);
769 err = 0; 988 err = 1;
989
990 if (rhlist) {
991 list = rht_dereference_bucket(list->next, tbl, hash);
992 if (list) {
993 RCU_INIT_POINTER(list->rhead.next, obj);
994 obj = &list->rhead;
995 err = 0;
996 }
997 }
998
999 rcu_assign_pointer(*pprev, obj);
770 break; 1000 break;
771 } 1001 }
772 1002
773 spin_unlock_bh(lock); 1003 spin_unlock_bh(lock);
774 1004
1005 if (err > 0) {
1006 atomic_dec(&ht->nelems);
1007 if (unlikely(ht->p.automatic_shrinking &&
1008 rht_shrink_below_30(ht, tbl)))
1009 schedule_work(&ht->run_work);
1010 err = 0;
1011 }
1012
775 return err; 1013 return err;
776} 1014}
777 1015
778/** 1016/* Internal function, please use rhashtable_remove_fast() instead */
779 * rhashtable_remove_fast - remove object from hash table 1017static inline int __rhashtable_remove_fast(
780 * @ht: hash table
781 * @obj: pointer to hash head inside object
782 * @params: hash table parameters
783 *
784 * Since the hash chain is single linked, the removal operation needs to
785 * walk the bucket chain upon removal. The removal operation is thus
786 * considerable slow if the hash table is not correctly sized.
787 *
788 * Will automatically shrink the table via rhashtable_expand() if the
789 * shrink_decision function specified at rhashtable_init() returns true.
790 *
791 * Returns zero on success, -ENOENT if the entry could not be found.
792 */
793static inline int rhashtable_remove_fast(
794 struct rhashtable *ht, struct rhash_head *obj, 1018 struct rhashtable *ht, struct rhash_head *obj,
795 const struct rhashtable_params params) 1019 const struct rhashtable_params params, bool rhlist)
796{ 1020{
797 struct bucket_table *tbl; 1021 struct bucket_table *tbl;
798 int err; 1022 int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
806 * visible then that guarantees the entry to still be in 1030 * visible then that guarantees the entry to still be in
807 * the old tbl if it exists. 1031 * the old tbl if it exists.
808 */ 1032 */
809 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && 1033 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1034 rhlist)) &&
810 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1035 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
811 ; 1036 ;
812 1037
813 if (err)
814 goto out;
815
816 atomic_dec(&ht->nelems);
817 if (unlikely(ht->p.automatic_shrinking &&
818 rht_shrink_below_30(ht, tbl)))
819 schedule_work(&ht->run_work);
820
821out:
822 rcu_read_unlock(); 1038 rcu_read_unlock();
823 1039
824 return err; 1040 return err;
825} 1041}
826 1042
1043/**
1044 * rhashtable_remove_fast - remove object from hash table
1045 * @ht: hash table
1046 * @obj: pointer to hash head inside object
1047 * @params: hash table parameters
1048 *
1049 * Since the hash chain is single linked, the removal operation needs to
1050 * walk the bucket chain upon removal. The removal operation is thus
1051 * considerable slow if the hash table is not correctly sized.
1052 *
1053 * Will automatically shrink the table via rhashtable_expand() if the
1054 * shrink_decision function specified at rhashtable_init() returns true.
1055 *
1056 * Returns zero on success, -ENOENT if the entry could not be found.
1057 */
1058static inline int rhashtable_remove_fast(
1059 struct rhashtable *ht, struct rhash_head *obj,
1060 const struct rhashtable_params params)
1061{
1062 return __rhashtable_remove_fast(ht, obj, params, false);
1063}
1064
1065/**
1066 * rhltable_remove - remove object from hash list table
1067 * @hlt: hash list table
1068 * @list: pointer to hash list head inside object
1069 * @params: hash table parameters
1070 *
1071 * Since the hash chain is single linked, the removal operation needs to
1072 * walk the bucket chain upon removal. The removal operation is thus
1073 * considerable slow if the hash table is not correctly sized.
1074 *
1075 * Will automatically shrink the table via rhashtable_expand() if the
1076 * shrink_decision function specified at rhashtable_init() returns true.
1077 *
1078 * Returns zero on success, -ENOENT if the entry could not be found.
1079 */
1080static inline int rhltable_remove(
1081 struct rhltable *hlt, struct rhlist_head *list,
1082 const struct rhashtable_params params)
1083{
1084 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1085}
1086
827/* Internal function, please use rhashtable_replace_fast() instead */ 1087/* Internal function, please use rhashtable_replace_fast() instead */
828static inline int __rhashtable_replace_fast( 1088static inline int __rhashtable_replace_fast(
829 struct rhashtable *ht, struct bucket_table *tbl, 1089 struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
906 return err; 1166 return err;
907} 1167}
908 1168
1169/* Obsolete function, do not use in new code. */
1170static inline int rhashtable_walk_init(struct rhashtable *ht,
1171 struct rhashtable_iter *iter, gfp_t gfp)
1172{
1173 rhashtable_walk_enter(ht, iter);
1174 return 0;
1175}
1176
1177/**
1178 * rhltable_walk_enter - Initialise an iterator
1179 * @hlt: Table to walk over
1180 * @iter: Hash table Iterator
1181 *
1182 * This function prepares a hash table walk.
1183 *
1184 * Note that if you restart a walk after rhashtable_walk_stop you
1185 * may see the same object twice. Also, you may miss objects if
1186 * there are removals in between rhashtable_walk_stop and the next
1187 * call to rhashtable_walk_start.
1188 *
1189 * For a completely stable walk you should construct your own data
1190 * structure outside the hash table.
1191 *
1192 * This function may sleep so you must not call it from interrupt
1193 * context or with spin locks held.
1194 *
1195 * You must call rhashtable_walk_exit after this function returns.
1196 */
1197static inline void rhltable_walk_enter(struct rhltable *hlt,
1198 struct rhashtable_iter *iter)
1199{
1200 return rhashtable_walk_enter(&hlt->ht, iter);
1201}
1202
1203/**
1204 * rhltable_free_and_destroy - free elements and destroy hash list table
1205 * @hlt: the hash list table to destroy
1206 * @free_fn: callback to release resources of element
1207 * @arg: pointer passed to free_fn
1208 *
1209 * See documentation for rhashtable_free_and_destroy.
1210 */
1211static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1212 void (*free_fn)(void *ptr,
1213 void *arg),
1214 void *arg)
1215{
1216 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1217}
1218
1219static inline void rhltable_destroy(struct rhltable *hlt)
1220{
1221 return rhltable_free_and_destroy(hlt, NULL, NULL);
1222}
1223
909#endif /* _LINUX_RHASHTABLE_H */ 1224#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2daece8979f7..57e54847b0b9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -105,7 +105,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
105 struct netlink_callback *cb, 105 struct netlink_callback *cb,
106 struct net_device *dev, 106 struct net_device *dev,
107 struct net_device *filter_dev, 107 struct net_device *filter_dev,
108 int idx); 108 int *idx);
109extern int ndo_dflt_fdb_add(struct ndmsg *ndm, 109extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
110 struct nlattr *tb[], 110 struct nlattr *tb[],
111 struct net_device *dev, 111 struct net_device *dev,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0f665cb26b50..9bf60b556bd2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 612 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
613 * @napi_id: id of the NAPI struct this skb came from 613 * @napi_id: id of the NAPI struct this skb came from
614 * @secmark: security marking 614 * @secmark: security marking
615 * @offload_fwd_mark: fwding offload mark
616 * @mark: Generic packet mark 615 * @mark: Generic packet mark
617 * @vlan_proto: vlan encapsulation protocol 616 * @vlan_proto: vlan encapsulation protocol
618 * @vlan_tci: vlan tag control information 617 * @vlan_tci: vlan tag control information
@@ -677,13 +676,23 @@ struct sk_buff {
677 */ 676 */
678 kmemcheck_bitfield_begin(flags1); 677 kmemcheck_bitfield_begin(flags1);
679 __u16 queue_mapping; 678 __u16 queue_mapping;
679
680/* if you move cloned around you also must adapt those constants */
681#ifdef __BIG_ENDIAN_BITFIELD
682#define CLONED_MASK (1 << 7)
683#else
684#define CLONED_MASK 1
685#endif
686#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
687
688 __u8 __cloned_offset[0];
680 __u8 cloned:1, 689 __u8 cloned:1,
681 nohdr:1, 690 nohdr:1,
682 fclone:2, 691 fclone:2,
683 peeked:1, 692 peeked:1,
684 head_frag:1, 693 head_frag:1,
685 xmit_more:1; 694 xmit_more:1,
686 /* one bit hole */ 695 __unused:1; /* one bit hole */
687 kmemcheck_bitfield_end(flags1); 696 kmemcheck_bitfield_end(flags1);
688 697
689 /* fields enclosed in headers_start/headers_end are copied 698 /* fields enclosed in headers_start/headers_end are copied
@@ -730,7 +739,10 @@ struct sk_buff {
730 __u8 ipvs_property:1; 739 __u8 ipvs_property:1;
731 __u8 inner_protocol_type:1; 740 __u8 inner_protocol_type:1;
732 __u8 remcsum_offload:1; 741 __u8 remcsum_offload:1;
733 /* 3 or 5 bit hole */ 742#ifdef CONFIG_NET_SWITCHDEV
743 __u8 offload_fwd_mark:1;
744#endif
745 /* 2, 4 or 5 bit hole */
734 746
735#ifdef CONFIG_NET_SCHED 747#ifdef CONFIG_NET_SCHED
736 __u16 tc_index; /* traffic control index */ 748 __u16 tc_index; /* traffic control index */
@@ -757,14 +769,9 @@ struct sk_buff {
757 unsigned int sender_cpu; 769 unsigned int sender_cpu;
758 }; 770 };
759#endif 771#endif
760 union {
761#ifdef CONFIG_NETWORK_SECMARK 772#ifdef CONFIG_NETWORK_SECMARK
762 __u32 secmark; 773 __u32 secmark;
763#endif
764#ifdef CONFIG_NET_SWITCHDEV
765 __u32 offload_fwd_mark;
766#endif 774#endif
767 };
768 775
769 union { 776 union {
770 __u32 mark; 777 __u32 mark;
@@ -2295,7 +2302,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2295 2302
2296int ___pskb_trim(struct sk_buff *skb, unsigned int len); 2303int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2297 2304
2298static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 2305static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2299{ 2306{
2300 if (unlikely(skb_is_nonlinear(skb))) { 2307 if (unlikely(skb_is_nonlinear(skb))) {
2301 WARN_ON(1); 2308 WARN_ON(1);
@@ -2305,6 +2312,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2305 skb_set_tail_pointer(skb, len); 2312 skb_set_tail_pointer(skb, len);
2306} 2313}
2307 2314
2315static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2316{
2317 __skb_set_length(skb, len);
2318}
2319
2308void skb_trim(struct sk_buff *skb, unsigned int len); 2320void skb_trim(struct sk_buff *skb, unsigned int len);
2309 2321
2310static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 2322static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -2335,6 +2347,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2335 BUG_ON(err); 2347 BUG_ON(err);
2336} 2348}
2337 2349
2350static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2351{
2352 unsigned int diff = len - skb->len;
2353
2354 if (skb_tailroom(skb) < diff) {
2355 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2356 GFP_ATOMIC);
2357 if (ret)
2358 return ret;
2359 }
2360 __skb_set_length(skb, len);
2361 return 0;
2362}
2363
2338/** 2364/**
2339 * skb_orphan - orphan a buffer 2365 * skb_orphan - orphan a buffer
2340 * @skb: buffer to orphan 2366 * @skb: buffer to orphan
@@ -2386,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
2386 kfree_skb(skb); 2412 kfree_skb(skb);
2387} 2413}
2388 2414
2415void skb_rbtree_purge(struct rb_root *root);
2416
2389void *netdev_alloc_frag(unsigned int fragsz); 2417void *netdev_alloc_frag(unsigned int fragsz);
2390 2418
2391struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 2419struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2938,6 +2966,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2938 return __pskb_trim(skb, len); 2966 return __pskb_trim(skb, len);
2939} 2967}
2940 2968
2969static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2970{
2971 if (skb->ip_summed == CHECKSUM_COMPLETE)
2972 skb->ip_summed = CHECKSUM_NONE;
2973 __skb_trim(skb, len);
2974 return 0;
2975}
2976
2977static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2978{
2979 if (skb->ip_summed == CHECKSUM_COMPLETE)
2980 skb->ip_summed = CHECKSUM_NONE;
2981 return __skb_grow(skb, len);
2982}
2983
2941#define skb_queue_walk(queue, skb) \ 2984#define skb_queue_walk(queue, skb) \
2942 for (skb = (queue)->next; \ 2985 for (skb = (queue)->next; \
2943 skb != (struct sk_buff *)(queue); \ 2986 skb != (struct sk_buff *)(queue); \
@@ -3042,6 +3085,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3042struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 3085struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3043struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 3086struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3044int skb_ensure_writable(struct sk_buff *skb, int write_len); 3087int skb_ensure_writable(struct sk_buff *skb, int write_len);
3088int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3045int skb_vlan_pop(struct sk_buff *skb); 3089int skb_vlan_pop(struct sk_buff *skb);
3046int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 3090int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3047struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, 3091struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3726,6 +3770,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3726 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 3770 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3727} 3771}
3728 3772
3773static inline void skb_gso_reset(struct sk_buff *skb)
3774{
3775 skb_shinfo(skb)->gso_size = 0;
3776 skb_shinfo(skb)->gso_segs = 0;
3777 skb_shinfo(skb)->gso_type = 0;
3778}
3779
3729void __skb_warn_lro_forwarding(const struct sk_buff *skb); 3780void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3730 3781
3731static inline bool skb_warn_if_lro(const struct sk_buff *skb) 3782static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a4f7203a9017..ecc3e07c6e63 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,6 +25,7 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/rbtree.h> 27#include <linux/rbtree.h>
28#include <linux/uidgid.h>
28#include <uapi/linux/sysctl.h> 29#include <uapi/linux/sysctl.h>
29 30
30/* For the /proc/sys support */ 31/* For the /proc/sys support */
@@ -159,6 +160,9 @@ struct ctl_table_root {
159 struct ctl_table_set default_set; 160 struct ctl_table_set default_set;
160 struct ctl_table_set *(*lookup)(struct ctl_table_root *root, 161 struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
161 struct nsproxy *namespaces); 162 struct nsproxy *namespaces);
163 void (*set_ownership)(struct ctl_table_header *head,
164 struct ctl_table *table,
165 kuid_t *uid, kgid_t *gid);
162 int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); 166 int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
163}; 167};
164 168
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
19 19
20 20
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/win_minmax.h>
22#include <net/sock.h> 23#include <net/sock.h>
23#include <net/inet_connection_sock.h> 24#include <net/inet_connection_sock.h>
24#include <net/inet_timewait_sock.h> 25#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
212 u8 reord; /* reordering detected */ 213 u8 reord; /* reordering detected */
213 } rack; 214 } rack;
214 u16 advmss; /* Advertised MSS */ 215 u16 advmss; /* Advertised MSS */
215 u8 unused; 216 u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
217 unused:7;
216 u8 nonagle : 4,/* Disable Nagle algorithm? */ 218 u8 nonagle : 4,/* Disable Nagle algorithm? */
217 thin_lto : 1,/* Use linear timeouts for thin streams */ 219 thin_lto : 1,/* Use linear timeouts for thin streams */
218 thin_dupack : 1,/* Fast retransmit on first dupack */ 220 thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
234 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 236 u32 mdev_max_us; /* maximal mdev for the last rtt period */
235 u32 rttvar_us; /* smoothed mdev_max */ 237 u32 rttvar_us; /* smoothed mdev_max */
236 u32 rtt_seq; /* sequence number to update rttvar */ 238 u32 rtt_seq; /* sequence number to update rttvar */
237 struct rtt_meas { 239 struct minmax rtt_min;
238 u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
239 } rtt_min[3];
240 240
241 u32 packets_out; /* Packets which are "in flight" */ 241 u32 packets_out; /* Packets which are "in flight" */
242 u32 retrans_out; /* Retransmitted packets out */ 242 u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
268 * receiver in Recovery. */ 268 * receiver in Recovery. */
269 u32 prr_out; /* Total number of pkts sent during Recovery. */ 269 u32 prr_out; /* Total number of pkts sent during Recovery. */
270 u32 delivered; /* Total data packets delivered incl. rexmits */ 270 u32 delivered; /* Total data packets delivered incl. rexmits */
271 u32 lost; /* Total data packets lost incl. rexmits */
272 u32 app_limited; /* limited until "delivered" reaches this val */
273 struct skb_mstamp first_tx_mstamp; /* start of window send phase */
274 struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
275 u32 rate_delivered; /* saved rate sample: packets delivered */
276 u32 rate_interval_us; /* saved rate sample: time elapsed */
271 277
272 u32 rcv_wnd; /* Current receiver window */ 278 u32 rcv_wnd; /* Current receiver window */
273 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 279 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
281 struct sk_buff* lost_skb_hint; 287 struct sk_buff* lost_skb_hint;
282 struct sk_buff *retransmit_skb_hint; 288 struct sk_buff *retransmit_skb_hint;
283 289
284 /* OOO segments go in this list. Note that socket lock must be held, 290 /* OOO segments go in this rbtree. Socket lock must be held. */
285 * as we do not use sk_buff_head lock. 291 struct rb_root out_of_order_queue;
286 */ 292 struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
287 struct sk_buff_head out_of_order_queue;
288 293
289 /* SACKs data, these 2 need to be together (see tcp_options_write) */ 294 /* SACKs data, these 2 need to be together (see tcp_options_write) */
290 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 295 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
1/**
2 * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
3 *
4 */
5#ifndef MINMAX_H
6#define MINMAX_H
7
8#include <linux/types.h>
9
10/* A single data point for our parameterized min-max tracker */
11struct minmax_sample {
12 u32 t; /* time measurement was taken */
13 u32 v; /* value measured */
14};
15
16/* State for the parameterized min-max tracker */
17struct minmax {
18 struct minmax_sample s[3];
19};
20
21static inline u32 minmax_get(const struct minmax *m)
22{
23 return m->s[0].v;
24}
25
26static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
27{
28 struct minmax_sample val = { .t = t, .v = meas };
29
30 m->s[2] = m->s[1] = m->s[0] = val;
31 return m->s[0].v;
32}
33
34u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
35u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
36
37#endif