aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-11 13:59:41 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-11 13:59:41 -0500
commit8c2e6c904fd8701a8d02d2bdb86871dc3ec4e85b (patch)
tree8f6c64f4799f193673c3788b45f3960910d64174
parent3d93e33780b059e7e95d78491692df40b18ceb5c (diff)
parent36e04a2d78d97cc3a02a168541dfa00c8e4b30f2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2018-01-11 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Various BPF related improvements and fixes to nfp driver: i) do not register XDP RXQ structure to control queues, ii) round up program stack size to word size for nfp, iii) restrict MTU changes when BPF offload is active, iv) add more fully featured relocation support to JIT, v) add support for signed compare instructions to the nfp JIT, vi) export and reuse verfier log routine for nfp, and many more, from Jakub, Quentin and Nic. 2) Fix a syzkaller reported GPF in BPF's copy_verifier_state() when we hit kmalloc failure path, from Alexei. 3) Add two follow-up fixes for the recent XDP RXQ series: i) kvzalloc() allocated memory was only kfree()'ed, and ii) fix a memory leak where RX queue was not freed in netif_free_rx_queues(), from Jakub. 4) Add a sample for transferring XDP meta data into the skb, here it is used for setting skb->mark with the buffer from XDP, from Jesper. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c248
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h44
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h1
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--kernel/bpf/verifier.c20
-rw-r--r--net/core/dev.c9
-rw-r--r--samples/bpf/Makefile1
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh220
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c103
18 files changed, 760 insertions, 263 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0de59f04da84..47c5224f8d6f 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -85,7 +85,7 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
85 85
86static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog) 86static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
87{ 87{
88 return nfp_prog->start_off + nfp_prog->prog_len; 88 return nfp_prog->prog_len;
89} 89}
90 90
91static bool 91static bool
@@ -100,12 +100,6 @@ nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off); 100 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
101} 101}
102 102
103static unsigned int
104nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
105{
106 return offset - nfp_prog->start_off;
107}
108
109/* --- Emitters --- */ 103/* --- Emitters --- */
110static void 104static void
111__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, 105__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
@@ -195,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
195 nfp_prog_push(nfp_prog, insn); 189 nfp_prog_push(nfp_prog, insn);
196} 190}
197 191
198static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer) 192static void
193emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
194 enum nfp_relo_type relo)
199{ 195{
200 if (defer > 2) { 196 if (mask == BR_UNC && defer > 2) {
201 pr_err("BUG: branch defer out of bounds %d\n", defer); 197 pr_err("BUG: branch defer out of bounds %d\n", defer);
202 nfp_prog->error = -EFAULT; 198 nfp_prog->error = -EFAULT;
203 return; 199 return;
204 } 200 }
205 __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer); 201
202 __emit_br(nfp_prog, mask,
203 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
204 BR_CSS_NONE, addr, defer);
205
206 nfp_prog->prog[nfp_prog->prog_len - 1] |=
207 FIELD_PREP(OP_RELO_TYPE, relo);
206} 208}
207 209
208static void 210static void
209emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) 211emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
210{ 212{
211 __emit_br(nfp_prog, mask, 213 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
212 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
213 BR_CSS_NONE, addr, defer);
214} 214}
215 215
216static void 216static void
@@ -515,16 +515,6 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
515 emit_nop(nfp_prog); 515 emit_nop(nfp_prog);
516} 516}
517 517
518static void
519wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
520 enum br_special special)
521{
522 emit_br(nfp_prog, mask, 0, 0);
523
524 nfp_prog->prog[nfp_prog->prog_len - 1] |=
525 FIELD_PREP(OP_BR_SPECIAL, special);
526}
527
528static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) 518static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
529{ 519{
530 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); 520 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
@@ -749,7 +739,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
749 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); 739 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
750 emit_alu(nfp_prog, reg_none(), 740 emit_alu(nfp_prog, reg_none(),
751 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); 741 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
752 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); 742 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
753 743
754 /* Load data */ 744 /* Load data */
755 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); 745 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -762,7 +752,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
762 /* Check packet length */ 752 /* Check packet length */
763 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); 753 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
764 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); 754 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
765 wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); 755 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
766 756
767 /* Load data */ 757 /* Load data */
768 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); 758 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -1269,7 +1259,7 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1269 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0); 1259 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1270 1260
1271 /* Skip over the -EINVAL ret code (defer 2) */ 1261 /* Skip over the -EINVAL ret code (defer 2) */
1272 emit_br_def(nfp_prog, end, 2); 1262 emit_br(nfp_prog, BR_UNC, end, 2);
1273 1263
1274 emit_alu(nfp_prog, plen_reg(nfp_prog), 1264 emit_alu(nfp_prog, plen_reg(nfp_prog),
1275 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2)); 1265 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
@@ -1924,6 +1914,26 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1924 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); 1914 return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
1925} 1915}
1926 1916
1917static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1918{
1919 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
1920}
1921
1922static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1923{
1924 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
1925}
1926
1927static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1928{
1929 return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
1930}
1931
1932static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1933{
1934 return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
1935}
1936
1927static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 1937static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1928{ 1938{
1929 const struct bpf_insn *insn = &meta->insn; 1939 const struct bpf_insn *insn = &meta->insn;
@@ -2013,6 +2023,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2013 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); 2023 return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
2014} 2024}
2015 2025
2026static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2027{
2028 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
2029}
2030
2031static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2032{
2033 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
2034}
2035
2036static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2037{
2038 return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
2039}
2040
2041static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2042{
2043 return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
2044}
2045
2016static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2046static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2017{ 2047{
2018 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); 2048 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -2036,7 +2066,7 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2036 2066
2037static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 2067static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2038{ 2068{
2039 wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT); 2069 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
2040 2070
2041 return 0; 2071 return 0;
2042} 2072}
@@ -2097,6 +2127,10 @@ static const instr_cb_t instr_cb[256] = {
2097 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, 2127 [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
2098 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, 2128 [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm,
2099 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, 2129 [BPF_JMP | BPF_JLE | BPF_K] = jle_imm,
2130 [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm,
2131 [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm,
2132 [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm,
2133 [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm,
2100 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, 2134 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
2101 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, 2135 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
2102 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, 2136 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
@@ -2104,24 +2138,16 @@ static const instr_cb_t instr_cb[256] = {
2104 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, 2138 [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
2105 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, 2139 [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg,
2106 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, 2140 [BPF_JMP | BPF_JLE | BPF_X] = jle_reg,
2141 [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg,
2142 [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg,
2143 [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg,
2144 [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg,
2107 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, 2145 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
2108 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, 2146 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
2109 [BPF_JMP | BPF_CALL] = call, 2147 [BPF_JMP | BPF_CALL] = call,
2110 [BPF_JMP | BPF_EXIT] = goto_out, 2148 [BPF_JMP | BPF_EXIT] = goto_out,
2111}; 2149};
2112 2150
2113/* --- Misc code --- */
2114static void br_set_offset(u64 *instr, u16 offset)
2115{
2116 u16 addr_lo, addr_hi;
2117
2118 addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
2119 addr_hi = offset != addr_lo;
2120 *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
2121 *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
2122 *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
2123}
2124
2125/* --- Assembler logic --- */ 2151/* --- Assembler logic --- */
2126static int nfp_fixup_branches(struct nfp_prog *nfp_prog) 2152static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
2127{ 2153{
@@ -2137,11 +2163,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
2137 continue; 2163 continue;
2138 2164
2139 if (list_is_last(&meta->l, &nfp_prog->insns)) 2165 if (list_is_last(&meta->l, &nfp_prog->insns))
2140 idx = nfp_prog->last_bpf_off; 2166 br_idx = nfp_prog->last_bpf_off;
2141 else 2167 else
2142 idx = list_next_entry(meta, l)->off - 1; 2168 br_idx = list_next_entry(meta, l)->off - 1;
2143
2144 br_idx = nfp_prog_offset_to_index(nfp_prog, idx);
2145 2169
2146 if (!nfp_is_br(nfp_prog->prog[br_idx])) { 2170 if (!nfp_is_br(nfp_prog->prog[br_idx])) {
2147 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", 2171 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
@@ -2149,7 +2173,8 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
2149 return -ELOOP; 2173 return -ELOOP;
2150 } 2174 }
2151 /* Leave special branches for later */ 2175 /* Leave special branches for later */
2152 if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx])) 2176 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
2177 RELO_BR_REL)
2153 continue; 2178 continue;
2154 2179
2155 if (!meta->jmp_dst) { 2180 if (!meta->jmp_dst) {
@@ -2164,38 +2189,13 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
2164 return -ELOOP; 2189 return -ELOOP;
2165 } 2190 }
2166 2191
2167 for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off); 2192 for (idx = meta->off; idx <= br_idx; idx++) {
2168 idx <= br_idx; idx++) {
2169 if (!nfp_is_br(nfp_prog->prog[idx])) 2193 if (!nfp_is_br(nfp_prog->prog[idx]))
2170 continue; 2194 continue;
2171 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off); 2195 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
2172 } 2196 }
2173 } 2197 }
2174 2198
2175 /* Fixup 'goto out's separately, they can be scattered around */
2176 for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
2177 enum br_special special;
2178
2179 if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
2180 continue;
2181
2182 special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
2183 switch (special) {
2184 case OP_BR_NORMAL:
2185 break;
2186 case OP_BR_GO_OUT:
2187 br_set_offset(&nfp_prog->prog[br_idx],
2188 nfp_prog->tgt_out);
2189 break;
2190 case OP_BR_GO_ABORT:
2191 br_set_offset(&nfp_prog->prog[br_idx],
2192 nfp_prog->tgt_abort);
2193 break;
2194 }
2195
2196 nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
2197 }
2198
2199 return 0; 2199 return 0;
2200} 2200}
2201 2201
@@ -2223,7 +2223,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
2223 /* Target for aborts */ 2223 /* Target for aborts */
2224 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2224 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2225 2225
2226 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 2226 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2227 2227
2228 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2228 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2229 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); 2229 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2250,7 +2250,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
2250 emit_shf(nfp_prog, reg_b(2), 2250 emit_shf(nfp_prog, reg_b(2),
2251 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0); 2251 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
2252 2252
2253 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 2253 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2254 2254
2255 emit_shf(nfp_prog, reg_b(2), 2255 emit_shf(nfp_prog, reg_b(2),
2256 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4); 2256 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2269,7 +2269,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
2269 /* Target for aborts */ 2269 /* Target for aborts */
2270 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); 2270 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
2271 2271
2272 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 2272 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2273 2273
2274 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2274 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2275 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); 2275 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2290,7 +2290,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
2290 emit_shf(nfp_prog, reg_b(2), 2290 emit_shf(nfp_prog, reg_b(2),
2291 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0); 2291 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
2292 2292
2293 emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); 2293 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
2294 2294
2295 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); 2295 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
2296 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); 2296 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2706,25 +2706,38 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
2706 return 0; 2706 return 0;
2707} 2707}
2708 2708
2709static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) 2709static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
2710{ 2710{
2711 __le64 *ustore = (__force __le64 *)prog;
2711 int i; 2712 int i;
2712 2713
2713 for (i = 0; i < nfp_prog->prog_len; i++) { 2714 for (i = 0; i < len; i++) {
2714 int err; 2715 int err;
2715 2716
2716 err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]); 2717 err = nfp_ustore_check_valid_no_ecc(prog[i]);
2717 if (err) 2718 if (err)
2718 return err; 2719 return err;
2719 2720
2720 nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]); 2721 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
2721
2722 ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
2723 } 2722 }
2724 2723
2725 return 0; 2724 return 0;
2726} 2725}
2727 2726
2727static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
2728{
2729 void *prog;
2730
2731 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
2732 if (!prog)
2733 return;
2734
2735 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
2736 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
2737 kvfree(nfp_prog->prog);
2738 nfp_prog->prog = prog;
2739}
2740
2728int nfp_bpf_jit(struct nfp_prog *nfp_prog) 2741int nfp_bpf_jit(struct nfp_prog *nfp_prog)
2729{ 2742{
2730 int ret; 2743 int ret;
@@ -2740,5 +2753,78 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
2740 return -EINVAL; 2753 return -EINVAL;
2741 } 2754 }
2742 2755
2743 return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog); 2756 nfp_bpf_prog_trim(nfp_prog);
2757
2758 return ret;
2759}
2760
2761void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
2762{
2763 struct nfp_insn_meta *meta;
2764
2765 /* Another pass to record jump information. */
2766 list_for_each_entry(meta, &nfp_prog->insns, l) {
2767 u64 code = meta->insn.code;
2768
2769 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
2770 BPF_OP(code) != BPF_CALL) {
2771 struct nfp_insn_meta *dst_meta;
2772 unsigned short dst_indx;
2773
2774 dst_indx = meta->n + 1 + meta->insn.off;
2775 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
2776 cnt);
2777
2778 meta->jmp_dst = dst_meta;
2779 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
2780 }
2781 }
2782}
2783
2784void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
2785{
2786 unsigned int i;
2787 u64 *prog;
2788 int err;
2789
2790 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
2791 GFP_KERNEL);
2792 if (!prog)
2793 return ERR_PTR(-ENOMEM);
2794
2795 for (i = 0; i < nfp_prog->prog_len; i++) {
2796 enum nfp_relo_type special;
2797
2798 special = FIELD_GET(OP_RELO_TYPE, prog[i]);
2799 switch (special) {
2800 case RELO_NONE:
2801 continue;
2802 case RELO_BR_REL:
2803 br_add_offset(&prog[i], bv->start_off);
2804 break;
2805 case RELO_BR_GO_OUT:
2806 br_set_offset(&prog[i],
2807 nfp_prog->tgt_out + bv->start_off);
2808 break;
2809 case RELO_BR_GO_ABORT:
2810 br_set_offset(&prog[i],
2811 nfp_prog->tgt_abort + bv->start_off);
2812 break;
2813 case RELO_BR_NEXT_PKT:
2814 br_set_offset(&prog[i], bv->tgt_done);
2815 break;
2816 }
2817
2818 prog[i] &= ~OP_RELO_TYPE;
2819 }
2820
2821 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
2822 if (err)
2823 goto err_free_prog;
2824
2825 return prog;
2826
2827err_free_prog:
2828 kfree(prog);
2829 return ERR_PTR(err);
2744} 2830}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 4b63167906ca..e8cfe300c8c4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -87,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
87static int 87static int
88nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) 88nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
89{ 89{
90 struct nfp_bpf_vnic *bv;
90 int err; 91 int err;
91 92
92 nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL); 93 bv = kzalloc(sizeof(*bv), GFP_KERNEL);
93 if (!nn->app_priv) 94 if (!bv)
94 return -ENOMEM; 95 return -ENOMEM;
96 nn->app_priv = bv;
95 97
96 err = nfp_app_nic_vnic_alloc(app, nn, id); 98 err = nfp_app_nic_vnic_alloc(app, nn, id);
97 if (err) 99 if (err)
98 goto err_free_priv; 100 goto err_free_priv;
99 101
102 bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
103 bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
104
100 return 0; 105 return 0;
101err_free_priv: 106err_free_priv:
102 kfree(nn->app_priv); 107 kfree(nn->app_priv);
@@ -191,7 +196,27 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
191 196
192static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) 197static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
193{ 198{
194 return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; 199 struct nfp_bpf_vnic *bv = nn->app_priv;
200
201 return !!bv->tc_prog;
202}
203
204static int
205nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
206{
207 struct nfp_net *nn = netdev_priv(netdev);
208 unsigned int max_mtu;
209
210 if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
211 return 0;
212
213 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
214 if (new_mtu > max_mtu) {
215 nn_info(nn, "BPF offload active, MTU over %u not supported\n",
216 max_mtu);
217 return -EBUSY;
218 }
219 return 0;
195} 220}
196 221
197static int 222static int
@@ -311,6 +336,8 @@ const struct nfp_app_type app_bpf = {
311 .init = nfp_bpf_init, 336 .init = nfp_bpf_init,
312 .clean = nfp_bpf_clean, 337 .clean = nfp_bpf_clean,
313 338
339 .change_mtu = nfp_bpf_change_mtu,
340
314 .extra_cap = nfp_bpf_extra_cap, 341 .extra_cap = nfp_bpf_extra_cap,
315 342
316 .vnic_alloc = nfp_bpf_vnic_alloc, 343 .vnic_alloc = nfp_bpf_vnic_alloc,
@@ -318,9 +345,6 @@ const struct nfp_app_type app_bpf = {
318 345
319 .setup_tc = nfp_bpf_setup_tc, 346 .setup_tc = nfp_bpf_setup_tc,
320 .tc_busy = nfp_bpf_tc_busy, 347 .tc_busy = nfp_bpf_tc_busy,
348 .bpf = nfp_ndo_bpf,
321 .xdp_offload = nfp_bpf_xdp_offload, 349 .xdp_offload = nfp_bpf_xdp_offload,
322
323 .bpf_verifier_prep = nfp_bpf_verifier_prep,
324 .bpf_translate = nfp_bpf_translate,
325 .bpf_destroy = nfp_bpf_destroy,
326}; 350};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 89a9b6393882..66381afee2a9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -42,17 +42,28 @@
42 42
43#include "../nfp_asm.h" 43#include "../nfp_asm.h"
44 44
45/* For branch fixup logic use up-most byte of branch instruction as scratch 45/* For relocation logic use up-most byte of branch instruction as scratch
46 * area. Remember to clear this before sending instructions to HW! 46 * area. Remember to clear this before sending instructions to HW!
47 */ 47 */
48#define OP_BR_SPECIAL 0xff00000000000000ULL 48#define OP_RELO_TYPE 0xff00000000000000ULL
49 49
50enum br_special { 50enum nfp_relo_type {
51 OP_BR_NORMAL = 0, 51 RELO_NONE = 0,
52 OP_BR_GO_OUT, 52 /* standard internal jumps */
53 OP_BR_GO_ABORT, 53 RELO_BR_REL,
54 /* internal jumps to parts of the outro */
55 RELO_BR_GO_OUT,
56 RELO_BR_GO_ABORT,
57 /* external jumps to fixed addresses */
58 RELO_BR_NEXT_PKT,
54}; 59};
55 60
61/* To make absolute relocated branches (branches other than RELO_BR_REL)
62 * distinguishable in user space dumps from normal jumps, add a large offset
63 * to them.
64 */
65#define BR_OFF_RELO 15000
66
56enum static_regs { 67enum static_regs {
57 STATIC_REG_IMM = 21, /* Bank AB */ 68 STATIC_REG_IMM = 21, /* Bank AB */
58 STATIC_REG_STACK = 22, /* Bank A */ 69 STATIC_REG_STACK = 22, /* Bank A */
@@ -191,11 +202,9 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
191 * @__prog_alloc_len: alloc size of @prog array 202 * @__prog_alloc_len: alloc size of @prog array
192 * @verifier_meta: temporary storage for verifier's insn meta 203 * @verifier_meta: temporary storage for verifier's insn meta
193 * @type: BPF program type 204 * @type: BPF program type
194 * @start_off: address of the first instruction in the memory
195 * @last_bpf_off: address of the last instruction translated from BPF 205 * @last_bpf_off: address of the last instruction translated from BPF
196 * @tgt_out: jump target for normal exit 206 * @tgt_out: jump target for normal exit
197 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) 207 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
198 * @tgt_done: jump target to get the next packet
199 * @n_translated: number of successfully translated instructions (for errors) 208 * @n_translated: number of successfully translated instructions (for errors)
200 * @error: error code if something went wrong 209 * @error: error code if something went wrong
201 * @stack_depth: max stack depth from the verifier 210 * @stack_depth: max stack depth from the verifier
@@ -213,11 +222,9 @@ struct nfp_prog {
213 222
214 enum bpf_prog_type type; 223 enum bpf_prog_type type;
215 224
216 unsigned int start_off;
217 unsigned int last_bpf_off; 225 unsigned int last_bpf_off;
218 unsigned int tgt_out; 226 unsigned int tgt_out;
219 unsigned int tgt_abort; 227 unsigned int tgt_abort;
220 unsigned int tgt_done;
221 228
222 unsigned int n_translated; 229 unsigned int n_translated;
223 int error; 230 int error;
@@ -231,11 +238,16 @@ struct nfp_prog {
231/** 238/**
232 * struct nfp_bpf_vnic - per-vNIC BPF priv structure 239 * struct nfp_bpf_vnic - per-vNIC BPF priv structure
233 * @tc_prog: currently loaded cls_bpf program 240 * @tc_prog: currently loaded cls_bpf program
241 * @start_off: address of the first instruction in the memory
242 * @tgt_done: jump target to get the next packet
234 */ 243 */
235struct nfp_bpf_vnic { 244struct nfp_bpf_vnic {
236 struct bpf_prog *tc_prog; 245 struct bpf_prog *tc_prog;
246 unsigned int start_off;
247 unsigned int tgt_done;
237}; 248};
238 249
250void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
239int nfp_bpf_jit(struct nfp_prog *prog); 251int nfp_bpf_jit(struct nfp_prog *prog);
240 252
241extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; 253extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
@@ -244,16 +256,14 @@ struct netdev_bpf;
244struct nfp_app; 256struct nfp_app;
245struct nfp_net; 257struct nfp_net;
246 258
259int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
260 struct netdev_bpf *bpf);
247int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, 261int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
248 bool old_prog); 262 bool old_prog);
249 263
250int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
251 struct netdev_bpf *bpf);
252int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
253 struct bpf_prog *prog);
254int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
255 struct bpf_prog *prog);
256struct nfp_insn_meta * 264struct nfp_insn_meta *
257nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 265nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
258 unsigned int insn_idx, unsigned int n_insns); 266 unsigned int insn_idx, unsigned int n_insns);
267
268void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
259#endif 269#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index fa2905e67b07..320b2250d29a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -42,6 +42,7 @@
42#include <linux/jiffies.h> 42#include <linux/jiffies.h>
43#include <linux/timer.h> 43#include <linux/timer.h>
44#include <linux/list.h> 44#include <linux/list.h>
45#include <linux/mm.h>
45 46
46#include <net/pkt_cls.h> 47#include <net/pkt_cls.h>
47#include <net/tc_act/tc_gact.h> 48#include <net/tc_act/tc_gact.h>
@@ -70,23 +71,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
70 list_add_tail(&meta->l, &nfp_prog->insns); 71 list_add_tail(&meta->l, &nfp_prog->insns);
71 } 72 }
72 73
73 /* Another pass to record jump information. */ 74 nfp_bpf_jit_prepare(nfp_prog, cnt);
74 list_for_each_entry(meta, &nfp_prog->insns, l) {
75 u64 code = meta->insn.code;
76
77 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
78 BPF_OP(code) != BPF_CALL) {
79 struct nfp_insn_meta *dst_meta;
80 unsigned short dst_indx;
81
82 dst_indx = meta->n + 1 + meta->insn.off;
83 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
84 cnt);
85
86 meta->jmp_dst = dst_meta;
87 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
88 }
89 }
90 75
91 return 0; 76 return 0;
92} 77}
@@ -102,8 +87,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
102 kfree(nfp_prog); 87 kfree(nfp_prog);
103} 88}
104 89
105int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, 90static int
106 struct netdev_bpf *bpf) 91nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
92 struct netdev_bpf *bpf)
107{ 93{
108 struct bpf_prog *prog = bpf->verifier.prog; 94 struct bpf_prog *prog = bpf->verifier.prog;
109 struct nfp_prog *nfp_prog; 95 struct nfp_prog *nfp_prog;
@@ -133,8 +119,7 @@ err_free:
133 return ret; 119 return ret;
134} 120}
135 121
136int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, 122static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
137 struct bpf_prog *prog)
138{ 123{
139 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; 124 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
140 unsigned int stack_size; 125 unsigned int stack_size;
@@ -146,37 +131,48 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
146 prog->aux->stack_depth, stack_size); 131 prog->aux->stack_depth, stack_size);
147 return -EOPNOTSUPP; 132 return -EOPNOTSUPP;
148 } 133 }
149 134 nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
150 nfp_prog->stack_depth = prog->aux->stack_depth;
151 nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
152 nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
153 135
154 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); 136 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
155 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); 137 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
156 138
157 nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); 139 nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
158 if (!nfp_prog->prog) 140 if (!nfp_prog->prog)
159 return -ENOMEM; 141 return -ENOMEM;
160 142
161 return nfp_bpf_jit(nfp_prog); 143 return nfp_bpf_jit(nfp_prog);
162} 144}
163 145
164int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, 146static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
165 struct bpf_prog *prog)
166{ 147{
167 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; 148 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
168 149
169 kfree(nfp_prog->prog); 150 kvfree(nfp_prog->prog);
170 nfp_prog_free(nfp_prog); 151 nfp_prog_free(nfp_prog);
171 152
172 return 0; 153 return 0;
173} 154}
174 155
156int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
157{
158 switch (bpf->command) {
159 case BPF_OFFLOAD_VERIFIER_PREP:
160 return nfp_bpf_verifier_prep(app, nn, bpf);
161 case BPF_OFFLOAD_TRANSLATE:
162 return nfp_bpf_translate(nn, bpf->offload.prog);
163 case BPF_OFFLOAD_DESTROY:
164 return nfp_bpf_destroy(nn, bpf->offload.prog);
165 default:
166 return -EINVAL;
167 }
168}
169
175static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) 170static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
176{ 171{
177 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; 172 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
178 unsigned int max_mtu; 173 unsigned int max_mtu;
179 dma_addr_t dma_addr; 174 dma_addr_t dma_addr;
175 void *img;
180 int err; 176 int err;
181 177
182 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; 178 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
@@ -185,11 +181,17 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
185 return -EOPNOTSUPP; 181 return -EOPNOTSUPP;
186 } 182 }
187 183
188 dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, 184 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
185 if (IS_ERR(img))
186 return PTR_ERR(img);
187
188 dma_addr = dma_map_single(nn->dp.dev, img,
189 nfp_prog->prog_len * sizeof(u64), 189 nfp_prog->prog_len * sizeof(u64),
190 DMA_TO_DEVICE); 190 DMA_TO_DEVICE);
191 if (dma_mapping_error(nn->dp.dev, dma_addr)) 191 if (dma_mapping_error(nn->dp.dev, dma_addr)) {
192 kfree(img);
192 return -ENOMEM; 193 return -ENOMEM;
194 }
193 195
194 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); 196 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
195 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); 197 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -201,6 +203,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
201 203
202 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), 204 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
203 DMA_TO_DEVICE); 205 DMA_TO_DEVICE);
206 kfree(img);
204 207
205 return err; 208 return err;
206} 209}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index d8870c2f11f3..7890d95d4018 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -31,8 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#define pr_fmt(fmt) "NFP net bpf: " fmt
35
36#include <linux/bpf.h> 34#include <linux/bpf.h>
37#include <linux/bpf_verifier.h> 35#include <linux/bpf_verifier.h>
38#include <linux/kernel.h> 36#include <linux/kernel.h>
@@ -41,6 +39,9 @@
41#include "fw.h" 39#include "fw.h"
42#include "main.h" 40#include "main.h"
43 41
42#define pr_vlog(env, fmt, ...) \
43 bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
44
44struct nfp_insn_meta * 45struct nfp_insn_meta *
45nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 46nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
46 unsigned int insn_idx, unsigned int n_insns) 47 unsigned int insn_idx, unsigned int n_insns)
@@ -116,18 +117,18 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
116 switch (func_id) { 117 switch (func_id) {
117 case BPF_FUNC_xdp_adjust_head: 118 case BPF_FUNC_xdp_adjust_head:
118 if (!bpf->adjust_head.off_max) { 119 if (!bpf->adjust_head.off_max) {
119 pr_warn("adjust_head not supported by FW\n"); 120 pr_vlog(env, "adjust_head not supported by FW\n");
120 return -EOPNOTSUPP; 121 return -EOPNOTSUPP;
121 } 122 }
122 if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) { 123 if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
123 pr_warn("adjust_head: FW requires shifting metadata, not supported by the driver\n"); 124 pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
124 return -EOPNOTSUPP; 125 return -EOPNOTSUPP;
125 } 126 }
126 127
127 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); 128 nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
128 break; 129 break;
129 default: 130 default:
130 pr_warn("unsupported function id: %d\n", func_id); 131 pr_vlog(env, "unsupported function id: %d\n", func_id);
131 return -EOPNOTSUPP; 132 return -EOPNOTSUPP;
132 } 133 }
133 134
@@ -150,7 +151,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
150 char tn_buf[48]; 151 char tn_buf[48];
151 152
152 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); 153 tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
153 pr_info("unsupported exit state: %d, var_off: %s\n", 154 pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
154 reg0->type, tn_buf); 155 reg0->type, tn_buf);
155 return -EINVAL; 156 return -EINVAL;
156 } 157 }
@@ -160,7 +161,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
160 imm <= TC_ACT_REDIRECT && 161 imm <= TC_ACT_REDIRECT &&
161 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && 162 imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
162 imm != TC_ACT_QUEUED) { 163 imm != TC_ACT_QUEUED) {
163 pr_info("unsupported exit state: %d, imm: %llx\n", 164 pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
164 reg0->type, imm); 165 reg0->type, imm);
165 return -EINVAL; 166 return -EINVAL;
166 } 167 }
@@ -171,12 +172,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
171static int 172static int
172nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, 173nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
173 struct nfp_insn_meta *meta, 174 struct nfp_insn_meta *meta,
174 const struct bpf_reg_state *reg) 175 const struct bpf_reg_state *reg,
176 struct bpf_verifier_env *env)
175{ 177{
176 s32 old_off, new_off; 178 s32 old_off, new_off;
177 179
178 if (!tnum_is_const(reg->var_off)) { 180 if (!tnum_is_const(reg->var_off)) {
179 pr_info("variable ptr stack access\n"); 181 pr_vlog(env, "variable ptr stack access\n");
180 return -EINVAL; 182 return -EINVAL;
181 } 183 }
182 184
@@ -194,7 +196,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
194 if (old_off % 4 == new_off % 4) 196 if (old_off % 4 == new_off % 4)
195 return 0; 197 return 0;
196 198
197 pr_info("stack access changed location was:%d is:%d\n", 199 pr_vlog(env, "stack access changed location was:%d is:%d\n",
198 old_off, new_off); 200 old_off, new_off);
199 return -EINVAL; 201 return -EINVAL;
200} 202}
@@ -209,18 +211,18 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
209 if (reg->type != PTR_TO_CTX && 211 if (reg->type != PTR_TO_CTX &&
210 reg->type != PTR_TO_STACK && 212 reg->type != PTR_TO_STACK &&
211 reg->type != PTR_TO_PACKET) { 213 reg->type != PTR_TO_PACKET) {
212 pr_info("unsupported ptr type: %d\n", reg->type); 214 pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
213 return -EINVAL; 215 return -EINVAL;
214 } 216 }
215 217
216 if (reg->type == PTR_TO_STACK) { 218 if (reg->type == PTR_TO_STACK) {
217 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg); 219 err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
218 if (err) 220 if (err)
219 return err; 221 return err;
220 } 222 }
221 223
222 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { 224 if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
223 pr_info("ptr type changed for instruction %d -> %d\n", 225 pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
224 meta->ptr.type, reg->type); 226 meta->ptr.type, reg->type);
225 return -EINVAL; 227 return -EINVAL;
226 } 228 }
@@ -241,7 +243,7 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
241 243
242 if (meta->insn.src_reg >= MAX_BPF_REG || 244 if (meta->insn.src_reg >= MAX_BPF_REG ||
243 meta->insn.dst_reg >= MAX_BPF_REG) { 245 meta->insn.dst_reg >= MAX_BPF_REG) {
244 pr_err("program uses extended registers - jit hardening?\n"); 246 pr_vlog(env, "program uses extended registers - jit hardening?\n");
245 return -EINVAL; 247 return -EINVAL;
246 } 248 }
247 249
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 3af1943a8521..32ff46a00f70 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -82,15 +82,15 @@ extern const struct nfp_app_type app_flower;
82 * @repr_clean: representor about to be unregistered 82 * @repr_clean: representor about to be unregistered
83 * @repr_open: representor netdev open callback 83 * @repr_open: representor netdev open callback
84 * @repr_stop: representor netdev stop callback 84 * @repr_stop: representor netdev stop callback
85 * @change_mtu: MTU change on a netdev has been requested (veto-only, change
86 * is not guaranteed to be committed)
85 * @start: start application logic 87 * @start: start application logic
86 * @stop: stop application logic 88 * @stop: stop application logic
87 * @ctrl_msg_rx: control message handler 89 * @ctrl_msg_rx: control message handler
88 * @setup_tc: setup TC ndo 90 * @setup_tc: setup TC ndo
89 * @tc_busy: TC HW offload busy (rules loaded) 91 * @tc_busy: TC HW offload busy (rules loaded)
92 * @bpf: BPF ndo offload-related calls
90 * @xdp_offload: offload an XDP program 93 * @xdp_offload: offload an XDP program
91 * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
92 * @bpf_translate: translate call for dev-specific BPF programs
93 * @bpf_destroy: destroy for dev-specific BPF programs
94 * @eswitch_mode_get: get SR-IOV eswitch mode 94 * @eswitch_mode_get: get SR-IOV eswitch mode
95 * @sriov_enable: app-specific sriov initialisation 95 * @sriov_enable: app-specific sriov initialisation
96 * @sriov_disable: app-specific sriov clean-up 96 * @sriov_disable: app-specific sriov clean-up
@@ -120,6 +120,9 @@ struct nfp_app_type {
120 int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); 120 int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
121 int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); 121 int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
122 122
123 int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
124 int new_mtu);
125
123 int (*start)(struct nfp_app *app); 126 int (*start)(struct nfp_app *app);
124 void (*stop)(struct nfp_app *app); 127 void (*stop)(struct nfp_app *app);
125 128
@@ -128,14 +131,10 @@ struct nfp_app_type {
128 int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, 131 int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
129 enum tc_setup_type type, void *type_data); 132 enum tc_setup_type type, void *type_data);
130 bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); 133 bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
134 int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
135 struct netdev_bpf *xdp);
131 int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, 136 int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
132 struct bpf_prog *prog); 137 struct bpf_prog *prog);
133 int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
134 struct netdev_bpf *bpf);
135 int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
136 struct bpf_prog *prog);
137 int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
138 struct bpf_prog *prog);
139 138
140 int (*sriov_enable)(struct nfp_app *app, int num_vfs); 139 int (*sriov_enable)(struct nfp_app *app, int num_vfs);
141 void (*sriov_disable)(struct nfp_app *app); 140 void (*sriov_disable)(struct nfp_app *app);
@@ -242,6 +241,14 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
242 app->type->repr_clean(app, netdev); 241 app->type->repr_clean(app, netdev);
243} 242}
244 243
244static inline int
245nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
246{
247 if (!app || !app->type->change_mtu)
248 return 0;
249 return app->type->change_mtu(app, netdev, new_mtu);
250}
251
245static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) 252static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
246{ 253{
247 app->ctrl = ctrl; 254 app->ctrl = ctrl;
@@ -303,6 +310,14 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
303 return app->type->setup_tc(app, netdev, type, type_data); 310 return app->type->setup_tc(app, netdev, type, type_data);
304} 311}
305 312
313static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
314 struct netdev_bpf *bpf)
315{
316 if (!app || !app->type->bpf)
317 return -EINVAL;
318 return app->type->bpf(app, nn, bpf);
319}
320
306static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, 321static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
307 struct bpf_prog *prog) 322 struct bpf_prog *prog)
308{ 323{
@@ -311,33 +326,6 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
311 return app->type->xdp_offload(app, nn, prog); 326 return app->type->xdp_offload(app, nn, prog);
312} 327}
313 328
314static inline int
315nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
316 struct netdev_bpf *bpf)
317{
318 if (!app || !app->type->bpf_verifier_prep)
319 return -EOPNOTSUPP;
320 return app->type->bpf_verifier_prep(app, nn, bpf);
321}
322
323static inline int
324nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
325 struct bpf_prog *prog)
326{
327 if (!app || !app->type->bpf_translate)
328 return -EOPNOTSUPP;
329 return app->type->bpf_translate(app, nn, prog);
330}
331
332static inline int
333nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
334 struct bpf_prog *prog)
335{
336 if (!app || !app->type->bpf_destroy)
337 return -EOPNOTSUPP;
338 return app->type->bpf_destroy(app, nn, prog);
339}
340
341static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) 329static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
342{ 330{
343 trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, 331 trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index d3610987fb07..9ee3a3f60cc7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -50,6 +50,36 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
50 [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, 50 [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
51}; 51};
52 52
53u16 br_get_offset(u64 instr)
54{
55 u16 addr_lo, addr_hi;
56
57 addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
58 addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
59
60 return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
61 addr_lo;
62}
63
64void br_set_offset(u64 *instr, u16 offset)
65{
66 u16 addr_lo, addr_hi;
67
68 addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
69 addr_hi = offset != addr_lo;
70 *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
71 *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
72 *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
73}
74
75void br_add_offset(u64 *instr, u16 offset)
76{
77 u16 addr;
78
79 addr = br_get_offset(*instr);
80 br_set_offset(instr, addr + offset);
81}
82
53static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) 83static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
54{ 84{
55 bool lm_id, lm_dec = false; 85 bool lm_id, lm_dec = false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index a24daeab1a77..20e51cb60e69 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -81,6 +81,7 @@ enum br_mask {
81 BR_BHS = 0x04, 81 BR_BHS = 0x04,
82 BR_BLO = 0x05, 82 BR_BLO = 0x05,
83 BR_BGE = 0x08, 83 BR_BGE = 0x08,
84 BR_BLT = 0x09,
84 BR_UNC = 0x18, 85 BR_UNC = 0x18,
85}; 86};
86 87
@@ -93,6 +94,10 @@ enum br_ctx_signal_state {
93 BR_CSS_NONE = 2, 94 BR_CSS_NONE = 2,
94}; 95};
95 96
97u16 br_get_offset(u64 instr);
98void br_set_offset(u64 *instr, u16 offset);
99void br_add_offset(u64 *instr, u16 offset);
100
96#define OP_BBYTE_BASE 0x0c800000000ULL 101#define OP_BBYTE_BASE 0x0c800000000ULL
97#define OP_BB_A_SRC 0x000000000ffULL 102#define OP_BB_A_SRC 0x000000000ffULL
98#define OP_BB_BYTE 0x00000000300ULL 103#define OP_BB_BYTE 0x00000000300ULL
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 05e071b3dc5b..caee147fce04 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2253,7 +2253,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 2253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2255 2255
2256 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 2256 if (dp->netdev)
2257 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2257 kfree(rx_ring->rxbufs); 2258 kfree(rx_ring->rxbufs);
2258 2259
2259 if (rx_ring->rxds) 2260 if (rx_ring->rxds)
@@ -2279,9 +2280,12 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2279{ 2280{
2280 int sz, err; 2281 int sz, err;
2281 2282
2282 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx); 2283 if (dp->netdev) {
2283 if (err < 0) 2284 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
2284 return err; 2285 rx_ring->idx);
2286 if (err < 0)
2287 return err;
2288 }
2285 2289
2286 rx_ring->cnt = dp->rxd_cnt; 2290 rx_ring->cnt = dp->rxd_cnt;
2287 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; 2291 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -3045,6 +3049,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3045{ 3049{
3046 struct nfp_net *nn = netdev_priv(netdev); 3050 struct nfp_net *nn = netdev_priv(netdev);
3047 struct nfp_net_dp *dp; 3051 struct nfp_net_dp *dp;
3052 int err;
3053
3054 err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
3055 if (err)
3056 return err;
3048 3057
3049 dp = nfp_net_clone_dp(nn); 3058 dp = nfp_net_clone_dp(nn);
3050 if (!dp) 3059 if (!dp)
@@ -3405,16 +3414,8 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3405 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; 3414 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
3406 xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; 3415 xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
3407 return 0; 3416 return 0;
3408 case BPF_OFFLOAD_VERIFIER_PREP:
3409 return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
3410 case BPF_OFFLOAD_TRANSLATE:
3411 return nfp_app_bpf_translate(nn->app, nn,
3412 xdp->offload.prog);
3413 case BPF_OFFLOAD_DESTROY:
3414 return nfp_app_bpf_destroy(nn->app, nn,
3415 xdp->offload.prog);
3416 default: 3417 default:
3417 return -EINVAL; 3418 return nfp_app_bpf(nn->app, nn, xdp);
3418 } 3419 }
3419} 3420}
3420 3421
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452e0fc2..25c36001bffa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -91,23 +91,24 @@
91#define NFP_NET_RSS_IPV6_EX_UDP 9 91#define NFP_NET_RSS_IPV6_EX_UDP 9
92 92
93/** 93/**
94 * @NFP_NET_TXR_MAX: Maximum number of TX rings 94 * Ring counts
95 * @NFP_NET_RXR_MAX: Maximum number of RX rings 95 * %NFP_NET_TXR_MAX: Maximum number of TX rings
96 * %NFP_NET_RXR_MAX: Maximum number of RX rings
96 */ 97 */
97#define NFP_NET_TXR_MAX 64 98#define NFP_NET_TXR_MAX 64
98#define NFP_NET_RXR_MAX 64 99#define NFP_NET_RXR_MAX 64
99 100
100/** 101/**
101 * Read/Write config words (0x0000 - 0x002c) 102 * Read/Write config words (0x0000 - 0x002c)
102 * @NFP_NET_CFG_CTRL: Global control 103 * %NFP_NET_CFG_CTRL: Global control
103 * @NFP_NET_CFG_UPDATE: Indicate which fields are updated 104 * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
104 * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings 105 * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
105 * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings 106 * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
106 * @NFP_NET_CFG_MTU: Set MTU size 107 * %NFP_NET_CFG_MTU: Set MTU size
107 * @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) 108 * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
108 * @NFP_NET_CFG_EXN: MSI-X table entry for exceptions 109 * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
109 * @NFP_NET_CFG_LSC: MSI-X table entry for link state changes 110 * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
110 * @NFP_NET_CFG_MACADDR: MAC address 111 * %NFP_NET_CFG_MACADDR: MAC address
111 * 112 *
112 * TODO: 113 * TODO:
113 * - define Error details in UPDATE 114 * - define Error details in UPDATE
@@ -176,14 +177,14 @@
176 177
177/** 178/**
178 * Read-only words (0x0030 - 0x0050): 179 * Read-only words (0x0030 - 0x0050):
179 * @NFP_NET_CFG_VERSION: Firmware version number 180 * %NFP_NET_CFG_VERSION: Firmware version number
180 * @NFP_NET_CFG_STS: Status 181 * %NFP_NET_CFG_STS: Status
181 * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) 182 * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
182 * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings 183 * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
183 * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings 184 * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
184 * @NFP_NET_CFG_MAX_MTU: Maximum support MTU 185 * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
185 * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) 186 * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
186 * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) 187 * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
187 * 188 *
188 * TODO: 189 * TODO:
189 * - define more STS bits 190 * - define more STS bits
@@ -228,31 +229,31 @@
228 229
229/** 230/**
230 * RSS capabilities 231 * RSS capabilities
231 * @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as 232 * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
232 * @NFP_NET_CFG_RSS_HFUNC) 233 * %NFP_NET_CFG_RSS_HFUNC)
233 */ 234 */
234#define NFP_NET_CFG_RSS_CAP 0x0054 235#define NFP_NET_CFG_RSS_CAP 0x0054
235#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000 236#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
236 237
237/** 238/**
238 * VXLAN/UDP encap configuration 239 * VXLAN/UDP encap configuration
239 * @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports 240 * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
240 * @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes 241 * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
241 */ 242 */
242#define NFP_NET_CFG_VXLAN_PORT 0x0060 243#define NFP_NET_CFG_VXLAN_PORT 0x0060
243#define NFP_NET_CFG_VXLAN_SZ 0x0008 244#define NFP_NET_CFG_VXLAN_SZ 0x0008
244 245
245/** 246/**
246 * BPF section 247 * BPF section
247 * @NFP_NET_CFG_BPF_ABI: BPF ABI version 248 * %NFP_NET_CFG_BPF_ABI: BPF ABI version
248 * @NFP_NET_CFG_BPF_CAP: BPF capabilities 249 * %NFP_NET_CFG_BPF_CAP: BPF capabilities
249 * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes 250 * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
250 * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded 251 * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
251 * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit 252 * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
252 * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks 253 * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
253 * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks 254 * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
254 * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions 255 * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
255 * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code 256 * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
256 */ 257 */
257#define NFP_NET_CFG_BPF_ABI 0x0080 258#define NFP_NET_CFG_BPF_ABI 0x0080
258#define NFP_NET_BPF_ABI 2 259#define NFP_NET_BPF_ABI 2
@@ -278,9 +279,9 @@
278/** 279/**
279 * RSS configuration (0x0100 - 0x01ac): 280 * RSS configuration (0x0100 - 0x01ac):
280 * Used only when NFP_NET_CFG_CTRL_RSS is enabled 281 * Used only when NFP_NET_CFG_CTRL_RSS is enabled
281 * @NFP_NET_CFG_RSS_CFG: RSS configuration word 282 * %NFP_NET_CFG_RSS_CFG: RSS configuration word
282 * @NFP_NET_CFG_RSS_KEY: RSS "secret" key 283 * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
283 * @NFP_NET_CFG_RSS_ITBL: RSS indirection table 284 * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
284 */ 285 */
285#define NFP_NET_CFG_RSS_BASE 0x0100 286#define NFP_NET_CFG_RSS_BASE 0x0100
286#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE 287#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
@@ -305,13 +306,13 @@
305 306
306/** 307/**
307 * TX ring configuration (0x200 - 0x800) 308 * TX ring configuration (0x200 - 0x800)
308 * @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration 309 * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
309 * @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) 310 * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
310 * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) 311 * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
311 * @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) 312 * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
312 * @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) 313 * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
313 * @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) 314 * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
314 * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet 315 * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
315 */ 316 */
316#define NFP_NET_CFG_TXR_BASE 0x0200 317#define NFP_NET_CFG_TXR_BASE 0x0200
317#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) 318#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +326,12 @@
325 326
326/** 327/**
327 * RX ring configuration (0x0800 - 0x0c00) 328 * RX ring configuration (0x0800 - 0x0c00)
328 * @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration 329 * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
329 * @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) 330 * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
330 * @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) 331 * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
331 * @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) 332 * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
332 * @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) 333 * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
333 * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) 334 * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
334 */ 335 */
335#define NFP_NET_CFG_RXR_BASE 0x0800 336#define NFP_NET_CFG_RXR_BASE 0x0800
336#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) 337#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +344,7 @@
343/** 344/**
344 * Interrupt Control/Cause registers (0x0c00 - 0x0d00) 345 * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
345 * These registers are only used when MSI-X auto-masking is not 346 * These registers are only used when MSI-X auto-masking is not
346 * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index 347 * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
347 * by MSI-X entry and are 1B in size. If an entry is zero, the 348 * by MSI-X entry and are 1B in size. If an entry is zero, the
348 * corresponding entry is enabled. If the FW generates an interrupt, 349 * corresponding entry is enabled. If the FW generates an interrupt,
349 * it writes a cause into the corresponding field. This also masks 350 * it writes a cause into the corresponding field. This also masks
@@ -393,8 +394,8 @@
393/** 394/**
394 * Per ring stats (0x1000 - 0x1800) 395 * Per ring stats (0x1000 - 0x1800)
395 * options, 64bit per entry 396 * options, 64bit per entry
396 * @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) 397 * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
397 * @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) 398 * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
398 */ 399 */
399#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 400#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
400#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ 401#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -418,10 +419,10 @@
418 419
419/** 420/**
420 * VLAN filtering using general use mailbox 421 * VLAN filtering using general use mailbox
421 * @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox 422 * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
422 * @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter 423 * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
423 * @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter 424 * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
424 * @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes 425 * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
425 */ 426 */
426#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL 427#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
427#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER 428#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index f50aa119570a..317f87cc3cc6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -186,6 +186,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
186 return -EINVAL; 186 return -EINVAL;
187} 187}
188 188
189static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
190{
191 struct nfp_repr *repr = netdev_priv(netdev);
192
193 return nfp_app_change_mtu(repr->app, netdev, new_mtu);
194}
195
189static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) 196static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
190{ 197{
191 struct nfp_repr *repr = netdev_priv(netdev); 198 struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +247,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
240 .ndo_open = nfp_repr_open, 247 .ndo_open = nfp_repr_open,
241 .ndo_stop = nfp_repr_stop, 248 .ndo_stop = nfp_repr_stop,
242 .ndo_start_xmit = nfp_repr_xmit, 249 .ndo_start_xmit = nfp_repr_xmit,
250 .ndo_change_mtu = nfp_repr_change_mtu,
243 .ndo_get_stats64 = nfp_repr_get_stats64, 251 .ndo_get_stats64 = nfp_repr_get_stats64,
244 .ndo_has_offload_stats = nfp_repr_has_offload_stats, 252 .ndo_has_offload_stats = nfp_repr_has_offload_stats,
245 .ndo_get_offload_stats = nfp_repr_get_offload_stats, 253 .ndo_get_offload_stats = nfp_repr_get_offload_stats,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897bc9c6..cbc7badf40a0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -89,6 +89,7 @@ struct nfp_repr {
89 * @NFP_REPR_TYPE_PHYS_PORT: external NIC port 89 * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
90 * @NFP_REPR_TYPE_PF: physical function 90 * @NFP_REPR_TYPE_PF: physical function
91 * @NFP_REPR_TYPE_VF: virtual function 91 * @NFP_REPR_TYPE_VF: virtual function
92 * @__NFP_REPR_TYPE_MAX: number of representor types
92 */ 93 */
93enum nfp_repr_type { 94enum nfp_repr_type {
94 NFP_REPR_TYPE_PHYS_PORT, 95 NFP_REPR_TYPE_PHYS_PORT,
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 2feb218c001d..6b66cd1aa0b9 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -192,6 +192,9 @@ struct bpf_verifier_env {
192 u32 subprog_cnt; 192 u32 subprog_cnt;
193}; 193};
194 194
195__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
196 const char *fmt, ...);
197
195static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 198static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
196{ 199{
197 struct bpf_verifier_state *cur = env->cur_state; 200 struct bpf_verifier_state *cur = env->cur_state;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a2b211262c25..3b2b47666180 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -169,11 +169,11 @@ struct bpf_call_arg_meta {
169static DEFINE_MUTEX(bpf_verifier_lock); 169static DEFINE_MUTEX(bpf_verifier_lock);
170 170
171/* log_level controls verbosity level of eBPF verifier. 171/* log_level controls verbosity level of eBPF verifier.
172 * verbose() is used to dump the verification trace to the log, so the user 172 * bpf_verifier_log_write() is used to dump the verification trace to the log,
173 * can figure out what's wrong with the program 173 * so the user can figure out what's wrong with the program
174 */ 174 */
175static __printf(2, 3) void verbose(struct bpf_verifier_env *env, 175__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
176 const char *fmt, ...) 176 const char *fmt, ...)
177{ 177{
178 struct bpf_verifer_log *log = &env->log; 178 struct bpf_verifer_log *log = &env->log;
179 unsigned int n; 179 unsigned int n;
@@ -197,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
197 else 197 else
198 log->ubuf = NULL; 198 log->ubuf = NULL;
199} 199}
200EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
201/* Historically bpf_verifier_log_write was called verbose, but the name was too
202 * generic for symbol export. The function was renamed, but not the calls in
203 * the verifier to avoid complicating backports. Hence the alias below.
204 */
205static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
206 const char *fmt, ...)
207 __attribute__((alias("bpf_verifier_log_write")));
200 208
201static bool type_is_pkt_pointer(enum bpf_reg_type type) 209static bool type_is_pkt_pointer(enum bpf_reg_type type)
202{ 210{
@@ -375,6 +383,8 @@ static int realloc_func_state(struct bpf_func_state *state, int size,
375 383
376static void free_func_state(struct bpf_func_state *state) 384static void free_func_state(struct bpf_func_state *state)
377{ 385{
386 if (!state)
387 return;
378 kfree(state->stack); 388 kfree(state->stack);
379 kfree(state); 389 kfree(state);
380} 390}
@@ -487,6 +497,8 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
487 } 497 }
488 return &elem->st; 498 return &elem->st;
489err: 499err:
500 free_verifier_state(env->cur_state, true);
501 env->cur_state = NULL;
490 /* pop all elements and return */ 502 /* pop all elements and return */
491 while (!pop_stack(env, NULL, NULL)); 503 while (!pop_stack(env, NULL, NULL));
492 return NULL; 504 return NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 5cb782f074d7..3d24d9a59086 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7657,7 +7657,7 @@ err_rxq_info:
7657 /* Rollback successful reg's and free other resources */ 7657 /* Rollback successful reg's and free other resources */
7658 while (i--) 7658 while (i--)
7659 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 7659 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
7660 kfree(dev->_rx); 7660 kvfree(dev->_rx);
7661 dev->_rx = NULL; 7661 dev->_rx = NULL;
7662 return err; 7662 return err;
7663} 7663}
@@ -7665,16 +7665,15 @@ err_rxq_info:
7665static void netif_free_rx_queues(struct net_device *dev) 7665static void netif_free_rx_queues(struct net_device *dev)
7666{ 7666{
7667 unsigned int i, count = dev->num_rx_queues; 7667 unsigned int i, count = dev->num_rx_queues;
7668 struct netdev_rx_queue *rx;
7669 7668
7670 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ 7669 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
7671 if (!dev->_rx) 7670 if (!dev->_rx)
7672 return; 7671 return;
7673 7672
7674 rx = dev->_rx;
7675
7676 for (i = 0; i < count; i++) 7673 for (i = 0; i < count; i++)
7677 xdp_rxq_info_unreg(&rx[i].xdp_rxq); 7674 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
7675
7676 kvfree(dev->_rx);
7678} 7677}
7679 7678
7680static void netdev_init_one_queue(struct net_device *dev, 7679static void netdev_init_one_queue(struct net_device *dev,
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 3ff7a05bea9a..7f61a3d57fa7 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -142,6 +142,7 @@ always += xdp_redirect_map_kern.o
142always += xdp_redirect_cpu_kern.o 142always += xdp_redirect_cpu_kern.o
143always += xdp_monitor_kern.o 143always += xdp_monitor_kern.o
144always += xdp_rxq_info_kern.o 144always += xdp_rxq_info_kern.o
145always += xdp2skb_meta_kern.o
145always += syscall_tp_kern.o 146always += syscall_tp_kern.o
146 147
147HOSTCFLAGS += -I$(objtree)/usr/include 148HOSTCFLAGS += -I$(objtree)/usr/include
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
new file mode 100755
index 000000000000..b9c9549c4c27
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -0,0 +1,220 @@
1#!/bin/bash
2#
3# SPDX-License-Identifier: GPL-2.0
4# Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
5#
6# Bash-shell example on using iproute2 tools 'tc' and 'ip' to load
7# eBPF programs, both for XDP and clsbpf. Shell script function
8# wrappers and even long options parsing is illustrated, for ease of
9# use.
10#
11# Related to sample/bpf/xdp2skb_meta_kern.c, which contains BPF-progs
12# that need to collaborate between XDP and TC hooks. Thus, it is
13# convenient that the same tool load both programs that need to work
14# together.
15#
16BPF_FILE=xdp2skb_meta_kern.o
17DIR=$(dirname $0)
18
19export TC=/usr/sbin/tc
20export IP=/usr/sbin/ip
21
22function usage() {
23 echo ""
24 echo "Usage: $0 [-vfh] --dev ethX"
25 echo " -d | --dev : Network device (required)"
26 echo " --flush : Cleanup flush TC and XDP progs"
27 echo " --list : (\$LIST) List TC and XDP progs"
28 echo " -v | --verbose : (\$VERBOSE) Verbose"
29 echo " --dry-run : (\$DRYRUN) Dry-run only (echo commands)"
30 echo ""
31}
32
33## -- General shell logging cmds --
34function err() {
35 local exitcode=$1
36 shift
37 echo "ERROR: $@" >&2
38 exit $exitcode
39}
40
41function info() {
42 if [[ -n "$VERBOSE" ]]; then
43 echo "# $@"
44 fi
45}
46
47## -- Helper function calls --
48
49# Wrapper call for TC and IP
50# - Will display the offending command on failure
51function _call_cmd() {
52 local cmd="$1"
53 local allow_fail="$2"
54 shift 2
55 if [[ -n "$VERBOSE" ]]; then
56 echo "$(basename $cmd) $@"
57 fi
58 if [[ -n "$DRYRUN" ]]; then
59 return
60 fi
61 $cmd "$@"
62 local status=$?
63 if (( $status != 0 )); then
64 if [[ "$allow_fail" == "" ]]; then
65 err 2 "Exec error($status) occurred cmd: \"$cmd $@\""
66 fi
67 fi
68}
69function call_tc() {
70 _call_cmd "$TC" "" "$@"
71}
72function call_tc_allow_fail() {
73 _call_cmd "$TC" "allow_fail" "$@"
74}
75function call_ip() {
76 _call_cmd "$IP" "" "$@"
77}
78
79## --- Parse command line arguments / parameters ---
80# Using external program "getopt" to get --long-options
81OPTIONS=$(getopt -o vfhd: \
82 --long verbose,flush,help,list,dev:,dry-run -- "$@")
83if (( $? != 0 )); then
84 err 4 "Error calling getopt"
85fi
86eval set -- "$OPTIONS"
87
88unset DEV
89unset FLUSH
90while true; do
91 case "$1" in
92 -d | --dev ) # device
93 DEV=$2
94 info "Device set to: DEV=$DEV" >&2
95 shift 2
96 ;;
97 -v | --verbose)
98 VERBOSE=yes
99 # info "Verbose mode: VERBOSE=$VERBOSE" >&2
100 shift
101 ;;
102 --dry-run )
103 DRYRUN=yes
104 VERBOSE=yes
105 info "Dry-run mode: enable VERBOSE and don't call TC+IP" >&2
106 shift
107 ;;
108 -f | --flush )
109 FLUSH=yes
110 shift
111 ;;
112 --list )
113 LIST=yes
114 shift
115 ;;
116 -- )
117 shift
118 break
119 ;;
120 -h | --help )
121 usage;
122 exit 0
123 ;;
124 * )
125 shift
126 break
127 ;;
128 esac
129done
130
131FILE="$DIR/$BPF_FILE"
132if [[ ! -e $FILE ]]; then
133 err 3 "Missing BPF object file ($FILE)"
134fi
135
136if [[ -z $DEV ]]; then
137 usage
138 err 2 "Please specify network device -- required option --dev"
139fi
140
141## -- Function calls --
142
143function list_tc()
144{
145 local device="$1"
146 shift
147 info "Listing current TC ingress rules"
148 call_tc filter show dev $device ingress
149}
150
151function list_xdp()
152{
153 local device="$1"
154 shift
155 info "Listing current XDP device($device) setting"
156 call_ip link show dev $device | grep --color=auto xdp
157}
158
159function flush_tc()
160{
161 local device="$1"
162 shift
163 info "Flush TC on device: $device"
164 call_tc_allow_fail filter del dev $device ingress
165 call_tc_allow_fail qdisc del dev $device clsact
166}
167
168function flush_xdp()
169{
170 local device="$1"
171 shift
172 info "Flush XDP on device: $device"
173 call_ip link set dev $device xdp off
174}
175
176function attach_tc_mark()
177{
178 local device="$1"
179 local file="$2"
180 local prog="tc_mark"
181 shift 2
182
183 # Re-attach clsact to clear/flush existing role
184 call_tc_allow_fail qdisc del dev $device clsact 2> /dev/null
185 call_tc qdisc add dev $device clsact
186
187 # Attach BPF prog
188 call_tc filter add dev $device ingress \
189 prio 1 handle 1 bpf da obj $file sec $prog
190}
191
192function attach_xdp_mark()
193{
194 local device="$1"
195 local file="$2"
196 local prog="xdp_mark"
197 shift 2
198
199 # Remove XDP prog in-case it's already loaded
200 # TODO: Need ip-link option to override/replace existing XDP prog
201 flush_xdp $device
202
203 # Attach XDP/BPF prog
204 call_ip link set dev $device xdp obj $file sec $prog
205}
206
207if [[ -n $FLUSH ]]; then
208 flush_tc $DEV
209 flush_xdp $DEV
210 exit 0
211fi
212
213if [[ -n $LIST ]]; then
214 list_tc $DEV
215 list_xdp $DEV
216 exit 0
217fi
218
219attach_tc_mark $DEV $FILE
220attach_xdp_mark $DEV $FILE
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
new file mode 100644
index 000000000000..12e1024069c2
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -0,0 +1,103 @@
1/* SPDX-License-Identifier: GPL-2.0
2 * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
3 *
4 * Example howto transfer info from XDP to SKB, e.g. skb->mark
5 * -----------------------------------------------------------
6 * This uses the XDP data_meta infrastructure, and is a cooperation
7 * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook.
8 *
9 * Notice: This example does not use the BPF C-loader (bpf_load.c),
10 * but instead rely on the iproute2 TC tool for loading BPF-objects.
11 */
12#include <uapi/linux/bpf.h>
13#include <uapi/linux/pkt_cls.h>
14
15#include "bpf_helpers.h"
16
17/*
18 * This struct is stored in the XDP 'data_meta' area, which is located
19 * just in-front-of the raw packet payload data. The meaning is
20 * specific to these two BPF programs that use it as a communication
21 * channel. XDP adjust/increase the area via a bpf-helper, and TC use
22 * boundary checks to see if data have been provided.
23 *
24 * The struct must be 4 byte aligned, which here is enforced by the
25 * struct __attribute__((aligned(4))).
26 */
27struct meta_info {
28 __u32 mark;
29} __attribute__((aligned(4)));
30
31SEC("xdp_mark")
32int _xdp_mark(struct xdp_md *ctx)
33{
34 struct meta_info *meta;
35 void *data, *data_end;
36 int ret;
37
38 /* Reserve space in-front data pointer for our meta info.
39 * (Notice drivers not supporting data_meta will fail here!)
40 */
41 ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
42 if (ret < 0)
43 return XDP_ABORTED;
44
45 /* For some unknown reason, these ctx pointers must be read
46 * after bpf_xdp_adjust_meta, else verifier will reject prog.
47 */
48 data = (void *)(unsigned long)ctx->data;
49
50 /* Check data_meta have room for meta_info struct */
51 meta = (void *)(unsigned long)ctx->data_meta;
52 if (meta + 1 > data)
53 return XDP_ABORTED;
54
55 meta->mark = 42;
56
57 return XDP_PASS;
58}
59
60SEC("tc_mark")
61int _tc_mark(struct __sk_buff *ctx)
62{
63 void *data = (void *)(unsigned long)ctx->data;
64 void *data_end = (void *)(unsigned long)ctx->data_end;
65 void *data_meta = (void *)(unsigned long)ctx->data_meta;
66 struct meta_info *meta = data_meta;
67
68 /* Check XDP gave us some data_meta */
69 if (meta + 1 > data) {
70 ctx->mark = 41;
71 /* Skip "accept" if no data_meta is avail */
72 return TC_ACT_OK;
73 }
74
75 /* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */
76 ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */
77
78 return TC_ACT_OK;
79}
80
81/* Manually attaching these programs:
82export DEV=ixgbe2
83export FILE=xdp2skb_meta_kern.o
84
85# via TC command
86tc qdisc del dev $DEV clsact 2> /dev/null
87tc qdisc add dev $DEV clsact
88tc filter add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark
89tc filter show dev $DEV ingress
90
91# XDP via IP command:
92ip link set dev $DEV xdp off
93ip link set dev $DEV xdp obj $FILE sec xdp_mark
94
95# Use iptable to "see" if SKBs are marked
96iptables -I INPUT -p icmp -m mark --mark 41 # == 0x29
97iptables -I INPUT -p icmp -m mark --mark 42 # == 0x2a
98
99# Hint: catch XDP_ABORTED errors via
100perf record -e xdp:*
101perf script
102
103*/