aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842.h127
-rw-r--r--lib/842/842_compress.c626
-rw-r--r--lib/842/842_debugfs.h52
-rw-r--r--lib/842/842_decompress.c405
-rw-r--r--lib/842/Makefile2
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug72
-rw-r--r--lib/Kconfig.kasan12
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitmap.c32
-rw-r--r--lib/bug.c7
-rw-r--r--lib/cpu_rmap.c2
-rw-r--r--lib/cpumask.c83
-rw-r--r--lib/crc-itu-t.c2
-rw-r--r--lib/crc-t10dif.c12
-rw-r--r--lib/debug_info.c27
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/dma-debug.c3
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/find_last_bit.c41
-rw-r--r--lib/genalloc.c14
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/lz4/lz4_decompress.c12
-rw-r--r--lib/mpi/longlong.h4
-rw-r--r--lib/mpi/mpicoder.c87
-rw-r--r--lib/mpi/mpiutil.c6
-rw-r--r--lib/percpu_counter.c6
-rw-r--r--lib/radix-tree.c30
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/raid6/x86.h2
-rw-r--r--lib/rbtree.c76
-rw-r--r--lib/rhashtable.c35
-rw-r--r--lib/scatterlist.c50
-rw-r--r--lib/sort.c23
-rw-r--r--lib/string.c19
-rw-r--r--lib/strnlen_user.c18
-rw-r--r--lib/swiotlb.c18
-rw-r--r--lib/test-hexdump.c6
-rw-r--r--lib/test_bpf.c2664
-rw-r--r--lib/test_rhashtable.c215
-rw-r--r--lib/timerqueue.c10
43 files changed, 4501 insertions, 352 deletions
diff --git a/lib/842/842.h b/lib/842/842.h
new file mode 100644
index 000000000000..7c200030acf7
--- /dev/null
+++ b/lib/842/842.h
@@ -0,0 +1,127 @@
1
2#ifndef __842_H__
3#define __842_H__
4
5/* The 842 compressed format is made up of multiple blocks, each of
6 * which have the format:
7 *
8 * <template>[arg1][arg2][arg3][arg4]
9 *
10 * where there are between 0 and 4 template args, depending on the specific
11 * template operation. For normal operations, each arg is either a specific
12 * number of data bytes to add to the output buffer, or an index pointing
13 * to a previously-written number of data bytes to copy to the output buffer.
14 *
15 * The template code is a 5-bit value. This code indicates what to do with
16 * the following data. Template codes from 0 to 0x19 should use the template
17 * table, the static "decomp_ops" table used in decompress. For each template
18 * (table row), there are between 1 and 4 actions; each action corresponds to
19 * an arg following the template code bits. Each action is either a "data"
20 * type action, or a "index" type action, and each action results in 2, 4, or 8
21 * bytes being written to the output buffer. Each template (i.e. all actions
22 * in the table row) will add up to 8 bytes being written to the output buffer.
23 * Any row with less than 4 actions is padded with noop actions, indicated by
24 * N0 (for which there is no corresponding arg in the compressed data buffer).
25 *
26 * "Data" actions, indicated in the table by D2, D4, and D8, mean that the
27 * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data
28 * buffer should be copied directly to the output buffer.
29 *
30 * "Index" actions, indicated in the table by I2, I4, and I8, mean the
31 * corresponding arg is an index parameter that points to, respectively, a 2,
32 * 4, or 8 byte value already in the output buffer, that should be copied to
33 * the end of the output buffer. Essentially, the index points to a position
34 * in a ring buffer that contains the last N bytes of output buffer data.
35 * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4,
36 * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section,
37 * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4
38 * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048
39 * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for
40 * each of I2, I4, and I8 that are updated for each byte written to the output
41 * buffer. In this implementation, the output buffer is directly used for each
42 * index; there is no additional memory required. Note that the index is into
43 * a ring buffer, not a sliding window; for example, if there have been 260
44 * bytes written to the output buffer, an I2 index of 0 would index to byte 256
45 * in the output buffer, while an I2 index of 16 would index to byte 16 in the
46 * output buffer.
47 *
48 * There are also 3 special template codes; 0x1b for "repeat", 0x1c for
49 * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit
50 * arg N indicating how many times to repeat. The last 8 bytes written to the
51 * output buffer are written again to the output buffer, N + 1 times. The
52 * "zeros" operation, which has no arg bits, writes 8 zeros to the output
53 * buffer. The "end" operation, which also has no arg bits, signals the end
54 * of the compressed data. There may be some number of padding (don't care,
55 * but usually 0) bits after the "end" operation bits, to fill the buffer
56 * length to a specific byte multiple (usually a multiple of 8, 16, or 32
57 * bytes).
58 *
59 * This software implementation also uses one of the undefined template values,
60 * 0x1d as a special "short data" template code, to represent less than 8 bytes
61 * of uncompressed data. It is followed by a 3 bit arg N indicating how many
62 * data bytes will follow, and then N bytes of data, which should be copied to
63 * the output buffer. This allows the software 842 compressor to accept input
64 * buffers that are not an exact multiple of 8 bytes long. However, those
65 * compressed buffers containing this sw-only template will be rejected by
66 * the 842 hardware decompressor, and must be decompressed with this software
67 * library. The 842 software compression module includes a parameter to
68 * disable using this sw-only "short data" template, and instead simply
69 * reject any input buffer that is not a multiple of 8 bytes long.
70 *
71 * After all actions for each operation code are processed, another template
72 * code is in the next 5 bits. The decompression ends once the "end" template
73 * code is detected.
74 */
75
76#include <linux/module.h>
77#include <linux/kernel.h>
78#include <linux/bitops.h>
79#include <asm/unaligned.h>
80
81#include <linux/sw842.h>
82
83/* special templates */
84#define OP_REPEAT (0x1B)
85#define OP_ZEROS (0x1C)
86#define OP_END (0x1E)
87
88/* sw only template - this is not in the hw design; it's used only by this
89 * software compressor and decompressor, to allow input buffers that aren't
90 * a multiple of 8.
91 */
92#define OP_SHORT_DATA (0x1D)
93
94/* additional bits of each op param */
95#define OP_BITS (5)
96#define REPEAT_BITS (6)
97#define SHORT_DATA_BITS (3)
98#define I2_BITS (8)
99#define I4_BITS (9)
100#define I8_BITS (8)
101
102#define REPEAT_BITS_MAX (0x3f)
103#define SHORT_DATA_BITS_MAX (0x7)
104
105/* Arbitrary values used to indicate action */
106#define OP_ACTION (0x70)
107#define OP_ACTION_INDEX (0x10)
108#define OP_ACTION_DATA (0x20)
109#define OP_ACTION_NOOP (0x40)
110#define OP_AMOUNT (0x0f)
111#define OP_AMOUNT_0 (0x00)
112#define OP_AMOUNT_2 (0x02)
113#define OP_AMOUNT_4 (0x04)
114#define OP_AMOUNT_8 (0x08)
115
116#define D2 (OP_ACTION_DATA | OP_AMOUNT_2)
117#define D4 (OP_ACTION_DATA | OP_AMOUNT_4)
118#define D8 (OP_ACTION_DATA | OP_AMOUNT_8)
119#define I2 (OP_ACTION_INDEX | OP_AMOUNT_2)
120#define I4 (OP_ACTION_INDEX | OP_AMOUNT_4)
121#define I8 (OP_ACTION_INDEX | OP_AMOUNT_8)
122#define N0 (OP_ACTION_NOOP | OP_AMOUNT_0)
123
124/* the max of the regular templates - not including the special templates */
125#define OPS_MAX (0x1a)
126
127#endif
diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c
new file mode 100644
index 000000000000..7ce68948e68c
--- /dev/null
+++ b/lib/842/842_compress.c
@@ -0,0 +1,626 @@
1/*
2 * 842 Software Compression
3 *
4 * Copyright (C) 2015 Dan Streetman, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * See 842.h for details of the 842 compressed format.
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20#define MODULE_NAME "842_compress"
21
22#include <linux/hashtable.h>
23
24#include "842.h"
25#include "842_debugfs.h"
26
27#define SW842_HASHTABLE8_BITS (10)
28#define SW842_HASHTABLE4_BITS (11)
29#define SW842_HASHTABLE2_BITS (10)
30
31/* By default, we allow compressing input buffers of any length, but we must
32 * use the non-standard "short data" template so the decompressor can correctly
33 * reproduce the uncompressed data buffer at the right length. However the
34 * hardware 842 compressor will not recognize the "short data" template, and
35 * will fail to decompress any compressed buffer containing it (I have no idea
36 * why anyone would want to use software to compress and hardware to decompress
37 * but that's beside the point). This parameter forces the compression
38 * function to simply reject any input buffer that isn't a multiple of 8 bytes
39 * long, instead of using the "short data" template, so that all compressed
40 * buffers produced by this function will be decompressable by the 842 hardware
41 * decompressor. Unless you have a specific need for that, leave this disabled
42 * so that any length buffer can be compressed.
43 */
44static bool sw842_strict;
45module_param_named(strict, sw842_strict, bool, 0644);
46
47static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */
48 { I8, N0, N0, N0, 0x19 }, /* 8 */
49 { I4, I4, N0, N0, 0x18 }, /* 18 */
50 { I4, I2, I2, N0, 0x17 }, /* 25 */
51 { I2, I2, I4, N0, 0x13 }, /* 25 */
52 { I2, I2, I2, I2, 0x12 }, /* 32 */
53 { I4, I2, D2, N0, 0x16 }, /* 33 */
54 { I4, D2, I2, N0, 0x15 }, /* 33 */
55 { I2, D2, I4, N0, 0x0e }, /* 33 */
56 { D2, I2, I4, N0, 0x09 }, /* 33 */
57 { I2, I2, I2, D2, 0x11 }, /* 40 */
58 { I2, I2, D2, I2, 0x10 }, /* 40 */
59 { I2, D2, I2, I2, 0x0d }, /* 40 */
60 { D2, I2, I2, I2, 0x08 }, /* 40 */
61 { I4, D4, N0, N0, 0x14 }, /* 41 */
62 { D4, I4, N0, N0, 0x04 }, /* 41 */
63 { I2, I2, D4, N0, 0x0f }, /* 48 */
64 { I2, D2, I2, D2, 0x0c }, /* 48 */
65 { I2, D4, I2, N0, 0x0b }, /* 48 */
66 { D2, I2, I2, D2, 0x07 }, /* 48 */
67 { D2, I2, D2, I2, 0x06 }, /* 48 */
68 { D4, I2, I2, N0, 0x03 }, /* 48 */
69 { I2, D2, D4, N0, 0x0a }, /* 56 */
70 { D2, I2, D4, N0, 0x05 }, /* 56 */
71 { D4, I2, D2, N0, 0x02 }, /* 56 */
72 { D4, D2, I2, N0, 0x01 }, /* 56 */
73 { D8, N0, N0, N0, 0x00 }, /* 64 */
74};
75
76struct sw842_hlist_node8 {
77 struct hlist_node node;
78 u64 data;
79 u8 index;
80};
81
82struct sw842_hlist_node4 {
83 struct hlist_node node;
84 u32 data;
85 u16 index;
86};
87
88struct sw842_hlist_node2 {
89 struct hlist_node node;
90 u16 data;
91 u8 index;
92};
93
94#define INDEX_NOT_FOUND (-1)
95#define INDEX_NOT_CHECKED (-2)
96
97struct sw842_param {
98 u8 *in;
99 u8 *instart;
100 u64 ilen;
101 u8 *out;
102 u64 olen;
103 u8 bit;
104 u64 data8[1];
105 u32 data4[2];
106 u16 data2[4];
107 int index8[1];
108 int index4[2];
109 int index2[4];
110 DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS);
111 DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS);
112 DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS);
113 struct sw842_hlist_node8 node8[1 << I8_BITS];
114 struct sw842_hlist_node4 node4[1 << I4_BITS];
115 struct sw842_hlist_node2 node2[1 << I2_BITS];
116};
117
118#define get_input_data(p, o, b) \
119 be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o))))
120
121#define init_hashtable_nodes(p, b) do { \
122 int _i; \
123 hash_init((p)->htable##b); \
124 for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \
125 (p)->node##b[_i].index = _i; \
126 (p)->node##b[_i].data = 0; \
127 INIT_HLIST_NODE(&(p)->node##b[_i].node); \
128 } \
129} while (0)
130
131#define find_index(p, b, n) ({ \
132 struct sw842_hlist_node##b *_n; \
133 p->index##b[n] = INDEX_NOT_FOUND; \
134 hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \
135 if (p->data##b[n] == _n->data) { \
136 p->index##b[n] = _n->index; \
137 break; \
138 } \
139 } \
140 p->index##b[n] >= 0; \
141})
142
143#define check_index(p, b, n) \
144 ((p)->index##b[n] == INDEX_NOT_CHECKED \
145 ? find_index(p, b, n) \
146 : (p)->index##b[n] >= 0)
147
148#define replace_hash(p, b, i, d) do { \
149 struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \
150 hash_del(&_n->node); \
151 _n->data = (p)->data##b[d]; \
152 pr_debug("add hash index%x %x pos %x data %lx\n", b, \
153 (unsigned int)_n->index, \
154 (unsigned int)((p)->in - (p)->instart), \
155 (unsigned long)_n->data); \
156 hash_add((p)->htable##b, &_n->node, _n->data); \
157} while (0)
158
159static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
160
161static int add_bits(struct sw842_param *p, u64 d, u8 n);
162
163static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s)
164{
165 int ret;
166
167 if (n <= s)
168 return -EINVAL;
169
170 ret = add_bits(p, d >> s, n - s);
171 if (ret)
172 return ret;
173 return add_bits(p, d & GENMASK_ULL(s - 1, 0), s);
174}
175
176static int add_bits(struct sw842_param *p, u64 d, u8 n)
177{
178 int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits;
179 u64 o;
180 u8 *out = p->out;
181
182 pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d);
183
184 if (n > 64)
185 return -EINVAL;
186
187 /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0),
188 * or if we're at the end of the output buffer and would write past end
189 */
190 if (bits > 64)
191 return __split_add_bits(p, d, n, 32);
192 else if (p->olen < 8 && bits > 32 && bits <= 56)
193 return __split_add_bits(p, d, n, 16);
194 else if (p->olen < 4 && bits > 16 && bits <= 24)
195 return __split_add_bits(p, d, n, 8);
196
197 if (DIV_ROUND_UP(bits, 8) > p->olen)
198 return -ENOSPC;
199
200 o = *out & bmask[b];
201 d <<= s;
202
203 if (bits <= 8)
204 *out = o | d;
205 else if (bits <= 16)
206 put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out);
207 else if (bits <= 24)
208 put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out);
209 else if (bits <= 32)
210 put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out);
211 else if (bits <= 40)
212 put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out);
213 else if (bits <= 48)
214 put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out);
215 else if (bits <= 56)
216 put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out);
217 else
218 put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out);
219
220 p->bit += n;
221
222 if (p->bit > 7) {
223 p->out += p->bit / 8;
224 p->olen -= p->bit / 8;
225 p->bit %= 8;
226 }
227
228 return 0;
229}
230
231static int add_template(struct sw842_param *p, u8 c)
232{
233 int ret, i, b = 0;
234 u8 *t = comp_ops[c];
235 bool inv = false;
236
237 if (c >= OPS_MAX)
238 return -EINVAL;
239
240 pr_debug("template %x\n", t[4]);
241
242 ret = add_bits(p, t[4], OP_BITS);
243 if (ret)
244 return ret;
245
246 for (i = 0; i < 4; i++) {
247 pr_debug("op %x\n", t[i]);
248
249 switch (t[i] & OP_AMOUNT) {
250 case OP_AMOUNT_8:
251 if (b)
252 inv = true;
253 else if (t[i] & OP_ACTION_INDEX)
254 ret = add_bits(p, p->index8[0], I8_BITS);
255 else if (t[i] & OP_ACTION_DATA)
256 ret = add_bits(p, p->data8[0], 64);
257 else
258 inv = true;
259 break;
260 case OP_AMOUNT_4:
261 if (b == 2 && t[i] & OP_ACTION_DATA)
262 ret = add_bits(p, get_input_data(p, 2, 32), 32);
263 else if (b != 0 && b != 4)
264 inv = true;
265 else if (t[i] & OP_ACTION_INDEX)
266 ret = add_bits(p, p->index4[b >> 2], I4_BITS);
267 else if (t[i] & OP_ACTION_DATA)
268 ret = add_bits(p, p->data4[b >> 2], 32);
269 else
270 inv = true;
271 break;
272 case OP_AMOUNT_2:
273 if (b != 0 && b != 2 && b != 4 && b != 6)
274 inv = true;
275 if (t[i] & OP_ACTION_INDEX)
276 ret = add_bits(p, p->index2[b >> 1], I2_BITS);
277 else if (t[i] & OP_ACTION_DATA)
278 ret = add_bits(p, p->data2[b >> 1], 16);
279 else
280 inv = true;
281 break;
282 case OP_AMOUNT_0:
283 inv = (b != 8) || !(t[i] & OP_ACTION_NOOP);
284 break;
285 default:
286 inv = true;
287 break;
288 }
289
290 if (ret)
291 return ret;
292
293 if (inv) {
294 pr_err("Invalid templ %x op %d : %x %x %x %x\n",
295 c, i, t[0], t[1], t[2], t[3]);
296 return -EINVAL;
297 }
298
299 b += t[i] & OP_AMOUNT;
300 }
301
302 if (b != 8) {
303 pr_err("Invalid template %x len %x : %x %x %x %x\n",
304 c, b, t[0], t[1], t[2], t[3]);
305 return -EINVAL;
306 }
307
308 if (sw842_template_counts)
309 atomic_inc(&template_count[t[4]]);
310
311 return 0;
312}
313
314static int add_repeat_template(struct sw842_param *p, u8 r)
315{
316 int ret;
317
318 /* repeat param is 0-based */
319 if (!r || --r > REPEAT_BITS_MAX)
320 return -EINVAL;
321
322 ret = add_bits(p, OP_REPEAT, OP_BITS);
323 if (ret)
324 return ret;
325
326 ret = add_bits(p, r, REPEAT_BITS);
327 if (ret)
328 return ret;
329
330 if (sw842_template_counts)
331 atomic_inc(&template_repeat_count);
332
333 return 0;
334}
335
336static int add_short_data_template(struct sw842_param *p, u8 b)
337{
338 int ret, i;
339
340 if (!b || b > SHORT_DATA_BITS_MAX)
341 return -EINVAL;
342
343 ret = add_bits(p, OP_SHORT_DATA, OP_BITS);
344 if (ret)
345 return ret;
346
347 ret = add_bits(p, b, SHORT_DATA_BITS);
348 if (ret)
349 return ret;
350
351 for (i = 0; i < b; i++) {
352 ret = add_bits(p, p->in[i], 8);
353 if (ret)
354 return ret;
355 }
356
357 if (sw842_template_counts)
358 atomic_inc(&template_short_data_count);
359
360 return 0;
361}
362
363static int add_zeros_template(struct sw842_param *p)
364{
365 int ret = add_bits(p, OP_ZEROS, OP_BITS);
366
367 if (ret)
368 return ret;
369
370 if (sw842_template_counts)
371 atomic_inc(&template_zeros_count);
372
373 return 0;
374}
375
376static int add_end_template(struct sw842_param *p)
377{
378 int ret = add_bits(p, OP_END, OP_BITS);
379
380 if (ret)
381 return ret;
382
383 if (sw842_template_counts)
384 atomic_inc(&template_end_count);
385
386 return 0;
387}
388
389static bool check_template(struct sw842_param *p, u8 c)
390{
391 u8 *t = comp_ops[c];
392 int i, match, b = 0;
393
394 if (c >= OPS_MAX)
395 return false;
396
397 for (i = 0; i < 4; i++) {
398 if (t[i] & OP_ACTION_INDEX) {
399 if (t[i] & OP_AMOUNT_2)
400 match = check_index(p, 2, b >> 1);
401 else if (t[i] & OP_AMOUNT_4)
402 match = check_index(p, 4, b >> 2);
403 else if (t[i] & OP_AMOUNT_8)
404 match = check_index(p, 8, 0);
405 else
406 return false;
407 if (!match)
408 return false;
409 }
410
411 b += t[i] & OP_AMOUNT;
412 }
413
414 return true;
415}
416
417static void get_next_data(struct sw842_param *p)
418{
419 p->data8[0] = get_input_data(p, 0, 64);
420 p->data4[0] = get_input_data(p, 0, 32);
421 p->data4[1] = get_input_data(p, 4, 32);
422 p->data2[0] = get_input_data(p, 0, 16);
423 p->data2[1] = get_input_data(p, 2, 16);
424 p->data2[2] = get_input_data(p, 4, 16);
425 p->data2[3] = get_input_data(p, 6, 16);
426}
427
428/* update the hashtable entries.
429 * only call this after finding/adding the current template
430 * the dataN fields for the current 8 byte block must be already updated
431 */
432static void update_hashtables(struct sw842_param *p)
433{
434 u64 pos = p->in - p->instart;
435 u64 n8 = (pos >> 3) % (1 << I8_BITS);
436 u64 n4 = (pos >> 2) % (1 << I4_BITS);
437 u64 n2 = (pos >> 1) % (1 << I2_BITS);
438
439 replace_hash(p, 8, n8, 0);
440 replace_hash(p, 4, n4, 0);
441 replace_hash(p, 4, n4, 1);
442 replace_hash(p, 2, n2, 0);
443 replace_hash(p, 2, n2, 1);
444 replace_hash(p, 2, n2, 2);
445 replace_hash(p, 2, n2, 3);
446}
447
448/* find the next template to use, and add it
449 * the p->dataN fields must already be set for the current 8 byte block
450 */
451static int process_next(struct sw842_param *p)
452{
453 int ret, i;
454
455 p->index8[0] = INDEX_NOT_CHECKED;
456 p->index4[0] = INDEX_NOT_CHECKED;
457 p->index4[1] = INDEX_NOT_CHECKED;
458 p->index2[0] = INDEX_NOT_CHECKED;
459 p->index2[1] = INDEX_NOT_CHECKED;
460 p->index2[2] = INDEX_NOT_CHECKED;
461 p->index2[3] = INDEX_NOT_CHECKED;
462
463 /* check up to OPS_MAX - 1; last op is our fallback */
464 for (i = 0; i < OPS_MAX - 1; i++) {
465 if (check_template(p, i))
466 break;
467 }
468
469 ret = add_template(p, i);
470 if (ret)
471 return ret;
472
473 return 0;
474}
475
476/**
477 * sw842_compress
478 *
479 * Compress the uncompressed buffer of length @ilen at @in to the output buffer
480 * @out, using no more than @olen bytes, using the 842 compression format.
481 *
482 * Returns: 0 on success, error on failure. The @olen parameter
483 * will contain the number of output bytes written on success, or
484 * 0 on error.
485 */
486int sw842_compress(const u8 *in, unsigned int ilen,
487 u8 *out, unsigned int *olen, void *wmem)
488{
489 struct sw842_param *p = (struct sw842_param *)wmem;
490 int ret;
491 u64 last, next, pad, total;
492 u8 repeat_count = 0;
493
494 BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS);
495
496 init_hashtable_nodes(p, 8);
497 init_hashtable_nodes(p, 4);
498 init_hashtable_nodes(p, 2);
499
500 p->in = (u8 *)in;
501 p->instart = p->in;
502 p->ilen = ilen;
503 p->out = out;
504 p->olen = *olen;
505 p->bit = 0;
506
507 total = p->olen;
508
509 *olen = 0;
510
511 /* if using strict mode, we can only compress a multiple of 8 */
512 if (sw842_strict && (ilen % 8)) {
513 pr_err("Using strict mode, can't compress len %d\n", ilen);
514 return -EINVAL;
515 }
516
517 /* let's compress at least 8 bytes, mkay? */
518 if (unlikely(ilen < 8))
519 goto skip_comp;
520
521 /* make initial 'last' different so we don't match the first time */
522 last = ~get_unaligned((u64 *)p->in);
523
524 while (p->ilen > 7) {
525 next = get_unaligned((u64 *)p->in);
526
527 /* must get the next data, as we need to update the hashtable
528 * entries with the new data every time
529 */
530 get_next_data(p);
531
532 /* we don't care about endianness in last or next;
533 * we're just comparing 8 bytes to another 8 bytes,
534 * they're both the same endianness
535 */
536 if (next == last) {
537 /* repeat count bits are 0-based, so we stop at +1 */
538 if (++repeat_count <= REPEAT_BITS_MAX)
539 goto repeat;
540 }
541 if (repeat_count) {
542 ret = add_repeat_template(p, repeat_count);
543 repeat_count = 0;
544 if (next == last) /* reached max repeat bits */
545 goto repeat;
546 }
547
548 if (next == 0)
549 ret = add_zeros_template(p);
550 else
551 ret = process_next(p);
552
553 if (ret)
554 return ret;
555
556repeat:
557 last = next;
558 update_hashtables(p);
559 p->in += 8;
560 p->ilen -= 8;
561 }
562
563 if (repeat_count) {
564 ret = add_repeat_template(p, repeat_count);
565 if (ret)
566 return ret;
567 }
568
569skip_comp:
570 if (p->ilen > 0) {
571 ret = add_short_data_template(p, p->ilen);
572 if (ret)
573 return ret;
574
575 p->in += p->ilen;
576 p->ilen = 0;
577 }
578
579 ret = add_end_template(p);
580 if (ret)
581 return ret;
582
583 if (p->bit) {
584 p->out++;
585 p->olen--;
586 p->bit = 0;
587 }
588
589 /* pad compressed length to multiple of 8 */
590 pad = (8 - ((total - p->olen) % 8)) % 8;
591 if (pad) {
592 if (pad > p->olen) /* we were so close! */
593 return -ENOSPC;
594 memset(p->out, 0, pad);
595 p->out += pad;
596 p->olen -= pad;
597 }
598
599 if (unlikely((total - p->olen) > UINT_MAX))
600 return -ENOSPC;
601
602 *olen = total - p->olen;
603
604 return 0;
605}
606EXPORT_SYMBOL_GPL(sw842_compress);
607
608static int __init sw842_init(void)
609{
610 if (sw842_template_counts)
611 sw842_debugfs_create();
612
613 return 0;
614}
615module_init(sw842_init);
616
617static void __exit sw842_exit(void)
618{
619 if (sw842_template_counts)
620 sw842_debugfs_remove();
621}
622module_exit(sw842_exit);
623
624MODULE_LICENSE("GPL");
625MODULE_DESCRIPTION("Software 842 Compressor");
626MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
new file mode 100644
index 000000000000..e7f3bffaf255
--- /dev/null
+++ b/lib/842/842_debugfs.h
@@ -0,0 +1,52 @@
1
2#ifndef __842_DEBUGFS_H__
3#define __842_DEBUGFS_H__
4
5#include <linux/debugfs.h>
6
7static bool sw842_template_counts;
8module_param_named(template_counts, sw842_template_counts, bool, 0444);
9
10static atomic_t template_count[OPS_MAX], template_repeat_count,
11 template_zeros_count, template_short_data_count, template_end_count;
12
13static struct dentry *sw842_debugfs_root;
14
15static int __init sw842_debugfs_create(void)
16{
17 umode_t m = S_IRUGO | S_IWUSR;
18 int i;
19
20 if (!debugfs_initialized())
21 return -ENODEV;
22
23 sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
24 if (IS_ERR(sw842_debugfs_root))
25 return PTR_ERR(sw842_debugfs_root);
26
27 for (i = 0; i < ARRAY_SIZE(template_count); i++) {
28 char name[32];
29
30 snprintf(name, 32, "template_%02x", i);
31 debugfs_create_atomic_t(name, m, sw842_debugfs_root,
32 &template_count[i]);
33 }
34 debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root,
35 &template_repeat_count);
36 debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root,
37 &template_zeros_count);
38 debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root,
39 &template_short_data_count);
40 debugfs_create_atomic_t("template_end", m, sw842_debugfs_root,
41 &template_end_count);
42
43 return 0;
44}
45
46static void __exit sw842_debugfs_remove(void)
47{
48 if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root))
49 debugfs_remove_recursive(sw842_debugfs_root);
50}
51
52#endif
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c
new file mode 100644
index 000000000000..5446ff0c9ba0
--- /dev/null
+++ b/lib/842/842_decompress.c
@@ -0,0 +1,405 @@
1/*
2 * 842 Software Decompression
3 *
4 * Copyright (C) 2015 Dan Streetman, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * See 842.h for details of the 842 compressed format.
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20#define MODULE_NAME "842_decompress"
21
22#include "842.h"
23#include "842_debugfs.h"
24
25/* rolling fifo sizes */
26#define I2_FIFO_SIZE (2 * (1 << I2_BITS))
27#define I4_FIFO_SIZE (4 * (1 << I4_BITS))
28#define I8_FIFO_SIZE (8 * (1 << I8_BITS))
29
30static u8 decomp_ops[OPS_MAX][4] = {
31 { D8, N0, N0, N0 },
32 { D4, D2, I2, N0 },
33 { D4, I2, D2, N0 },
34 { D4, I2, I2, N0 },
35 { D4, I4, N0, N0 },
36 { D2, I2, D4, N0 },
37 { D2, I2, D2, I2 },
38 { D2, I2, I2, D2 },
39 { D2, I2, I2, I2 },
40 { D2, I2, I4, N0 },
41 { I2, D2, D4, N0 },
42 { I2, D4, I2, N0 },
43 { I2, D2, I2, D2 },
44 { I2, D2, I2, I2 },
45 { I2, D2, I4, N0 },
46 { I2, I2, D4, N0 },
47 { I2, I2, D2, I2 },
48 { I2, I2, I2, D2 },
49 { I2, I2, I2, I2 },
50 { I2, I2, I4, N0 },
51 { I4, D4, N0, N0 },
52 { I4, D2, I2, N0 },
53 { I4, I2, D2, N0 },
54 { I4, I2, I2, N0 },
55 { I4, I4, N0, N0 },
56 { I8, N0, N0, N0 }
57};
58
59struct sw842_param {
60 u8 *in;
61 u8 bit;
62 u64 ilen;
63 u8 *out;
64 u8 *ostart;
65 u64 olen;
66};
67
68#define beN_to_cpu(d, s) \
69 ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
70 (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
71 (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
72 WARN(1, "pr_debug param err invalid size %x\n", s))
73
74static int next_bits(struct sw842_param *p, u64 *d, u8 n);
75
76static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s)
77{
78 u64 tmp = 0;
79 int ret;
80
81 if (n <= s) {
82 pr_debug("split_next_bits invalid n %u s %u\n", n, s);
83 return -EINVAL;
84 }
85
86 ret = next_bits(p, &tmp, n - s);
87 if (ret)
88 return ret;
89 ret = next_bits(p, d, s);
90 if (ret)
91 return ret;
92 *d |= tmp << s;
93 return 0;
94}
95
96static int next_bits(struct sw842_param *p, u64 *d, u8 n)
97{
98 u8 *in = p->in, b = p->bit, bits = b + n;
99
100 if (n > 64) {
101 pr_debug("next_bits invalid n %u\n", n);
102 return -EINVAL;
103 }
104
105 /* split this up if reading > 8 bytes, or if we're at the end of
106 * the input buffer and would read past the end
107 */
108 if (bits > 64)
109 return __split_next_bits(p, d, n, 32);
110 else if (p->ilen < 8 && bits > 32 && bits <= 56)
111 return __split_next_bits(p, d, n, 16);
112 else if (p->ilen < 4 && bits > 16 && bits <= 24)
113 return __split_next_bits(p, d, n, 8);
114
115 if (DIV_ROUND_UP(bits, 8) > p->ilen)
116 return -EOVERFLOW;
117
118 if (bits <= 8)
119 *d = *in >> (8 - bits);
120 else if (bits <= 16)
121 *d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits);
122 else if (bits <= 32)
123 *d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits);
124 else
125 *d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits);
126
127 *d &= GENMASK_ULL(n - 1, 0);
128
129 p->bit += n;
130
131 if (p->bit > 7) {
132 p->in += p->bit / 8;
133 p->ilen -= p->bit / 8;
134 p->bit %= 8;
135 }
136
137 return 0;
138}
139
140static int do_data(struct sw842_param *p, u8 n)
141{
142 u64 v;
143 int ret;
144
145 if (n > p->olen)
146 return -ENOSPC;
147
148 ret = next_bits(p, &v, n * 8);
149 if (ret)
150 return ret;
151
152 switch (n) {
153 case 2:
154 put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out);
155 break;
156 case 4:
157 put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out);
158 break;
159 case 8:
160 put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out);
161 break;
162 default:
163 return -EINVAL;
164 }
165
166 p->out += n;
167 p->olen -= n;
168
169 return 0;
170}
171
172static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
173{
174 u64 index, offset, total = round_down(p->out - p->ostart, 8);
175 int ret;
176
177 ret = next_bits(p, &index, bits);
178 if (ret)
179 return ret;
180
181 offset = index * size;
182
183 /* a ring buffer of fsize is used; correct the offset */
184 if (total > fsize) {
185 /* this is where the current fifo is */
186 u64 section = round_down(total, fsize);
187 /* the current pos in the fifo */
188 u64 pos = total - section;
189
190 /* if the offset is past/at the pos, we need to
191 * go back to the last fifo section
192 */
193 if (offset >= pos)
194 section -= fsize;
195
196 offset += section;
197 }
198
199 if (offset + size > total) {
200 pr_debug("index%x %lx points past end %lx\n", size,
201 (unsigned long)offset, (unsigned long)total);
202 return -EINVAL;
203 }
204
205 pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
206 size, (unsigned long)index, (unsigned long)(index * size),
207 (unsigned long)offset, (unsigned long)total,
208 (unsigned long)beN_to_cpu(&p->ostart[offset], size));
209
210 memcpy(p->out, &p->ostart[offset], size);
211 p->out += size;
212 p->olen -= size;
213
214 return 0;
215}
216
217static int do_index(struct sw842_param *p, u8 n)
218{
219 switch (n) {
220 case 2:
221 return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE);
222 case 4:
223 return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE);
224 case 8:
225 return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE);
226 default:
227 return -EINVAL;
228 }
229}
230
231static int do_op(struct sw842_param *p, u8 o)
232{
233 int i, ret = 0;
234
235 if (o >= OPS_MAX)
236 return -EINVAL;
237
238 for (i = 0; i < 4; i++) {
239 u8 op = decomp_ops[o][i];
240
241 pr_debug("op is %x\n", op);
242
243 switch (op & OP_ACTION) {
244 case OP_ACTION_DATA:
245 ret = do_data(p, op & OP_AMOUNT);
246 break;
247 case OP_ACTION_INDEX:
248 ret = do_index(p, op & OP_AMOUNT);
249 break;
250 case OP_ACTION_NOOP:
251 break;
252 default:
253 pr_err("Interal error, invalid op %x\n", op);
254 return -EINVAL;
255 }
256
257 if (ret)
258 return ret;
259 }
260
261 if (sw842_template_counts)
262 atomic_inc(&template_count[o]);
263
264 return 0;
265}
266
267/**
268 * sw842_decompress
269 *
270 * Decompress the 842-compressed buffer of length @ilen at @in
271 * to the output buffer @out, using no more than @olen bytes.
272 *
273 * The compressed buffer must be only a single 842-compressed buffer,
274 * with the standard format described in the comments in 842.h
275 * Processing will stop when the 842 "END" template is detected,
276 * not the end of the buffer.
277 *
278 * Returns: 0 on success, error on failure. The @olen parameter
279 * will contain the number of output bytes written on success, or
280 * 0 on error.
281 */
282int sw842_decompress(const u8 *in, unsigned int ilen,
283 u8 *out, unsigned int *olen)
284{
285 struct sw842_param p;
286 int ret;
287 u64 op, rep, tmp, bytes, total;
288
289 p.in = (u8 *)in;
290 p.bit = 0;
291 p.ilen = ilen;
292 p.out = out;
293 p.ostart = out;
294 p.olen = *olen;
295
296 total = p.olen;
297
298 *olen = 0;
299
300 do {
301 ret = next_bits(&p, &op, OP_BITS);
302 if (ret)
303 return ret;
304
305 pr_debug("template is %lx\n", (unsigned long)op);
306
307 switch (op) {
308 case OP_REPEAT:
309 ret = next_bits(&p, &rep, REPEAT_BITS);
310 if (ret)
311 return ret;
312
313 if (p.out == out) /* no previous bytes */
314 return -EINVAL;
315
316 /* copy rep + 1 */
317 rep++;
318
319 if (rep * 8 > p.olen)
320 return -ENOSPC;
321
322 while (rep-- > 0) {
323 memcpy(p.out, p.out - 8, 8);
324 p.out += 8;
325 p.olen -= 8;
326 }
327
328 if (sw842_template_counts)
329 atomic_inc(&template_repeat_count);
330
331 break;
332 case OP_ZEROS:
333 if (8 > p.olen)
334 return -ENOSPC;
335
336 memset(p.out, 0, 8);
337 p.out += 8;
338 p.olen -= 8;
339
340 if (sw842_template_counts)
341 atomic_inc(&template_zeros_count);
342
343 break;
344 case OP_SHORT_DATA:
345 ret = next_bits(&p, &bytes, SHORT_DATA_BITS);
346 if (ret)
347 return ret;
348
349 if (!bytes || bytes > SHORT_DATA_BITS_MAX)
350 return -EINVAL;
351
352 while (bytes-- > 0) {
353 ret = next_bits(&p, &tmp, 8);
354 if (ret)
355 return ret;
356 *p.out = (u8)tmp;
357 p.out++;
358 p.olen--;
359 }
360
361 if (sw842_template_counts)
362 atomic_inc(&template_short_data_count);
363
364 break;
365 case OP_END:
366 if (sw842_template_counts)
367 atomic_inc(&template_end_count);
368
369 break;
370 default: /* use template */
371 ret = do_op(&p, op);
372 if (ret)
373 return ret;
374 break;
375 }
376 } while (op != OP_END);
377
378 if (unlikely((total - p.olen) > UINT_MAX))
379 return -ENOSPC;
380
381 *olen = total - p.olen;
382
383 return 0;
384}
385EXPORT_SYMBOL_GPL(sw842_decompress);
386
387static int __init sw842_init(void)
388{
389 if (sw842_template_counts)
390 sw842_debugfs_create();
391
392 return 0;
393}
394module_init(sw842_init);
395
396static void __exit sw842_exit(void)
397{
398 if (sw842_template_counts)
399 sw842_debugfs_remove();
400}
401module_exit(sw842_exit);
402
403MODULE_LICENSE("GPL");
404MODULE_DESCRIPTION("Software 842 Decompressor");
405MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/lib/842/Makefile b/lib/842/Makefile
new file mode 100644
index 000000000000..5d24c0baff2e
--- /dev/null
+++ b/lib/842/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_842_COMPRESS) += 842_compress.o
2obj-$(CONFIG_842_DECOMPRESS) += 842_decompress.o
diff --git a/lib/Kconfig b/lib/Kconfig
index 601965a948e8..3a2ef67db6c7 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -212,6 +212,12 @@ config RANDOM32_SELFTEST
212# 212#
213# compression support is select'ed if needed 213# compression support is select'ed if needed
214# 214#
215config 842_COMPRESS
216 tristate
217
218config 842_DECOMPRESS
219 tristate
220
215config ZLIB_INFLATE 221config ZLIB_INFLATE
216 tristate 222 tristate
217 223
@@ -522,4 +528,7 @@ source "lib/fonts/Kconfig"
522config ARCH_HAS_SG_CHAIN 528config ARCH_HAS_SG_CHAIN
523 def_bool n 529 def_bool n
524 530
531config ARCH_HAS_PMEM_API
532 bool
533
525endmenu 534endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 17670573dda8..e2894b23efb6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -841,9 +841,14 @@ config SCHED_DEBUG
841 that can help debug the scheduler. The runtime overhead of this 841 that can help debug the scheduler. The runtime overhead of this
842 option is minimal. 842 option is minimal.
843 843
844config SCHED_INFO
845 bool
846 default n
847
844config SCHEDSTATS 848config SCHEDSTATS
845 bool "Collect scheduler statistics" 849 bool "Collect scheduler statistics"
846 depends on DEBUG_KERNEL && PROC_FS 850 depends on DEBUG_KERNEL && PROC_FS
851 select SCHED_INFO
847 help 852 help
848 If you say Y here, additional code will be inserted into the 853 If you say Y here, additional code will be inserted into the
849 scheduler and related routines to collect statistics about 854 scheduler and related routines to collect statistics about
@@ -1233,6 +1238,7 @@ config RCU_TORTURE_TEST
1233 depends on DEBUG_KERNEL 1238 depends on DEBUG_KERNEL
1234 select TORTURE_TEST 1239 select TORTURE_TEST
1235 select SRCU 1240 select SRCU
1241 select TASKS_RCU
1236 default n 1242 default n
1237 help 1243 help
1238 This option provides a kernel module that runs torture tests 1244 This option provides a kernel module that runs torture tests
@@ -1261,12 +1267,38 @@ config RCU_TORTURE_TEST_RUNNABLE
1261 Say N here if you want the RCU torture tests to start only 1267 Say N here if you want the RCU torture tests to start only
1262 after being manually enabled via /proc. 1268 after being manually enabled via /proc.
1263 1269
1270config RCU_TORTURE_TEST_SLOW_PREINIT
1271 bool "Slow down RCU grace-period pre-initialization to expose races"
1272 depends on RCU_TORTURE_TEST
1273 help
1274 This option delays grace-period pre-initialization (the
1275 propagation of CPU-hotplug changes up the rcu_node combining
1276 tree) for a few jiffies between initializing each pair of
1277 consecutive rcu_node structures. This helps to expose races
1278 involving grace-period pre-initialization, in other words, it
1279 makes your kernel less stable. It can also greatly increase
1280 grace-period latency, especially on systems with large numbers
1281 of CPUs. This is useful when torture-testing RCU, but in
1282 almost no other circumstance.
1283
1284 Say Y here if you want your system to crash and hang more often.
1285 Say N if you want a sane system.
1286
1287config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
1288 int "How much to slow down RCU grace-period pre-initialization"
1289 range 0 5
1290 default 3
1291 depends on RCU_TORTURE_TEST_SLOW_PREINIT
1292 help
1293 This option specifies the number of jiffies to wait between
1294 each rcu_node structure pre-initialization step.
1295
1264config RCU_TORTURE_TEST_SLOW_INIT 1296config RCU_TORTURE_TEST_SLOW_INIT
1265 bool "Slow down RCU grace-period initialization to expose races" 1297 bool "Slow down RCU grace-period initialization to expose races"
1266 depends on RCU_TORTURE_TEST 1298 depends on RCU_TORTURE_TEST
1267 help 1299 help
1268 This option makes grace-period initialization block for a 1300 This option delays grace-period initialization for a few
1269 few jiffies between initializing each pair of consecutive 1301 jiffies between initializing each pair of consecutive
1270 rcu_node structures. This helps to expose races involving 1302 rcu_node structures. This helps to expose races involving
1271 grace-period initialization, in other words, it makes your 1303 grace-period initialization, in other words, it makes your
1272 kernel less stable. It can also greatly increase grace-period 1304 kernel less stable. It can also greatly increase grace-period
@@ -1281,10 +1313,35 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY
1281 int "How much to slow down RCU grace-period initialization" 1313 int "How much to slow down RCU grace-period initialization"
1282 range 0 5 1314 range 0 5
1283 default 3 1315 default 3
1316 depends on RCU_TORTURE_TEST_SLOW_INIT
1284 help 1317 help
1285 This option specifies the number of jiffies to wait between 1318 This option specifies the number of jiffies to wait between
1286 each rcu_node structure initialization. 1319 each rcu_node structure initialization.
1287 1320
1321config RCU_TORTURE_TEST_SLOW_CLEANUP
1322 bool "Slow down RCU grace-period cleanup to expose races"
1323 depends on RCU_TORTURE_TEST
1324 help
1325 This option delays grace-period cleanup for a few jiffies
1326 between cleaning up each pair of consecutive rcu_node
1327 structures. This helps to expose races involving grace-period
1328 cleanup, in other words, it makes your kernel less stable.
1329 It can also greatly increase grace-period latency, especially
1330 on systems with large numbers of CPUs. This is useful when
1331 torture-testing RCU, but in almost no other circumstance.
1332
1333 Say Y here if you want your system to crash and hang more often.
1334 Say N if you want a sane system.
1335
1336config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
1337 int "How much to slow down RCU grace-period cleanup"
1338 range 0 5
1339 default 3
1340 depends on RCU_TORTURE_TEST_SLOW_CLEANUP
1341 help
1342 This option specifies the number of jiffies to wait between
1343 each rcu_node structure cleanup operation.
1344
1288config RCU_CPU_STALL_TIMEOUT 1345config RCU_CPU_STALL_TIMEOUT
1289 int "RCU CPU stall timeout in seconds" 1346 int "RCU CPU stall timeout in seconds"
1290 depends on RCU_STALL_COMMON 1347 depends on RCU_STALL_COMMON
@@ -1321,6 +1378,17 @@ config RCU_TRACE
1321 Say Y here if you want to enable RCU tracing 1378 Say Y here if you want to enable RCU tracing
1322 Say N if you are unsure. 1379 Say N if you are unsure.
1323 1380
1381config RCU_EQS_DEBUG
1382 bool "Use this when adding any sort of NO_HZ support to your arch"
1383 depends on DEBUG_KERNEL
1384 help
1385 This option provides consistency checks in RCU's handling of
1386 NO_HZ. These checks have proven quite helpful in detecting
1387 bugs in arch-specific NO_HZ code.
1388
1389 Say N here if you need ultimate kernel/user switch latencies
1390 Say Y if you are unsure
1391
1324endmenu # "RCU Debugging" 1392endmenu # "RCU Debugging"
1325 1393
1326config DEBUG_BLOCK_EXT_DEVT 1394config DEBUG_BLOCK_EXT_DEVT
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fecaedc80a2..39f24d6721e5 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -10,15 +10,14 @@ config KASAN
10 help 10 help
11 Enables kernel address sanitizer - runtime memory debugger, 11 Enables kernel address sanitizer - runtime memory debugger,
12 designed to find out-of-bounds accesses and use-after-free bugs. 12 designed to find out-of-bounds accesses and use-after-free bugs.
13 This is strictly debugging feature. It consumes about 1/8 13 This is strictly a debugging feature and it requires a gcc version
14 of available memory and brings about ~x3 performance slowdown. 14 of 4.9.2 or later. Detection of out of bounds accesses to stack or
15 global variables requires gcc 5.0 or later.
16 This feature consumes about 1/8 of available memory and brings about
17 ~x3 performance slowdown.
15 For better error detection enable CONFIG_STACKTRACE, 18 For better error detection enable CONFIG_STACKTRACE,
16 and add slub_debug=U to boot cmdline. 19 and add slub_debug=U to boot cmdline.
17 20
18config KASAN_SHADOW_OFFSET
19 hex
20 default 0xdffffc0000000000 if X86_64
21
22choice 21choice
23 prompt "Instrumentation type" 22 prompt "Instrumentation type"
24 depends on KASAN 23 depends on KASAN
@@ -40,6 +39,7 @@ config KASAN_INLINE
40 memory accesses. This is faster than outline (in some workloads 39 memory accesses. This is faster than outline (in some workloads
41 it gives about x2 boost over outline instrumentation), but 40 it gives about x2 boost over outline instrumentation), but
42 make kernel's .text size much bigger. 41 make kernel's .text size much bigger.
42 This requires a gcc version of 5.0 or later.
43 43
44endchoice 44endchoice
45 45
diff --git a/lib/Makefile b/lib/Makefile
index 6c37933336a0..6897b527581a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,9 @@ CFLAGS_kobject.o += -DDEBUG
45CFLAGS_kobject_uevent.o += -DDEBUG 45CFLAGS_kobject_uevent.o += -DDEBUG
46endif 46endif
47 47
48obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
49CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
50
48obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 51obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
49obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 52obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
50obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 53obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -78,6 +81,8 @@ obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
78obj-$(CONFIG_CRC8) += crc8.o 81obj-$(CONFIG_CRC8) += crc8.o
79obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 82obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
80 83
84obj-$(CONFIG_842_COMPRESS) += 842/
85obj-$(CONFIG_842_DECOMPRESS) += 842/
81obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 86obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
82obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 87obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
83obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ 88obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 64c0926f5dd8..a578a0189199 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -462,19 +462,20 @@ EXPORT_SYMBOL(bitmap_parse_user);
462 * Output format is a comma-separated list of decimal numbers and 462 * Output format is a comma-separated list of decimal numbers and
463 * ranges if list is specified or hex digits grouped into comma-separated 463 * ranges if list is specified or hex digits grouped into comma-separated
464 * sets of 8 digits/set. Returns the number of characters written to buf. 464 * sets of 8 digits/set. Returns the number of characters written to buf.
465 *
466 * It is assumed that @buf is a pointer into a PAGE_SIZE area and that
467 * sufficient storage remains at @buf to accommodate the
468 * bitmap_print_to_pagebuf() output.
465 */ 469 */
466int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, 470int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
467 int nmaskbits) 471 int nmaskbits)
468{ 472{
469 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; 473 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
470 int n = 0; 474 int n = 0;
471 475
472 if (len > 1) { 476 if (len > 1)
473 n = list ? scnprintf(buf, len, "%*pbl", nmaskbits, maskp) : 477 n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
474 scnprintf(buf, len, "%*pb", nmaskbits, maskp); 478 scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
475 buf[n++] = '\n';
476 buf[n] = '\0';
477 }
478 return n; 479 return n;
479} 480}
480EXPORT_SYMBOL(bitmap_print_to_pagebuf); 481EXPORT_SYMBOL(bitmap_print_to_pagebuf);
@@ -506,12 +507,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
506 unsigned a, b; 507 unsigned a, b;
507 int c, old_c, totaldigits; 508 int c, old_c, totaldigits;
508 const char __user __force *ubuf = (const char __user __force *)buf; 509 const char __user __force *ubuf = (const char __user __force *)buf;
509 int exp_digit, in_range; 510 int at_start, in_range;
510 511
511 totaldigits = c = 0; 512 totaldigits = c = 0;
512 bitmap_zero(maskp, nmaskbits); 513 bitmap_zero(maskp, nmaskbits);
513 do { 514 do {
514 exp_digit = 1; 515 at_start = 1;
515 in_range = 0; 516 in_range = 0;
516 a = b = 0; 517 a = b = 0;
517 518
@@ -540,11 +541,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
540 break; 541 break;
541 542
542 if (c == '-') { 543 if (c == '-') {
543 if (exp_digit || in_range) 544 if (at_start || in_range)
544 return -EINVAL; 545 return -EINVAL;
545 b = 0; 546 b = 0;
546 in_range = 1; 547 in_range = 1;
547 exp_digit = 1;
548 continue; 548 continue;
549 } 549 }
550 550
@@ -554,16 +554,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
554 b = b * 10 + (c - '0'); 554 b = b * 10 + (c - '0');
555 if (!in_range) 555 if (!in_range)
556 a = b; 556 a = b;
557 exp_digit = 0; 557 at_start = 0;
558 totaldigits++; 558 totaldigits++;
559 } 559 }
560 if (!(a <= b)) 560 if (!(a <= b))
561 return -EINVAL; 561 return -EINVAL;
562 if (b >= nmaskbits) 562 if (b >= nmaskbits)
563 return -ERANGE; 563 return -ERANGE;
564 while (a <= b) { 564 if (!at_start) {
565 set_bit(a, maskp); 565 while (a <= b) {
566 a++; 566 set_bit(a, maskp);
567 a++;
568 }
567 } 569 }
568 } while (buflen && c == ','); 570 } while (buflen && c == ',');
569 return 0; 571 return 0;
diff --git a/lib/bug.c b/lib/bug.c
index 0c3bd9552b6f..cff145f032a5 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
66 struct module *mod; 66 struct module *mod;
67 const struct bug_entry *bug = NULL; 67 const struct bug_entry *bug = NULL;
68 68
69 rcu_read_lock(); 69 rcu_read_lock_sched();
70 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { 70 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
71 unsigned i; 71 unsigned i;
72 72
@@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
77 } 77 }
78 bug = NULL; 78 bug = NULL;
79out: 79out:
80 rcu_read_unlock(); 80 rcu_read_unlock_sched();
81 81
82 return bug; 82 return bug;
83} 83}
@@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
88 char *secstrings; 88 char *secstrings;
89 unsigned int i; 89 unsigned int i;
90 90
91 lockdep_assert_held(&module_mutex);
92
91 mod->bug_table = NULL; 93 mod->bug_table = NULL;
92 mod->num_bugs = 0; 94 mod->num_bugs = 0;
93 95
@@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
113 115
114void module_bug_cleanup(struct module *mod) 116void module_bug_cleanup(struct module *mod)
115{ 117{
118 lockdep_assert_held(&module_mutex);
116 list_del_rcu(&mod->bug_list); 119 list_del_rcu(&mod->bug_list);
117} 120}
118 121
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index 4f134d8907a7..f610b2a10b3e 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -191,7 +191,7 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
191 /* Update distances based on topology */ 191 /* Update distances based on topology */
192 for_each_cpu(cpu, update_mask) { 192 for_each_cpu(cpu, update_mask) {
193 if (cpu_rmap_copy_neigh(rmap, cpu, 193 if (cpu_rmap_copy_neigh(rmap, cpu,
194 topology_thread_cpumask(cpu), 1)) 194 topology_sibling_cpumask(cpu), 1))
195 continue; 195 continue;
196 if (cpu_rmap_copy_neigh(rmap, cpu, 196 if (cpu_rmap_copy_neigh(rmap, cpu,
197 topology_core_cpumask(cpu), 2)) 197 topology_core_cpumask(cpu), 2))
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 830dd5dec40f..5a70f6196f57 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -16,11 +16,10 @@
16int cpumask_next_and(int n, const struct cpumask *src1p, 16int cpumask_next_and(int n, const struct cpumask *src1p,
17 const struct cpumask *src2p) 17 const struct cpumask *src2p)
18{ 18{
19 struct cpumask tmp; 19 while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
20 20 if (cpumask_test_cpu(n, src2p))
21 if (cpumask_and(&tmp, src1p, src2p)) 21 break;
22 return cpumask_next(n, &tmp); 22 return n;
23 return nr_cpu_ids;
24} 23}
25EXPORT_SYMBOL(cpumask_next_and); 24EXPORT_SYMBOL(cpumask_next_and);
26 25
@@ -139,64 +138,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
139#endif 138#endif
140 139
141/** 140/**
142 * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first 141 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
143 *
144 * @i: index number 142 * @i: index number
145 * @numa_node: local numa_node 143 * @node: local numa_node
146 * @dstp: cpumask with the relevant cpu bit set according to the policy
147 * 144 *
148 * This function sets the cpumask according to a numa aware policy. 145 * This function selects an online CPU according to a numa aware policy;
149 * cpumask could be used as an affinity hint for the IRQ related to a 146 * local cpus are returned first, followed by non-local ones, then it
150 * queue. When the policy is to spread queues across cores - local cores 147 * wraps around.
151 * first.
152 * 148 *
153 * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set 149 * It's not very efficient, but useful for setup.
154 * the cpu bit and need to re-call the function.
155 */ 150 */
156int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 151unsigned int cpumask_local_spread(unsigned int i, int node)
157{ 152{
158 cpumask_var_t mask;
159 int cpu; 153 int cpu;
160 int ret = 0;
161
162 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
163 return -ENOMEM;
164 154
155 /* Wrap: we always want a cpu. */
165 i %= num_online_cpus(); 156 i %= num_online_cpus();
166 157
167 if (numa_node == -1 || !cpumask_of_node(numa_node)) { 158 if (node == -1) {
168 /* Use all online cpu's for non numa aware system */ 159 for_each_cpu(cpu, cpu_online_mask)
169 cpumask_copy(mask, cpu_online_mask); 160 if (i-- == 0)
161 return cpu;
170 } else { 162 } else {
171 int n; 163 /* NUMA first. */
172 164 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
173 cpumask_and(mask, 165 if (i-- == 0)
174 cpumask_of_node(numa_node), cpu_online_mask); 166 return cpu;
175 167
176 n = cpumask_weight(mask); 168 for_each_cpu(cpu, cpu_online_mask) {
177 if (i >= n) { 169 /* Skip NUMA nodes, done above. */
178 i -= n; 170 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
179 171 continue;
180 /* If index > number of local cpu's, mask out local 172
181 * cpu's 173 if (i-- == 0)
182 */ 174 return cpu;
183 cpumask_andnot(mask, cpu_online_mask, mask);
184 } 175 }
185 } 176 }
186 177 BUG();
187 for_each_cpu(cpu, mask) {
188 if (--i < 0)
189 goto out;
190 }
191
192 ret = -EAGAIN;
193
194out:
195 free_cpumask_var(mask);
196
197 if (!ret)
198 cpumask_set_cpu(cpu, dstp);
199
200 return ret;
201} 178}
202EXPORT_SYMBOL(cpumask_set_cpu_local_first); 179EXPORT_SYMBOL(cpumask_local_spread);
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
index a63472b82416..b3219d0abfb4 100644
--- a/lib/crc-itu-t.c
+++ b/lib/crc-itu-t.c
@@ -9,7 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/crc-itu-t.h> 10#include <linux/crc-itu-t.h>
11 11
12/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */ 12/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */
13const u16 crc_itu_t_table[256] = { 13const u16 crc_itu_t_table[256] = {
14 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 14 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
15 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 15 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index dfe6ec17c0a5..1ad33e555805 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -19,7 +19,7 @@
19static struct crypto_shash *crct10dif_tfm; 19static struct crypto_shash *crct10dif_tfm;
20static struct static_key crct10dif_fallback __read_mostly; 20static struct static_key crct10dif_fallback __read_mostly;
21 21
22__u16 crc_t10dif(const unsigned char *buffer, size_t len) 22__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
23{ 23{
24 struct { 24 struct {
25 struct shash_desc shash; 25 struct shash_desc shash;
@@ -28,17 +28,23 @@ __u16 crc_t10dif(const unsigned char *buffer, size_t len)
28 int err; 28 int err;
29 29
30 if (static_key_false(&crct10dif_fallback)) 30 if (static_key_false(&crct10dif_fallback))
31 return crc_t10dif_generic(0, buffer, len); 31 return crc_t10dif_generic(crc, buffer, len);
32 32
33 desc.shash.tfm = crct10dif_tfm; 33 desc.shash.tfm = crct10dif_tfm;
34 desc.shash.flags = 0; 34 desc.shash.flags = 0;
35 *(__u16 *)desc.ctx = 0; 35 *(__u16 *)desc.ctx = crc;
36 36
37 err = crypto_shash_update(&desc.shash, buffer, len); 37 err = crypto_shash_update(&desc.shash, buffer, len);
38 BUG_ON(err); 38 BUG_ON(err);
39 39
40 return *(__u16 *)desc.ctx; 40 return *(__u16 *)desc.ctx;
41} 41}
42EXPORT_SYMBOL(crc_t10dif_update);
43
44__u16 crc_t10dif(const unsigned char *buffer, size_t len)
45{
46 return crc_t10dif_update(0, buffer, len);
47}
42EXPORT_SYMBOL(crc_t10dif); 48EXPORT_SYMBOL(crc_t10dif);
43 49
44static int __init crc_t10dif_mod_init(void) 50static int __init crc_t10dif_mod_init(void)
diff --git a/lib/debug_info.c b/lib/debug_info.c
new file mode 100644
index 000000000000..2edbe27517ed
--- /dev/null
+++ b/lib/debug_info.c
@@ -0,0 +1,27 @@
1/*
2 * This file exists solely to ensure debug information for some core
3 * data structures is included in the final image even for
4 * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
5 * adding appropriate #includes is fine.
6 */
7#include <stdarg.h>
8
9#include <linux/cred.h>
10#include <linux/crypto.h>
11#include <linux/dcache.h>
12#include <linux/device.h>
13#include <linux/fs.h>
14#include <linux/fscache-cache.h>
15#include <linux/io.h>
16#include <linux/kallsyms.h>
17#include <linux/kernel.h>
18#include <linux/kobject.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/net.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25#include <net/addrconf.h>
26#include <net/sock.h>
27#include <net/tcp.h>
diff --git a/lib/decompress.c b/lib/decompress.c
index 528ff932d8e4..62696dff5730 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
59{ 59{
60 const struct compress_format *cf; 60 const struct compress_format *cf;
61 61
62 if (len < 2) 62 if (len < 2) {
63 if (name)
64 *name = NULL;
63 return NULL; /* Need at least this much... */ 65 return NULL; /* Need at least this much... */
66 }
64 67
65 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); 68 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
66 69
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ae4b65e17e64..dace71fe41f7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
574 unsigned long flags; 574 unsigned long flags;
575 phys_addr_t cln; 575 phys_addr_t cln;
576 576
577 if (dma_debug_disabled())
578 return;
579
577 if (!page) 580 if (!page)
578 return; 581 return;
579 582
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index d8f3d3150603..e491e02eff54 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -887,7 +887,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val,
887 887
888/* handle both dyndbg and $module.dyndbg params at boot */ 888/* handle both dyndbg and $module.dyndbg params at boot */
889static int ddebug_dyndbg_boot_param_cb(char *param, char *val, 889static int ddebug_dyndbg_boot_param_cb(char *param, char *val,
890 const char *unused) 890 const char *unused, void *arg)
891{ 891{
892 vpr_info("%s=\"%s\"\n", param, val); 892 vpr_info("%s=\"%s\"\n", param, val);
893 return ddebug_dyndbg_param_cb(param, val, NULL, 0); 893 return ddebug_dyndbg_param_cb(param, val, NULL, 0);
@@ -1028,7 +1028,7 @@ static int __init dynamic_debug_init(void)
1028 */ 1028 */
1029 cmdline = kstrdup(saved_command_line, GFP_KERNEL); 1029 cmdline = kstrdup(saved_command_line, GFP_KERNEL);
1030 parse_args("dyndbg params", cmdline, NULL, 1030 parse_args("dyndbg params", cmdline, NULL,
1031 0, 0, 0, &ddebug_dyndbg_boot_param_cb); 1031 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb);
1032 kfree(cmdline); 1032 kfree(cmdline);
1033 return 0; 1033 return 0;
1034 1034
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
deleted file mode 100644
index 3e3be40c6a6e..000000000000
--- a/lib/find_last_bit.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/* find_last_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2008 IBM Corporation
4 * Written by Rusty Russell <rusty@rustcorp.com.au>
5 * (Inspired by David Howell's find_next_bit implementation)
6 *
7 * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
8 * size and improve performance, 2015.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/bitops.h>
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20
21#ifndef find_last_bit
22
23unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
24{
25 if (size) {
26 unsigned long val = BITMAP_LAST_WORD_MASK(size);
27 unsigned long idx = (size-1) / BITS_PER_LONG;
28
29 do {
30 val &= addr[idx];
31 if (val)
32 return idx * BITS_PER_LONG + __fls(val);
33
34 val = ~0ul;
35 } while (idx--);
36 }
37 return size;
38}
39EXPORT_SYMBOL(find_last_bit);
40
41#endif
diff --git a/lib/genalloc.c b/lib/genalloc.c
index d214866eeea2..daf0afb6d979 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
602EXPORT_SYMBOL(devm_gen_pool_create); 602EXPORT_SYMBOL(devm_gen_pool_create);
603 603
604/** 604/**
605 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 605 * gen_pool_get - Obtain the gen_pool (if any) for a device
606 * @dev: device to retrieve the gen_pool from 606 * @dev: device to retrieve the gen_pool from
607 * 607 *
608 * Returns the gen_pool for the device if one is present, or NULL. 608 * Returns the gen_pool for the device if one is present, or NULL.
609 */ 609 */
610struct gen_pool *dev_get_gen_pool(struct device *dev) 610struct gen_pool *gen_pool_get(struct device *dev)
611{ 611{
612 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, 612 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
613 NULL); 613 NULL);
@@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev)
616 return NULL; 616 return NULL;
617 return *p; 617 return *p;
618} 618}
619EXPORT_SYMBOL_GPL(dev_get_gen_pool); 619EXPORT_SYMBOL_GPL(gen_pool_get);
620 620
621#ifdef CONFIG_OF 621#ifdef CONFIG_OF
622/** 622/**
623 * of_get_named_gen_pool - find a pool by phandle property 623 * of_gen_pool_get - find a pool by phandle property
624 * @np: device node 624 * @np: device node
625 * @propname: property name containing phandle(s) 625 * @propname: property name containing phandle(s)
626 * @index: index into the phandle array 626 * @index: index into the phandle array
@@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool);
629 * address of the device tree node pointed at by the phandle property, 629 * address of the device tree node pointed at by the phandle property,
630 * or NULL if not found. 630 * or NULL if not found.
631 */ 631 */
632struct gen_pool *of_get_named_gen_pool(struct device_node *np, 632struct gen_pool *of_gen_pool_get(struct device_node *np,
633 const char *propname, int index) 633 const char *propname, int index)
634{ 634{
635 struct platform_device *pdev; 635 struct platform_device *pdev;
@@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
642 of_node_put(np_pool); 642 of_node_put(np_pool);
643 if (!pdev) 643 if (!pdev)
644 return NULL; 644 return NULL;
645 return dev_get_gen_pool(&pdev->dev); 645 return gen_pool_get(&pdev->dev);
646} 646}
647EXPORT_SYMBOL_GPL(of_get_named_gen_pool); 647EXPORT_SYMBOL_GPL(of_gen_pool_get);
648#endif /* CONFIG_OF */ 648#endif /* CONFIG_OF */
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 7ea09699855d..8d74c20d8595 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -11,6 +11,7 @@
11#include <linux/ctype.h> 11#include <linux/ctype.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/export.h> 13#include <linux/export.h>
14#include <asm/unaligned.h>
14 15
15const char hex_asc[] = "0123456789abcdef"; 16const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 17EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
139 for (j = 0; j < ngroups; j++) { 140 for (j = 0; j < ngroups; j++) {
140 ret = snprintf(linebuf + lx, linebuflen - lx, 141 ret = snprintf(linebuf + lx, linebuflen - lx,
141 "%s%16.16llx", j ? " " : "", 142 "%s%16.16llx", j ? " " : "",
142 (unsigned long long)*(ptr8 + j)); 143 get_unaligned(ptr8 + j));
143 if (ret >= linebuflen - lx) 144 if (ret >= linebuflen - lx)
144 goto overflow1; 145 goto overflow1;
145 lx += ret; 146 lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
150 for (j = 0; j < ngroups; j++) { 151 for (j = 0; j < ngroups; j++) {
151 ret = snprintf(linebuf + lx, linebuflen - lx, 152 ret = snprintf(linebuf + lx, linebuflen - lx,
152 "%s%8.8x", j ? " " : "", 153 "%s%8.8x", j ? " " : "",
153 *(ptr4 + j)); 154 get_unaligned(ptr4 + j));
154 if (ret >= linebuflen - lx) 155 if (ret >= linebuflen - lx)
155 goto overflow1; 156 goto overflow1;
156 lx += ret; 157 lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
161 for (j = 0; j < ngroups; j++) { 162 for (j = 0; j < ngroups; j++) {
162 ret = snprintf(linebuf + lx, linebuflen - lx, 163 ret = snprintf(linebuf + lx, linebuflen - lx,
163 "%s%4.4x", j ? " " : "", 164 "%s%4.4x", j ? " " : "",
164 *(ptr2 + j)); 165 get_unaligned(ptr2 + j));
165 if (ret >= linebuflen - lx) 166 if (ret >= linebuflen - lx)
166 goto overflow1; 167 goto overflow1;
167 lx += ret; 168 lx += ret;
diff --git a/lib/kobject.c b/lib/kobject.c
index 3b841b97fccd..3e3a5c3cb330 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -257,23 +257,20 @@ static int kobject_add_internal(struct kobject *kobj)
257int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 257int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
258 va_list vargs) 258 va_list vargs)
259{ 259{
260 const char *old_name = kobj->name;
261 char *s; 260 char *s;
262 261
263 if (kobj->name && !fmt) 262 if (kobj->name && !fmt)
264 return 0; 263 return 0;
265 264
266 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 265 s = kvasprintf(GFP_KERNEL, fmt, vargs);
267 if (!kobj->name) { 266 if (!s)
268 kobj->name = old_name;
269 return -ENOMEM; 267 return -ENOMEM;
270 }
271 268
272 /* ewww... some of these buggers have '/' in the name ... */ 269 /* ewww... some of these buggers have '/' in the name ... */
273 while ((s = strchr(kobj->name, '/'))) 270 strreplace(s, '/', '!');
274 s[0] = '!'; 271 kfree(kobj->name);
272 kobj->name = s;
275 273
276 kfree(old_name);
277 return 0; 274 return 0;
278} 275}
279 276
@@ -340,8 +337,9 @@ error:
340} 337}
341EXPORT_SYMBOL(kobject_init); 338EXPORT_SYMBOL(kobject_init);
342 339
343static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, 340static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
344 const char *fmt, va_list vargs) 341 struct kobject *parent,
342 const char *fmt, va_list vargs)
345{ 343{
346 int retval; 344 int retval;
347 345
@@ -548,6 +546,7 @@ out:
548 kfree(devpath); 546 kfree(devpath);
549 return error; 547 return error;
550} 548}
549EXPORT_SYMBOL_GPL(kobject_move);
551 550
552/** 551/**
553 * kobject_del - unlink kobject from hierarchy. 552 * kobject_del - unlink kobject from hierarchy.
diff --git a/lib/list_sort.c b/lib/list_sort.c
index b29015102698..3fe401067e20 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -289,5 +289,5 @@ exit:
289 kfree(elts); 289 kfree(elts);
290 return err; 290 return err;
291} 291}
292module_init(list_sort_test); 292late_initcall(list_sort_test);
293#endif /* CONFIG_TEST_LIST_SORT */ 293#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 26cc6029b280..6d940c72b5fc 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -140,8 +140,12 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
140 /* Error: request to write beyond destination buffer */ 140 /* Error: request to write beyond destination buffer */
141 if (cpy > oend) 141 if (cpy > oend)
142 goto _output_error; 142 goto _output_error;
143#if LZ4_ARCH64
144 if ((ref + COPYLENGTH) > oend)
145#else
143 if ((ref + COPYLENGTH) > oend || 146 if ((ref + COPYLENGTH) > oend ||
144 (op + COPYLENGTH) > oend) 147 (op + COPYLENGTH) > oend)
148#endif
145 goto _output_error; 149 goto _output_error;
146 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 150 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
147 while (op < cpy) 151 while (op < cpy)
@@ -266,7 +270,13 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
266 if (cpy > oend - COPYLENGTH) { 270 if (cpy > oend - COPYLENGTH) {
267 if (cpy > oend) 271 if (cpy > oend)
268 goto _output_error; /* write outside of buf */ 272 goto _output_error; /* write outside of buf */
269 273#if LZ4_ARCH64
274 if ((ref + COPYLENGTH) > oend)
275#else
276 if ((ref + COPYLENGTH) > oend ||
277 (op + COPYLENGTH) > oend)
278#endif
279 goto _output_error;
270 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 280 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
271 while (op < cpy) 281 while (op < cpy)
272 *op++ = *ref++; 282 *op++ = *ref++;
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index aac511417ad1..a89d041592c8 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -639,7 +639,7 @@ do { \
639 ************** MIPS ***************** 639 ************** MIPS *****************
640 ***************************************/ 640 ***************************************/
641#if defined(__mips__) && W_TYPE_SIZE == 32 641#if defined(__mips__) && W_TYPE_SIZE == 32
642#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 642#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
643#define umul_ppmm(w1, w0, u, v) \ 643#define umul_ppmm(w1, w0, u, v) \
644do { \ 644do { \
645 UDItype __ll = (UDItype)(u) * (v); \ 645 UDItype __ll = (UDItype)(u) * (v); \
@@ -671,7 +671,7 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 675#define umul_ppmm(w1, w0, u, v) \
676do { \ 676do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 4cc6442733f4..bc0a1da8afba 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -128,28 +128,36 @@ leave:
128} 128}
129EXPORT_SYMBOL_GPL(mpi_read_from_buffer); 129EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
130 130
131/**************** 131/**
132 * Return an allocated buffer with the MPI (msb first). 132 * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
133 * NBYTES receives the length of this buffer. Caller must free the 133 *
134 * return string (This function does return a 0 byte buffer with NBYTES 134 * @a: a multi precision integer
135 * set to zero if the value of A is zero. If sign is not NULL, it will 135 * @buf: bufer to which the output will be written to. Needs to be at
136 * be set to the sign of the A. 136 * leaset mpi_get_size(a) long.
137 * @buf_len: size of the buf.
138 * @nbytes: receives the actual length of the data written.
139 * @sign: if not NULL, it will be set to the sign of a.
140 *
141 * Return: 0 on success or error code in case of error
137 */ 142 */
138void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) 143int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
144 int *sign)
139{ 145{
140 uint8_t *p, *buffer; 146 uint8_t *p;
141 mpi_limb_t alimb; 147 mpi_limb_t alimb;
148 unsigned int n = mpi_get_size(a);
142 int i; 149 int i;
143 unsigned int n; 150
151 if (buf_len < n || !buf)
152 return -EINVAL;
144 153
145 if (sign) 154 if (sign)
146 *sign = a->sign; 155 *sign = a->sign;
147 *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; 156
148 if (!n) 157 if (nbytes)
149 n++; /* avoid zero length allocation */ 158 *nbytes = n;
150 p = buffer = kmalloc(n, GFP_KERNEL); 159
151 if (!p) 160 p = buf;
152 return NULL;
153 161
154 for (i = a->nlimbs - 1; i >= 0; i--) { 162 for (i = a->nlimbs - 1; i >= 0; i--) {
155 alimb = a->d[i]; 163 alimb = a->d[i];
@@ -171,15 +179,56 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
171#error please implement for this limb size. 179#error please implement for this limb size.
172#endif 180#endif
173 } 181 }
182 return 0;
183}
184EXPORT_SYMBOL_GPL(mpi_read_buffer);
185
186/*
187 * mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first).
188 * Caller must free the return string.
189 * This function does return a 0 byte buffer with nbytes set to zero if the
190 * value of A is zero.
191 *
192 * @a: a multi precision integer.
193 * @nbytes: receives the length of this buffer.
194 * @sign: if not NULL, it will be set to the sign of the a.
195 *
196 * Return: Pointer to MPI buffer or NULL on error
197 */
198void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
199{
200 uint8_t *buf, *p;
201 unsigned int n;
202 int ret;
203
204 if (!nbytes)
205 return NULL;
206
207 n = mpi_get_size(a);
208
209 if (!n)
210 n++;
211
212 buf = kmalloc(n, GFP_KERNEL);
213
214 if (!buf)
215 return NULL;
216
217 ret = mpi_read_buffer(a, buf, n, nbytes, sign);
218
219 if (ret) {
220 kfree(buf);
221 return NULL;
222 }
174 223
175 /* this is sub-optimal but we need to do the shift operation 224 /* this is sub-optimal but we need to do the shift operation
176 * because the caller has to free the returned buffer */ 225 * because the caller has to free the returned buffer */
177 for (p = buffer; !*p && *nbytes; p++, --*nbytes) 226 for (p = buf; !*p && *nbytes; p++, --*nbytes)
178 ; 227 ;
179 if (p != buffer) 228 if (p != buf)
180 memmove(buffer, p, *nbytes); 229 memmove(buf, p, *nbytes);
181 230
182 return buffer; 231 return buf;
183} 232}
184EXPORT_SYMBOL_GPL(mpi_get_buffer); 233EXPORT_SYMBOL_GPL(mpi_get_buffer);
185 234
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index bf076d281d40..314f4dfa603e 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a)
69 if (!a) 69 if (!a)
70 return; 70 return;
71 71
72 kfree(a); 72 kzfree(a);
73} 73}
74 74
75void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) 75void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
@@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
95 if (!p) 95 if (!p)
96 return -ENOMEM; 96 return -ENOMEM;
97 memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); 97 memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
98 kfree(a->d); 98 kzfree(a->d);
99 a->d = p; 99 a->d = p;
100 } else { 100 } else {
101 a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); 101 a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL);
@@ -112,7 +112,7 @@ void mpi_free(MPI a)
112 return; 112 return;
113 113
114 if (a->flags & 4) 114 if (a->flags & 4)
115 kfree(a->d); 115 kzfree(a->d);
116 else 116 else
117 mpi_free_limb_space(a->d); 117 mpi_free_limb_space(a->d);
118 118
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 48144cdae819..f051d69f0910 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
197 * Compare counter against given value. 197 * Compare counter against given value.
198 * Return 1 if greater, 0 if equal and -1 if less 198 * Return 1 if greater, 0 if equal and -1 if less
199 */ 199 */
200int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 200int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
201{ 201{
202 s64 count; 202 s64 count;
203 203
204 count = percpu_counter_read(fbc); 204 count = percpu_counter_read(fbc);
205 /* Check to see if rough count will be sufficient for comparison */ 205 /* Check to see if rough count will be sufficient for comparison */
206 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { 206 if (abs(count - rhs) > (batch * num_online_cpus())) {
207 if (count > rhs) 207 if (count > rhs)
208 return 1; 208 return 1;
209 else 209 else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
218 else 218 else
219 return 0; 219 return 0;
220} 220}
221EXPORT_SYMBOL(percpu_counter_compare); 221EXPORT_SYMBOL(__percpu_counter_compare);
222 222
223static int __init percpu_counter_startup(void) 223static int __init percpu_counter_startup(void)
224{ 224{
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3d2aa27b845b..f9ebe1c82060 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/rcupdate.h> 35#include <linux/rcupdate.h>
36#include <linux/preempt_mask.h> /* in_interrupt() */ 36#include <linux/preempt.h> /* in_interrupt() */
37 37
38 38
39/* 39/*
@@ -65,7 +65,8 @@ static struct kmem_cache *radix_tree_node_cachep;
65 */ 65 */
66struct radix_tree_preload { 66struct radix_tree_preload {
67 int nr; 67 int nr;
68 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; 68 /* nodes->private_data points to next preallocated node */
69 struct radix_tree_node *nodes;
69}; 70};
70static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 71static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
71 72
@@ -197,8 +198,9 @@ radix_tree_node_alloc(struct radix_tree_root *root)
197 */ 198 */
198 rtp = this_cpu_ptr(&radix_tree_preloads); 199 rtp = this_cpu_ptr(&radix_tree_preloads);
199 if (rtp->nr) { 200 if (rtp->nr) {
200 ret = rtp->nodes[rtp->nr - 1]; 201 ret = rtp->nodes;
201 rtp->nodes[rtp->nr - 1] = NULL; 202 rtp->nodes = ret->private_data;
203 ret->private_data = NULL;
202 rtp->nr--; 204 rtp->nr--;
203 } 205 }
204 /* 206 /*
@@ -257,17 +259,20 @@ static int __radix_tree_preload(gfp_t gfp_mask)
257 259
258 preempt_disable(); 260 preempt_disable();
259 rtp = this_cpu_ptr(&radix_tree_preloads); 261 rtp = this_cpu_ptr(&radix_tree_preloads);
260 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { 262 while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
261 preempt_enable(); 263 preempt_enable();
262 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 264 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
263 if (node == NULL) 265 if (node == NULL)
264 goto out; 266 goto out;
265 preempt_disable(); 267 preempt_disable();
266 rtp = this_cpu_ptr(&radix_tree_preloads); 268 rtp = this_cpu_ptr(&radix_tree_preloads);
267 if (rtp->nr < ARRAY_SIZE(rtp->nodes)) 269 if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
268 rtp->nodes[rtp->nr++] = node; 270 node->private_data = rtp->nodes;
269 else 271 rtp->nodes = node;
272 rtp->nr++;
273 } else {
270 kmem_cache_free(radix_tree_node_cachep, node); 274 kmem_cache_free(radix_tree_node_cachep, node);
275 }
271 } 276 }
272 ret = 0; 277 ret = 0;
273out: 278out:
@@ -1463,15 +1468,16 @@ static int radix_tree_callback(struct notifier_block *nfb,
1463{ 1468{
1464 int cpu = (long)hcpu; 1469 int cpu = (long)hcpu;
1465 struct radix_tree_preload *rtp; 1470 struct radix_tree_preload *rtp;
1471 struct radix_tree_node *node;
1466 1472
1467 /* Free per-cpu pool of perloaded nodes */ 1473 /* Free per-cpu pool of perloaded nodes */
1468 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 1474 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1469 rtp = &per_cpu(radix_tree_preloads, cpu); 1475 rtp = &per_cpu(radix_tree_preloads, cpu);
1470 while (rtp->nr) { 1476 while (rtp->nr) {
1471 kmem_cache_free(radix_tree_node_cachep, 1477 node = rtp->nodes;
1472 rtp->nodes[rtp->nr-1]); 1478 rtp->nodes = node->private_data;
1473 rtp->nodes[rtp->nr-1] = NULL; 1479 kmem_cache_free(radix_tree_node_cachep, node);
1474 rtp->nr--; 1480 rtp->nr--;
1475 } 1481 }
1476 } 1482 }
1477 return NOTIFY_OK; 1483 return NOTIFY_OK;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index c7dab0645554..3b10a48fa040 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_unroll = UNROLL $@
15 < $< > $@ || ( rm -f $@ && exit 1 ) 15 < $< > $@ || ( rm -f $@ && exit 1 )
16 16
17ifeq ($(CONFIG_ALTIVEC),y) 17ifeq ($(CONFIG_ALTIVEC),y)
18altivec_flags := -maltivec -mabi=altivec 18altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
19endif 19endif
20 20
21# The GCC option -ffreestanding is required in order to compile code containing 21# The GCC option -ffreestanding is required in order to compile code containing
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
index b7595484a815..8fe9d9662abb 100644
--- a/lib/raid6/x86.h
+++ b/lib/raid6/x86.h
@@ -23,7 +23,7 @@
23 23
24#ifdef __KERNEL__ /* Real code */ 24#ifdef __KERNEL__ /* Real code */
25 25
26#include <asm/i387.h> 26#include <asm/fpu/api.h>
27 27
28#else /* Dummy code for user space testing */ 28#else /* Dummy code for user space testing */
29 29
diff --git a/lib/rbtree.c b/lib/rbtree.c
index c16c81a3d430..1356454e36de 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -44,6 +44,30 @@
44 * parentheses and have some accompanying text comment. 44 * parentheses and have some accompanying text comment.
45 */ 45 */
46 46
47/*
48 * Notes on lockless lookups:
49 *
50 * All stores to the tree structure (rb_left and rb_right) must be done using
51 * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
52 * tree structure as seen in program order.
53 *
54 * These two requirements will allow lockless iteration of the tree -- not
55 * correct iteration mind you, tree rotations are not atomic so a lookup might
56 * miss entire subtrees.
57 *
58 * But they do guarantee that any such traversal will only see valid elements
59 * and that it will indeed complete -- does not get stuck in a loop.
60 *
61 * It also guarantees that if the lookup returns an element it is the 'correct'
62 * one. But not returning an element does _NOT_ mean it's not present.
63 *
64 * NOTE:
65 *
66 * Stores to __rb_parent_color are not important for simple lookups so those
67 * are left undone as of now. Nor did I check for loops involving parent
68 * pointers.
69 */
70
47static inline void rb_set_black(struct rb_node *rb) 71static inline void rb_set_black(struct rb_node *rb)
48{ 72{
49 rb->__rb_parent_color |= RB_BLACK; 73 rb->__rb_parent_color |= RB_BLACK;
@@ -129,8 +153,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
129 * This still leaves us in violation of 4), the 153 * This still leaves us in violation of 4), the
130 * continuation into Case 3 will fix that. 154 * continuation into Case 3 will fix that.
131 */ 155 */
132 parent->rb_right = tmp = node->rb_left; 156 tmp = node->rb_left;
133 node->rb_left = parent; 157 WRITE_ONCE(parent->rb_right, tmp);
158 WRITE_ONCE(node->rb_left, parent);
134 if (tmp) 159 if (tmp)
135 rb_set_parent_color(tmp, parent, 160 rb_set_parent_color(tmp, parent,
136 RB_BLACK); 161 RB_BLACK);
@@ -149,8 +174,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
149 * / \ 174 * / \
150 * n U 175 * n U
151 */ 176 */
152 gparent->rb_left = tmp; /* == parent->rb_right */ 177 WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
153 parent->rb_right = gparent; 178 WRITE_ONCE(parent->rb_right, gparent);
154 if (tmp) 179 if (tmp)
155 rb_set_parent_color(tmp, gparent, RB_BLACK); 180 rb_set_parent_color(tmp, gparent, RB_BLACK);
156 __rb_rotate_set_parents(gparent, parent, root, RB_RED); 181 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -171,8 +196,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
171 tmp = parent->rb_left; 196 tmp = parent->rb_left;
172 if (node == tmp) { 197 if (node == tmp) {
173 /* Case 2 - right rotate at parent */ 198 /* Case 2 - right rotate at parent */
174 parent->rb_left = tmp = node->rb_right; 199 tmp = node->rb_right;
175 node->rb_right = parent; 200 WRITE_ONCE(parent->rb_left, tmp);
201 WRITE_ONCE(node->rb_right, parent);
176 if (tmp) 202 if (tmp)
177 rb_set_parent_color(tmp, parent, 203 rb_set_parent_color(tmp, parent,
178 RB_BLACK); 204 RB_BLACK);
@@ -183,8 +209,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
183 } 209 }
184 210
185 /* Case 3 - left rotate at gparent */ 211 /* Case 3 - left rotate at gparent */
186 gparent->rb_right = tmp; /* == parent->rb_left */ 212 WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
187 parent->rb_left = gparent; 213 WRITE_ONCE(parent->rb_left, gparent);
188 if (tmp) 214 if (tmp)
189 rb_set_parent_color(tmp, gparent, RB_BLACK); 215 rb_set_parent_color(tmp, gparent, RB_BLACK);
190 __rb_rotate_set_parents(gparent, parent, root, RB_RED); 216 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -224,8 +250,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
224 * / \ / \ 250 * / \ / \
225 * Sl Sr N Sl 251 * Sl Sr N Sl
226 */ 252 */
227 parent->rb_right = tmp1 = sibling->rb_left; 253 tmp1 = sibling->rb_left;
228 sibling->rb_left = parent; 254 WRITE_ONCE(parent->rb_right, tmp1);
255 WRITE_ONCE(sibling->rb_left, parent);
229 rb_set_parent_color(tmp1, parent, RB_BLACK); 256 rb_set_parent_color(tmp1, parent, RB_BLACK);
230 __rb_rotate_set_parents(parent, sibling, root, 257 __rb_rotate_set_parents(parent, sibling, root,
231 RB_RED); 258 RB_RED);
@@ -275,9 +302,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
275 * \ 302 * \
276 * Sr 303 * Sr
277 */ 304 */
278 sibling->rb_left = tmp1 = tmp2->rb_right; 305 tmp1 = tmp2->rb_right;
279 tmp2->rb_right = sibling; 306 WRITE_ONCE(sibling->rb_left, tmp1);
280 parent->rb_right = tmp2; 307 WRITE_ONCE(tmp2->rb_right, sibling);
308 WRITE_ONCE(parent->rb_right, tmp2);
281 if (tmp1) 309 if (tmp1)
282 rb_set_parent_color(tmp1, sibling, 310 rb_set_parent_color(tmp1, sibling,
283 RB_BLACK); 311 RB_BLACK);
@@ -297,8 +325,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
297 * / \ / \ 325 * / \ / \
298 * (sl) sr N (sl) 326 * (sl) sr N (sl)
299 */ 327 */
300 parent->rb_right = tmp2 = sibling->rb_left; 328 tmp2 = sibling->rb_left;
301 sibling->rb_left = parent; 329 WRITE_ONCE(parent->rb_right, tmp2);
330 WRITE_ONCE(sibling->rb_left, parent);
302 rb_set_parent_color(tmp1, sibling, RB_BLACK); 331 rb_set_parent_color(tmp1, sibling, RB_BLACK);
303 if (tmp2) 332 if (tmp2)
304 rb_set_parent(tmp2, parent); 333 rb_set_parent(tmp2, parent);
@@ -310,8 +339,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
310 sibling = parent->rb_left; 339 sibling = parent->rb_left;
311 if (rb_is_red(sibling)) { 340 if (rb_is_red(sibling)) {
312 /* Case 1 - right rotate at parent */ 341 /* Case 1 - right rotate at parent */
313 parent->rb_left = tmp1 = sibling->rb_right; 342 tmp1 = sibling->rb_right;
314 sibling->rb_right = parent; 343 WRITE_ONCE(parent->rb_left, tmp1);
344 WRITE_ONCE(sibling->rb_right, parent);
315 rb_set_parent_color(tmp1, parent, RB_BLACK); 345 rb_set_parent_color(tmp1, parent, RB_BLACK);
316 __rb_rotate_set_parents(parent, sibling, root, 346 __rb_rotate_set_parents(parent, sibling, root,
317 RB_RED); 347 RB_RED);
@@ -336,9 +366,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
336 break; 366 break;
337 } 367 }
338 /* Case 3 - right rotate at sibling */ 368 /* Case 3 - right rotate at sibling */
339 sibling->rb_right = tmp1 = tmp2->rb_left; 369 tmp1 = tmp2->rb_left;
340 tmp2->rb_left = sibling; 370 WRITE_ONCE(sibling->rb_right, tmp1);
341 parent->rb_left = tmp2; 371 WRITE_ONCE(tmp2->rb_left, sibling);
372 WRITE_ONCE(parent->rb_left, tmp2);
342 if (tmp1) 373 if (tmp1)
343 rb_set_parent_color(tmp1, sibling, 374 rb_set_parent_color(tmp1, sibling,
344 RB_BLACK); 375 RB_BLACK);
@@ -347,8 +378,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
347 sibling = tmp2; 378 sibling = tmp2;
348 } 379 }
349 /* Case 4 - left rotate at parent + color flips */ 380 /* Case 4 - left rotate at parent + color flips */
350 parent->rb_left = tmp2 = sibling->rb_right; 381 tmp2 = sibling->rb_right;
351 sibling->rb_right = parent; 382 WRITE_ONCE(parent->rb_left, tmp2);
383 WRITE_ONCE(sibling->rb_right, parent);
352 rb_set_parent_color(tmp1, sibling, RB_BLACK); 384 rb_set_parent_color(tmp1, sibling, RB_BLACK);
353 if (tmp2) 385 if (tmp2)
354 rb_set_parent(tmp2, parent); 386 rb_set_parent(tmp2, parent);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 4898442b837f..cc0c69710dcf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -14,6 +14,7 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17#include <linux/atomic.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/log2.h> 20#include <linux/log2.h>
@@ -25,6 +26,7 @@
25#include <linux/random.h> 26#include <linux/random.h>
26#include <linux/rhashtable.h> 27#include <linux/rhashtable.h>
27#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/export.h>
28 30
29#define HASH_DEFAULT_SIZE 64UL 31#define HASH_DEFAULT_SIZE 64UL
30#define HASH_MIN_SIZE 4U 32#define HASH_MIN_SIZE 4U
@@ -405,13 +407,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
405 407
406 if (rht_grow_above_75(ht, tbl)) 408 if (rht_grow_above_75(ht, tbl))
407 size *= 2; 409 size *= 2;
408 /* More than two rehashes (not resizes) detected. */ 410 /* Do not schedule more than one rehash */
409 else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) 411 else if (old_tbl != tbl)
410 return -EBUSY; 412 return -EBUSY;
411 413
412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
413 if (new_tbl == NULL) 415 if (new_tbl == NULL) {
416 /* Schedule async resize/rehash to try allocation
417 * non-atomic context.
418 */
419 schedule_work(&ht->run_work);
414 return -ENOMEM; 420 return -ENOMEM;
421 }
415 422
416 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 423 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
417 if (err) { 424 if (err) {
@@ -441,6 +448,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
441 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 448 if (key && rhashtable_lookup_fast(ht, key, ht->p))
442 goto exit; 449 goto exit;
443 450
451 err = -E2BIG;
452 if (unlikely(rht_grow_above_max(ht, tbl)))
453 goto exit;
454
444 err = -EAGAIN; 455 err = -EAGAIN;
445 if (rhashtable_check_elasticity(ht, tbl, hash) || 456 if (rhashtable_check_elasticity(ht, tbl, hash) ||
446 rht_grow_above_100(ht, tbl)) 457 rht_grow_above_100(ht, tbl))
@@ -574,7 +585,6 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
574 struct bucket_table *tbl = iter->walker->tbl; 585 struct bucket_table *tbl = iter->walker->tbl;
575 struct rhashtable *ht = iter->ht; 586 struct rhashtable *ht = iter->ht;
576 struct rhash_head *p = iter->p; 587 struct rhash_head *p = iter->p;
577 void *obj = NULL;
578 588
579 if (p) { 589 if (p) {
580 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 590 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
@@ -594,13 +604,14 @@ next:
594 if (!rht_is_a_nulls(p)) { 604 if (!rht_is_a_nulls(p)) {
595 iter->skip++; 605 iter->skip++;
596 iter->p = p; 606 iter->p = p;
597 obj = rht_obj(ht, p); 607 return rht_obj(ht, p);
598 goto out;
599 } 608 }
600 609
601 iter->skip = 0; 610 iter->skip = 0;
602 } 611 }
603 612
613 iter->p = NULL;
614
604 /* Ensure we see any new tables. */ 615 /* Ensure we see any new tables. */
605 smp_rmb(); 616 smp_rmb();
606 617
@@ -611,11 +622,7 @@ next:
611 return ERR_PTR(-EAGAIN); 622 return ERR_PTR(-EAGAIN);
612 } 623 }
613 624
614 iter->p = NULL; 625 return NULL;
615
616out:
617
618 return obj;
619} 626}
620EXPORT_SYMBOL_GPL(rhashtable_walk_next); 627EXPORT_SYMBOL_GPL(rhashtable_walk_next);
621 628
@@ -733,6 +740,12 @@ int rhashtable_init(struct rhashtable *ht,
733 if (params->max_size) 740 if (params->max_size)
734 ht->p.max_size = rounddown_pow_of_two(params->max_size); 741 ht->p.max_size = rounddown_pow_of_two(params->max_size);
735 742
743 if (params->insecure_max_entries)
744 ht->p.insecure_max_entries =
745 rounddown_pow_of_two(params->insecure_max_entries);
746 else
747 ht->p.insecure_max_entries = ht->p.max_size * 2;
748
736 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 749 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
737 750
738 /* The maximum (not average) chain length grows with the 751 /* The maximum (not average) chain length grows with the
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c9f2e8c6ccc9..d105a9f56878 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -56,6 +56,38 @@ int sg_nents(struct scatterlist *sg)
56} 56}
57EXPORT_SYMBOL(sg_nents); 57EXPORT_SYMBOL(sg_nents);
58 58
59/**
60 * sg_nents_for_len - return total count of entries in scatterlist
61 * needed to satisfy the supplied length
62 * @sg: The scatterlist
63 * @len: The total required length
64 *
65 * Description:
66 * Determines the number of entries in sg that are required to meet
67 * the supplied length, taking into acount chaining as well
68 *
69 * Returns:
70 * the number of sg entries needed, negative error on failure
71 *
72 **/
73int sg_nents_for_len(struct scatterlist *sg, u64 len)
74{
75 int nents;
76 u64 total;
77
78 if (!len)
79 return 0;
80
81 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
82 nents++;
83 total += sg->length;
84 if (total >= len)
85 return nents;
86 }
87
88 return -EINVAL;
89}
90EXPORT_SYMBOL(sg_nents_for_len);
59 91
60/** 92/**
61 * sg_last - return the last scatterlist entry in a list 93 * sg_last - return the last scatterlist entry in a list
@@ -618,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop);
618 * Returns the number of copied bytes. 650 * Returns the number of copied bytes.
619 * 651 *
620 **/ 652 **/
621static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 653size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
622 void *buf, size_t buflen, off_t skip, 654 size_t buflen, off_t skip, bool to_buffer)
623 bool to_buffer)
624{ 655{
625 unsigned int offset = 0; 656 unsigned int offset = 0;
626 struct sg_mapping_iter miter; 657 struct sg_mapping_iter miter;
@@ -657,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
657 local_irq_restore(flags); 688 local_irq_restore(flags);
658 return offset; 689 return offset;
659} 690}
691EXPORT_SYMBOL(sg_copy_buffer);
660 692
661/** 693/**
662 * sg_copy_from_buffer - Copy from a linear buffer to an SG list 694 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
@@ -669,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
669 * 701 *
670 **/ 702 **/
671size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 703size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
672 void *buf, size_t buflen) 704 const void *buf, size_t buflen)
673{ 705{
674 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); 706 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
675} 707}
676EXPORT_SYMBOL(sg_copy_from_buffer); 708EXPORT_SYMBOL(sg_copy_from_buffer);
677 709
@@ -697,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer);
697 * @sgl: The SG list 729 * @sgl: The SG list
698 * @nents: Number of SG entries 730 * @nents: Number of SG entries
699 * @buf: Where to copy from 731 * @buf: Where to copy from
700 * @skip: Number of bytes to skip before copying
701 * @buflen: The number of bytes to copy 732 * @buflen: The number of bytes to copy
733 * @skip: Number of bytes to skip before copying
702 * 734 *
703 * Returns the number of copied bytes. 735 * Returns the number of copied bytes.
704 * 736 *
705 **/ 737 **/
706size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 738size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
707 void *buf, size_t buflen, off_t skip) 739 const void *buf, size_t buflen, off_t skip)
708{ 740{
709 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); 741 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
710} 742}
711EXPORT_SYMBOL(sg_pcopy_from_buffer); 743EXPORT_SYMBOL(sg_pcopy_from_buffer);
712 744
@@ -715,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer);
715 * @sgl: The SG list 747 * @sgl: The SG list
716 * @nents: Number of SG entries 748 * @nents: Number of SG entries
717 * @buf: Where to copy to 749 * @buf: Where to copy to
718 * @skip: Number of bytes to skip before copying
719 * @buflen: The number of bytes to copy 750 * @buflen: The number of bytes to copy
751 * @skip: Number of bytes to skip before copying
720 * 752 *
721 * Returns the number of copied bytes. 753 * Returns the number of copied bytes.
722 * 754 *
diff --git a/lib/sort.c b/lib/sort.c
index 43c9fe73ae2e..fc20df42aa6f 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -8,6 +8,12 @@
8#include <linux/export.h> 8#include <linux/export.h>
9#include <linux/sort.h> 9#include <linux/sort.h>
10 10
11static int alignment_ok(const void *base, int align)
12{
13 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
14 ((unsigned long)base & (align - 1)) == 0;
15}
16
11static void u32_swap(void *a, void *b, int size) 17static void u32_swap(void *a, void *b, int size)
12{ 18{
13 u32 t = *(u32 *)a; 19 u32 t = *(u32 *)a;
@@ -15,6 +21,13 @@ static void u32_swap(void *a, void *b, int size)
15 *(u32 *)b = t; 21 *(u32 *)b = t;
16} 22}
17 23
24static void u64_swap(void *a, void *b, int size)
25{
26 u64 t = *(u64 *)a;
27 *(u64 *)a = *(u64 *)b;
28 *(u64 *)b = t;
29}
30
18static void generic_swap(void *a, void *b, int size) 31static void generic_swap(void *a, void *b, int size)
19{ 32{
20 char t; 33 char t;
@@ -50,8 +63,14 @@ void sort(void *base, size_t num, size_t size,
50 /* pre-scale counters for performance */ 63 /* pre-scale counters for performance */
51 int i = (num/2 - 1) * size, n = num * size, c, r; 64 int i = (num/2 - 1) * size, n = num * size, c, r;
52 65
53 if (!swap_func) 66 if (!swap_func) {
54 swap_func = (size == 4 ? u32_swap : generic_swap); 67 if (size == 4 && alignment_ok(base, 4))
68 swap_func = u32_swap;
69 else if (size == 8 && alignment_ok(base, 8))
70 swap_func = u64_swap;
71 else
72 swap_func = generic_swap;
73 }
55 74
56 /* heapify */ 75 /* heapify */
57 for ( ; i >= 0; i -= size) { 76 for ( ; i >= 0; i -= size) {
diff --git a/lib/string.c b/lib/string.c
index a5792019193c..13d1e84ddb80 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
607void memzero_explicit(void *s, size_t count) 607void memzero_explicit(void *s, size_t count)
608{ 608{
609 memset(s, 0, count); 609 memset(s, 0, count);
610 barrier(); 610 barrier_data(s);
611} 611}
612EXPORT_SYMBOL(memzero_explicit); 612EXPORT_SYMBOL(memzero_explicit);
613 613
@@ -849,3 +849,20 @@ void *memchr_inv(const void *start, int c, size_t bytes)
849 return check_bytes8(start, value, bytes % 8); 849 return check_bytes8(start, value, bytes % 8);
850} 850}
851EXPORT_SYMBOL(memchr_inv); 851EXPORT_SYMBOL(memchr_inv);
852
853/**
854 * strreplace - Replace all occurrences of character in string.
855 * @s: The string to operate on.
856 * @old: The character being replaced.
857 * @new: The character @old is replaced with.
858 *
859 * Returns pointer to the nul byte at the end of @s.
860 */
861char *strreplace(char *s, char old, char new)
862{
863 for (; *s; ++s)
864 if (*s == old)
865 *s = new;
866 return s;
867}
868EXPORT_SYMBOL(strreplace);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index a28df5206d95..3a5f2b366d84 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
57 return res + find_zero(data) + 1 - align; 57 return res + find_zero(data) + 1 - align;
58 } 58 }
59 res += sizeof(unsigned long); 59 res += sizeof(unsigned long);
60 if (unlikely(max < sizeof(unsigned long))) 60 /* We already handled 'unsigned long' bytes. Did we do it all ? */
61 if (unlikely(max <= sizeof(unsigned long)))
61 break; 62 break;
62 max -= sizeof(unsigned long); 63 max -= sizeof(unsigned long);
63 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 64 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -84,13 +85,21 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
84 * @str: The string to measure. 85 * @str: The string to measure.
85 * @count: Maximum count (including NUL character) 86 * @count: Maximum count (including NUL character)
86 * 87 *
87 * Context: User context only. This function may sleep. 88 * Context: User context only. This function may sleep if pagefaults are
89 * enabled.
88 * 90 *
89 * Get the size of a NUL-terminated string in user space. 91 * Get the size of a NUL-terminated string in user space.
90 * 92 *
91 * Returns the size of the string INCLUDING the terminating NUL. 93 * Returns the size of the string INCLUDING the terminating NUL.
92 * If the string is too long, returns 'count+1'. 94 * If the string is too long, returns a number larger than @count. User
95 * has to check the return value against "> count".
93 * On exception (or invalid count), returns 0. 96 * On exception (or invalid count), returns 0.
97 *
98 * NOTE! You should basically never use this function. There is
99 * almost never any valid case for using the length of a user space
100 * string, since the string can be changed at any time by other
101 * threads. Use "strncpy_from_user()" instead to get a stable copy
102 * of the string.
94 */ 103 */
95long strnlen_user(const char __user *str, long count) 104long strnlen_user(const char __user *str, long count)
96{ 105{
@@ -113,7 +122,8 @@ EXPORT_SYMBOL(strnlen_user);
113 * strlen_user: - Get the size of a user string INCLUDING final NUL. 122 * strlen_user: - Get the size of a user string INCLUDING final NUL.
114 * @str: The string to measure. 123 * @str: The string to measure.
115 * 124 *
116 * Context: User context only. This function may sleep. 125 * Context: User context only. This function may sleep if pagefaults are
126 * enabled.
117 * 127 *
118 * Get the size of a NUL-terminated string in user space. 128 * Get the size of a NUL-terminated string in user space.
119 * 129 *
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 4abda074ea45..76f29ecba8f4 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -29,10 +29,10 @@
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/scatterlist.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/dma.h> 35#include <asm/dma.h>
35#include <asm/scatterlist.h>
36 36
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
@@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
537 * Allocates bounce buffer and returns its kernel virtual address. 537 * Allocates bounce buffer and returns its kernel virtual address.
538 */ 538 */
539 539
540phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, 540static phys_addr_t
541 enum dma_data_direction dir) 541map_single(struct device *hwdev, phys_addr_t phys, size_t size,
542 enum dma_data_direction dir)
542{ 543{
543 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 544 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
544 545
@@ -655,7 +656,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
655 */ 656 */
656 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 657 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
657 if (paddr == SWIOTLB_MAP_ERROR) 658 if (paddr == SWIOTLB_MAP_ERROR)
658 return NULL; 659 goto err_warn;
659 660
660 ret = phys_to_virt(paddr); 661 ret = phys_to_virt(paddr);
661 dev_addr = phys_to_dma(hwdev, paddr); 662 dev_addr = phys_to_dma(hwdev, paddr);
@@ -669,7 +670,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
669 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 670 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
670 swiotlb_tbl_unmap_single(hwdev, paddr, 671 swiotlb_tbl_unmap_single(hwdev, paddr,
671 size, DMA_TO_DEVICE); 672 size, DMA_TO_DEVICE);
672 return NULL; 673 goto err_warn;
673 } 674 }
674 } 675 }
675 676
@@ -677,6 +678,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
677 memset(ret, 0, size); 678 memset(ret, 0, size);
678 679
679 return ret; 680 return ret;
681
682err_warn:
683 pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
684 dev_name(hwdev), size);
685 dump_stack();
686
687 return NULL;
680} 688}
681EXPORT_SYMBOL(swiotlb_alloc_coherent); 689EXPORT_SYMBOL(swiotlb_alloc_coherent);
682 690
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
index c227cc43ec0a..5241df36eedf 100644
--- a/lib/test-hexdump.c
+++ b/lib/test-hexdump.c
@@ -25,19 +25,19 @@ static const char * const test_data_1_le[] __initconst = {
25 "4c", "d1", "19", "99", "43", "b1", "af", "0c", 25 "4c", "d1", "19", "99", "43", "b1", "af", "0c",
26}; 26};
27 27
28static const char *test_data_2_le[] __initdata = { 28static const char * const test_data_2_le[] __initconst = {
29 "32be", "7bdb", "180a", "b293", 29 "32be", "7bdb", "180a", "b293",
30 "ba70", "24c4", "837d", "9b34", 30 "ba70", "24c4", "837d", "9b34",
31 "9ca6", "ad31", "0f9c", "e9ac", 31 "9ca6", "ad31", "0f9c", "e9ac",
32 "d14c", "9919", "b143", "0caf", 32 "d14c", "9919", "b143", "0caf",
33}; 33};
34 34
35static const char *test_data_4_le[] __initdata = { 35static const char * const test_data_4_le[] __initconst = {
36 "7bdb32be", "b293180a", "24c4ba70", "9b34837d", 36 "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
37 "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143", 37 "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
38}; 38};
39 39
40static const char *test_data_8_le[] __initdata = { 40static const char * const test_data_8_le[] __initconst = {
41 "b293180a7bdb32be", "9b34837d24c4ba70", 41 "b293180a7bdb32be", "9b34837d24c4ba70",
42 "e9ac0f9cad319ca6", "0cafb1439919d14c", 42 "e9ac0f9cad319ca6", "0cafb1439919d14c",
43}; 43};
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 80d78c51f65f..7f58c735d745 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -21,6 +21,7 @@
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/if_vlan.h> 23#include <linux/if_vlan.h>
24#include <linux/random.h>
24 25
25/* General test specific settings */ 26/* General test specific settings */
26#define MAX_SUBTESTS 3 27#define MAX_SUBTESTS 3
@@ -67,6 +68,10 @@ struct bpf_test {
67 union { 68 union {
68 struct sock_filter insns[MAX_INSNS]; 69 struct sock_filter insns[MAX_INSNS];
69 struct bpf_insn insns_int[MAX_INSNS]; 70 struct bpf_insn insns_int[MAX_INSNS];
71 struct {
72 void *insns;
73 unsigned int len;
74 } ptr;
70 } u; 75 } u;
71 __u8 aux; 76 __u8 aux;
72 __u8 data[MAX_DATA]; 77 __u8 data[MAX_DATA];
@@ -74,8 +79,282 @@ struct bpf_test {
74 int data_size; 79 int data_size;
75 __u32 result; 80 __u32 result;
76 } test[MAX_SUBTESTS]; 81 } test[MAX_SUBTESTS];
82 int (*fill_helper)(struct bpf_test *self);
77}; 83};
78 84
85/* Large test cases need separate allocation and fill handler. */
86
87static int bpf_fill_maxinsns1(struct bpf_test *self)
88{
89 unsigned int len = BPF_MAXINSNS;
90 struct sock_filter *insn;
91 __u32 k = ~0;
92 int i;
93
94 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
95 if (!insn)
96 return -ENOMEM;
97
98 for (i = 0; i < len; i++, k--)
99 insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
100
101 self->u.ptr.insns = insn;
102 self->u.ptr.len = len;
103
104 return 0;
105}
106
107static int bpf_fill_maxinsns2(struct bpf_test *self)
108{
109 unsigned int len = BPF_MAXINSNS;
110 struct sock_filter *insn;
111 int i;
112
113 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
114 if (!insn)
115 return -ENOMEM;
116
117 for (i = 0; i < len; i++)
118 insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
119
120 self->u.ptr.insns = insn;
121 self->u.ptr.len = len;
122
123 return 0;
124}
125
126static int bpf_fill_maxinsns3(struct bpf_test *self)
127{
128 unsigned int len = BPF_MAXINSNS;
129 struct sock_filter *insn;
130 struct rnd_state rnd;
131 int i;
132
133 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
134 if (!insn)
135 return -ENOMEM;
136
137 prandom_seed_state(&rnd, 3141592653589793238ULL);
138
139 for (i = 0; i < len - 1; i++) {
140 __u32 k = prandom_u32_state(&rnd);
141
142 insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
143 }
144
145 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
146
147 self->u.ptr.insns = insn;
148 self->u.ptr.len = len;
149
150 return 0;
151}
152
153static int bpf_fill_maxinsns4(struct bpf_test *self)
154{
155 unsigned int len = BPF_MAXINSNS + 1;
156 struct sock_filter *insn;
157 int i;
158
159 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
160 if (!insn)
161 return -ENOMEM;
162
163 for (i = 0; i < len; i++)
164 insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
165
166 self->u.ptr.insns = insn;
167 self->u.ptr.len = len;
168
169 return 0;
170}
171
172static int bpf_fill_maxinsns5(struct bpf_test *self)
173{
174 unsigned int len = BPF_MAXINSNS;
175 struct sock_filter *insn;
176 int i;
177
178 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
179 if (!insn)
180 return -ENOMEM;
181
182 insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
183
184 for (i = 1; i < len - 1; i++)
185 insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
186
187 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
188
189 self->u.ptr.insns = insn;
190 self->u.ptr.len = len;
191
192 return 0;
193}
194
195static int bpf_fill_maxinsns6(struct bpf_test *self)
196{
197 unsigned int len = BPF_MAXINSNS;
198 struct sock_filter *insn;
199 int i;
200
201 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
202 if (!insn)
203 return -ENOMEM;
204
205 for (i = 0; i < len - 1; i++)
206 insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
207 SKF_AD_VLAN_TAG_PRESENT);
208
209 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
210
211 self->u.ptr.insns = insn;
212 self->u.ptr.len = len;
213
214 return 0;
215}
216
217static int bpf_fill_maxinsns7(struct bpf_test *self)
218{
219 unsigned int len = BPF_MAXINSNS;
220 struct sock_filter *insn;
221 int i;
222
223 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
224 if (!insn)
225 return -ENOMEM;
226
227 for (i = 0; i < len - 4; i++)
228 insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
229 SKF_AD_CPU);
230
231 insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
232 insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
233 SKF_AD_CPU);
234 insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
235 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
236
237 self->u.ptr.insns = insn;
238 self->u.ptr.len = len;
239
240 return 0;
241}
242
243static int bpf_fill_maxinsns8(struct bpf_test *self)
244{
245 unsigned int len = BPF_MAXINSNS;
246 struct sock_filter *insn;
247 int i, jmp_off = len - 3;
248
249 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
250 if (!insn)
251 return -ENOMEM;
252
253 insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
254
255 for (i = 1; i < len - 1; i++)
256 insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
257
258 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
259
260 self->u.ptr.insns = insn;
261 self->u.ptr.len = len;
262
263 return 0;
264}
265
266static int bpf_fill_maxinsns9(struct bpf_test *self)
267{
268 unsigned int len = BPF_MAXINSNS;
269 struct bpf_insn *insn;
270 int i;
271
272 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
273 if (!insn)
274 return -ENOMEM;
275
276 insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
277 insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
278 insn[2] = BPF_EXIT_INSN();
279
280 for (i = 3; i < len - 2; i++)
281 insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
282
283 insn[len - 2] = BPF_EXIT_INSN();
284 insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
285
286 self->u.ptr.insns = insn;
287 self->u.ptr.len = len;
288
289 return 0;
290}
291
292static int bpf_fill_maxinsns10(struct bpf_test *self)
293{
294 unsigned int len = BPF_MAXINSNS, hlen = len - 2;
295 struct bpf_insn *insn;
296 int i;
297
298 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
299 if (!insn)
300 return -ENOMEM;
301
302 for (i = 0; i < hlen / 2; i++)
303 insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
304 for (i = hlen - 1; i > hlen / 2; i--)
305 insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
306
307 insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
308 insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
309 insn[hlen + 1] = BPF_EXIT_INSN();
310
311 self->u.ptr.insns = insn;
312 self->u.ptr.len = len;
313
314 return 0;
315}
316
317static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
318 unsigned int plen)
319{
320 struct sock_filter *insn;
321 unsigned int rlen;
322 int i, j;
323
324 insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
325 if (!insn)
326 return -ENOMEM;
327
328 rlen = (len % plen) - 1;
329
330 for (i = 0; i + plen < len; i += plen)
331 for (j = 0; j < plen; j++)
332 insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
333 plen - 1 - j, 0, 0);
334 for (j = 0; j < rlen; j++)
335 insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
336 0, 0);
337
338 insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
339
340 self->u.ptr.insns = insn;
341 self->u.ptr.len = len;
342
343 return 0;
344}
345
346static int bpf_fill_maxinsns11(struct bpf_test *self)
347{
348 /* Hits 70 passes on x86_64, so cannot get JITed there. */
349 return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
350}
351
352static int bpf_fill_ja(struct bpf_test *self)
353{
354 /* Hits exactly 11 passes on x86_64 JIT. */
355 return __bpf_fill_ja(self, 12, 9);
356}
357
79static struct bpf_test tests[] = { 358static struct bpf_test tests[] = {
80 { 359 {
81 "TAX", 360 "TAX",
@@ -1755,7 +2034,8 @@ static struct bpf_test tests[] = {
1755 BPF_EXIT_INSN(), 2034 BPF_EXIT_INSN(),
1756 BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), 2035 BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
1757 BPF_EXIT_INSN(), 2036 BPF_EXIT_INSN(),
1758 BPF_ALU64_IMM(BPF_MOV, R0, 1), 2037 BPF_LD_IMM64(R0, 0x1ffffffffLL),
2038 BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
1759 BPF_EXIT_INSN(), 2039 BPF_EXIT_INSN(),
1760 }, 2040 },
1761 INTERNAL, 2041 INTERNAL,
@@ -1805,6 +2085,2313 @@ static struct bpf_test tests[] = {
1805 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, 2085 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
1806 { { 38, 256 } } 2086 { { 38, 256 } }
1807 }, 2087 },
2088 /* BPF_ALU | BPF_MOV | BPF_X */
2089 {
2090 "ALU_MOV_X: dst = 2",
2091 .u.insns_int = {
2092 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2093 BPF_ALU32_REG(BPF_MOV, R0, R1),
2094 BPF_EXIT_INSN(),
2095 },
2096 INTERNAL,
2097 { },
2098 { { 0, 2 } },
2099 },
2100 {
2101 "ALU_MOV_X: dst = 4294967295",
2102 .u.insns_int = {
2103 BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
2104 BPF_ALU32_REG(BPF_MOV, R0, R1),
2105 BPF_EXIT_INSN(),
2106 },
2107 INTERNAL,
2108 { },
2109 { { 0, 4294967295U } },
2110 },
2111 {
2112 "ALU64_MOV_X: dst = 2",
2113 .u.insns_int = {
2114 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2115 BPF_ALU64_REG(BPF_MOV, R0, R1),
2116 BPF_EXIT_INSN(),
2117 },
2118 INTERNAL,
2119 { },
2120 { { 0, 2 } },
2121 },
2122 {
2123 "ALU64_MOV_X: dst = 4294967295",
2124 .u.insns_int = {
2125 BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
2126 BPF_ALU64_REG(BPF_MOV, R0, R1),
2127 BPF_EXIT_INSN(),
2128 },
2129 INTERNAL,
2130 { },
2131 { { 0, 4294967295U } },
2132 },
2133 /* BPF_ALU | BPF_MOV | BPF_K */
2134 {
2135 "ALU_MOV_K: dst = 2",
2136 .u.insns_int = {
2137 BPF_ALU32_IMM(BPF_MOV, R0, 2),
2138 BPF_EXIT_INSN(),
2139 },
2140 INTERNAL,
2141 { },
2142 { { 0, 2 } },
2143 },
2144 {
2145 "ALU_MOV_K: dst = 4294967295",
2146 .u.insns_int = {
2147 BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
2148 BPF_EXIT_INSN(),
2149 },
2150 INTERNAL,
2151 { },
2152 { { 0, 4294967295U } },
2153 },
2154 {
2155 "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
2156 .u.insns_int = {
2157 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
2158 BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
2159 BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
2160 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2161 BPF_MOV32_IMM(R0, 2),
2162 BPF_EXIT_INSN(),
2163 BPF_MOV32_IMM(R0, 1),
2164 BPF_EXIT_INSN(),
2165 },
2166 INTERNAL,
2167 { },
2168 { { 0, 0x1 } },
2169 },
2170 {
2171 "ALU64_MOV_K: dst = 2",
2172 .u.insns_int = {
2173 BPF_ALU64_IMM(BPF_MOV, R0, 2),
2174 BPF_EXIT_INSN(),
2175 },
2176 INTERNAL,
2177 { },
2178 { { 0, 2 } },
2179 },
2180 {
2181 "ALU64_MOV_K: dst = 2147483647",
2182 .u.insns_int = {
2183 BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
2184 BPF_EXIT_INSN(),
2185 },
2186 INTERNAL,
2187 { },
2188 { { 0, 2147483647 } },
2189 },
2190 {
2191 "ALU64_OR_K: dst = 0x0",
2192 .u.insns_int = {
2193 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
2194 BPF_LD_IMM64(R3, 0x0),
2195 BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
2196 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2197 BPF_MOV32_IMM(R0, 2),
2198 BPF_EXIT_INSN(),
2199 BPF_MOV32_IMM(R0, 1),
2200 BPF_EXIT_INSN(),
2201 },
2202 INTERNAL,
2203 { },
2204 { { 0, 0x1 } },
2205 },
2206 {
2207 "ALU64_MOV_K: dst = -1",
2208 .u.insns_int = {
2209 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
2210 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
2211 BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
2212 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2213 BPF_MOV32_IMM(R0, 2),
2214 BPF_EXIT_INSN(),
2215 BPF_MOV32_IMM(R0, 1),
2216 BPF_EXIT_INSN(),
2217 },
2218 INTERNAL,
2219 { },
2220 { { 0, 0x1 } },
2221 },
2222 /* BPF_ALU | BPF_ADD | BPF_X */
2223 {
2224 "ALU_ADD_X: 1 + 2 = 3",
2225 .u.insns_int = {
2226 BPF_LD_IMM64(R0, 1),
2227 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2228 BPF_ALU32_REG(BPF_ADD, R0, R1),
2229 BPF_EXIT_INSN(),
2230 },
2231 INTERNAL,
2232 { },
2233 { { 0, 3 } },
2234 },
2235 {
2236 "ALU_ADD_X: 1 + 4294967294 = 4294967295",
2237 .u.insns_int = {
2238 BPF_LD_IMM64(R0, 1),
2239 BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
2240 BPF_ALU32_REG(BPF_ADD, R0, R1),
2241 BPF_EXIT_INSN(),
2242 },
2243 INTERNAL,
2244 { },
2245 { { 0, 4294967295U } },
2246 },
2247 {
2248 "ALU64_ADD_X: 1 + 2 = 3",
2249 .u.insns_int = {
2250 BPF_LD_IMM64(R0, 1),
2251 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2252 BPF_ALU64_REG(BPF_ADD, R0, R1),
2253 BPF_EXIT_INSN(),
2254 },
2255 INTERNAL,
2256 { },
2257 { { 0, 3 } },
2258 },
2259 {
2260 "ALU64_ADD_X: 1 + 4294967294 = 4294967295",
2261 .u.insns_int = {
2262 BPF_LD_IMM64(R0, 1),
2263 BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
2264 BPF_ALU64_REG(BPF_ADD, R0, R1),
2265 BPF_EXIT_INSN(),
2266 },
2267 INTERNAL,
2268 { },
2269 { { 0, 4294967295U } },
2270 },
2271 /* BPF_ALU | BPF_ADD | BPF_K */
2272 {
2273 "ALU_ADD_K: 1 + 2 = 3",
2274 .u.insns_int = {
2275 BPF_LD_IMM64(R0, 1),
2276 BPF_ALU32_IMM(BPF_ADD, R0, 2),
2277 BPF_EXIT_INSN(),
2278 },
2279 INTERNAL,
2280 { },
2281 { { 0, 3 } },
2282 },
2283 {
2284 "ALU_ADD_K: 3 + 0 = 3",
2285 .u.insns_int = {
2286 BPF_LD_IMM64(R0, 3),
2287 BPF_ALU32_IMM(BPF_ADD, R0, 0),
2288 BPF_EXIT_INSN(),
2289 },
2290 INTERNAL,
2291 { },
2292 { { 0, 3 } },
2293 },
2294 {
2295 "ALU_ADD_K: 1 + 4294967294 = 4294967295",
2296 .u.insns_int = {
2297 BPF_LD_IMM64(R0, 1),
2298 BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
2299 BPF_EXIT_INSN(),
2300 },
2301 INTERNAL,
2302 { },
2303 { { 0, 4294967295U } },
2304 },
2305 {
2306 "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
2307 .u.insns_int = {
2308 BPF_LD_IMM64(R2, 0x0),
2309 BPF_LD_IMM64(R3, 0x00000000ffffffff),
2310 BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
2311 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2312 BPF_MOV32_IMM(R0, 2),
2313 BPF_EXIT_INSN(),
2314 BPF_MOV32_IMM(R0, 1),
2315 BPF_EXIT_INSN(),
2316 },
2317 INTERNAL,
2318 { },
2319 { { 0, 0x1 } },
2320 },
2321 {
2322 "ALU64_ADD_K: 1 + 2 = 3",
2323 .u.insns_int = {
2324 BPF_LD_IMM64(R0, 1),
2325 BPF_ALU64_IMM(BPF_ADD, R0, 2),
2326 BPF_EXIT_INSN(),
2327 },
2328 INTERNAL,
2329 { },
2330 { { 0, 3 } },
2331 },
2332 {
2333 "ALU64_ADD_K: 3 + 0 = 3",
2334 .u.insns_int = {
2335 BPF_LD_IMM64(R0, 3),
2336 BPF_ALU64_IMM(BPF_ADD, R0, 0),
2337 BPF_EXIT_INSN(),
2338 },
2339 INTERNAL,
2340 { },
2341 { { 0, 3 } },
2342 },
2343 {
2344 "ALU64_ADD_K: 1 + 2147483646 = 2147483647",
2345 .u.insns_int = {
2346 BPF_LD_IMM64(R0, 1),
2347 BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
2348 BPF_EXIT_INSN(),
2349 },
2350 INTERNAL,
2351 { },
2352 { { 0, 2147483647 } },
2353 },
2354 {
2355 "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
2356 .u.insns_int = {
2357 BPF_LD_IMM64(R0, 2147483646),
2358 BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
2359 BPF_EXIT_INSN(),
2360 },
2361 INTERNAL,
2362 { },
2363 { { 0, -1 } },
2364 },
2365 {
2366 "ALU64_ADD_K: 1 + 0 = 1",
2367 .u.insns_int = {
2368 BPF_LD_IMM64(R2, 0x1),
2369 BPF_LD_IMM64(R3, 0x1),
2370 BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
2371 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2372 BPF_MOV32_IMM(R0, 2),
2373 BPF_EXIT_INSN(),
2374 BPF_MOV32_IMM(R0, 1),
2375 BPF_EXIT_INSN(),
2376 },
2377 INTERNAL,
2378 { },
2379 { { 0, 0x1 } },
2380 },
2381 {
2382 "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
2383 .u.insns_int = {
2384 BPF_LD_IMM64(R2, 0x0),
2385 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
2386 BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
2387 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2388 BPF_MOV32_IMM(R0, 2),
2389 BPF_EXIT_INSN(),
2390 BPF_MOV32_IMM(R0, 1),
2391 BPF_EXIT_INSN(),
2392 },
2393 INTERNAL,
2394 { },
2395 { { 0, 0x1 } },
2396 },
2397 /* BPF_ALU | BPF_SUB | BPF_X */
2398 {
2399 "ALU_SUB_X: 3 - 1 = 2",
2400 .u.insns_int = {
2401 BPF_LD_IMM64(R0, 3),
2402 BPF_ALU32_IMM(BPF_MOV, R1, 1),
2403 BPF_ALU32_REG(BPF_SUB, R0, R1),
2404 BPF_EXIT_INSN(),
2405 },
2406 INTERNAL,
2407 { },
2408 { { 0, 2 } },
2409 },
2410 {
2411 "ALU_SUB_X: 4294967295 - 4294967294 = 1",
2412 .u.insns_int = {
2413 BPF_LD_IMM64(R0, 4294967295U),
2414 BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
2415 BPF_ALU32_REG(BPF_SUB, R0, R1),
2416 BPF_EXIT_INSN(),
2417 },
2418 INTERNAL,
2419 { },
2420 { { 0, 1 } },
2421 },
2422 {
2423 "ALU64_SUB_X: 3 - 1 = 2",
2424 .u.insns_int = {
2425 BPF_LD_IMM64(R0, 3),
2426 BPF_ALU32_IMM(BPF_MOV, R1, 1),
2427 BPF_ALU64_REG(BPF_SUB, R0, R1),
2428 BPF_EXIT_INSN(),
2429 },
2430 INTERNAL,
2431 { },
2432 { { 0, 2 } },
2433 },
2434 {
2435 "ALU64_SUB_X: 4294967295 - 4294967294 = 1",
2436 .u.insns_int = {
2437 BPF_LD_IMM64(R0, 4294967295U),
2438 BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
2439 BPF_ALU64_REG(BPF_SUB, R0, R1),
2440 BPF_EXIT_INSN(),
2441 },
2442 INTERNAL,
2443 { },
2444 { { 0, 1 } },
2445 },
2446 /* BPF_ALU | BPF_SUB | BPF_K */
2447 {
2448 "ALU_SUB_K: 3 - 1 = 2",
2449 .u.insns_int = {
2450 BPF_LD_IMM64(R0, 3),
2451 BPF_ALU32_IMM(BPF_SUB, R0, 1),
2452 BPF_EXIT_INSN(),
2453 },
2454 INTERNAL,
2455 { },
2456 { { 0, 2 } },
2457 },
2458 {
2459 "ALU_SUB_K: 3 - 0 = 3",
2460 .u.insns_int = {
2461 BPF_LD_IMM64(R0, 3),
2462 BPF_ALU32_IMM(BPF_SUB, R0, 0),
2463 BPF_EXIT_INSN(),
2464 },
2465 INTERNAL,
2466 { },
2467 { { 0, 3 } },
2468 },
2469 {
2470 "ALU_SUB_K: 4294967295 - 4294967294 = 1",
2471 .u.insns_int = {
2472 BPF_LD_IMM64(R0, 4294967295U),
2473 BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
2474 BPF_EXIT_INSN(),
2475 },
2476 INTERNAL,
2477 { },
2478 { { 0, 1 } },
2479 },
2480 {
2481 "ALU64_SUB_K: 3 - 1 = 2",
2482 .u.insns_int = {
2483 BPF_LD_IMM64(R0, 3),
2484 BPF_ALU64_IMM(BPF_SUB, R0, 1),
2485 BPF_EXIT_INSN(),
2486 },
2487 INTERNAL,
2488 { },
2489 { { 0, 2 } },
2490 },
2491 {
2492 "ALU64_SUB_K: 3 - 0 = 3",
2493 .u.insns_int = {
2494 BPF_LD_IMM64(R0, 3),
2495 BPF_ALU64_IMM(BPF_SUB, R0, 0),
2496 BPF_EXIT_INSN(),
2497 },
2498 INTERNAL,
2499 { },
2500 { { 0, 3 } },
2501 },
2502 {
2503 "ALU64_SUB_K: 4294967294 - 4294967295 = -1",
2504 .u.insns_int = {
2505 BPF_LD_IMM64(R0, 4294967294U),
2506 BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
2507 BPF_EXIT_INSN(),
2508 },
2509 INTERNAL,
2510 { },
2511 { { 0, -1 } },
2512 },
2513 {
2514 "ALU64_ADD_K: 2147483646 - 2147483647 = -1",
2515 .u.insns_int = {
2516 BPF_LD_IMM64(R0, 2147483646),
2517 BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
2518 BPF_EXIT_INSN(),
2519 },
2520 INTERNAL,
2521 { },
2522 { { 0, -1 } },
2523 },
2524 /* BPF_ALU | BPF_MUL | BPF_X */
2525 {
2526 "ALU_MUL_X: 2 * 3 = 6",
2527 .u.insns_int = {
2528 BPF_LD_IMM64(R0, 2),
2529 BPF_ALU32_IMM(BPF_MOV, R1, 3),
2530 BPF_ALU32_REG(BPF_MUL, R0, R1),
2531 BPF_EXIT_INSN(),
2532 },
2533 INTERNAL,
2534 { },
2535 { { 0, 6 } },
2536 },
2537 {
2538 "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
2539 .u.insns_int = {
2540 BPF_LD_IMM64(R0, 2),
2541 BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
2542 BPF_ALU32_REG(BPF_MUL, R0, R1),
2543 BPF_EXIT_INSN(),
2544 },
2545 INTERNAL,
2546 { },
2547 { { 0, 0xFFFFFFF0 } },
2548 },
2549 {
2550 "ALU_MUL_X: -1 * -1 = 1",
2551 .u.insns_int = {
2552 BPF_LD_IMM64(R0, -1),
2553 BPF_ALU32_IMM(BPF_MOV, R1, -1),
2554 BPF_ALU32_REG(BPF_MUL, R0, R1),
2555 BPF_EXIT_INSN(),
2556 },
2557 INTERNAL,
2558 { },
2559 { { 0, 1 } },
2560 },
2561 {
2562 "ALU64_MUL_X: 2 * 3 = 6",
2563 .u.insns_int = {
2564 BPF_LD_IMM64(R0, 2),
2565 BPF_ALU32_IMM(BPF_MOV, R1, 3),
2566 BPF_ALU64_REG(BPF_MUL, R0, R1),
2567 BPF_EXIT_INSN(),
2568 },
2569 INTERNAL,
2570 { },
2571 { { 0, 6 } },
2572 },
2573 {
2574 "ALU64_MUL_X: 1 * 2147483647 = 2147483647",
2575 .u.insns_int = {
2576 BPF_LD_IMM64(R0, 1),
2577 BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
2578 BPF_ALU64_REG(BPF_MUL, R0, R1),
2579 BPF_EXIT_INSN(),
2580 },
2581 INTERNAL,
2582 { },
2583 { { 0, 2147483647 } },
2584 },
2585 /* BPF_ALU | BPF_MUL | BPF_K */
2586 {
2587 "ALU_MUL_K: 2 * 3 = 6",
2588 .u.insns_int = {
2589 BPF_LD_IMM64(R0, 2),
2590 BPF_ALU32_IMM(BPF_MUL, R0, 3),
2591 BPF_EXIT_INSN(),
2592 },
2593 INTERNAL,
2594 { },
2595 { { 0, 6 } },
2596 },
2597 {
2598 "ALU_MUL_K: 3 * 1 = 3",
2599 .u.insns_int = {
2600 BPF_LD_IMM64(R0, 3),
2601 BPF_ALU32_IMM(BPF_MUL, R0, 1),
2602 BPF_EXIT_INSN(),
2603 },
2604 INTERNAL,
2605 { },
2606 { { 0, 3 } },
2607 },
2608 {
2609 "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
2610 .u.insns_int = {
2611 BPF_LD_IMM64(R0, 2),
2612 BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
2613 BPF_EXIT_INSN(),
2614 },
2615 INTERNAL,
2616 { },
2617 { { 0, 0xFFFFFFF0 } },
2618 },
2619 {
2620 "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
2621 .u.insns_int = {
2622 BPF_LD_IMM64(R2, 0x1),
2623 BPF_LD_IMM64(R3, 0x00000000ffffffff),
2624 BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
2625 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2626 BPF_MOV32_IMM(R0, 2),
2627 BPF_EXIT_INSN(),
2628 BPF_MOV32_IMM(R0, 1),
2629 BPF_EXIT_INSN(),
2630 },
2631 INTERNAL,
2632 { },
2633 { { 0, 0x1 } },
2634 },
2635 {
2636 "ALU64_MUL_K: 2 * 3 = 6",
2637 .u.insns_int = {
2638 BPF_LD_IMM64(R0, 2),
2639 BPF_ALU64_IMM(BPF_MUL, R0, 3),
2640 BPF_EXIT_INSN(),
2641 },
2642 INTERNAL,
2643 { },
2644 { { 0, 6 } },
2645 },
2646 {
2647 "ALU64_MUL_K: 3 * 1 = 3",
2648 .u.insns_int = {
2649 BPF_LD_IMM64(R0, 3),
2650 BPF_ALU64_IMM(BPF_MUL, R0, 1),
2651 BPF_EXIT_INSN(),
2652 },
2653 INTERNAL,
2654 { },
2655 { { 0, 3 } },
2656 },
2657 {
2658 "ALU64_MUL_K: 1 * 2147483647 = 2147483647",
2659 .u.insns_int = {
2660 BPF_LD_IMM64(R0, 1),
2661 BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
2662 BPF_EXIT_INSN(),
2663 },
2664 INTERNAL,
2665 { },
2666 { { 0, 2147483647 } },
2667 },
2668 {
2669 "ALU64_MUL_K: 1 * -2147483647 = -2147483647",
2670 .u.insns_int = {
2671 BPF_LD_IMM64(R0, 1),
2672 BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
2673 BPF_EXIT_INSN(),
2674 },
2675 INTERNAL,
2676 { },
2677 { { 0, -2147483647 } },
2678 },
2679 {
2680 "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
2681 .u.insns_int = {
2682 BPF_LD_IMM64(R2, 0x1),
2683 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
2684 BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
2685 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2686 BPF_MOV32_IMM(R0, 2),
2687 BPF_EXIT_INSN(),
2688 BPF_MOV32_IMM(R0, 1),
2689 BPF_EXIT_INSN(),
2690 },
2691 INTERNAL,
2692 { },
2693 { { 0, 0x1 } },
2694 },
2695 /* BPF_ALU | BPF_DIV | BPF_X */
2696 {
2697 "ALU_DIV_X: 6 / 2 = 3",
2698 .u.insns_int = {
2699 BPF_LD_IMM64(R0, 6),
2700 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2701 BPF_ALU32_REG(BPF_DIV, R0, R1),
2702 BPF_EXIT_INSN(),
2703 },
2704 INTERNAL,
2705 { },
2706 { { 0, 3 } },
2707 },
2708 {
2709 "ALU_DIV_X: 4294967295 / 4294967295 = 1",
2710 .u.insns_int = {
2711 BPF_LD_IMM64(R0, 4294967295U),
2712 BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
2713 BPF_ALU32_REG(BPF_DIV, R0, R1),
2714 BPF_EXIT_INSN(),
2715 },
2716 INTERNAL,
2717 { },
2718 { { 0, 1 } },
2719 },
2720 {
2721 "ALU64_DIV_X: 6 / 2 = 3",
2722 .u.insns_int = {
2723 BPF_LD_IMM64(R0, 6),
2724 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2725 BPF_ALU64_REG(BPF_DIV, R0, R1),
2726 BPF_EXIT_INSN(),
2727 },
2728 INTERNAL,
2729 { },
2730 { { 0, 3 } },
2731 },
2732 {
2733 "ALU64_DIV_X: 2147483647 / 2147483647 = 1",
2734 .u.insns_int = {
2735 BPF_LD_IMM64(R0, 2147483647),
2736 BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
2737 BPF_ALU64_REG(BPF_DIV, R0, R1),
2738 BPF_EXIT_INSN(),
2739 },
2740 INTERNAL,
2741 { },
2742 { { 0, 1 } },
2743 },
2744 {
2745 "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
2746 .u.insns_int = {
2747 BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
2748 BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
2749 BPF_LD_IMM64(R3, 0x0000000000000001LL),
2750 BPF_ALU64_REG(BPF_DIV, R2, R4),
2751 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2752 BPF_MOV32_IMM(R0, 2),
2753 BPF_EXIT_INSN(),
2754 BPF_MOV32_IMM(R0, 1),
2755 BPF_EXIT_INSN(),
2756 },
2757 INTERNAL,
2758 { },
2759 { { 0, 0x1 } },
2760 },
2761 /* BPF_ALU | BPF_DIV | BPF_K */
2762 {
2763 "ALU_DIV_K: 6 / 2 = 3",
2764 .u.insns_int = {
2765 BPF_LD_IMM64(R0, 6),
2766 BPF_ALU32_IMM(BPF_DIV, R0, 2),
2767 BPF_EXIT_INSN(),
2768 },
2769 INTERNAL,
2770 { },
2771 { { 0, 3 } },
2772 },
2773 {
2774 "ALU_DIV_K: 3 / 1 = 3",
2775 .u.insns_int = {
2776 BPF_LD_IMM64(R0, 3),
2777 BPF_ALU32_IMM(BPF_DIV, R0, 1),
2778 BPF_EXIT_INSN(),
2779 },
2780 INTERNAL,
2781 { },
2782 { { 0, 3 } },
2783 },
2784 {
2785 "ALU_DIV_K: 4294967295 / 4294967295 = 1",
2786 .u.insns_int = {
2787 BPF_LD_IMM64(R0, 4294967295U),
2788 BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
2789 BPF_EXIT_INSN(),
2790 },
2791 INTERNAL,
2792 { },
2793 { { 0, 1 } },
2794 },
2795 {
2796 "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
2797 .u.insns_int = {
2798 BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
2799 BPF_LD_IMM64(R3, 0x1UL),
2800 BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
2801 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2802 BPF_MOV32_IMM(R0, 2),
2803 BPF_EXIT_INSN(),
2804 BPF_MOV32_IMM(R0, 1),
2805 BPF_EXIT_INSN(),
2806 },
2807 INTERNAL,
2808 { },
2809 { { 0, 0x1 } },
2810 },
2811 {
2812 "ALU64_DIV_K: 6 / 2 = 3",
2813 .u.insns_int = {
2814 BPF_LD_IMM64(R0, 6),
2815 BPF_ALU64_IMM(BPF_DIV, R0, 2),
2816 BPF_EXIT_INSN(),
2817 },
2818 INTERNAL,
2819 { },
2820 { { 0, 3 } },
2821 },
2822 {
2823 "ALU64_DIV_K: 3 / 1 = 3",
2824 .u.insns_int = {
2825 BPF_LD_IMM64(R0, 3),
2826 BPF_ALU64_IMM(BPF_DIV, R0, 1),
2827 BPF_EXIT_INSN(),
2828 },
2829 INTERNAL,
2830 { },
2831 { { 0, 3 } },
2832 },
2833 {
2834 "ALU64_DIV_K: 2147483647 / 2147483647 = 1",
2835 .u.insns_int = {
2836 BPF_LD_IMM64(R0, 2147483647),
2837 BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
2838 BPF_EXIT_INSN(),
2839 },
2840 INTERNAL,
2841 { },
2842 { { 0, 1 } },
2843 },
2844 {
2845 "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
2846 .u.insns_int = {
2847 BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
2848 BPF_LD_IMM64(R3, 0x0000000000000001LL),
2849 BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
2850 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
2851 BPF_MOV32_IMM(R0, 2),
2852 BPF_EXIT_INSN(),
2853 BPF_MOV32_IMM(R0, 1),
2854 BPF_EXIT_INSN(),
2855 },
2856 INTERNAL,
2857 { },
2858 { { 0, 0x1 } },
2859 },
2860 /* BPF_ALU | BPF_MOD | BPF_X */
2861 {
2862 "ALU_MOD_X: 3 % 2 = 1",
2863 .u.insns_int = {
2864 BPF_LD_IMM64(R0, 3),
2865 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2866 BPF_ALU32_REG(BPF_MOD, R0, R1),
2867 BPF_EXIT_INSN(),
2868 },
2869 INTERNAL,
2870 { },
2871 { { 0, 1 } },
2872 },
2873 {
2874 "ALU_MOD_X: 4294967295 % 4294967293 = 2",
2875 .u.insns_int = {
2876 BPF_LD_IMM64(R0, 4294967295U),
2877 BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
2878 BPF_ALU32_REG(BPF_MOD, R0, R1),
2879 BPF_EXIT_INSN(),
2880 },
2881 INTERNAL,
2882 { },
2883 { { 0, 2 } },
2884 },
2885 {
2886 "ALU64_MOD_X: 3 % 2 = 1",
2887 .u.insns_int = {
2888 BPF_LD_IMM64(R0, 3),
2889 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2890 BPF_ALU64_REG(BPF_MOD, R0, R1),
2891 BPF_EXIT_INSN(),
2892 },
2893 INTERNAL,
2894 { },
2895 { { 0, 1 } },
2896 },
2897 {
2898 "ALU64_MOD_X: 2147483647 % 2147483645 = 2",
2899 .u.insns_int = {
2900 BPF_LD_IMM64(R0, 2147483647),
2901 BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
2902 BPF_ALU64_REG(BPF_MOD, R0, R1),
2903 BPF_EXIT_INSN(),
2904 },
2905 INTERNAL,
2906 { },
2907 { { 0, 2 } },
2908 },
2909 /* BPF_ALU | BPF_MOD | BPF_K */
2910 {
2911 "ALU_MOD_K: 3 % 2 = 1",
2912 .u.insns_int = {
2913 BPF_LD_IMM64(R0, 3),
2914 BPF_ALU32_IMM(BPF_MOD, R0, 2),
2915 BPF_EXIT_INSN(),
2916 },
2917 INTERNAL,
2918 { },
2919 { { 0, 1 } },
2920 },
2921 {
2922 "ALU_MOD_K: 3 % 1 = 0",
2923 .u.insns_int = {
2924 BPF_LD_IMM64(R0, 3),
2925 BPF_ALU32_IMM(BPF_MOD, R0, 1),
2926 BPF_EXIT_INSN(),
2927 },
2928 INTERNAL,
2929 { },
2930 { { 0, 0 } },
2931 },
2932 {
2933 "ALU_MOD_K: 4294967295 % 4294967293 = 2",
2934 .u.insns_int = {
2935 BPF_LD_IMM64(R0, 4294967295U),
2936 BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
2937 BPF_EXIT_INSN(),
2938 },
2939 INTERNAL,
2940 { },
2941 { { 0, 2 } },
2942 },
2943 {
2944 "ALU64_MOD_K: 3 % 2 = 1",
2945 .u.insns_int = {
2946 BPF_LD_IMM64(R0, 3),
2947 BPF_ALU64_IMM(BPF_MOD, R0, 2),
2948 BPF_EXIT_INSN(),
2949 },
2950 INTERNAL,
2951 { },
2952 { { 0, 1 } },
2953 },
2954 {
2955 "ALU64_MOD_K: 3 % 1 = 0",
2956 .u.insns_int = {
2957 BPF_LD_IMM64(R0, 3),
2958 BPF_ALU64_IMM(BPF_MOD, R0, 1),
2959 BPF_EXIT_INSN(),
2960 },
2961 INTERNAL,
2962 { },
2963 { { 0, 0 } },
2964 },
2965 {
2966 "ALU64_MOD_K: 2147483647 % 2147483645 = 2",
2967 .u.insns_int = {
2968 BPF_LD_IMM64(R0, 2147483647),
2969 BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
2970 BPF_EXIT_INSN(),
2971 },
2972 INTERNAL,
2973 { },
2974 { { 0, 2 } },
2975 },
2976 /* BPF_ALU | BPF_AND | BPF_X */
2977 {
2978 "ALU_AND_X: 3 & 2 = 2",
2979 .u.insns_int = {
2980 BPF_LD_IMM64(R0, 3),
2981 BPF_ALU32_IMM(BPF_MOV, R1, 2),
2982 BPF_ALU32_REG(BPF_AND, R0, R1),
2983 BPF_EXIT_INSN(),
2984 },
2985 INTERNAL,
2986 { },
2987 { { 0, 2 } },
2988 },
2989 {
2990 "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
2991 .u.insns_int = {
2992 BPF_LD_IMM64(R0, 0xffffffff),
2993 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
2994 BPF_ALU32_REG(BPF_AND, R0, R1),
2995 BPF_EXIT_INSN(),
2996 },
2997 INTERNAL,
2998 { },
2999 { { 0, 0xffffffff } },
3000 },
3001 {
3002 "ALU64_AND_X: 3 & 2 = 2",
3003 .u.insns_int = {
3004 BPF_LD_IMM64(R0, 3),
3005 BPF_ALU32_IMM(BPF_MOV, R1, 2),
3006 BPF_ALU64_REG(BPF_AND, R0, R1),
3007 BPF_EXIT_INSN(),
3008 },
3009 INTERNAL,
3010 { },
3011 { { 0, 2 } },
3012 },
3013 {
3014 "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
3015 .u.insns_int = {
3016 BPF_LD_IMM64(R0, 0xffffffff),
3017 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
3018 BPF_ALU64_REG(BPF_AND, R0, R1),
3019 BPF_EXIT_INSN(),
3020 },
3021 INTERNAL,
3022 { },
3023 { { 0, 0xffffffff } },
3024 },
3025 /* BPF_ALU | BPF_AND | BPF_K */
3026 {
3027 "ALU_AND_K: 3 & 2 = 2",
3028 .u.insns_int = {
3029 BPF_LD_IMM64(R0, 3),
3030 BPF_ALU32_IMM(BPF_AND, R0, 2),
3031 BPF_EXIT_INSN(),
3032 },
3033 INTERNAL,
3034 { },
3035 { { 0, 2 } },
3036 },
3037 {
3038 "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
3039 .u.insns_int = {
3040 BPF_LD_IMM64(R0, 0xffffffff),
3041 BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
3042 BPF_EXIT_INSN(),
3043 },
3044 INTERNAL,
3045 { },
3046 { { 0, 0xffffffff } },
3047 },
3048 {
3049 "ALU64_AND_K: 3 & 2 = 2",
3050 .u.insns_int = {
3051 BPF_LD_IMM64(R0, 3),
3052 BPF_ALU64_IMM(BPF_AND, R0, 2),
3053 BPF_EXIT_INSN(),
3054 },
3055 INTERNAL,
3056 { },
3057 { { 0, 2 } },
3058 },
3059 {
3060 "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
3061 .u.insns_int = {
3062 BPF_LD_IMM64(R0, 0xffffffff),
3063 BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
3064 BPF_EXIT_INSN(),
3065 },
3066 INTERNAL,
3067 { },
3068 { { 0, 0xffffffff } },
3069 },
3070 {
3071 "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
3072 .u.insns_int = {
3073 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3074 BPF_LD_IMM64(R3, 0x0000000000000000LL),
3075 BPF_ALU64_IMM(BPF_AND, R2, 0x0),
3076 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3077 BPF_MOV32_IMM(R0, 2),
3078 BPF_EXIT_INSN(),
3079 BPF_MOV32_IMM(R0, 1),
3080 BPF_EXIT_INSN(),
3081 },
3082 INTERNAL,
3083 { },
3084 { { 0, 0x1 } },
3085 },
3086 {
3087 "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
3088 .u.insns_int = {
3089 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3090 BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
3091 BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
3092 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3093 BPF_MOV32_IMM(R0, 2),
3094 BPF_EXIT_INSN(),
3095 BPF_MOV32_IMM(R0, 1),
3096 BPF_EXIT_INSN(),
3097 },
3098 INTERNAL,
3099 { },
3100 { { 0, 0x1 } },
3101 },
3102 {
3103 "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
3104 .u.insns_int = {
3105 BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
3106 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
3107 BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
3108 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3109 BPF_MOV32_IMM(R0, 2),
3110 BPF_EXIT_INSN(),
3111 BPF_MOV32_IMM(R0, 1),
3112 BPF_EXIT_INSN(),
3113 },
3114 INTERNAL,
3115 { },
3116 { { 0, 0x1 } },
3117 },
3118 /* BPF_ALU | BPF_OR | BPF_X */
3119 {
3120 "ALU_OR_X: 1 | 2 = 3",
3121 .u.insns_int = {
3122 BPF_LD_IMM64(R0, 1),
3123 BPF_ALU32_IMM(BPF_MOV, R1, 2),
3124 BPF_ALU32_REG(BPF_OR, R0, R1),
3125 BPF_EXIT_INSN(),
3126 },
3127 INTERNAL,
3128 { },
3129 { { 0, 3 } },
3130 },
3131 {
3132 "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
3133 .u.insns_int = {
3134 BPF_LD_IMM64(R0, 0),
3135 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
3136 BPF_ALU32_REG(BPF_OR, R0, R1),
3137 BPF_EXIT_INSN(),
3138 },
3139 INTERNAL,
3140 { },
3141 { { 0, 0xffffffff } },
3142 },
3143 {
3144 "ALU64_OR_X: 1 | 2 = 3",
3145 .u.insns_int = {
3146 BPF_LD_IMM64(R0, 1),
3147 BPF_ALU32_IMM(BPF_MOV, R1, 2),
3148 BPF_ALU64_REG(BPF_OR, R0, R1),
3149 BPF_EXIT_INSN(),
3150 },
3151 INTERNAL,
3152 { },
3153 { { 0, 3 } },
3154 },
3155 {
3156 "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
3157 .u.insns_int = {
3158 BPF_LD_IMM64(R0, 0),
3159 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
3160 BPF_ALU64_REG(BPF_OR, R0, R1),
3161 BPF_EXIT_INSN(),
3162 },
3163 INTERNAL,
3164 { },
3165 { { 0, 0xffffffff } },
3166 },
3167 /* BPF_ALU | BPF_OR | BPF_K */
3168 {
3169 "ALU_OR_K: 1 | 2 = 3",
3170 .u.insns_int = {
3171 BPF_LD_IMM64(R0, 1),
3172 BPF_ALU32_IMM(BPF_OR, R0, 2),
3173 BPF_EXIT_INSN(),
3174 },
3175 INTERNAL,
3176 { },
3177 { { 0, 3 } },
3178 },
3179 {
3180 "ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
3181 .u.insns_int = {
3182 BPF_LD_IMM64(R0, 0),
3183 BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
3184 BPF_EXIT_INSN(),
3185 },
3186 INTERNAL,
3187 { },
3188 { { 0, 0xffffffff } },
3189 },
3190 {
3191 "ALU64_OR_K: 1 | 2 = 3",
3192 .u.insns_int = {
3193 BPF_LD_IMM64(R0, 1),
3194 BPF_ALU64_IMM(BPF_OR, R0, 2),
3195 BPF_EXIT_INSN(),
3196 },
3197 INTERNAL,
3198 { },
3199 { { 0, 3 } },
3200 },
3201 {
3202 "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
3203 .u.insns_int = {
3204 BPF_LD_IMM64(R0, 0),
3205 BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
3206 BPF_EXIT_INSN(),
3207 },
3208 INTERNAL,
3209 { },
3210 { { 0, 0xffffffff } },
3211 },
3212 {
3213 "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
3214 .u.insns_int = {
3215 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3216 BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
3217 BPF_ALU64_IMM(BPF_OR, R2, 0x0),
3218 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3219 BPF_MOV32_IMM(R0, 2),
3220 BPF_EXIT_INSN(),
3221 BPF_MOV32_IMM(R0, 1),
3222 BPF_EXIT_INSN(),
3223 },
3224 INTERNAL,
3225 { },
3226 { { 0, 0x1 } },
3227 },
3228 {
3229 "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
3230 .u.insns_int = {
3231 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3232 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
3233 BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
3234 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3235 BPF_MOV32_IMM(R0, 2),
3236 BPF_EXIT_INSN(),
3237 BPF_MOV32_IMM(R0, 1),
3238 BPF_EXIT_INSN(),
3239 },
3240 INTERNAL,
3241 { },
3242 { { 0, 0x1 } },
3243 },
3244 {
3245 "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
3246 .u.insns_int = {
3247 BPF_LD_IMM64(R2, 0x0000000000000000LL),
3248 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
3249 BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
3250 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3251 BPF_MOV32_IMM(R0, 2),
3252 BPF_EXIT_INSN(),
3253 BPF_MOV32_IMM(R0, 1),
3254 BPF_EXIT_INSN(),
3255 },
3256 INTERNAL,
3257 { },
3258 { { 0, 0x1 } },
3259 },
3260 /* BPF_ALU | BPF_XOR | BPF_X */
3261 {
3262 "ALU_XOR_X: 5 ^ 6 = 3",
3263 .u.insns_int = {
3264 BPF_LD_IMM64(R0, 5),
3265 BPF_ALU32_IMM(BPF_MOV, R1, 6),
3266 BPF_ALU32_REG(BPF_XOR, R0, R1),
3267 BPF_EXIT_INSN(),
3268 },
3269 INTERNAL,
3270 { },
3271 { { 0, 3 } },
3272 },
3273 {
3274 "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
3275 .u.insns_int = {
3276 BPF_LD_IMM64(R0, 1),
3277 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
3278 BPF_ALU32_REG(BPF_XOR, R0, R1),
3279 BPF_EXIT_INSN(),
3280 },
3281 INTERNAL,
3282 { },
3283 { { 0, 0xfffffffe } },
3284 },
3285 {
3286 "ALU64_XOR_X: 5 ^ 6 = 3",
3287 .u.insns_int = {
3288 BPF_LD_IMM64(R0, 5),
3289 BPF_ALU32_IMM(BPF_MOV, R1, 6),
3290 BPF_ALU64_REG(BPF_XOR, R0, R1),
3291 BPF_EXIT_INSN(),
3292 },
3293 INTERNAL,
3294 { },
3295 { { 0, 3 } },
3296 },
3297 {
3298 "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
3299 .u.insns_int = {
3300 BPF_LD_IMM64(R0, 1),
3301 BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
3302 BPF_ALU64_REG(BPF_XOR, R0, R1),
3303 BPF_EXIT_INSN(),
3304 },
3305 INTERNAL,
3306 { },
3307 { { 0, 0xfffffffe } },
3308 },
3309 /* BPF_ALU | BPF_XOR | BPF_K */
3310 {
3311 "ALU_XOR_K: 5 ^ 6 = 3",
3312 .u.insns_int = {
3313 BPF_LD_IMM64(R0, 5),
3314 BPF_ALU32_IMM(BPF_XOR, R0, 6),
3315 BPF_EXIT_INSN(),
3316 },
3317 INTERNAL,
3318 { },
3319 { { 0, 3 } },
3320 },
3321 {
3322 "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
3323 .u.insns_int = {
3324 BPF_LD_IMM64(R0, 1),
3325 BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
3326 BPF_EXIT_INSN(),
3327 },
3328 INTERNAL,
3329 { },
3330 { { 0, 0xfffffffe } },
3331 },
3332 {
3333 "ALU64_XOR_K: 5 ^ 6 = 3",
3334 .u.insns_int = {
3335 BPF_LD_IMM64(R0, 5),
3336 BPF_ALU64_IMM(BPF_XOR, R0, 6),
3337 BPF_EXIT_INSN(),
3338 },
3339 INTERNAL,
3340 { },
3341 { { 0, 3 } },
3342 },
3343 {
3344 "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
3345 .u.insns_int = {
3346 BPF_LD_IMM64(R0, 1),
3347 BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
3348 BPF_EXIT_INSN(),
3349 },
3350 INTERNAL,
3351 { },
3352 { { 0, 0xfffffffe } },
3353 },
3354 {
3355 "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
3356 .u.insns_int = {
3357 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3358 BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
3359 BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
3360 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3361 BPF_MOV32_IMM(R0, 2),
3362 BPF_EXIT_INSN(),
3363 BPF_MOV32_IMM(R0, 1),
3364 BPF_EXIT_INSN(),
3365 },
3366 INTERNAL,
3367 { },
3368 { { 0, 0x1 } },
3369 },
3370 {
3371 "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
3372 .u.insns_int = {
3373 BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
3374 BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
3375 BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
3376 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3377 BPF_MOV32_IMM(R0, 2),
3378 BPF_EXIT_INSN(),
3379 BPF_MOV32_IMM(R0, 1),
3380 BPF_EXIT_INSN(),
3381 },
3382 INTERNAL,
3383 { },
3384 { { 0, 0x1 } },
3385 },
3386 {
3387 "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
3388 .u.insns_int = {
3389 BPF_LD_IMM64(R2, 0x0000000000000000LL),
3390 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
3391 BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
3392 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3393 BPF_MOV32_IMM(R0, 2),
3394 BPF_EXIT_INSN(),
3395 BPF_MOV32_IMM(R0, 1),
3396 BPF_EXIT_INSN(),
3397 },
3398 INTERNAL,
3399 { },
3400 { { 0, 0x1 } },
3401 },
3402 /* BPF_ALU | BPF_LSH | BPF_X */
3403 {
3404 "ALU_LSH_X: 1 << 1 = 2",
3405 .u.insns_int = {
3406 BPF_LD_IMM64(R0, 1),
3407 BPF_ALU32_IMM(BPF_MOV, R1, 1),
3408 BPF_ALU32_REG(BPF_LSH, R0, R1),
3409 BPF_EXIT_INSN(),
3410 },
3411 INTERNAL,
3412 { },
3413 { { 0, 2 } },
3414 },
3415 {
3416 "ALU_LSH_X: 1 << 31 = 0x80000000",
3417 .u.insns_int = {
3418 BPF_LD_IMM64(R0, 1),
3419 BPF_ALU32_IMM(BPF_MOV, R1, 31),
3420 BPF_ALU32_REG(BPF_LSH, R0, R1),
3421 BPF_EXIT_INSN(),
3422 },
3423 INTERNAL,
3424 { },
3425 { { 0, 0x80000000 } },
3426 },
3427 {
3428 "ALU64_LSH_X: 1 << 1 = 2",
3429 .u.insns_int = {
3430 BPF_LD_IMM64(R0, 1),
3431 BPF_ALU32_IMM(BPF_MOV, R1, 1),
3432 BPF_ALU64_REG(BPF_LSH, R0, R1),
3433 BPF_EXIT_INSN(),
3434 },
3435 INTERNAL,
3436 { },
3437 { { 0, 2 } },
3438 },
3439 {
3440 "ALU64_LSH_X: 1 << 31 = 0x80000000",
3441 .u.insns_int = {
3442 BPF_LD_IMM64(R0, 1),
3443 BPF_ALU32_IMM(BPF_MOV, R1, 31),
3444 BPF_ALU64_REG(BPF_LSH, R0, R1),
3445 BPF_EXIT_INSN(),
3446 },
3447 INTERNAL,
3448 { },
3449 { { 0, 0x80000000 } },
3450 },
3451 /* BPF_ALU | BPF_LSH | BPF_K */
3452 {
3453 "ALU_LSH_K: 1 << 1 = 2",
3454 .u.insns_int = {
3455 BPF_LD_IMM64(R0, 1),
3456 BPF_ALU32_IMM(BPF_LSH, R0, 1),
3457 BPF_EXIT_INSN(),
3458 },
3459 INTERNAL,
3460 { },
3461 { { 0, 2 } },
3462 },
3463 {
3464 "ALU_LSH_K: 1 << 31 = 0x80000000",
3465 .u.insns_int = {
3466 BPF_LD_IMM64(R0, 1),
3467 BPF_ALU32_IMM(BPF_LSH, R0, 31),
3468 BPF_EXIT_INSN(),
3469 },
3470 INTERNAL,
3471 { },
3472 { { 0, 0x80000000 } },
3473 },
3474 {
3475 "ALU64_LSH_K: 1 << 1 = 2",
3476 .u.insns_int = {
3477 BPF_LD_IMM64(R0, 1),
3478 BPF_ALU64_IMM(BPF_LSH, R0, 1),
3479 BPF_EXIT_INSN(),
3480 },
3481 INTERNAL,
3482 { },
3483 { { 0, 2 } },
3484 },
3485 {
3486 "ALU64_LSH_K: 1 << 31 = 0x80000000",
3487 .u.insns_int = {
3488 BPF_LD_IMM64(R0, 1),
3489 BPF_ALU64_IMM(BPF_LSH, R0, 31),
3490 BPF_EXIT_INSN(),
3491 },
3492 INTERNAL,
3493 { },
3494 { { 0, 0x80000000 } },
3495 },
3496 /* BPF_ALU | BPF_RSH | BPF_X */
3497 {
3498 "ALU_RSH_X: 2 >> 1 = 1",
3499 .u.insns_int = {
3500 BPF_LD_IMM64(R0, 2),
3501 BPF_ALU32_IMM(BPF_MOV, R1, 1),
3502 BPF_ALU32_REG(BPF_RSH, R0, R1),
3503 BPF_EXIT_INSN(),
3504 },
3505 INTERNAL,
3506 { },
3507 { { 0, 1 } },
3508 },
3509 {
3510 "ALU_RSH_X: 0x80000000 >> 31 = 1",
3511 .u.insns_int = {
3512 BPF_LD_IMM64(R0, 0x80000000),
3513 BPF_ALU32_IMM(BPF_MOV, R1, 31),
3514 BPF_ALU32_REG(BPF_RSH, R0, R1),
3515 BPF_EXIT_INSN(),
3516 },
3517 INTERNAL,
3518 { },
3519 { { 0, 1 } },
3520 },
3521 {
3522 "ALU64_RSH_X: 2 >> 1 = 1",
3523 .u.insns_int = {
3524 BPF_LD_IMM64(R0, 2),
3525 BPF_ALU32_IMM(BPF_MOV, R1, 1),
3526 BPF_ALU64_REG(BPF_RSH, R0, R1),
3527 BPF_EXIT_INSN(),
3528 },
3529 INTERNAL,
3530 { },
3531 { { 0, 1 } },
3532 },
3533 {
3534 "ALU64_RSH_X: 0x80000000 >> 31 = 1",
3535 .u.insns_int = {
3536 BPF_LD_IMM64(R0, 0x80000000),
3537 BPF_ALU32_IMM(BPF_MOV, R1, 31),
3538 BPF_ALU64_REG(BPF_RSH, R0, R1),
3539 BPF_EXIT_INSN(),
3540 },
3541 INTERNAL,
3542 { },
3543 { { 0, 1 } },
3544 },
3545 /* BPF_ALU | BPF_RSH | BPF_K */
3546 {
3547 "ALU_RSH_K: 2 >> 1 = 1",
3548 .u.insns_int = {
3549 BPF_LD_IMM64(R0, 2),
3550 BPF_ALU32_IMM(BPF_RSH, R0, 1),
3551 BPF_EXIT_INSN(),
3552 },
3553 INTERNAL,
3554 { },
3555 { { 0, 1 } },
3556 },
3557 {
3558 "ALU_RSH_K: 0x80000000 >> 31 = 1",
3559 .u.insns_int = {
3560 BPF_LD_IMM64(R0, 0x80000000),
3561 BPF_ALU32_IMM(BPF_RSH, R0, 31),
3562 BPF_EXIT_INSN(),
3563 },
3564 INTERNAL,
3565 { },
3566 { { 0, 1 } },
3567 },
3568 {
3569 "ALU64_RSH_K: 2 >> 1 = 1",
3570 .u.insns_int = {
3571 BPF_LD_IMM64(R0, 2),
3572 BPF_ALU64_IMM(BPF_RSH, R0, 1),
3573 BPF_EXIT_INSN(),
3574 },
3575 INTERNAL,
3576 { },
3577 { { 0, 1 } },
3578 },
3579 {
3580 "ALU64_RSH_K: 0x80000000 >> 31 = 1",
3581 .u.insns_int = {
3582 BPF_LD_IMM64(R0, 0x80000000),
3583 BPF_ALU64_IMM(BPF_RSH, R0, 31),
3584 BPF_EXIT_INSN(),
3585 },
3586 INTERNAL,
3587 { },
3588 { { 0, 1 } },
3589 },
3590 /* BPF_ALU | BPF_ARSH | BPF_X */
3591 {
3592 "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
3593 .u.insns_int = {
3594 BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
3595 BPF_ALU32_IMM(BPF_MOV, R1, 40),
3596 BPF_ALU64_REG(BPF_ARSH, R0, R1),
3597 BPF_EXIT_INSN(),
3598 },
3599 INTERNAL,
3600 { },
3601 { { 0, 0xffff00ff } },
3602 },
3603 /* BPF_ALU | BPF_ARSH | BPF_K */
3604 {
3605 "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
3606 .u.insns_int = {
3607 BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
3608 BPF_ALU64_IMM(BPF_ARSH, R0, 40),
3609 BPF_EXIT_INSN(),
3610 },
3611 INTERNAL,
3612 { },
3613 { { 0, 0xffff00ff } },
3614 },
3615 /* BPF_ALU | BPF_NEG */
3616 {
3617 "ALU_NEG: -(3) = -3",
3618 .u.insns_int = {
3619 BPF_ALU32_IMM(BPF_MOV, R0, 3),
3620 BPF_ALU32_IMM(BPF_NEG, R0, 0),
3621 BPF_EXIT_INSN(),
3622 },
3623 INTERNAL,
3624 { },
3625 { { 0, -3 } },
3626 },
3627 {
3628 "ALU_NEG: -(-3) = 3",
3629 .u.insns_int = {
3630 BPF_ALU32_IMM(BPF_MOV, R0, -3),
3631 BPF_ALU32_IMM(BPF_NEG, R0, 0),
3632 BPF_EXIT_INSN(),
3633 },
3634 INTERNAL,
3635 { },
3636 { { 0, 3 } },
3637 },
3638 {
3639 "ALU64_NEG: -(3) = -3",
3640 .u.insns_int = {
3641 BPF_LD_IMM64(R0, 3),
3642 BPF_ALU64_IMM(BPF_NEG, R0, 0),
3643 BPF_EXIT_INSN(),
3644 },
3645 INTERNAL,
3646 { },
3647 { { 0, -3 } },
3648 },
3649 {
3650 "ALU64_NEG: -(-3) = 3",
3651 .u.insns_int = {
3652 BPF_LD_IMM64(R0, -3),
3653 BPF_ALU64_IMM(BPF_NEG, R0, 0),
3654 BPF_EXIT_INSN(),
3655 },
3656 INTERNAL,
3657 { },
3658 { { 0, 3 } },
3659 },
3660 /* BPF_ALU | BPF_END | BPF_FROM_BE */
3661 {
3662 "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
3663 .u.insns_int = {
3664 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3665 BPF_ENDIAN(BPF_FROM_BE, R0, 16),
3666 BPF_EXIT_INSN(),
3667 },
3668 INTERNAL,
3669 { },
3670 { { 0, cpu_to_be16(0xcdef) } },
3671 },
3672 {
3673 "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
3674 .u.insns_int = {
3675 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3676 BPF_ENDIAN(BPF_FROM_BE, R0, 32),
3677 BPF_EXIT_INSN(),
3678 },
3679 INTERNAL,
3680 { },
3681 { { 0, cpu_to_be32(0x89abcdef) } },
3682 },
3683 {
3684 "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
3685 .u.insns_int = {
3686 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3687 BPF_ENDIAN(BPF_FROM_BE, R0, 64),
3688 BPF_EXIT_INSN(),
3689 },
3690 INTERNAL,
3691 { },
3692 { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
3693 },
3694 /* BPF_ALU | BPF_END | BPF_FROM_LE */
3695 {
3696 "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
3697 .u.insns_int = {
3698 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3699 BPF_ENDIAN(BPF_FROM_LE, R0, 16),
3700 BPF_EXIT_INSN(),
3701 },
3702 INTERNAL,
3703 { },
3704 { { 0, cpu_to_le16(0xcdef) } },
3705 },
3706 {
3707 "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
3708 .u.insns_int = {
3709 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3710 BPF_ENDIAN(BPF_FROM_LE, R0, 32),
3711 BPF_EXIT_INSN(),
3712 },
3713 INTERNAL,
3714 { },
3715 { { 0, cpu_to_le32(0x89abcdef) } },
3716 },
3717 {
3718 "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
3719 .u.insns_int = {
3720 BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
3721 BPF_ENDIAN(BPF_FROM_LE, R0, 64),
3722 BPF_EXIT_INSN(),
3723 },
3724 INTERNAL,
3725 { },
3726 { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
3727 },
3728 /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
3729 {
3730 "ST_MEM_B: Store/Load byte: max negative",
3731 .u.insns_int = {
3732 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3733 BPF_ST_MEM(BPF_B, R10, -40, 0xff),
3734 BPF_LDX_MEM(BPF_B, R0, R10, -40),
3735 BPF_EXIT_INSN(),
3736 },
3737 INTERNAL,
3738 { },
3739 { { 0, 0xff } },
3740 },
3741 {
3742 "ST_MEM_B: Store/Load byte: max positive",
3743 .u.insns_int = {
3744 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3745 BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
3746 BPF_LDX_MEM(BPF_H, R0, R10, -40),
3747 BPF_EXIT_INSN(),
3748 },
3749 INTERNAL,
3750 { },
3751 { { 0, 0x7f } },
3752 },
3753 {
3754 "STX_MEM_B: Store/Load byte: max negative",
3755 .u.insns_int = {
3756 BPF_LD_IMM64(R0, 0),
3757 BPF_LD_IMM64(R1, 0xffLL),
3758 BPF_STX_MEM(BPF_B, R10, R1, -40),
3759 BPF_LDX_MEM(BPF_B, R0, R10, -40),
3760 BPF_EXIT_INSN(),
3761 },
3762 INTERNAL,
3763 { },
3764 { { 0, 0xff } },
3765 },
3766 {
3767 "ST_MEM_H: Store/Load half word: max negative",
3768 .u.insns_int = {
3769 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3770 BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
3771 BPF_LDX_MEM(BPF_H, R0, R10, -40),
3772 BPF_EXIT_INSN(),
3773 },
3774 INTERNAL,
3775 { },
3776 { { 0, 0xffff } },
3777 },
3778 {
3779 "ST_MEM_H: Store/Load half word: max positive",
3780 .u.insns_int = {
3781 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3782 BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
3783 BPF_LDX_MEM(BPF_H, R0, R10, -40),
3784 BPF_EXIT_INSN(),
3785 },
3786 INTERNAL,
3787 { },
3788 { { 0, 0x7fff } },
3789 },
3790 {
3791 "STX_MEM_H: Store/Load half word: max negative",
3792 .u.insns_int = {
3793 BPF_LD_IMM64(R0, 0),
3794 BPF_LD_IMM64(R1, 0xffffLL),
3795 BPF_STX_MEM(BPF_H, R10, R1, -40),
3796 BPF_LDX_MEM(BPF_H, R0, R10, -40),
3797 BPF_EXIT_INSN(),
3798 },
3799 INTERNAL,
3800 { },
3801 { { 0, 0xffff } },
3802 },
3803 {
3804 "ST_MEM_W: Store/Load word: max negative",
3805 .u.insns_int = {
3806 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3807 BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
3808 BPF_LDX_MEM(BPF_W, R0, R10, -40),
3809 BPF_EXIT_INSN(),
3810 },
3811 INTERNAL,
3812 { },
3813 { { 0, 0xffffffff } },
3814 },
3815 {
3816 "ST_MEM_W: Store/Load word: max positive",
3817 .u.insns_int = {
3818 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3819 BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
3820 BPF_LDX_MEM(BPF_W, R0, R10, -40),
3821 BPF_EXIT_INSN(),
3822 },
3823 INTERNAL,
3824 { },
3825 { { 0, 0x7fffffff } },
3826 },
3827 {
3828 "STX_MEM_W: Store/Load word: max negative",
3829 .u.insns_int = {
3830 BPF_LD_IMM64(R0, 0),
3831 BPF_LD_IMM64(R1, 0xffffffffLL),
3832 BPF_STX_MEM(BPF_W, R10, R1, -40),
3833 BPF_LDX_MEM(BPF_W, R0, R10, -40),
3834 BPF_EXIT_INSN(),
3835 },
3836 INTERNAL,
3837 { },
3838 { { 0, 0xffffffff } },
3839 },
3840 {
3841 "ST_MEM_DW: Store/Load double word: max negative",
3842 .u.insns_int = {
3843 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3844 BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
3845 BPF_LDX_MEM(BPF_DW, R0, R10, -40),
3846 BPF_EXIT_INSN(),
3847 },
3848 INTERNAL,
3849 { },
3850 { { 0, 0xffffffff } },
3851 },
3852 {
3853 "ST_MEM_DW: Store/Load double word: max negative 2",
3854 .u.insns_int = {
3855 BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
3856 BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
3857 BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
3858 BPF_LDX_MEM(BPF_DW, R2, R10, -40),
3859 BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
3860 BPF_MOV32_IMM(R0, 2),
3861 BPF_EXIT_INSN(),
3862 BPF_MOV32_IMM(R0, 1),
3863 BPF_EXIT_INSN(),
3864 },
3865 INTERNAL,
3866 { },
3867 { { 0, 0x1 } },
3868 },
3869 {
3870 "ST_MEM_DW: Store/Load double word: max positive",
3871 .u.insns_int = {
3872 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3873 BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
3874 BPF_LDX_MEM(BPF_DW, R0, R10, -40),
3875 BPF_EXIT_INSN(),
3876 },
3877 INTERNAL,
3878 { },
3879 { { 0, 0x7fffffff } },
3880 },
3881 {
3882 "STX_MEM_DW: Store/Load double word: max negative",
3883 .u.insns_int = {
3884 BPF_LD_IMM64(R0, 0),
3885 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
3886 BPF_STX_MEM(BPF_W, R10, R1, -40),
3887 BPF_LDX_MEM(BPF_W, R0, R10, -40),
3888 BPF_EXIT_INSN(),
3889 },
3890 INTERNAL,
3891 { },
3892 { { 0, 0xffffffff } },
3893 },
3894 /* BPF_STX | BPF_XADD | BPF_W/DW */
3895 {
3896 "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
3897 .u.insns_int = {
3898 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
3899 BPF_ST_MEM(BPF_W, R10, -40, 0x10),
3900 BPF_STX_XADD(BPF_W, R10, R0, -40),
3901 BPF_LDX_MEM(BPF_W, R0, R10, -40),
3902 BPF_EXIT_INSN(),
3903 },
3904 INTERNAL,
3905 { },
3906 { { 0, 0x22 } },
3907 },
3908 {
3909 "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
3910 .u.insns_int = {
3911 BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
3912 BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
3913 BPF_STX_XADD(BPF_DW, R10, R0, -40),
3914 BPF_LDX_MEM(BPF_DW, R0, R10, -40),
3915 BPF_EXIT_INSN(),
3916 },
3917 INTERNAL,
3918 { },
3919 { { 0, 0x22 } },
3920 },
3921 /* BPF_JMP | BPF_EXIT */
3922 {
3923 "JMP_EXIT",
3924 .u.insns_int = {
3925 BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
3926 BPF_EXIT_INSN(),
3927 BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
3928 },
3929 INTERNAL,
3930 { },
3931 { { 0, 0x4711 } },
3932 },
3933 /* BPF_JMP | BPF_JA */
3934 {
3935 "JMP_JA: Unconditional jump: if (true) return 1",
3936 .u.insns_int = {
3937 BPF_ALU32_IMM(BPF_MOV, R0, 0),
3938 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3939 BPF_EXIT_INSN(),
3940 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3941 BPF_EXIT_INSN(),
3942 },
3943 INTERNAL,
3944 { },
3945 { { 0, 1 } },
3946 },
3947 /* BPF_JMP | BPF_JSGT | BPF_K */
3948 {
3949 "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
3950 .u.insns_int = {
3951 BPF_ALU32_IMM(BPF_MOV, R0, 0),
3952 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
3953 BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
3954 BPF_EXIT_INSN(),
3955 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3956 BPF_EXIT_INSN(),
3957 },
3958 INTERNAL,
3959 { },
3960 { { 0, 1 } },
3961 },
3962 {
3963 "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
3964 .u.insns_int = {
3965 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3966 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
3967 BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
3968 BPF_EXIT_INSN(),
3969 BPF_ALU32_IMM(BPF_MOV, R0, 0),
3970 BPF_EXIT_INSN(),
3971 },
3972 INTERNAL,
3973 { },
3974 { { 0, 1 } },
3975 },
3976 /* BPF_JMP | BPF_JSGE | BPF_K */
3977 {
3978 "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
3979 .u.insns_int = {
3980 BPF_ALU32_IMM(BPF_MOV, R0, 0),
3981 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
3982 BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
3983 BPF_EXIT_INSN(),
3984 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3985 BPF_EXIT_INSN(),
3986 },
3987 INTERNAL,
3988 { },
3989 { { 0, 1 } },
3990 },
3991 {
3992 "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
3993 .u.insns_int = {
3994 BPF_ALU32_IMM(BPF_MOV, R0, 0),
3995 BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
3996 BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
3997 BPF_EXIT_INSN(),
3998 BPF_ALU32_IMM(BPF_MOV, R0, 1),
3999 BPF_EXIT_INSN(),
4000 },
4001 INTERNAL,
4002 { },
4003 { { 0, 1 } },
4004 },
4005 /* BPF_JMP | BPF_JGT | BPF_K */
4006 {
4007 "JMP_JGT_K: if (3 > 2) return 1",
4008 .u.insns_int = {
4009 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4010 BPF_LD_IMM64(R1, 3),
4011 BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
4012 BPF_EXIT_INSN(),
4013 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4014 BPF_EXIT_INSN(),
4015 },
4016 INTERNAL,
4017 { },
4018 { { 0, 1 } },
4019 },
4020 /* BPF_JMP | BPF_JGE | BPF_K */
4021 {
4022 "JMP_JGE_K: if (3 >= 2) return 1",
4023 .u.insns_int = {
4024 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4025 BPF_LD_IMM64(R1, 3),
4026 BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
4027 BPF_EXIT_INSN(),
4028 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4029 BPF_EXIT_INSN(),
4030 },
4031 INTERNAL,
4032 { },
4033 { { 0, 1 } },
4034 },
4035 /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
4036 {
4037 "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
4038 .u.insns_int = {
4039 BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
4040 BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
4041 BPF_EXIT_INSN(),
4042 BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
4043 BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
4044 BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
4045 BPF_EXIT_INSN(),
4046 },
4047 INTERNAL,
4048 { },
4049 { { 0, 1 } },
4050 },
4051 {
4052 "JMP_JGE_K: if (3 >= 3) return 1",
4053 .u.insns_int = {
4054 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4055 BPF_LD_IMM64(R1, 3),
4056 BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
4057 BPF_EXIT_INSN(),
4058 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4059 BPF_EXIT_INSN(),
4060 },
4061 INTERNAL,
4062 { },
4063 { { 0, 1 } },
4064 },
4065 /* BPF_JMP | BPF_JNE | BPF_K */
4066 {
4067 "JMP_JNE_K: if (3 != 2) return 1",
4068 .u.insns_int = {
4069 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4070 BPF_LD_IMM64(R1, 3),
4071 BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
4072 BPF_EXIT_INSN(),
4073 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4074 BPF_EXIT_INSN(),
4075 },
4076 INTERNAL,
4077 { },
4078 { { 0, 1 } },
4079 },
4080 /* BPF_JMP | BPF_JEQ | BPF_K */
4081 {
4082 "JMP_JEQ_K: if (3 == 3) return 1",
4083 .u.insns_int = {
4084 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4085 BPF_LD_IMM64(R1, 3),
4086 BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
4087 BPF_EXIT_INSN(),
4088 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4089 BPF_EXIT_INSN(),
4090 },
4091 INTERNAL,
4092 { },
4093 { { 0, 1 } },
4094 },
4095 /* BPF_JMP | BPF_JSET | BPF_K */
4096 {
4097 "JMP_JSET_K: if (0x3 & 0x2) return 1",
4098 .u.insns_int = {
4099 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4100 BPF_LD_IMM64(R1, 3),
4101 BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
4102 BPF_EXIT_INSN(),
4103 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4104 BPF_EXIT_INSN(),
4105 },
4106 INTERNAL,
4107 { },
4108 { { 0, 1 } },
4109 },
4110 {
4111 "JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
4112 .u.insns_int = {
4113 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4114 BPF_LD_IMM64(R1, 3),
4115 BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
4116 BPF_EXIT_INSN(),
4117 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4118 BPF_EXIT_INSN(),
4119 },
4120 INTERNAL,
4121 { },
4122 { { 0, 1 } },
4123 },
4124 /* BPF_JMP | BPF_JSGT | BPF_X */
4125 {
4126 "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
4127 .u.insns_int = {
4128 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4129 BPF_LD_IMM64(R1, -1),
4130 BPF_LD_IMM64(R2, -2),
4131 BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
4132 BPF_EXIT_INSN(),
4133 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4134 BPF_EXIT_INSN(),
4135 },
4136 INTERNAL,
4137 { },
4138 { { 0, 1 } },
4139 },
4140 {
4141 "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
4142 .u.insns_int = {
4143 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4144 BPF_LD_IMM64(R1, -1),
4145 BPF_LD_IMM64(R2, -1),
4146 BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
4147 BPF_EXIT_INSN(),
4148 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4149 BPF_EXIT_INSN(),
4150 },
4151 INTERNAL,
4152 { },
4153 { { 0, 1 } },
4154 },
4155 /* BPF_JMP | BPF_JSGE | BPF_X */
4156 {
4157 "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
4158 .u.insns_int = {
4159 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4160 BPF_LD_IMM64(R1, -1),
4161 BPF_LD_IMM64(R2, -2),
4162 BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
4163 BPF_EXIT_INSN(),
4164 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4165 BPF_EXIT_INSN(),
4166 },
4167 INTERNAL,
4168 { },
4169 { { 0, 1 } },
4170 },
4171 {
4172 "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
4173 .u.insns_int = {
4174 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4175 BPF_LD_IMM64(R1, -1),
4176 BPF_LD_IMM64(R2, -1),
4177 BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
4178 BPF_EXIT_INSN(),
4179 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4180 BPF_EXIT_INSN(),
4181 },
4182 INTERNAL,
4183 { },
4184 { { 0, 1 } },
4185 },
4186 /* BPF_JMP | BPF_JGT | BPF_X */
4187 {
4188 "JMP_JGT_X: if (3 > 2) return 1",
4189 .u.insns_int = {
4190 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4191 BPF_LD_IMM64(R1, 3),
4192 BPF_LD_IMM64(R2, 2),
4193 BPF_JMP_REG(BPF_JGT, R1, R2, 1),
4194 BPF_EXIT_INSN(),
4195 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4196 BPF_EXIT_INSN(),
4197 },
4198 INTERNAL,
4199 { },
4200 { { 0, 1 } },
4201 },
4202 /* BPF_JMP | BPF_JGE | BPF_X */
4203 {
4204 "JMP_JGE_X: if (3 >= 2) return 1",
4205 .u.insns_int = {
4206 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4207 BPF_LD_IMM64(R1, 3),
4208 BPF_LD_IMM64(R2, 2),
4209 BPF_JMP_REG(BPF_JGE, R1, R2, 1),
4210 BPF_EXIT_INSN(),
4211 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4212 BPF_EXIT_INSN(),
4213 },
4214 INTERNAL,
4215 { },
4216 { { 0, 1 } },
4217 },
4218 {
4219 "JMP_JGE_X: if (3 >= 3) return 1",
4220 .u.insns_int = {
4221 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4222 BPF_LD_IMM64(R1, 3),
4223 BPF_LD_IMM64(R2, 3),
4224 BPF_JMP_REG(BPF_JGE, R1, R2, 1),
4225 BPF_EXIT_INSN(),
4226 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4227 BPF_EXIT_INSN(),
4228 },
4229 INTERNAL,
4230 { },
4231 { { 0, 1 } },
4232 },
4233 /* BPF_JMP | BPF_JNE | BPF_X */
4234 {
4235 "JMP_JNE_X: if (3 != 2) return 1",
4236 .u.insns_int = {
4237 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4238 BPF_LD_IMM64(R1, 3),
4239 BPF_LD_IMM64(R2, 2),
4240 BPF_JMP_REG(BPF_JNE, R1, R2, 1),
4241 BPF_EXIT_INSN(),
4242 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4243 BPF_EXIT_INSN(),
4244 },
4245 INTERNAL,
4246 { },
4247 { { 0, 1 } },
4248 },
4249 /* BPF_JMP | BPF_JEQ | BPF_X */
4250 {
4251 "JMP_JEQ_X: if (3 == 3) return 1",
4252 .u.insns_int = {
4253 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4254 BPF_LD_IMM64(R1, 3),
4255 BPF_LD_IMM64(R2, 3),
4256 BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
4257 BPF_EXIT_INSN(),
4258 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4259 BPF_EXIT_INSN(),
4260 },
4261 INTERNAL,
4262 { },
4263 { { 0, 1 } },
4264 },
4265 /* BPF_JMP | BPF_JSET | BPF_X */
4266 {
4267 "JMP_JSET_X: if (0x3 & 0x2) return 1",
4268 .u.insns_int = {
4269 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4270 BPF_LD_IMM64(R1, 3),
4271 BPF_LD_IMM64(R2, 2),
4272 BPF_JMP_REG(BPF_JNE, R1, R2, 1),
4273 BPF_EXIT_INSN(),
4274 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4275 BPF_EXIT_INSN(),
4276 },
4277 INTERNAL,
4278 { },
4279 { { 0, 1 } },
4280 },
4281 {
4282 "JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
4283 .u.insns_int = {
4284 BPF_ALU32_IMM(BPF_MOV, R0, 0),
4285 BPF_LD_IMM64(R1, 3),
4286 BPF_LD_IMM64(R2, 0xffffffff),
4287 BPF_JMP_REG(BPF_JNE, R1, R2, 1),
4288 BPF_EXIT_INSN(),
4289 BPF_ALU32_IMM(BPF_MOV, R0, 1),
4290 BPF_EXIT_INSN(),
4291 },
4292 INTERNAL,
4293 { },
4294 { { 0, 1 } },
4295 },
4296 {
4297 "JMP_JA: Jump, gap, jump, ...",
4298 { },
4299 CLASSIC | FLAG_NO_DATA,
4300 { },
4301 { { 0, 0xababcbac } },
4302 .fill_helper = bpf_fill_ja,
4303 },
4304 { /* Mainly checking JIT here. */
4305 "BPF_MAXINSNS: Maximum possible literals",
4306 { },
4307 CLASSIC | FLAG_NO_DATA,
4308 { },
4309 { { 0, 0xffffffff } },
4310 .fill_helper = bpf_fill_maxinsns1,
4311 },
4312 { /* Mainly checking JIT here. */
4313 "BPF_MAXINSNS: Single literal",
4314 { },
4315 CLASSIC | FLAG_NO_DATA,
4316 { },
4317 { { 0, 0xfefefefe } },
4318 .fill_helper = bpf_fill_maxinsns2,
4319 },
4320 { /* Mainly checking JIT here. */
4321 "BPF_MAXINSNS: Run/add until end",
4322 { },
4323 CLASSIC | FLAG_NO_DATA,
4324 { },
4325 { { 0, 0x947bf368 } },
4326 .fill_helper = bpf_fill_maxinsns3,
4327 },
4328 {
4329 "BPF_MAXINSNS: Too many instructions",
4330 { },
4331 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
4332 { },
4333 { },
4334 .fill_helper = bpf_fill_maxinsns4,
4335 },
4336 { /* Mainly checking JIT here. */
4337 "BPF_MAXINSNS: Very long jump",
4338 { },
4339 CLASSIC | FLAG_NO_DATA,
4340 { },
4341 { { 0, 0xabababab } },
4342 .fill_helper = bpf_fill_maxinsns5,
4343 },
4344 { /* Mainly checking JIT here. */
4345 "BPF_MAXINSNS: Ctx heavy transformations",
4346 { },
4347 CLASSIC,
4348 { },
4349 {
4350 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
4351 { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
4352 },
4353 .fill_helper = bpf_fill_maxinsns6,
4354 },
4355 { /* Mainly checking JIT here. */
4356 "BPF_MAXINSNS: Call heavy transformations",
4357 { },
4358 CLASSIC | FLAG_NO_DATA,
4359 { },
4360 { { 1, 0 }, { 10, 0 } },
4361 .fill_helper = bpf_fill_maxinsns7,
4362 },
4363 { /* Mainly checking JIT here. */
4364 "BPF_MAXINSNS: Jump heavy test",
4365 { },
4366 CLASSIC | FLAG_NO_DATA,
4367 { },
4368 { { 0, 0xffffffff } },
4369 .fill_helper = bpf_fill_maxinsns8,
4370 },
4371 { /* Mainly checking JIT here. */
4372 "BPF_MAXINSNS: Very long jump backwards",
4373 { },
4374 INTERNAL | FLAG_NO_DATA,
4375 { },
4376 { { 0, 0xcbababab } },
4377 .fill_helper = bpf_fill_maxinsns9,
4378 },
4379 { /* Mainly checking JIT here. */
4380 "BPF_MAXINSNS: Edge hopping nuthouse",
4381 { },
4382 INTERNAL | FLAG_NO_DATA,
4383 { },
4384 { { 0, 0xabababac } },
4385 .fill_helper = bpf_fill_maxinsns10,
4386 },
4387 {
4388 "BPF_MAXINSNS: Jump, gap, jump, ...",
4389 { },
4390 CLASSIC | FLAG_NO_DATA,
4391 { },
4392 { { 0, 0xababcbac } },
4393 .fill_helper = bpf_fill_maxinsns11,
4394 },
1808}; 4395};
1809 4396
1810static struct net_device dev; 4397static struct net_device dev;
@@ -1858,10 +4445,15 @@ static void release_test_data(const struct bpf_test *test, void *data)
1858 kfree_skb(data); 4445 kfree_skb(data);
1859} 4446}
1860 4447
1861static int probe_filter_length(struct sock_filter *fp) 4448static int filter_length(int which)
1862{ 4449{
1863 int len = 0; 4450 struct sock_filter *fp;
4451 int len;
1864 4452
4453 if (tests[which].fill_helper)
4454 return tests[which].u.ptr.len;
4455
4456 fp = tests[which].u.insns;
1865 for (len = MAX_INSNS - 1; len > 0; --len) 4457 for (len = MAX_INSNS - 1; len > 0; --len)
1866 if (fp[len].code != 0 || fp[len].k != 0) 4458 if (fp[len].code != 0 || fp[len].k != 0)
1867 break; 4459 break;
@@ -1869,16 +4461,25 @@ static int probe_filter_length(struct sock_filter *fp)
1869 return len + 1; 4461 return len + 1;
1870} 4462}
1871 4463
4464static void *filter_pointer(int which)
4465{
4466 if (tests[which].fill_helper)
4467 return tests[which].u.ptr.insns;
4468 else
4469 return tests[which].u.insns;
4470}
4471
1872static struct bpf_prog *generate_filter(int which, int *err) 4472static struct bpf_prog *generate_filter(int which, int *err)
1873{ 4473{
1874 struct bpf_prog *fp;
1875 struct sock_fprog_kern fprog;
1876 unsigned int flen = probe_filter_length(tests[which].u.insns);
1877 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 4474 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
4475 unsigned int flen = filter_length(which);
4476 void *fptr = filter_pointer(which);
4477 struct sock_fprog_kern fprog;
4478 struct bpf_prog *fp;
1878 4479
1879 switch (test_type) { 4480 switch (test_type) {
1880 case CLASSIC: 4481 case CLASSIC:
1881 fprog.filter = tests[which].u.insns; 4482 fprog.filter = fptr;
1882 fprog.len = flen; 4483 fprog.len = flen;
1883 4484
1884 *err = bpf_prog_create(&fp, &fprog); 4485 *err = bpf_prog_create(&fp, &fprog);
@@ -1914,8 +4515,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
1914 } 4515 }
1915 4516
1916 fp->len = flen; 4517 fp->len = flen;
1917 memcpy(fp->insnsi, tests[which].u.insns_int, 4518 memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
1918 fp->len * sizeof(struct bpf_insn));
1919 4519
1920 bpf_prog_select_runtime(fp); 4520 bpf_prog_select_runtime(fp);
1921 break; 4521 break;
@@ -1987,9 +4587,33 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
1987 return err_cnt; 4587 return err_cnt;
1988} 4588}
1989 4589
4590static __init int prepare_bpf_tests(void)
4591{
4592 int i;
4593
4594 for (i = 0; i < ARRAY_SIZE(tests); i++) {
4595 if (tests[i].fill_helper &&
4596 tests[i].fill_helper(&tests[i]) < 0)
4597 return -ENOMEM;
4598 }
4599
4600 return 0;
4601}
4602
4603static __init void destroy_bpf_tests(void)
4604{
4605 int i;
4606
4607 for (i = 0; i < ARRAY_SIZE(tests); i++) {
4608 if (tests[i].fill_helper)
4609 kfree(tests[i].u.ptr.insns);
4610 }
4611}
4612
1990static __init int test_bpf(void) 4613static __init int test_bpf(void)
1991{ 4614{
1992 int i, err_cnt = 0, pass_cnt = 0; 4615 int i, err_cnt = 0, pass_cnt = 0;
4616 int jit_cnt = 0, run_cnt = 0;
1993 4617
1994 for (i = 0; i < ARRAY_SIZE(tests); i++) { 4618 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1995 struct bpf_prog *fp; 4619 struct bpf_prog *fp;
@@ -2006,6 +4630,13 @@ static __init int test_bpf(void)
2006 4630
2007 return err; 4631 return err;
2008 } 4632 }
4633
4634 pr_cont("jited:%u ", fp->jited);
4635
4636 run_cnt++;
4637 if (fp->jited)
4638 jit_cnt++;
4639
2009 err = run_one(fp, &tests[i]); 4640 err = run_one(fp, &tests[i]);
2010 release_filter(fp, i); 4641 release_filter(fp, i);
2011 4642
@@ -2018,13 +4649,24 @@ static __init int test_bpf(void)
2018 } 4649 }
2019 } 4650 }
2020 4651
2021 pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); 4652 pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
4653 pass_cnt, err_cnt, jit_cnt, run_cnt);
4654
2022 return err_cnt ? -EINVAL : 0; 4655 return err_cnt ? -EINVAL : 0;
2023} 4656}
2024 4657
2025static int __init test_bpf_init(void) 4658static int __init test_bpf_init(void)
2026{ 4659{
2027 return test_bpf(); 4660 int ret;
4661
4662 ret = prepare_bpf_tests();
4663 if (ret < 0)
4664 return ret;
4665
4666 ret = test_bpf();
4667
4668 destroy_bpf_tests();
4669 return ret;
2028} 4670}
2029 4671
2030static void __exit test_bpf_exit(void) 4672static void __exit test_bpf_exit(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index b2957540d3c7..c90777eae1f8 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,14 +1,9 @@
1/* 1/*
2 * Resizable, Scalable, Concurrent Hash Table 2 * Resizable, Scalable, Concurrent Hash Table
3 * 3 *
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> 4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 * 6 *
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 *
10 * Code partially derived from nft_hash
11 *
12 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
@@ -26,20 +21,37 @@
26#include <linux/rhashtable.h> 21#include <linux/rhashtable.h>
27#include <linux/slab.h> 22#include <linux/slab.h>
28 23
24#define MAX_ENTRIES 1000000
25#define TEST_INSERT_FAIL INT_MAX
26
27static int entries = 50000;
28module_param(entries, int, 0);
29MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
30
31static int runs = 4;
32module_param(runs, int, 0);
33MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
34
35static int max_size = 65536;
36module_param(max_size, int, 0);
37MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
29 38
30#define TEST_HT_SIZE 8 39static bool shrinking = false;
31#define TEST_ENTRIES 2048 40module_param(shrinking, bool, 0);
32#define TEST_PTR ((void *) 0xdeadbeef) 41MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
33#define TEST_NEXPANDS 4 42
43static int size = 8;
44module_param(size, int, 0);
45MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
34 46
35struct test_obj { 47struct test_obj {
36 void *ptr;
37 int value; 48 int value;
38 struct rhash_head node; 49 struct rhash_head node;
39}; 50};
40 51
41static const struct rhashtable_params test_rht_params = { 52static struct test_obj array[MAX_ENTRIES];
42 .nelem_hint = TEST_HT_SIZE, 53
54static struct rhashtable_params test_rht_params = {
43 .head_offset = offsetof(struct test_obj, node), 55 .head_offset = offsetof(struct test_obj, node),
44 .key_offset = offsetof(struct test_obj, value), 56 .key_offset = offsetof(struct test_obj, value),
45 .key_len = sizeof(int), 57 .key_len = sizeof(int),
@@ -51,11 +63,14 @@ static int __init test_rht_lookup(struct rhashtable *ht)
51{ 63{
52 unsigned int i; 64 unsigned int i;
53 65
54 for (i = 0; i < TEST_ENTRIES * 2; i++) { 66 for (i = 0; i < entries * 2; i++) {
55 struct test_obj *obj; 67 struct test_obj *obj;
56 bool expected = !(i % 2); 68 bool expected = !(i % 2);
57 u32 key = i; 69 u32 key = i;
58 70
71 if (array[i / 2].value == TEST_INSERT_FAIL)
72 expected = false;
73
59 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 74 obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
60 75
61 if (expected && !obj) { 76 if (expected && !obj) {
@@ -66,9 +81,9 @@ static int __init test_rht_lookup(struct rhashtable *ht)
66 key); 81 key);
67 return -EEXIST; 82 return -EEXIST;
68 } else if (expected && obj) { 83 } else if (expected && obj) {
69 if (obj->ptr != TEST_PTR || obj->value != i) { 84 if (obj->value != i) {
70 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", 85 pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
71 obj->ptr, TEST_PTR, obj->value, i); 86 obj->value, i);
72 return -EINVAL; 87 return -EINVAL;
73 } 88 }
74 } 89 }
@@ -77,129 +92,147 @@ static int __init test_rht_lookup(struct rhashtable *ht)
77 return 0; 92 return 0;
78} 93}
79 94
80static void test_bucket_stats(struct rhashtable *ht, bool quiet) 95static void test_bucket_stats(struct rhashtable *ht)
81{ 96{
82 unsigned int cnt, rcu_cnt, i, total = 0; 97 unsigned int err, total = 0, chain_len = 0;
98 struct rhashtable_iter hti;
83 struct rhash_head *pos; 99 struct rhash_head *pos;
84 struct test_obj *obj;
85 struct bucket_table *tbl;
86 100
87 tbl = rht_dereference_rcu(ht->tbl, ht); 101 err = rhashtable_walk_init(ht, &hti);
88 for (i = 0; i < tbl->size; i++) { 102 if (err) {
89 rcu_cnt = cnt = 0; 103 pr_warn("Test failed: allocation error");
104 return;
105 }
90 106
91 if (!quiet) 107 err = rhashtable_walk_start(&hti);
92 pr_info(" [%#4x/%u]", i, tbl->size); 108 if (err && err != -EAGAIN) {
109 pr_warn("Test failed: iterator failed: %d\n", err);
110 return;
111 }
93 112
94 rht_for_each_entry_rcu(obj, pos, tbl, i, node) { 113 while ((pos = rhashtable_walk_next(&hti))) {
95 cnt++; 114 if (PTR_ERR(pos) == -EAGAIN) {
96 total++; 115 pr_info("Info: encountered resize\n");
97 if (!quiet) 116 chain_len++;
98 pr_cont(" [%p],", obj); 117 continue;
118 } else if (IS_ERR(pos)) {
119 pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
120 PTR_ERR(pos));
121 break;
99 } 122 }
100 123
101 rht_for_each_entry_rcu(obj, pos, tbl, i, node) 124 total++;
102 rcu_cnt++;
103
104 if (rcu_cnt != cnt)
105 pr_warn("Test failed: Chain count mismach %d != %d",
106 cnt, rcu_cnt);
107
108 if (!quiet)
109 pr_cont("\n [%#x] first element: %p, chain length: %u\n",
110 i, tbl->buckets[i], cnt);
111 } 125 }
112 126
113 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", 127 rhashtable_walk_stop(&hti);
114 total, atomic_read(&ht->nelems), TEST_ENTRIES); 128 rhashtable_walk_exit(&hti);
129
130 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
131 total, atomic_read(&ht->nelems), entries, chain_len);
115 132
116 if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) 133 if (total != atomic_read(&ht->nelems) || total != entries)
117 pr_warn("Test failed: Total count mismatch ^^^"); 134 pr_warn("Test failed: Total count mismatch ^^^");
118} 135}
119 136
120static int __init test_rhashtable(struct rhashtable *ht) 137static s64 __init test_rhashtable(struct rhashtable *ht)
121{ 138{
122 struct bucket_table *tbl;
123 struct test_obj *obj; 139 struct test_obj *obj;
124 struct rhash_head *pos, *next;
125 int err; 140 int err;
126 unsigned int i; 141 unsigned int i, insert_fails = 0;
142 s64 start, end;
127 143
128 /* 144 /*
129 * Insertion Test: 145 * Insertion Test:
130 * Insert TEST_ENTRIES into table with all keys even numbers 146 * Insert entries into table with all keys even numbers
131 */ 147 */
132 pr_info(" Adding %d keys\n", TEST_ENTRIES); 148 pr_info(" Adding %d keys\n", entries);
133 for (i = 0; i < TEST_ENTRIES; i++) { 149 start = ktime_get_ns();
134 struct test_obj *obj; 150 for (i = 0; i < entries; i++) {
135 151 struct test_obj *obj = &array[i];
136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 if (!obj) {
138 err = -ENOMEM;
139 goto error;
140 }
141 152
142 obj->ptr = TEST_PTR;
143 obj->value = i * 2; 153 obj->value = i * 2;
144 154
145 err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); 155 err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
146 if (err) { 156 if (err == -ENOMEM || err == -EBUSY) {
147 kfree(obj); 157 /* Mark failed inserts but continue */
148 goto error; 158 obj->value = TEST_INSERT_FAIL;
159 insert_fails++;
160 } else if (err) {
161 return err;
149 } 162 }
150 } 163 }
151 164
165 if (insert_fails)
166 pr_info(" %u insertions failed due to memory pressure\n",
167 insert_fails);
168
169 test_bucket_stats(ht);
152 rcu_read_lock(); 170 rcu_read_lock();
153 test_bucket_stats(ht, true);
154 test_rht_lookup(ht); 171 test_rht_lookup(ht);
155 rcu_read_unlock(); 172 rcu_read_unlock();
156 173
157 rcu_read_lock(); 174 test_bucket_stats(ht);
158 test_bucket_stats(ht, true);
159 rcu_read_unlock();
160 175
161 pr_info(" Deleting %d keys\n", TEST_ENTRIES); 176 pr_info(" Deleting %d keys\n", entries);
162 for (i = 0; i < TEST_ENTRIES; i++) { 177 for (i = 0; i < entries; i++) {
163 u32 key = i * 2; 178 u32 key = i * 2;
164 179
165 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 180 if (array[i].value != TEST_INSERT_FAIL) {
166 BUG_ON(!obj); 181 obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
182 BUG_ON(!obj);
167 183
168 rhashtable_remove_fast(ht, &obj->node, test_rht_params); 184 rhashtable_remove_fast(ht, &obj->node, test_rht_params);
169 kfree(obj); 185 }
170 } 186 }
171 187
172 return 0; 188 end = ktime_get_ns();
173 189 pr_info(" Duration of test: %lld ns\n", end - start);
174error:
175 tbl = rht_dereference_rcu(ht->tbl, ht);
176 for (i = 0; i < tbl->size; i++)
177 rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
178 kfree(obj);
179 190
180 return err; 191 return end - start;
181} 192}
182 193
183static struct rhashtable ht; 194static struct rhashtable ht;
184 195
185static int __init test_rht_init(void) 196static int __init test_rht_init(void)
186{ 197{
187 int err; 198 int i, err;
199 u64 total_time = 0;
188 200
189 pr_info("Running resizable hashtable tests...\n"); 201 entries = min(entries, MAX_ENTRIES);
190 202
191 err = rhashtable_init(&ht, &test_rht_params); 203 test_rht_params.automatic_shrinking = shrinking;
192 if (err < 0) { 204 test_rht_params.max_size = max_size;
193 pr_warn("Test failed: Unable to initialize hashtable: %d\n", 205 test_rht_params.nelem_hint = size;
194 err);
195 return err;
196 }
197 206
198 err = test_rhashtable(&ht); 207 pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
208 size, max_size, shrinking);
199 209
200 rhashtable_destroy(&ht); 210 for (i = 0; i < runs; i++) {
211 s64 time;
201 212
202 return err; 213 pr_info("Test %02d:\n", i);
214 memset(&array, 0, sizeof(array));
215 err = rhashtable_init(&ht, &test_rht_params);
216 if (err < 0) {
217 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
218 err);
219 continue;
220 }
221
222 time = test_rhashtable(&ht);
223 rhashtable_destroy(&ht);
224 if (time < 0) {
225 pr_warn("Test failed: return code %lld\n", time);
226 return -EINVAL;
227 }
228
229 total_time += time;
230 }
231
232 do_div(total_time, runs);
233 pr_info("Average test time: %llu\n", total_time);
234
235 return 0;
203} 236}
204 237
205static void __exit test_rht_exit(void) 238static void __exit test_rht_exit(void)
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index a382e4a32609..782ae8ca2c06 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -36,7 +36,7 @@
36 * Adds the timer node to the timerqueue, sorted by the 36 * Adds the timer node to the timerqueue, sorted by the
37 * node's expires value. 37 * node's expires value.
38 */ 38 */
39void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) 39bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
40{ 40{
41 struct rb_node **p = &head->head.rb_node; 41 struct rb_node **p = &head->head.rb_node;
42 struct rb_node *parent = NULL; 42 struct rb_node *parent = NULL;
@@ -56,8 +56,11 @@ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
56 rb_link_node(&node->node, parent, p); 56 rb_link_node(&node->node, parent, p);
57 rb_insert_color(&node->node, &head->head); 57 rb_insert_color(&node->node, &head->head);
58 58
59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) 59 if (!head->next || node->expires.tv64 < head->next->expires.tv64) {
60 head->next = node; 60 head->next = node;
61 return true;
62 }
63 return false;
61} 64}
62EXPORT_SYMBOL_GPL(timerqueue_add); 65EXPORT_SYMBOL_GPL(timerqueue_add);
63 66
@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
69 * 72 *
70 * Removes the timer node from the timerqueue. 73 * Removes the timer node from the timerqueue.
71 */ 74 */
72void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) 75bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
73{ 76{
74 WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); 77 WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
75 78
@@ -82,6 +85,7 @@ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
82 } 85 }
83 rb_erase(&node->node, &head->head); 86 rb_erase(&node->node, &head->head);
84 RB_CLEAR_NODE(&node->node); 87 RB_CLEAR_NODE(&node->node);
88 return head->next != NULL;
85} 89}
86EXPORT_SYMBOL_GPL(timerqueue_del); 90EXPORT_SYMBOL_GPL(timerqueue_del);
87 91