aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig14
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/Makefile7
-rw-r--r--lib/asn1_decoder.c2
-rw-r--r--lib/atomic64_test.c13
-rw-r--r--lib/btree.c1
-rw-r--r--lib/bug.c21
-rw-r--r--lib/cpumask.c63
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/crc7.c84
-rw-r--r--lib/debugobjects.c19
-rw-r--r--lib/devres.c10
-rw-r--r--lib/digsig.c5
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/idr.c40
-rw-r--r--lib/interval_tree.c6
-rw-r--r--lib/interval_tree_test.c (renamed from lib/interval_tree_test_main.c)0
-rw-r--r--lib/iovec.c55
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/libcrc32c.c5
-rw-r--r--lib/lz4/lz4_decompress.c12
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c62
-rw-r--r--lib/nlattr.c17
-rw-r--r--lib/plist.c56
-rw-r--r--lib/radix-tree.c13
-rw-r--r--lib/string.c26
-rw-r--r--lib/swiotlb.c30
-rw-r--r--lib/test_bpf.c1929
-rw-r--r--lib/textsearch.c9
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xz/Kconfig24
-rw-r--r--lib/xz/xz_dec_lzma2.c4
32 files changed, 2404 insertions, 186 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 4771fb3f4da4..334f7722a999 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -331,6 +331,20 @@ config TEXTSEARCH_FSM
331config BTREE 331config BTREE
332 boolean 332 boolean
333 333
334config INTERVAL_TREE
335 boolean
336 help
337 Simple, embeddable, interval-tree. Can find the start of an
338 overlapping range in log(n) time and then iterate over all
339 overlapping nodes. The algorithm is implemented as an
340 augmented rbtree.
341
342 See:
343
344 Documentation/rbtree.txt
345
346 for more information.
347
334config ASSOCIATIVE_ARRAY 348config ASSOCIATIVE_ARRAY
335 bool 349 bool
336 help 350 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 819ac51202c0..7a638aa3545b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -501,6 +501,16 @@ config DEBUG_VM
501 501
502 If unsure, say N. 502 If unsure, say N.
503 503
504config DEBUG_VM_VMACACHE
505 bool "Debug VMA caching"
506 depends on DEBUG_VM
507 help
508 Enable this to turn on VMA caching debug information. Doing so
509 can cause significant overhead, so only enable it in non-production
510 environments.
511
512 If unsure, say N.
513
504config DEBUG_VM_RB 514config DEBUG_VM_RB
505 bool "Debug VM red-black trees" 515 bool "Debug VM red-black trees"
506 depends on DEBUG_VM 516 depends on DEBUG_VM
@@ -575,8 +585,8 @@ config DEBUG_HIGHMEM
575 bool "Highmem debugging" 585 bool "Highmem debugging"
576 depends on DEBUG_KERNEL && HIGHMEM 586 depends on DEBUG_KERNEL && HIGHMEM
577 help 587 help
578 This options enables addition error checking for high memory systems. 588 This option enables additional error checking for high memory
579 Disable for production systems. 589 systems. Disable for production systems.
580 590
581config HAVE_DEBUG_STACKOVERFLOW 591config HAVE_DEBUG_STACKOVERFLOW
582 bool 592 bool
@@ -823,11 +833,6 @@ config DEBUG_RT_MUTEXES
823 This allows rt mutex semantics violations and rt mutex related 833 This allows rt mutex semantics violations and rt mutex related
824 deadlocks (lockups) to be detected and reported automatically. 834 deadlocks (lockups) to be detected and reported automatically.
825 835
826config DEBUG_PI_LIST
827 bool
828 default y
829 depends on DEBUG_RT_MUTEXES
830
831config RT_MUTEX_TESTER 836config RT_MUTEX_TESTER
832 bool "Built-in scriptable tester for rt-mutexes" 837 bool "Built-in scriptable tester for rt-mutexes"
833 depends on DEBUG_KERNEL && RT_MUTEXES 838 depends on DEBUG_KERNEL && RT_MUTEXES
@@ -925,7 +930,7 @@ config LOCKDEP
925 bool 930 bool
926 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 931 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
927 select STACKTRACE 932 select STACKTRACE
928 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC 933 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
929 select KALLSYMS 934 select KALLSYMS
930 select KALLSYMS_ALL 935 select KALLSYMS_ALL
931 936
@@ -1053,6 +1058,16 @@ config DEBUG_LIST
1053 1058
1054 If unsure, say N. 1059 If unsure, say N.
1055 1060
1061config DEBUG_PI_LIST
1062 bool "Debug priority linked list manipulation"
1063 depends on DEBUG_KERNEL
1064 help
1065 Enable this to turn on extended checks in the priority-ordered
1066 linked-list (plist) walking routines. This checks the entire
1067 list multiple times during each manipulation.
1068
1069 If unsure, say N.
1070
1056config DEBUG_SG 1071config DEBUG_SG
1057 bool "Debug SG table operations" 1072 bool "Debug SG table operations"
1058 depends on DEBUG_KERNEL 1073 depends on DEBUG_KERNEL
@@ -1393,7 +1408,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1393 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1408 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1394 depends on !X86_64 1409 depends on !X86_64
1395 select STACKTRACE 1410 select STACKTRACE
1396 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC 1411 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
1397 help 1412 help
1398 Provide stacktrace filter for fault-injection capabilities 1413 Provide stacktrace filter for fault-injection capabilities
1399 1414
@@ -1496,6 +1511,7 @@ config RBTREE_TEST
1496config INTERVAL_TREE_TEST 1511config INTERVAL_TREE_TEST
1497 tristate "Interval tree test" 1512 tristate "Interval tree test"
1498 depends on m && DEBUG_KERNEL 1513 depends on m && DEBUG_KERNEL
1514 select INTERVAL_TREE
1499 help 1515 help
1500 A benchmark measuring the performance of the interval tree library 1516 A benchmark measuring the performance of the interval tree library
1501 1517
@@ -1620,6 +1636,19 @@ config TEST_USER_COPY
1620 1636
1621 If unsure, say N. 1637 If unsure, say N.
1622 1638
1639config TEST_BPF
1640 tristate "Test BPF filter functionality"
1641 default n
1642 depends on m && NET
1643 help
1644 This builds the "test_bpf" module that runs various test vectors
1645 against the BPF interpreter or BPF JIT compiler depending on the
1646 current setting. This is in particular useful for BPF JIT compiler
1647 development, but also to run regression tests against changes in
1648 the interpreter code.
1649
1650 If unsure, say N.
1651
1623source "samples/Kconfig" 1652source "samples/Kconfig"
1624 1653
1625source "lib/Kconfig.kgdb" 1654source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 0cd7b68e1382..ba967a19edba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,6 +33,7 @@ obj-y += kstrtox.o
33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o 34obj-$(CONFIG_TEST_MODULE) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o
36 37
37ifeq ($(CONFIG_DEBUG_KOBJECT),y) 38ifeq ($(CONFIG_DEBUG_KOBJECT),y)
38CFLAGS_kobject.o += -DDEBUG 39CFLAGS_kobject.o += -DDEBUG
@@ -50,6 +51,7 @@ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 51obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
51 52
52obj-$(CONFIG_BTREE) += btree.o 53obj-$(CONFIG_BTREE) += btree.o
54obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
53obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o 55obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
54obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 56obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
55obj-$(CONFIG_DEBUG_LIST) += list_debug.o 57obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -148,7 +150,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
148 150
149obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 151obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
150 152
151libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o 153libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
154 fdt_empty_tree.o
152$(foreach file, $(libfdt_files), \ 155$(foreach file, $(libfdt_files), \
153 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) 156 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
154lib-$(CONFIG_LIBFDT) += $(libfdt_files) 157lib-$(CONFIG_LIBFDT) += $(libfdt_files)
@@ -156,8 +159,6 @@ lib-$(CONFIG_LIBFDT) += $(libfdt_files)
156obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o 159obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
157obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o 160obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
158 161
159interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
160
161obj-$(CONFIG_PERCPU_TEST) += percpu_test.o 162obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
162 163
163obj-$(CONFIG_ASN1) += asn1_decoder.o 164obj-$(CONFIG_ASN1) += asn1_decoder.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 11b9b01fda6b..1a000bb050f9 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -140,7 +140,7 @@ error:
140 * @decoder: The decoder definition (produced by asn1_compiler) 140 * @decoder: The decoder definition (produced by asn1_compiler)
141 * @context: The caller's context (to be passed to the action functions) 141 * @context: The caller's context (to be passed to the action functions)
142 * @data: The encoded data 142 * @data: The encoded data
143 * @datasize: The size of the encoded data 143 * @datalen: The size of the encoded data
144 * 144 *
145 * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern 145 * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
146 * produced by asn1_compiler. Action functions are called on marked tags to 146 * produced by asn1_compiler. Action functions are called on marked tags to
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 00bca223d1e1..0211d30d8c39 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -8,6 +8,9 @@
8 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
11#include <linux/init.h> 14#include <linux/init.h>
12#include <linux/bug.h> 15#include <linux/bug.h>
13#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -146,18 +149,18 @@ static __init int test_atomic64(void)
146 BUG_ON(v.counter != r); 149 BUG_ON(v.counter != r);
147 150
148#ifdef CONFIG_X86 151#ifdef CONFIG_X86
149 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", 152 pr_info("passed for %s platform %s CX8 and %s SSE\n",
150#ifdef CONFIG_X86_64 153#ifdef CONFIG_X86_64
151 "x86-64", 154 "x86-64",
152#elif defined(CONFIG_X86_CMPXCHG64) 155#elif defined(CONFIG_X86_CMPXCHG64)
153 "i586+", 156 "i586+",
154#else 157#else
155 "i386+", 158 "i386+",
156#endif 159#endif
157 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", 160 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
158 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); 161 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
159#else 162#else
160 printk(KERN_INFO "atomic64 test passed\n"); 163 pr_info("passed\n");
161#endif 164#endif
162 165
163 return 0; 166 return 0;
diff --git a/lib/btree.c b/lib/btree.c
index f9a484676cb6..4264871ea1a0 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -198,6 +198,7 @@ EXPORT_SYMBOL_GPL(btree_init);
198 198
199void btree_destroy(struct btree_head *head) 199void btree_destroy(struct btree_head *head)
200{ 200{
201 mempool_free(head->node, head->mempool);
201 mempool_destroy(head->mempool); 202 mempool_destroy(head->mempool);
202 head->mempool = NULL; 203 head->mempool = NULL;
203} 204}
diff --git a/lib/bug.c b/lib/bug.c
index 168603477f02..d1d7c7878900 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,9 @@
37 37
38 Jeremy Fitzhardinge <jeremy@goop.org> 2006 38 Jeremy Fitzhardinge <jeremy@goop.org> 2006
39 */ 39 */
40
41#define pr_fmt(fmt) fmt
42
40#include <linux/list.h> 43#include <linux/list.h>
41#include <linux/module.h> 44#include <linux/module.h>
42#include <linux/kernel.h> 45#include <linux/kernel.h>
@@ -153,15 +156,13 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
153 156
154 if (warning) { 157 if (warning) {
155 /* this is a WARN_ON rather than BUG/BUG_ON */ 158 /* this is a WARN_ON rather than BUG/BUG_ON */
156 printk(KERN_WARNING "------------[ cut here ]------------\n"); 159 pr_warn("------------[ cut here ]------------\n");
157 160
158 if (file) 161 if (file)
159 printk(KERN_WARNING "WARNING: at %s:%u\n", 162 pr_warn("WARNING: at %s:%u\n", file, line);
160 file, line);
161 else 163 else
162 printk(KERN_WARNING "WARNING: at %p " 164 pr_warn("WARNING: at %p [verbose debug info unavailable]\n",
163 "[verbose debug info unavailable]\n", 165 (void *)bugaddr);
164 (void *)bugaddr);
165 166
166 print_modules(); 167 print_modules();
167 show_regs(regs); 168 show_regs(regs);
@@ -174,12 +175,10 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
174 printk(KERN_DEFAULT "------------[ cut here ]------------\n"); 175 printk(KERN_DEFAULT "------------[ cut here ]------------\n");
175 176
176 if (file) 177 if (file)
177 printk(KERN_CRIT "kernel BUG at %s:%u!\n", 178 pr_crit("kernel BUG at %s:%u!\n", file, line);
178 file, line);
179 else 179 else
180 printk(KERN_CRIT "Kernel BUG at %p " 180 pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n",
181 "[verbose debug info unavailable]\n", 181 (void *)bugaddr);
182 (void *)bugaddr);
183 182
184 return BUG_TRAP_TYPE_BUG; 183 return BUG_TRAP_TYPE_BUG;
185} 184}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index b810b753c607..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -164,3 +164,66 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
164 memblock_free_early(__pa(mask), cpumask_size()); 164 memblock_free_early(__pa(mask), cpumask_size());
165} 165}
166#endif 166#endif
167
168/**
169 * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
170 *
171 * @i: index number
172 * @numa_node: local numa_node
173 * @dstp: cpumask with the relevant cpu bit set according to the policy
174 *
175 * This function sets the cpumask according to a numa aware policy.
176 * cpumask could be used as an affinity hint for the IRQ related to a
177 * queue. When the policy is to spread queues across cores - local cores
178 * first.
179 *
180 * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
181 * the cpu bit and need to re-call the function.
182 */
183int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
184{
185 cpumask_var_t mask;
186 int cpu;
187 int ret = 0;
188
189 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
190 return -ENOMEM;
191
192 i %= num_online_cpus();
193
194 if (numa_node == -1 || !cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask);
197 } else {
198 int n;
199
200 cpumask_and(mask,
201 cpumask_of_node(numa_node), cpu_online_mask);
202
203 n = cpumask_weight(mask);
204 if (i >= n) {
205 i -= n;
206
207 /* If index > number of local cpu's, mask out local
208 * cpu's
209 */
210 cpumask_andnot(mask, cpu_online_mask, mask);
211 }
212 }
213
214 for_each_cpu(cpu, mask) {
215 if (--i < 0)
216 goto out;
217 }
218
219 ret = -EAGAIN;
220
221out:
222 free_cpumask_var(mask);
223
224 if (!ret)
225 cpumask_set_cpu(cpu, dstp);
226
227 return ret;
228}
229EXPORT_SYMBOL(cpumask_set_cpu_local_first);
diff --git a/lib/crc32.c b/lib/crc32.c
index 70f00ca5ef1e..21a7b2135af6 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -33,13 +33,13 @@
33#include "crc32defs.h" 33#include "crc32defs.h"
34 34
35#if CRC_LE_BITS > 8 35#if CRC_LE_BITS > 8
36# define tole(x) ((__force u32) __constant_cpu_to_le32(x)) 36# define tole(x) ((__force u32) cpu_to_le32(x))
37#else 37#else
38# define tole(x) (x) 38# define tole(x) (x)
39#endif 39#endif
40 40
41#if CRC_BE_BITS > 8 41#if CRC_BE_BITS > 8
42# define tobe(x) ((__force u32) __constant_cpu_to_be32(x)) 42# define tobe(x) ((__force u32) cpu_to_be32(x))
43#else 43#else
44# define tobe(x) (x) 44# define tobe(x) (x)
45#endif 45#endif
diff --git a/lib/crc7.c b/lib/crc7.c
index f1c3a144cec1..bf6255e23919 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -10,42 +10,47 @@
10#include <linux/crc7.h> 10#include <linux/crc7.h>
11 11
12 12
13/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */ 13/*
14const u8 crc7_syndrome_table[256] = { 14 * Table for CRC-7 (polynomial x^7 + x^3 + 1).
15 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 15 * This is a big-endian CRC (msbit is highest power of x),
16 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77, 16 * aligned so the msbit of the byte is the x^6 coefficient
17 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26, 17 * and the lsbit is not used.
18 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e, 18 */
19 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d, 19const u8 crc7_be_syndrome_table[256] = {
20 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45, 20 0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
21 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14, 21 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
22 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c, 22 0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
23 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b, 23 0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
24 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13, 24 0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
25 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42, 25 0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
26 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a, 26 0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
27 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69, 27 0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
28 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21, 28 0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
29 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70, 29 0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
30 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38, 30 0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
31 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e, 31 0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
32 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36, 32 0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
33 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67, 33 0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
34 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f, 34 0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
35 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c, 35 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
36 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 36 0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
37 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55, 37 0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
38 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d, 38 0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
39 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a, 39 0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
40 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52, 40 0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
41 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03, 41 0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
42 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b, 42 0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
43 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28, 43 0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
44 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60, 44 0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
45 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31, 45 0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
46 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79 46 0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
47 0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
48 0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
49 0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
50 0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
51 0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
47}; 52};
48EXPORT_SYMBOL(crc7_syndrome_table); 53EXPORT_SYMBOL(crc7_be_syndrome_table);
49 54
50/** 55/**
51 * crc7 - update the CRC7 for the data buffer 56 * crc7 - update the CRC7 for the data buffer
@@ -55,14 +60,17 @@ EXPORT_SYMBOL(crc7_syndrome_table);
55 * Context: any 60 * Context: any
56 * 61 *
57 * Returns the updated CRC7 value. 62 * Returns the updated CRC7 value.
63 * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
64 * makes the computation easier, and all callers want it in that form.
65 *
58 */ 66 */
59u8 crc7(u8 crc, const u8 *buffer, size_t len) 67u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
60{ 68{
61 while (len--) 69 while (len--)
62 crc = crc7_byte(crc, *buffer++); 70 crc = crc7_be_byte(crc, *buffer++);
63 return crc; 71 return crc;
64} 72}
65EXPORT_SYMBOL(crc7); 73EXPORT_SYMBOL(crc7_be);
66 74
67MODULE_DESCRIPTION("CRC7 calculations"); 75MODULE_DESCRIPTION("CRC7 calculations");
68MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index e0731c3db706..547f7f923dbc 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -7,6 +7,9 @@
7 * 7 *
8 * For licencing details see kernel-base/COPYING 8 * For licencing details see kernel-base/COPYING
9 */ 9 */
10
11#define pr_fmt(fmt) "ODEBUG: " fmt
12
10#include <linux/debugobjects.h> 13#include <linux/debugobjects.h>
11#include <linux/interrupt.h> 14#include <linux/interrupt.h>
12#include <linux/sched.h> 15#include <linux/sched.h>
@@ -218,7 +221,7 @@ static void debug_objects_oom(void)
218 unsigned long flags; 221 unsigned long flags;
219 int i; 222 int i;
220 223
221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 224 pr_warn("Out of memory. ODEBUG disabled\n");
222 225
223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 226 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
224 raw_spin_lock_irqsave(&db->lock, flags); 227 raw_spin_lock_irqsave(&db->lock, flags);
@@ -292,11 +295,9 @@ static void debug_object_is_on_stack(void *addr, int onstack)
292 295
293 limit++; 296 limit++;
294 if (is_on_stack) 297 if (is_on_stack)
295 printk(KERN_WARNING 298 pr_warn("object is on stack, but not annotated\n");
296 "ODEBUG: object is on stack, but not annotated\n");
297 else 299 else
298 printk(KERN_WARNING 300 pr_warn("object is not on stack, but annotated\n");
299 "ODEBUG: object is not on stack, but annotated\n");
300 WARN_ON(1); 301 WARN_ON(1);
301} 302}
302 303
@@ -985,7 +986,7 @@ static void __init debug_objects_selftest(void)
985 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 986 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
986 goto out; 987 goto out;
987#endif 988#endif
988 printk(KERN_INFO "ODEBUG: selftest passed\n"); 989 pr_info("selftest passed\n");
989 990
990out: 991out:
991 debug_objects_fixups = oldfixups; 992 debug_objects_fixups = oldfixups;
@@ -1060,8 +1061,8 @@ static int __init debug_objects_replace_static_objects(void)
1060 } 1061 }
1061 local_irq_enable(); 1062 local_irq_enable();
1062 1063
1063 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, 1064 pr_debug("%d of %d active objects replaced\n",
1064 obj_pool_used); 1065 cnt, obj_pool_used);
1065 return 0; 1066 return 0;
1066free: 1067free:
1067 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1068 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
@@ -1090,7 +1091,7 @@ void __init debug_objects_mem_init(void)
1090 debug_objects_enabled = 0; 1091 debug_objects_enabled = 0;
1091 if (obj_cache) 1092 if (obj_cache)
1092 kmem_cache_destroy(obj_cache); 1093 kmem_cache_destroy(obj_cache);
1093 printk(KERN_WARNING "ODEBUG: out of memory.\n"); 1094 pr_warn("out of memory.\n");
1094 } else 1095 } else
1095 debug_objects_selftest(); 1096 debug_objects_selftest();
1096} 1097}
diff --git a/lib/devres.c b/lib/devres.c
index 2f16c133fd36..f562bf6ff71d 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -157,12 +157,12 @@ EXPORT_SYMBOL(devm_ioremap_resource);
157 * if (!base) 157 * if (!base)
158 * return -EADDRNOTAVAIL; 158 * return -EADDRNOTAVAIL;
159 */ 159 */
160void __iomem *devm_request_and_ioremap(struct device *device, 160void __iomem *devm_request_and_ioremap(struct device *dev,
161 struct resource *res) 161 struct resource *res)
162{ 162{
163 void __iomem *dest_ptr; 163 void __iomem *dest_ptr;
164 164
165 dest_ptr = devm_ioremap_resource(device, res); 165 dest_ptr = devm_ioremap_resource(dev, res);
166 if (IS_ERR(dest_ptr)) 166 if (IS_ERR(dest_ptr))
167 return NULL; 167 return NULL;
168 168
@@ -194,7 +194,7 @@ static int devm_ioport_map_match(struct device *dev, void *res,
194 * Managed ioport_map(). Map is automatically unmapped on driver 194 * Managed ioport_map(). Map is automatically unmapped on driver
195 * detach. 195 * detach.
196 */ 196 */
197void __iomem * devm_ioport_map(struct device *dev, unsigned long port, 197void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
198 unsigned int nr) 198 unsigned int nr)
199{ 199{
200 void __iomem **ptr, *addr; 200 void __iomem **ptr, *addr;
@@ -265,7 +265,7 @@ static void pcim_iomap_release(struct device *gendev, void *res)
265 * be safely called without context and guaranteed to succed once 265 * be safely called without context and guaranteed to succed once
266 * allocated. 266 * allocated.
267 */ 267 */
268void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) 268void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
269{ 269{
270 struct pcim_iomap_devres *dr, *new_dr; 270 struct pcim_iomap_devres *dr, *new_dr;
271 271
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(pcim_iomap_table);
290 * Managed pci_iomap(). Map is automatically unmapped on driver 290 * Managed pci_iomap(). Map is automatically unmapped on driver
291 * detach. 291 * detach.
292 */ 292 */
293void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 293void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
294{ 294{
295 void __iomem **tbl; 295 void __iomem **tbl;
296 296
diff --git a/lib/digsig.c b/lib/digsig.c
index 8793aeda30ca..ae05ea393fc8 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -175,10 +175,11 @@ err1:
175 * digsig_verify() - digital signature verification with public key 175 * digsig_verify() - digital signature verification with public key
176 * @keyring: keyring to search key in 176 * @keyring: keyring to search key in
177 * @sig: digital signature 177 * @sig: digital signature
178 * @sigen: length of the signature 178 * @siglen: length of the signature
179 * @data: data 179 * @data: data
180 * @datalen: length of the data 180 * @datalen: length of the data
181 * @return: 0 on success, -EINVAL otherwise 181 *
182 * Returns 0 on success, -EINVAL otherwise
182 * 183 *
183 * Verifies data integrity against digital signature. 184 * Verifies data integrity against digital signature.
184 * Currently only RSA is supported. 185 * Currently only RSA is supported.
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000000..5d30c58150ad
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
1#include <linux/libfdt_env.h>
2#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/idr.c b/lib/idr.c
index 2642fa8e424d..39158abebad1 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -18,12 +18,6 @@
18 * pointer or what ever, we treat it as a (void *). You can pass this 18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass 19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer. 20 * that id to this code and it returns your pointer.
21
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */ 21 */
28 22
29#ifndef TEST // to test in user space... 23#ifndef TEST // to test in user space...
@@ -151,7 +145,7 @@ static void idr_layer_rcu_free(struct rcu_head *head)
151 145
152static inline void free_layer(struct idr *idr, struct idr_layer *p) 146static inline void free_layer(struct idr *idr, struct idr_layer *p)
153{ 147{
154 if (idr->hint && idr->hint == p) 148 if (idr->hint == p)
155 RCU_INIT_POINTER(idr->hint, NULL); 149 RCU_INIT_POINTER(idr->hint, NULL);
156 call_rcu(&p->rcu_head, idr_layer_rcu_free); 150 call_rcu(&p->rcu_head, idr_layer_rcu_free);
157} 151}
@@ -249,7 +243,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
249 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 243 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
250 244
251 /* if already at the top layer, we need to grow */ 245 /* if already at the top layer, we need to grow */
252 if (id >= 1 << (idp->layers * IDR_BITS)) { 246 if (id > idr_max(idp->layers)) {
253 *starting_id = id; 247 *starting_id = id;
254 return -EAGAIN; 248 return -EAGAIN;
255 } 249 }
@@ -562,6 +556,11 @@ void idr_remove(struct idr *idp, int id)
562 if (id < 0) 556 if (id < 0)
563 return; 557 return;
564 558
559 if (id > idr_max(idp->layers)) {
560 idr_remove_warning(id);
561 return;
562 }
563
565 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 564 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
566 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 565 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
567 idp->top->ary[0]) { 566 idp->top->ary[0]) {
@@ -579,16 +578,6 @@ void idr_remove(struct idr *idp, int id)
579 bitmap_clear(to_free->bitmap, 0, IDR_SIZE); 578 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
580 free_layer(idp, to_free); 579 free_layer(idp, to_free);
581 } 580 }
582 while (idp->id_free_cnt >= MAX_IDR_FREE) {
583 p = get_from_free_list(idp);
584 /*
585 * Note: we don't call the rcu callback here, since the only
586 * layers that fall into the freelist are those that have been
587 * preallocated.
588 */
589 kmem_cache_free(idr_layer_cache, p);
590 }
591 return;
592} 581}
593EXPORT_SYMBOL(idr_remove); 582EXPORT_SYMBOL(idr_remove);
594 583
@@ -809,14 +798,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
809 798
810 p = idp->top; 799 p = idp->top;
811 if (!p) 800 if (!p)
812 return ERR_PTR(-EINVAL); 801 return ERR_PTR(-ENOENT);
813
814 n = (p->layer+1) * IDR_BITS;
815 802
816 if (id >= (1 << n)) 803 if (id > idr_max(p->layer + 1))
817 return ERR_PTR(-EINVAL); 804 return ERR_PTR(-ENOENT);
818 805
819 n -= IDR_BITS; 806 n = p->layer * IDR_BITS;
820 while ((n > 0) && p) { 807 while ((n > 0) && p) {
821 p = p->ary[(id >> n) & IDR_MASK]; 808 p = p->ary[(id >> n) & IDR_MASK];
822 n -= IDR_BITS; 809 n -= IDR_BITS;
@@ -1027,6 +1014,9 @@ void ida_remove(struct ida *ida, int id)
1027 int n; 1014 int n;
1028 struct ida_bitmap *bitmap; 1015 struct ida_bitmap *bitmap;
1029 1016
1017 if (idr_id > idr_max(ida->idr.layers))
1018 goto err;
1019
1030 /* clear full bits while looking up the leaf idr_layer */ 1020 /* clear full bits while looking up the leaf idr_layer */
1031 while ((shift > 0) && p) { 1021 while ((shift > 0) && p) {
1032 n = (idr_id >> shift) & IDR_MASK; 1022 n = (idr_id >> shift) & IDR_MASK;
@@ -1042,7 +1032,7 @@ void ida_remove(struct ida *ida, int id)
1042 __clear_bit(n, p->bitmap); 1032 __clear_bit(n, p->bitmap);
1043 1033
1044 bitmap = (void *)p->ary[n]; 1034 bitmap = (void *)p->ary[n];
1045 if (!test_bit(offset, bitmap->bitmap)) 1035 if (!bitmap || !test_bit(offset, bitmap->bitmap))
1046 goto err; 1036 goto err;
1047 1037
1048 /* update bitmap and remove it if empty */ 1038 /* update bitmap and remove it if empty */
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index e6eb406f2d65..f367f9ad544c 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -1,6 +1,7 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/interval_tree.h> 2#include <linux/interval_tree.h>
3#include <linux/interval_tree_generic.h> 3#include <linux/interval_tree_generic.h>
4#include <linux/module.h>
4 5
5#define START(node) ((node)->start) 6#define START(node) ((node)->start)
6#define LAST(node) ((node)->last) 7#define LAST(node) ((node)->last)
@@ -8,3 +9,8 @@
8INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, 9INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
9 unsigned long, __subtree_last, 10 unsigned long, __subtree_last,
10 START, LAST,, interval_tree) 11 START, LAST,, interval_tree)
12
13EXPORT_SYMBOL_GPL(interval_tree_insert);
14EXPORT_SYMBOL_GPL(interval_tree_remove);
15EXPORT_SYMBOL_GPL(interval_tree_iter_first);
16EXPORT_SYMBOL_GPL(interval_tree_iter_next);
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test.c
index 245900b98c8e..245900b98c8e 100644
--- a/lib/interval_tree_test_main.c
+++ b/lib/interval_tree_test.c
diff --git a/lib/iovec.c b/lib/iovec.c
index 454baa88bf27..7a7c2da4cddf 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
51 return 0; 51 return 0;
52} 52}
53EXPORT_SYMBOL(memcpy_toiovec); 53EXPORT_SYMBOL(memcpy_toiovec);
54
55/*
56 * Copy kernel to iovec. Returns -EFAULT on error.
57 */
58
59int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
60 int offset, int len)
61{
62 int copy;
63 for (; len > 0; ++iov) {
64 /* Skip over the finished iovecs */
65 if (unlikely(offset >= iov->iov_len)) {
66 offset -= iov->iov_len;
67 continue;
68 }
69 copy = min_t(unsigned int, iov->iov_len - offset, len);
70 if (copy_to_user(iov->iov_base + offset, kdata, copy))
71 return -EFAULT;
72 offset = 0;
73 kdata += copy;
74 len -= copy;
75 }
76
77 return 0;
78}
79EXPORT_SYMBOL(memcpy_toiovecend);
80
81/*
82 * Copy iovec to kernel. Returns -EFAULT on error.
83 */
84
85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
86 int offset, int len)
87{
88 /* Skip over the finished iovecs */
89 while (offset >= iov->iov_len) {
90 offset -= iov->iov_len;
91 iov++;
92 }
93
94 while (len > 0) {
95 u8 __user *base = iov->iov_base + offset;
96 int copy = min_t(unsigned int, len, iov->iov_len - offset);
97
98 offset = 0;
99 if (copy_from_user(kdata, base, copy))
100 return -EFAULT;
101 len -= copy;
102 kdata += copy;
103 iov++;
104 }
105
106 return 0;
107}
108EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 4e3bd71bd949..9ebf9e20de53 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -29,7 +29,9 @@
29 29
30 30
31u64 uevent_seqnum; 31u64 uevent_seqnum;
32#ifdef CONFIG_UEVENT_HELPER
32char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 33char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
34#endif
33#ifdef CONFIG_NET 35#ifdef CONFIG_NET
34struct uevent_sock { 36struct uevent_sock {
35 struct list_head list; 37 struct list_head list;
@@ -109,6 +111,7 @@ static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
109} 111}
110#endif 112#endif
111 113
114#ifdef CONFIG_UEVENT_HELPER
112static int kobj_usermode_filter(struct kobject *kobj) 115static int kobj_usermode_filter(struct kobject *kobj)
113{ 116{
114 const struct kobj_ns_type_operations *ops; 117 const struct kobj_ns_type_operations *ops;
@@ -147,6 +150,7 @@ static void cleanup_uevent_env(struct subprocess_info *info)
147{ 150{
148 kfree(info->data); 151 kfree(info->data);
149} 152}
153#endif
150 154
151/** 155/**
152 * kobject_uevent_env - send an uevent with environmental data 156 * kobject_uevent_env - send an uevent with environmental data
@@ -323,6 +327,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
323#endif 327#endif
324 mutex_unlock(&uevent_sock_mutex); 328 mutex_unlock(&uevent_sock_mutex);
325 329
330#ifdef CONFIG_UEVENT_HELPER
326 /* call uevent_helper, usually only enabled during early boot */ 331 /* call uevent_helper, usually only enabled during early boot */
327 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { 332 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
328 struct subprocess_info *info; 333 struct subprocess_info *info;
@@ -347,6 +352,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
347 env = NULL; /* freed by cleanup_uevent_env */ 352 env = NULL; /* freed by cleanup_uevent_env */
348 } 353 }
349 } 354 }
355#endif
350 356
351exit: 357exit:
352 kfree(devpath); 358 kfree(devpath);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 244f5480c898..b3131f5cf8a2 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -62,10 +62,7 @@ EXPORT_SYMBOL(crc32c);
62static int __init libcrc32c_mod_init(void) 62static int __init libcrc32c_mod_init(void)
63{ 63{
64 tfm = crypto_alloc_shash("crc32c", 0, 0); 64 tfm = crypto_alloc_shash("crc32c", 0, 0);
65 if (IS_ERR(tfm)) 65 return PTR_ERR_OR_ZERO(tfm);
66 return PTR_ERR(tfm);
67
68 return 0;
69} 66}
70 67
71static void __exit libcrc32c_mod_fini(void) 68static void __exit libcrc32c_mod_fini(void)
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..7a85967060a5 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
72 len = *ip++; 72 len = *ip++;
73 for (; len == 255; length += 255) 73 for (; len == 255; length += 255)
74 len = *ip++; 74 len = *ip++;
75 if (unlikely(length > (size_t)(length + len)))
76 goto _output_error;
75 length += len; 77 length += len;
76 } 78 }
77 79
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
106 if (length == ML_MASK) { 108 if (length == ML_MASK) {
107 for (; *ip == 255; length += 255) 109 for (; *ip == 255; length += 255)
108 ip++; 110 ip++;
111 if (unlikely(length > (size_t)(length + *ip)))
112 goto _output_error;
109 length += *ip++; 113 length += *ip++;
110 } 114 }
111 115
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
155 159
156 /* write overflow error detected */ 160 /* write overflow error detected */
157_output_error: 161_output_error:
158 return (int) (-(((char *)ip) - source)); 162 return -1;
159} 163}
160 164
161static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, 165static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
188 int s = 255; 192 int s = 255;
189 while ((ip < iend) && (s == 255)) { 193 while ((ip < iend) && (s == 255)) {
190 s = *ip++; 194 s = *ip++;
195 if (unlikely(length > (size_t)(length + s)))
196 goto _output_error;
191 length += s; 197 length += s;
192 } 198 }
193 } 199 }
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
228 if (length == ML_MASK) { 234 if (length == ML_MASK) {
229 while (ip < iend) { 235 while (ip < iend) {
230 int s = *ip++; 236 int s = *ip++;
237 if (unlikely(length > (size_t)(length + s)))
238 goto _output_error;
231 length += s; 239 length += s;
232 if (s == 255) 240 if (s == 255)
233 continue; 241 continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
280 288
281 /* write overflow error detected */ 289 /* write overflow error detected */
282_output_error: 290_output_error:
283 return (int) (-(((char *) ip) - source)); 291 return -1;
284} 292}
285 293
286int lz4_decompress(const unsigned char *src, size_t *src_len, 294int lz4_decompress(const unsigned char *src, size_t *src_len,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..8563081e8da3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,11 +19,31 @@
19#include <linux/lzo.h> 19#include <linux/lzo.h>
20#include "lzodefs.h" 20#include "lzodefs.h"
21 21
22#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) 22#define HAVE_IP(t, x) \
23#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) 23 (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
24#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun 24 (((t + x) >= t) && ((t + x) >= x)))
25#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun 25
26#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun 26#define HAVE_OP(t, x) \
27 (((size_t)(op_end - op) >= (size_t)(t + x)) && \
28 (((t + x) >= t) && ((t + x) >= x)))
29
30#define NEED_IP(t, x) \
31 do { \
32 if (!HAVE_IP(t, x)) \
33 goto input_overrun; \
34 } while (0)
35
36#define NEED_OP(t, x) \
37 do { \
38 if (!HAVE_OP(t, x)) \
39 goto output_overrun; \
40 } while (0)
41
42#define TEST_LB(m_pos) \
43 do { \
44 if ((m_pos) < out) \
45 goto lookbehind_overrun; \
46 } while (0)
27 47
28int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, 48int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
29 unsigned char *out, size_t *out_len) 49 unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
58 while (unlikely(*ip == 0)) { 78 while (unlikely(*ip == 0)) {
59 t += 255; 79 t += 255;
60 ip++; 80 ip++;
61 NEED_IP(1); 81 NEED_IP(1, 0);
62 } 82 }
63 t += 15 + *ip++; 83 t += 15 + *ip++;
64 } 84 }
65 t += 3; 85 t += 3;
66copy_literal_run: 86copy_literal_run:
67#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 87#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { 88 if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
69 const unsigned char *ie = ip + t; 89 const unsigned char *ie = ip + t;
70 unsigned char *oe = op + t; 90 unsigned char *oe = op + t;
71 do { 91 do {
@@ -81,8 +101,8 @@ copy_literal_run:
81 } else 101 } else
82#endif 102#endif
83 { 103 {
84 NEED_OP(t); 104 NEED_OP(t, 0);
85 NEED_IP(t + 3); 105 NEED_IP(t, 3);
86 do { 106 do {
87 *op++ = *ip++; 107 *op++ = *ip++;
88 } while (--t > 0); 108 } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
95 m_pos -= t >> 2; 115 m_pos -= t >> 2;
96 m_pos -= *ip++ << 2; 116 m_pos -= *ip++ << 2;
97 TEST_LB(m_pos); 117 TEST_LB(m_pos);
98 NEED_OP(2); 118 NEED_OP(2, 0);
99 op[0] = m_pos[0]; 119 op[0] = m_pos[0];
100 op[1] = m_pos[1]; 120 op[1] = m_pos[1];
101 op += 2; 121 op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
119 while (unlikely(*ip == 0)) { 139 while (unlikely(*ip == 0)) {
120 t += 255; 140 t += 255;
121 ip++; 141 ip++;
122 NEED_IP(1); 142 NEED_IP(1, 0);
123 } 143 }
124 t += 31 + *ip++; 144 t += 31 + *ip++;
125 NEED_IP(2); 145 NEED_IP(2, 0);
126 } 146 }
127 m_pos = op - 1; 147 m_pos = op - 1;
128 next = get_unaligned_le16(ip); 148 next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
137 while (unlikely(*ip == 0)) { 157 while (unlikely(*ip == 0)) {
138 t += 255; 158 t += 255;
139 ip++; 159 ip++;
140 NEED_IP(1); 160 NEED_IP(1, 0);
141 } 161 }
142 t += 7 + *ip++; 162 t += 7 + *ip++;
143 NEED_IP(2); 163 NEED_IP(2, 0);
144 } 164 }
145 next = get_unaligned_le16(ip); 165 next = get_unaligned_le16(ip);
146 ip += 2; 166 ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
154#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 174#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
155 if (op - m_pos >= 8) { 175 if (op - m_pos >= 8) {
156 unsigned char *oe = op + t; 176 unsigned char *oe = op + t;
157 if (likely(HAVE_OP(t + 15))) { 177 if (likely(HAVE_OP(t, 15))) {
158 do { 178 do {
159 COPY8(op, m_pos); 179 COPY8(op, m_pos);
160 op += 8; 180 op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
164 m_pos += 8; 184 m_pos += 8;
165 } while (op < oe); 185 } while (op < oe);
166 op = oe; 186 op = oe;
167 if (HAVE_IP(6)) { 187 if (HAVE_IP(6, 0)) {
168 state = next; 188 state = next;
169 COPY4(op, ip); 189 COPY4(op, ip);
170 op += next; 190 op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
172 continue; 192 continue;
173 } 193 }
174 } else { 194 } else {
175 NEED_OP(t); 195 NEED_OP(t, 0);
176 do { 196 do {
177 *op++ = *m_pos++; 197 *op++ = *m_pos++;
178 } while (op < oe); 198 } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
181#endif 201#endif
182 { 202 {
183 unsigned char *oe = op + t; 203 unsigned char *oe = op + t;
184 NEED_OP(t); 204 NEED_OP(t, 0);
185 op[0] = m_pos[0]; 205 op[0] = m_pos[0];
186 op[1] = m_pos[1]; 206 op[1] = m_pos[1];
187 op += 2; 207 op += 2;
@@ -194,15 +214,15 @@ match_next:
194 state = next; 214 state = next;
195 t = next; 215 t = next;
196#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 216#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
197 if (likely(HAVE_IP(6) && HAVE_OP(4))) { 217 if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
198 COPY4(op, ip); 218 COPY4(op, ip);
199 op += t; 219 op += t;
200 ip += t; 220 ip += t;
201 } else 221 } else
202#endif 222#endif
203 { 223 {
204 NEED_IP(t + 3); 224 NEED_IP(t, 3);
205 NEED_OP(t); 225 NEED_OP(t, 0);
206 while (t > 0) { 226 while (t > 0) {
207 *op++ = *ip++; 227 *op++ = *ip++;
208 t--; 228 t--;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 10ad042d01be..9c3e85ff0a6c 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -136,6 +136,7 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
136errout: 136errout:
137 return err; 137 return err;
138} 138}
139EXPORT_SYMBOL(nla_validate);
139 140
140/** 141/**
141 * nla_policy_len - Determin the max. length of a policy 142 * nla_policy_len - Determin the max. length of a policy
@@ -162,6 +163,7 @@ nla_policy_len(const struct nla_policy *p, int n)
162 163
163 return len; 164 return len;
164} 165}
166EXPORT_SYMBOL(nla_policy_len);
165 167
166/** 168/**
167 * nla_parse - Parse a stream of attributes into a tb buffer 169 * nla_parse - Parse a stream of attributes into a tb buffer
@@ -208,6 +210,7 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
208errout: 210errout:
209 return err; 211 return err;
210} 212}
213EXPORT_SYMBOL(nla_parse);
211 214
212/** 215/**
213 * nla_find - Find a specific attribute in a stream of attributes 216 * nla_find - Find a specific attribute in a stream of attributes
@@ -228,6 +231,7 @@ struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
228 231
229 return NULL; 232 return NULL;
230} 233}
234EXPORT_SYMBOL(nla_find);
231 235
232/** 236/**
233 * nla_strlcpy - Copy string attribute payload into a sized buffer 237 * nla_strlcpy - Copy string attribute payload into a sized buffer
@@ -258,6 +262,7 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
258 262
259 return srclen; 263 return srclen;
260} 264}
265EXPORT_SYMBOL(nla_strlcpy);
261 266
262/** 267/**
263 * nla_memcpy - Copy a netlink attribute into another memory area 268 * nla_memcpy - Copy a netlink attribute into another memory area
@@ -278,6 +283,7 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
278 283
279 return minlen; 284 return minlen;
280} 285}
286EXPORT_SYMBOL(nla_memcpy);
281 287
282/** 288/**
283 * nla_memcmp - Compare an attribute with sized memory area 289 * nla_memcmp - Compare an attribute with sized memory area
@@ -295,6 +301,7 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
295 301
296 return d; 302 return d;
297} 303}
304EXPORT_SYMBOL(nla_memcmp);
298 305
299/** 306/**
300 * nla_strcmp - Compare a string attribute against a string 307 * nla_strcmp - Compare a string attribute against a string
@@ -317,6 +324,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
317 324
318 return d; 325 return d;
319} 326}
327EXPORT_SYMBOL(nla_strcmp);
320 328
321#ifdef CONFIG_NET 329#ifdef CONFIG_NET
322/** 330/**
@@ -502,12 +510,3 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data)
502} 510}
503EXPORT_SYMBOL(nla_append); 511EXPORT_SYMBOL(nla_append);
504#endif 512#endif
505
506EXPORT_SYMBOL(nla_validate);
507EXPORT_SYMBOL(nla_policy_len);
508EXPORT_SYMBOL(nla_parse);
509EXPORT_SYMBOL(nla_find);
510EXPORT_SYMBOL(nla_strlcpy);
511EXPORT_SYMBOL(nla_memcpy);
512EXPORT_SYMBOL(nla_memcmp);
513EXPORT_SYMBOL(nla_strcmp);
diff --git a/lib/plist.c b/lib/plist.c
index 1ebc95f7a46f..d408e774b746 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -134,6 +134,46 @@ void plist_del(struct plist_node *node, struct plist_head *head)
134 plist_check_head(head); 134 plist_check_head(head);
135} 135}
136 136
137/**
138 * plist_requeue - Requeue @node at end of same-prio entries.
139 *
140 * This is essentially an optimized plist_del() followed by
141 * plist_add(). It moves an entry already in the plist to
142 * after any other same-priority entries.
143 *
144 * @node: &struct plist_node pointer - entry to be moved
145 * @head: &struct plist_head pointer - list head
146 */
147void plist_requeue(struct plist_node *node, struct plist_head *head)
148{
149 struct plist_node *iter;
150 struct list_head *node_next = &head->node_list;
151
152 plist_check_head(head);
153 BUG_ON(plist_head_empty(head));
154 BUG_ON(plist_node_empty(node));
155
156 if (node == plist_last(head))
157 return;
158
159 iter = plist_next(node);
160
161 if (node->prio != iter->prio)
162 return;
163
164 plist_del(node, head);
165
166 plist_for_each_continue(iter, head) {
167 if (node->prio != iter->prio) {
168 node_next = &iter->node_list;
169 break;
170 }
171 }
172 list_add_tail(&node->node_list, node_next);
173
174 plist_check_head(head);
175}
176
137#ifdef CONFIG_DEBUG_PI_LIST 177#ifdef CONFIG_DEBUG_PI_LIST
138#include <linux/sched.h> 178#include <linux/sched.h>
139#include <linux/module.h> 179#include <linux/module.h>
@@ -170,12 +210,20 @@ static void __init plist_test_check(int nr_expect)
170 BUG_ON(prio_pos->prio_list.next != &first->prio_list); 210 BUG_ON(prio_pos->prio_list.next != &first->prio_list);
171} 211}
172 212
213static void __init plist_test_requeue(struct plist_node *node)
214{
215 plist_requeue(node, &test_head);
216
217 if (node != plist_last(&test_head))
218 BUG_ON(node->prio == plist_next(node)->prio);
219}
220
173static int __init plist_test(void) 221static int __init plist_test(void)
174{ 222{
175 int nr_expect = 0, i, loop; 223 int nr_expect = 0, i, loop;
176 unsigned int r = local_clock(); 224 unsigned int r = local_clock();
177 225
178 pr_debug("start plist test\n"); 226 printk(KERN_DEBUG "start plist test\n");
179 plist_head_init(&test_head); 227 plist_head_init(&test_head);
180 for (i = 0; i < ARRAY_SIZE(test_node); i++) 228 for (i = 0; i < ARRAY_SIZE(test_node); i++)
181 plist_node_init(test_node + i, 0); 229 plist_node_init(test_node + i, 0);
@@ -193,6 +241,10 @@ static int __init plist_test(void)
193 nr_expect--; 241 nr_expect--;
194 } 242 }
195 plist_test_check(nr_expect); 243 plist_test_check(nr_expect);
244 if (!plist_node_empty(test_node + i)) {
245 plist_test_requeue(test_node + i);
246 plist_test_check(nr_expect);
247 }
196 } 248 }
197 249
198 for (i = 0; i < ARRAY_SIZE(test_node); i++) { 250 for (i = 0; i < ARRAY_SIZE(test_node); i++) {
@@ -203,7 +255,7 @@ static int __init plist_test(void)
203 plist_test_check(nr_expect); 255 plist_test_check(nr_expect);
204 } 256 }
205 257
206 pr_debug("end plist test\n"); 258 printk(KERN_DEBUG "end plist test\n");
207 return 0; 259 return 0;
208} 260}
209 261
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 9599aa72d7a0..3291a8e37490 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -27,6 +27,7 @@
27#include <linux/radix-tree.h> 27#include <linux/radix-tree.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/kmemleak.h>
30#include <linux/notifier.h> 31#include <linux/notifier.h>
31#include <linux/cpu.h> 32#include <linux/cpu.h>
32#include <linux/string.h> 33#include <linux/string.h>
@@ -194,12 +195,17 @@ radix_tree_node_alloc(struct radix_tree_root *root)
194 * succeed in getting a node here (and never reach 195 * succeed in getting a node here (and never reach
195 * kmem_cache_alloc) 196 * kmem_cache_alloc)
196 */ 197 */
197 rtp = &__get_cpu_var(radix_tree_preloads); 198 rtp = this_cpu_ptr(&radix_tree_preloads);
198 if (rtp->nr) { 199 if (rtp->nr) {
199 ret = rtp->nodes[rtp->nr - 1]; 200 ret = rtp->nodes[rtp->nr - 1];
200 rtp->nodes[rtp->nr - 1] = NULL; 201 rtp->nodes[rtp->nr - 1] = NULL;
201 rtp->nr--; 202 rtp->nr--;
202 } 203 }
204 /*
205 * Update the allocation stack trace as this is more useful
206 * for debugging.
207 */
208 kmemleak_update_trace(ret);
203 } 209 }
204 if (ret == NULL) 210 if (ret == NULL)
205 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 211 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
@@ -250,14 +256,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
250 int ret = -ENOMEM; 256 int ret = -ENOMEM;
251 257
252 preempt_disable(); 258 preempt_disable();
253 rtp = &__get_cpu_var(radix_tree_preloads); 259 rtp = this_cpu_ptr(&radix_tree_preloads);
254 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { 260 while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
255 preempt_enable(); 261 preempt_enable();
256 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 262 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
257 if (node == NULL) 263 if (node == NULL)
258 goto out; 264 goto out;
259 preempt_disable(); 265 preempt_disable();
260 rtp = &__get_cpu_var(radix_tree_preloads); 266 rtp = this_cpu_ptr(&radix_tree_preloads);
261 if (rtp->nr < ARRAY_SIZE(rtp->nodes)) 267 if (rtp->nr < ARRAY_SIZE(rtp->nodes))
262 rtp->nodes[rtp->nr++] = node; 268 rtp->nodes[rtp->nr++] = node;
263 else 269 else
@@ -1296,7 +1302,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1296/** 1302/**
1297 * __radix_tree_delete_node - try to free node after clearing a slot 1303 * __radix_tree_delete_node - try to free node after clearing a slot
1298 * @root: radix tree root 1304 * @root: radix tree root
1299 * @index: index key
1300 * @node: node containing @index 1305 * @node: node containing @index
1301 * 1306 *
1302 * After clearing the slot at @index in @node from radix tree 1307 * After clearing the slot at @index in @node from radix tree
diff --git a/lib/string.c b/lib/string.c
index 9b1f9062a202..992bf30af759 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL(strcpy);
107 107
108#ifndef __HAVE_ARCH_STRNCPY 108#ifndef __HAVE_ARCH_STRNCPY
109/** 109/**
110 * strncpy - Copy a length-limited, %NUL-terminated string 110 * strncpy - Copy a length-limited, C-string
111 * @dest: Where to copy the string to 111 * @dest: Where to copy the string to
112 * @src: Where to copy the string from 112 * @src: Where to copy the string from
113 * @count: The maximum number of bytes to copy 113 * @count: The maximum number of bytes to copy
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(strncpy);
136 136
137#ifndef __HAVE_ARCH_STRLCPY 137#ifndef __HAVE_ARCH_STRLCPY
138/** 138/**
139 * strlcpy - Copy a %NUL terminated string into a sized buffer 139 * strlcpy - Copy a C-string into a sized buffer
140 * @dest: Where to copy the string to 140 * @dest: Where to copy the string to
141 * @src: Where to copy the string from 141 * @src: Where to copy the string from
142 * @size: size of destination buffer 142 * @size: size of destination buffer
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(strcat);
182 182
183#ifndef __HAVE_ARCH_STRNCAT 183#ifndef __HAVE_ARCH_STRNCAT
184/** 184/**
185 * strncat - Append a length-limited, %NUL-terminated string to another 185 * strncat - Append a length-limited, C-string to another
186 * @dest: The string to be appended to 186 * @dest: The string to be appended to
187 * @src: The string to append to it 187 * @src: The string to append to it
188 * @count: The maximum numbers of bytes to copy 188 * @count: The maximum numbers of bytes to copy
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(strncat);
211 211
212#ifndef __HAVE_ARCH_STRLCAT 212#ifndef __HAVE_ARCH_STRLCAT
213/** 213/**
214 * strlcat - Append a length-limited, %NUL-terminated string to another 214 * strlcat - Append a length-limited, C-string to another
215 * @dest: The string to be appended to 215 * @dest: The string to be appended to
216 * @src: The string to append to it 216 * @src: The string to append to it
217 * @count: The size of the destination buffer. 217 * @count: The size of the destination buffer.
@@ -301,6 +301,24 @@ char *strchr(const char *s, int c)
301EXPORT_SYMBOL(strchr); 301EXPORT_SYMBOL(strchr);
302#endif 302#endif
303 303
304#ifndef __HAVE_ARCH_STRCHRNUL
305/**
306 * strchrnul - Find and return a character in a string, or end of string
307 * @s: The string to be searched
308 * @c: The character to search for
309 *
310 * Returns pointer to first occurrence of 'c' in s. If c is not found, then
311 * return a pointer to the null byte at the end of s.
312 */
313char *strchrnul(const char *s, int c)
314{
315 while (*s && *s != (char)c)
316 s++;
317 return (char *)s;
318}
319EXPORT_SYMBOL(strchrnul);
320#endif
321
304#ifndef __HAVE_ARCH_STRRCHR 322#ifndef __HAVE_ARCH_STRRCHR
305/** 323/**
306 * strrchr - Find the last occurrence of a character in a string 324 * strrchr - Find the last occurrence of a character in a string
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index b604b831f4d1..4abda074ea45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
86 * We need to save away the original address corresponding to a mapped entry 86 * We need to save away the original address corresponding to a mapped entry
87 * for the sync operations. 87 * for the sync operations.
88 */ 88 */
89#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
89static phys_addr_t *io_tlb_orig_addr; 90static phys_addr_t *io_tlb_orig_addr;
90 91
91/* 92/*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188 io_tlb_list = memblock_virt_alloc( 189 io_tlb_list = memblock_virt_alloc(
189 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), 190 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
190 PAGE_SIZE); 191 PAGE_SIZE);
191 for (i = 0; i < io_tlb_nslabs; i++)
192 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
193 io_tlb_index = 0;
194 io_tlb_orig_addr = memblock_virt_alloc( 192 io_tlb_orig_addr = memblock_virt_alloc(
195 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), 193 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
196 PAGE_SIZE); 194 PAGE_SIZE);
195 for (i = 0; i < io_tlb_nslabs; i++) {
196 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
197 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
198 }
199 io_tlb_index = 0;
197 200
198 if (verbose) 201 if (verbose)
199 swiotlb_print_info(); 202 swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
313 if (!io_tlb_list) 316 if (!io_tlb_list)
314 goto cleanup3; 317 goto cleanup3;
315 318
316 for (i = 0; i < io_tlb_nslabs; i++)
317 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
318 io_tlb_index = 0;
319
320 io_tlb_orig_addr = (phys_addr_t *) 319 io_tlb_orig_addr = (phys_addr_t *)
321 __get_free_pages(GFP_KERNEL, 320 __get_free_pages(GFP_KERNEL,
322 get_order(io_tlb_nslabs * 321 get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
324 if (!io_tlb_orig_addr) 323 if (!io_tlb_orig_addr)
325 goto cleanup4; 324 goto cleanup4;
326 325
327 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 326 for (i = 0; i < io_tlb_nslabs; i++) {
327 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
328 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
329 }
330 io_tlb_index = 0;
328 331
329 swiotlb_print_info(); 332 swiotlb_print_info();
330 333
@@ -374,7 +377,7 @@ void __init swiotlb_free(void)
374 io_tlb_nslabs = 0; 377 io_tlb_nslabs = 0;
375} 378}
376 379
377static int is_swiotlb_buffer(phys_addr_t paddr) 380int is_swiotlb_buffer(phys_addr_t paddr)
378{ 381{
379 return paddr >= io_tlb_start && paddr < io_tlb_end; 382 return paddr >= io_tlb_start && paddr < io_tlb_end;
380} 383}
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
556 /* 559 /*
557 * First, sync the memory before unmapping the entry 560 * First, sync the memory before unmapping the entry
558 */ 561 */
559 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 562 if (orig_addr != INVALID_PHYS_ADDR &&
563 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
560 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 564 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
561 565
562 /* 566 /*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
573 * Step 1: return the slots to the free list, merging the 577 * Step 1: return the slots to the free list, merging the
574 * slots with superceeding slots 578 * slots with superceeding slots
575 */ 579 */
576 for (i = index + nslots - 1; i >= index; i--) 580 for (i = index + nslots - 1; i >= index; i--) {
577 io_tlb_list[i] = ++count; 581 io_tlb_list[i] = ++count;
582 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
583 }
578 /* 584 /*
579 * Step 2: merge the returned slots with the preceding slots, 585 * Step 2: merge the returned slots with the preceding slots,
580 * if available (non zero) 586 * if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
593 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 599 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
594 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 600 phys_addr_t orig_addr = io_tlb_orig_addr[index];
595 601
602 if (orig_addr == INVALID_PHYS_ADDR)
603 return;
596 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); 604 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
597 605
598 switch (target) { 606 switch (target) {
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644
index 000000000000..c579e0f58818
--- /dev/null
+++ b/lib/test_bpf.c
@@ -0,0 +1,1929 @@
1/*
2 * Testsuite for BPF interpreter and BPF JIT compiler
3 *
4 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/filter.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/if_vlan.h>
24
25/* General test specific settings */
26#define MAX_SUBTESTS 3
27#define MAX_TESTRUNS 10000
28#define MAX_DATA 128
29#define MAX_INSNS 512
30#define MAX_K 0xffffFFFF
31
32/* Few constants used to init test 'skb' */
33#define SKB_TYPE 3
34#define SKB_MARK 0x1234aaaa
35#define SKB_HASH 0x1234aaab
36#define SKB_QUEUE_MAP 123
37#define SKB_VLAN_TCI 0xffff
38#define SKB_DEV_IFINDEX 577
39#define SKB_DEV_TYPE 588
40
41/* Redefine REGs to make tests less verbose */
42#define R0 BPF_REG_0
43#define R1 BPF_REG_1
44#define R2 BPF_REG_2
45#define R3 BPF_REG_3
46#define R4 BPF_REG_4
47#define R5 BPF_REG_5
48#define R6 BPF_REG_6
49#define R7 BPF_REG_7
50#define R8 BPF_REG_8
51#define R9 BPF_REG_9
52#define R10 BPF_REG_10
53
54/* Flags that can be passed to test cases */
55#define FLAG_NO_DATA BIT(0)
56#define FLAG_EXPECTED_FAIL BIT(1)
57
58enum {
59 CLASSIC = BIT(6), /* Old BPF instructions only. */
60 INTERNAL = BIT(7), /* Extended instruction set. */
61};
62
63#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
64
65struct bpf_test {
66 const char *descr;
67 union {
68 struct sock_filter insns[MAX_INSNS];
69 struct sock_filter_int insns_int[MAX_INSNS];
70 } u;
71 __u8 aux;
72 __u8 data[MAX_DATA];
73 struct {
74 int data_size;
75 __u32 result;
76 } test[MAX_SUBTESTS];
77};
78
79static struct bpf_test tests[] = {
80 {
81 "TAX",
82 .u.insns = {
83 BPF_STMT(BPF_LD | BPF_IMM, 1),
84 BPF_STMT(BPF_MISC | BPF_TAX, 0),
85 BPF_STMT(BPF_LD | BPF_IMM, 2),
86 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
87 BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
88 BPF_STMT(BPF_MISC | BPF_TAX, 0),
89 BPF_STMT(BPF_LD | BPF_LEN, 0),
90 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
91 BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
92 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
93 BPF_STMT(BPF_RET | BPF_A, 0)
94 },
95 CLASSIC,
96 { 10, 20, 30, 40, 50 },
97 { { 2, 10 }, { 3, 20 }, { 4, 30 } },
98 },
99 {
100 "TXA",
101 .u.insns = {
102 BPF_STMT(BPF_LDX | BPF_LEN, 0),
103 BPF_STMT(BPF_MISC | BPF_TXA, 0),
104 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
105 BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
106 },
107 CLASSIC,
108 { 10, 20, 30, 40, 50 },
109 { { 1, 2 }, { 3, 6 }, { 4, 8 } },
110 },
111 {
112 "ADD_SUB_MUL_K",
113 .u.insns = {
114 BPF_STMT(BPF_LD | BPF_IMM, 1),
115 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
116 BPF_STMT(BPF_LDX | BPF_IMM, 3),
117 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
118 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
119 BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
120 BPF_STMT(BPF_RET | BPF_A, 0)
121 },
122 CLASSIC | FLAG_NO_DATA,
123 { },
124 { { 0, 0xfffffffd } }
125 },
126 {
127 "DIV_KX",
128 .u.insns = {
129 BPF_STMT(BPF_LD | BPF_IMM, 8),
130 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
131 BPF_STMT(BPF_MISC | BPF_TAX, 0),
132 BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
133 BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
134 BPF_STMT(BPF_MISC | BPF_TAX, 0),
135 BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
136 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
137 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
138 BPF_STMT(BPF_RET | BPF_A, 0)
139 },
140 CLASSIC | FLAG_NO_DATA,
141 { },
142 { { 0, 0x40000001 } }
143 },
144 {
145 "AND_OR_LSH_K",
146 .u.insns = {
147 BPF_STMT(BPF_LD | BPF_IMM, 0xff),
148 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
149 BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
150 BPF_STMT(BPF_MISC | BPF_TAX, 0),
151 BPF_STMT(BPF_LD | BPF_IMM, 0xf),
152 BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
153 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
154 BPF_STMT(BPF_RET | BPF_A, 0)
155 },
156 CLASSIC | FLAG_NO_DATA,
157 { },
158 { { 0, 0x800000ff }, { 1, 0x800000ff } },
159 },
160 {
161 "LD_IMM_0",
162 .u.insns = {
163 BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
164 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
165 BPF_STMT(BPF_RET | BPF_K, 0),
166 BPF_STMT(BPF_RET | BPF_K, 1),
167 },
168 CLASSIC,
169 { },
170 { { 1, 1 } },
171 },
172 {
173 "LD_IND",
174 .u.insns = {
175 BPF_STMT(BPF_LDX | BPF_LEN, 0),
176 BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
177 BPF_STMT(BPF_RET | BPF_K, 1)
178 },
179 CLASSIC,
180 { },
181 { { 1, 0 }, { 10, 0 }, { 60, 0 } },
182 },
183 {
184 "LD_ABS",
185 .u.insns = {
186 BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
187 BPF_STMT(BPF_RET | BPF_K, 1)
188 },
189 CLASSIC,
190 { },
191 { { 1, 0 }, { 10, 0 }, { 60, 0 } },
192 },
193 {
194 "LD_ABS_LL",
195 .u.insns = {
196 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
197 BPF_STMT(BPF_MISC | BPF_TAX, 0),
198 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
199 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
200 BPF_STMT(BPF_RET | BPF_A, 0)
201 },
202 CLASSIC,
203 { 1, 2, 3 },
204 { { 1, 0 }, { 2, 3 } },
205 },
206 {
207 "LD_IND_LL",
208 .u.insns = {
209 BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
210 BPF_STMT(BPF_LDX | BPF_LEN, 0),
211 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
212 BPF_STMT(BPF_MISC | BPF_TAX, 0),
213 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
214 BPF_STMT(BPF_RET | BPF_A, 0)
215 },
216 CLASSIC,
217 { 1, 2, 3, 0xff },
218 { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
219 },
220 {
221 "LD_ABS_NET",
222 .u.insns = {
223 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
224 BPF_STMT(BPF_MISC | BPF_TAX, 0),
225 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
226 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
227 BPF_STMT(BPF_RET | BPF_A, 0)
228 },
229 CLASSIC,
230 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
231 { { 15, 0 }, { 16, 3 } },
232 },
233 {
234 "LD_IND_NET",
235 .u.insns = {
236 BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
237 BPF_STMT(BPF_LDX | BPF_LEN, 0),
238 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
239 BPF_STMT(BPF_MISC | BPF_TAX, 0),
240 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
241 BPF_STMT(BPF_RET | BPF_A, 0)
242 },
243 CLASSIC,
244 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
245 { { 14, 0 }, { 15, 1 }, { 17, 3 } },
246 },
247 {
248 "LD_PKTTYPE",
249 .u.insns = {
250 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
251 SKF_AD_OFF + SKF_AD_PKTTYPE),
252 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
253 BPF_STMT(BPF_RET | BPF_K, 1),
254 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
255 SKF_AD_OFF + SKF_AD_PKTTYPE),
256 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
257 BPF_STMT(BPF_RET | BPF_K, 1),
258 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
259 SKF_AD_OFF + SKF_AD_PKTTYPE),
260 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
261 BPF_STMT(BPF_RET | BPF_K, 1),
262 BPF_STMT(BPF_RET | BPF_A, 0)
263 },
264 CLASSIC,
265 { },
266 { { 1, 3 }, { 10, 3 } },
267 },
268 {
269 "LD_MARK",
270 .u.insns = {
271 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
272 SKF_AD_OFF + SKF_AD_MARK),
273 BPF_STMT(BPF_RET | BPF_A, 0)
274 },
275 CLASSIC,
276 { },
277 { { 1, SKB_MARK}, { 10, SKB_MARK} },
278 },
279 {
280 "LD_RXHASH",
281 .u.insns = {
282 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
283 SKF_AD_OFF + SKF_AD_RXHASH),
284 BPF_STMT(BPF_RET | BPF_A, 0)
285 },
286 CLASSIC,
287 { },
288 { { 1, SKB_HASH}, { 10, SKB_HASH} },
289 },
290 {
291 "LD_QUEUE",
292 .u.insns = {
293 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
294 SKF_AD_OFF + SKF_AD_QUEUE),
295 BPF_STMT(BPF_RET | BPF_A, 0)
296 },
297 CLASSIC,
298 { },
299 { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
300 },
301 {
302 "LD_PROTOCOL",
303 .u.insns = {
304 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
305 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
306 BPF_STMT(BPF_RET | BPF_K, 0),
307 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
308 SKF_AD_OFF + SKF_AD_PROTOCOL),
309 BPF_STMT(BPF_MISC | BPF_TAX, 0),
310 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
311 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
312 BPF_STMT(BPF_RET | BPF_K, 0),
313 BPF_STMT(BPF_MISC | BPF_TXA, 0),
314 BPF_STMT(BPF_RET | BPF_A, 0)
315 },
316 CLASSIC,
317 { 10, 20, 30 },
318 { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
319 },
320 {
321 "LD_VLAN_TAG",
322 .u.insns = {
323 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
324 SKF_AD_OFF + SKF_AD_VLAN_TAG),
325 BPF_STMT(BPF_RET | BPF_A, 0)
326 },
327 CLASSIC,
328 { },
329 {
330 { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
331 { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
332 },
333 },
334 {
335 "LD_VLAN_TAG_PRESENT",
336 .u.insns = {
337 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
338 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
339 BPF_STMT(BPF_RET | BPF_A, 0)
340 },
341 CLASSIC,
342 { },
343 {
344 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
345 { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
346 },
347 },
348 {
349 "LD_IFINDEX",
350 .u.insns = {
351 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
352 SKF_AD_OFF + SKF_AD_IFINDEX),
353 BPF_STMT(BPF_RET | BPF_A, 0)
354 },
355 CLASSIC,
356 { },
357 { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
358 },
359 {
360 "LD_HATYPE",
361 .u.insns = {
362 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
363 SKF_AD_OFF + SKF_AD_HATYPE),
364 BPF_STMT(BPF_RET | BPF_A, 0)
365 },
366 CLASSIC,
367 { },
368 { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
369 },
370 {
371 "LD_CPU",
372 .u.insns = {
373 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
374 SKF_AD_OFF + SKF_AD_CPU),
375 BPF_STMT(BPF_MISC | BPF_TAX, 0),
376 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
377 SKF_AD_OFF + SKF_AD_CPU),
378 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
379 BPF_STMT(BPF_RET | BPF_A, 0)
380 },
381 CLASSIC,
382 { },
383 { { 1, 0 }, { 10, 0 } },
384 },
385 {
386 "LD_NLATTR",
387 .u.insns = {
388 BPF_STMT(BPF_LDX | BPF_IMM, 2),
389 BPF_STMT(BPF_MISC | BPF_TXA, 0),
390 BPF_STMT(BPF_LDX | BPF_IMM, 3),
391 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
392 SKF_AD_OFF + SKF_AD_NLATTR),
393 BPF_STMT(BPF_RET | BPF_A, 0)
394 },
395 CLASSIC,
396#ifdef __BIG_ENDIAN
397 { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
398#else
399 { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
400#endif
401 { { 4, 0 }, { 20, 6 } },
402 },
403 {
404 "LD_NLATTR_NEST",
405 .u.insns = {
406 BPF_STMT(BPF_LD | BPF_IMM, 2),
407 BPF_STMT(BPF_LDX | BPF_IMM, 3),
408 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
409 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
410 BPF_STMT(BPF_LD | BPF_IMM, 2),
411 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
412 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
413 BPF_STMT(BPF_LD | BPF_IMM, 2),
414 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
415 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
416 BPF_STMT(BPF_LD | BPF_IMM, 2),
417 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
418 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
419 BPF_STMT(BPF_LD | BPF_IMM, 2),
420 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
421 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
422 BPF_STMT(BPF_LD | BPF_IMM, 2),
423 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
424 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
425 BPF_STMT(BPF_LD | BPF_IMM, 2),
426 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
427 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
428 BPF_STMT(BPF_LD | BPF_IMM, 2),
429 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
430 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
431 BPF_STMT(BPF_RET | BPF_A, 0)
432 },
433 CLASSIC,
434#ifdef __BIG_ENDIAN
435 { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
436#else
437 { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
438#endif
439 { { 4, 0 }, { 20, 10 } },
440 },
441 {
442 "LD_PAYLOAD_OFF",
443 .u.insns = {
444 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
445 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
446 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
447 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
448 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
449 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
450 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
451 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
452 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
453 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
454 BPF_STMT(BPF_RET | BPF_A, 0)
455 },
456 CLASSIC,
457 /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
458 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
459 * id 9737, seq 1, length 64
460 */
461 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
462 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
463 0x08, 0x00,
464 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
465 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
466 { { 30, 0 }, { 100, 42 } },
467 },
468 {
469 "LD_ANC_XOR",
470 .u.insns = {
471 BPF_STMT(BPF_LD | BPF_IMM, 10),
472 BPF_STMT(BPF_LDX | BPF_IMM, 300),
473 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
474 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
475 BPF_STMT(BPF_RET | BPF_A, 0)
476 },
477 CLASSIC,
478 { },
479 { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
480 },
481 {
482 "SPILL_FILL",
483 .u.insns = {
484 BPF_STMT(BPF_LDX | BPF_LEN, 0),
485 BPF_STMT(BPF_LD | BPF_IMM, 2),
486 BPF_STMT(BPF_ALU | BPF_RSH, 1),
487 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
488 BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
489 BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
490 BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
491 BPF_STMT(BPF_STX, 15), /* M3 = len */
492 BPF_STMT(BPF_LDX | BPF_MEM, 1),
493 BPF_STMT(BPF_LD | BPF_MEM, 2),
494 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
495 BPF_STMT(BPF_LDX | BPF_MEM, 15),
496 BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
497 BPF_STMT(BPF_RET | BPF_A, 0)
498 },
499 CLASSIC,
500 { },
501 { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
502 },
503 {
504 "JEQ",
505 .u.insns = {
506 BPF_STMT(BPF_LDX | BPF_LEN, 0),
507 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
508 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
509 BPF_STMT(BPF_RET | BPF_K, 1),
510 BPF_STMT(BPF_RET | BPF_K, MAX_K)
511 },
512 CLASSIC,
513 { 3, 3, 3, 3, 3 },
514 { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
515 },
516 {
517 "JGT",
518 .u.insns = {
519 BPF_STMT(BPF_LDX | BPF_LEN, 0),
520 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
521 BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
522 BPF_STMT(BPF_RET | BPF_K, 1),
523 BPF_STMT(BPF_RET | BPF_K, MAX_K)
524 },
525 CLASSIC,
526 { 4, 4, 4, 3, 3 },
527 { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
528 },
529 {
530 "JGE",
531 .u.insns = {
532 BPF_STMT(BPF_LDX | BPF_LEN, 0),
533 BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
534 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
535 BPF_STMT(BPF_RET | BPF_K, 10),
536 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
537 BPF_STMT(BPF_RET | BPF_K, 20),
538 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
539 BPF_STMT(BPF_RET | BPF_K, 30),
540 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
541 BPF_STMT(BPF_RET | BPF_K, 40),
542 BPF_STMT(BPF_RET | BPF_K, MAX_K)
543 },
544 CLASSIC,
545 { 1, 2, 3, 4, 5 },
546 { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
547 },
548 {
549 "JSET",
550 .u.insns = {
551 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
552 BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
553 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
554 BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
555 BPF_STMT(BPF_LDX | BPF_LEN, 0),
556 BPF_STMT(BPF_MISC | BPF_TXA, 0),
557 BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
558 BPF_STMT(BPF_MISC | BPF_TAX, 0),
559 BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
560 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
561 BPF_STMT(BPF_RET | BPF_K, 10),
562 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
563 BPF_STMT(BPF_RET | BPF_K, 20),
564 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
565 BPF_STMT(BPF_RET | BPF_K, 30),
566 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
567 BPF_STMT(BPF_RET | BPF_K, 30),
568 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
569 BPF_STMT(BPF_RET | BPF_K, 30),
570 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
571 BPF_STMT(BPF_RET | BPF_K, 30),
572 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
573 BPF_STMT(BPF_RET | BPF_K, 30),
574 BPF_STMT(BPF_RET | BPF_K, MAX_K)
575 },
576 CLASSIC,
577 { 0, 0xAA, 0x55, 1 },
578 { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
579 },
580 {
581 "tcpdump port 22",
582 .u.insns = {
583 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
584 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
585 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
586 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
587 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
588 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
589 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
590 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
591 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
592 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
593 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
594 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
595 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
596 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
597 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
598 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
599 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
600 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
601 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
602 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
603 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
604 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
605 BPF_STMT(BPF_RET | BPF_K, 0xffff),
606 BPF_STMT(BPF_RET | BPF_K, 0),
607 },
608 CLASSIC,
609 /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
610 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
611 * seq 1305692979:1305693027, ack 3650467037, win 65535,
612 * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
613 */
614 { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
615 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
616 0x08, 0x00,
617 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
618 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
619 0x0a, 0x01, 0x01, 0x95, /* ip src */
620 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
621 0xc2, 0x24,
622 0x00, 0x16 /* dst port */ },
623 { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
624 },
625 {
626 "tcpdump complex",
627 .u.insns = {
628 /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
629 * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
630 * (len > 115 or len < 30000000000)' -d
631 */
632 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
633 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
634 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
635 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
636 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
637 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
638 BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
639 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
640 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
641 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
642 BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
643 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
644 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
645 BPF_STMT(BPF_ST, 1),
646 BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
647 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
648 BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
649 BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
650 BPF_STMT(BPF_LD | BPF_MEM, 1),
651 BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
652 BPF_STMT(BPF_ST, 5),
653 BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
654 BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
655 BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
656 BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
657 BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
658 BPF_STMT(BPF_LD | BPF_MEM, 5),
659 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
660 BPF_STMT(BPF_LD | BPF_LEN, 0),
661 BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
662 BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
663 BPF_STMT(BPF_RET | BPF_K, 0xffff),
664 BPF_STMT(BPF_RET | BPF_K, 0),
665 },
666 CLASSIC,
667 { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
668 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
669 0x08, 0x00,
670 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
671 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
672 0x0a, 0x01, 0x01, 0x95, /* ip src */
673 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
674 0xc2, 0x24,
675 0x00, 0x16 /* dst port */ },
676 { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
677 },
678 {
679 "RET_A",
680 .u.insns = {
681 /* check that unitialized X and A contain zeros */
682 BPF_STMT(BPF_MISC | BPF_TXA, 0),
683 BPF_STMT(BPF_RET | BPF_A, 0)
684 },
685 CLASSIC,
686 { },
687 { {1, 0}, {2, 0} },
688 },
689 {
690 "INT: ADD trivial",
691 .u.insns_int = {
692 BPF_ALU64_IMM(BPF_MOV, R1, 1),
693 BPF_ALU64_IMM(BPF_ADD, R1, 2),
694 BPF_ALU64_IMM(BPF_MOV, R2, 3),
695 BPF_ALU64_REG(BPF_SUB, R1, R2),
696 BPF_ALU64_IMM(BPF_ADD, R1, -1),
697 BPF_ALU64_IMM(BPF_MUL, R1, 3),
698 BPF_ALU64_REG(BPF_MOV, R0, R1),
699 BPF_EXIT_INSN(),
700 },
701 INTERNAL,
702 { },
703 { { 0, 0xfffffffd } }
704 },
705 {
706 "INT: MUL_X",
707 .u.insns_int = {
708 BPF_ALU64_IMM(BPF_MOV, R0, -1),
709 BPF_ALU64_IMM(BPF_MOV, R1, -1),
710 BPF_ALU64_IMM(BPF_MOV, R2, 3),
711 BPF_ALU64_REG(BPF_MUL, R1, R2),
712 BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
713 BPF_EXIT_INSN(),
714 BPF_ALU64_IMM(BPF_MOV, R0, 1),
715 BPF_EXIT_INSN(),
716 },
717 INTERNAL,
718 { },
719 { { 0, 1 } }
720 },
721 {
722 "INT: MUL_X2",
723 .u.insns_int = {
724 BPF_ALU32_IMM(BPF_MOV, R0, -1),
725 BPF_ALU32_IMM(BPF_MOV, R1, -1),
726 BPF_ALU32_IMM(BPF_MOV, R2, 3),
727 BPF_ALU64_REG(BPF_MUL, R1, R2),
728 BPF_ALU64_IMM(BPF_RSH, R1, 8),
729 BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
730 BPF_EXIT_INSN(),
731 BPF_ALU32_IMM(BPF_MOV, R0, 1),
732 BPF_EXIT_INSN(),
733 },
734 INTERNAL,
735 { },
736 { { 0, 1 } }
737 },
738 {
739 "INT: MUL32_X",
740 .u.insns_int = {
741 BPF_ALU32_IMM(BPF_MOV, R0, -1),
742 BPF_ALU64_IMM(BPF_MOV, R1, -1),
743 BPF_ALU32_IMM(BPF_MOV, R2, 3),
744 BPF_ALU32_REG(BPF_MUL, R1, R2),
745 BPF_ALU64_IMM(BPF_RSH, R1, 8),
746 BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
747 BPF_EXIT_INSN(),
748 BPF_ALU32_IMM(BPF_MOV, R0, 1),
749 BPF_EXIT_INSN(),
750 },
751 INTERNAL,
752 { },
753 { { 0, 1 } }
754 },
755 {
756 /* Have to test all register combinations, since
757 * JITing of different registers will produce
758 * different asm code.
759 */
760 "INT: ADD 64-bit",
761 .u.insns_int = {
762 BPF_ALU64_IMM(BPF_MOV, R0, 0),
763 BPF_ALU64_IMM(BPF_MOV, R1, 1),
764 BPF_ALU64_IMM(BPF_MOV, R2, 2),
765 BPF_ALU64_IMM(BPF_MOV, R3, 3),
766 BPF_ALU64_IMM(BPF_MOV, R4, 4),
767 BPF_ALU64_IMM(BPF_MOV, R5, 5),
768 BPF_ALU64_IMM(BPF_MOV, R6, 6),
769 BPF_ALU64_IMM(BPF_MOV, R7, 7),
770 BPF_ALU64_IMM(BPF_MOV, R8, 8),
771 BPF_ALU64_IMM(BPF_MOV, R9, 9),
772 BPF_ALU64_IMM(BPF_ADD, R0, 20),
773 BPF_ALU64_IMM(BPF_ADD, R1, 20),
774 BPF_ALU64_IMM(BPF_ADD, R2, 20),
775 BPF_ALU64_IMM(BPF_ADD, R3, 20),
776 BPF_ALU64_IMM(BPF_ADD, R4, 20),
777 BPF_ALU64_IMM(BPF_ADD, R5, 20),
778 BPF_ALU64_IMM(BPF_ADD, R6, 20),
779 BPF_ALU64_IMM(BPF_ADD, R7, 20),
780 BPF_ALU64_IMM(BPF_ADD, R8, 20),
781 BPF_ALU64_IMM(BPF_ADD, R9, 20),
782 BPF_ALU64_IMM(BPF_SUB, R0, 10),
783 BPF_ALU64_IMM(BPF_SUB, R1, 10),
784 BPF_ALU64_IMM(BPF_SUB, R2, 10),
785 BPF_ALU64_IMM(BPF_SUB, R3, 10),
786 BPF_ALU64_IMM(BPF_SUB, R4, 10),
787 BPF_ALU64_IMM(BPF_SUB, R5, 10),
788 BPF_ALU64_IMM(BPF_SUB, R6, 10),
789 BPF_ALU64_IMM(BPF_SUB, R7, 10),
790 BPF_ALU64_IMM(BPF_SUB, R8, 10),
791 BPF_ALU64_IMM(BPF_SUB, R9, 10),
792 BPF_ALU64_REG(BPF_ADD, R0, R0),
793 BPF_ALU64_REG(BPF_ADD, R0, R1),
794 BPF_ALU64_REG(BPF_ADD, R0, R2),
795 BPF_ALU64_REG(BPF_ADD, R0, R3),
796 BPF_ALU64_REG(BPF_ADD, R0, R4),
797 BPF_ALU64_REG(BPF_ADD, R0, R5),
798 BPF_ALU64_REG(BPF_ADD, R0, R6),
799 BPF_ALU64_REG(BPF_ADD, R0, R7),
800 BPF_ALU64_REG(BPF_ADD, R0, R8),
801 BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
802 BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
803 BPF_EXIT_INSN(),
804 BPF_ALU64_REG(BPF_ADD, R1, R0),
805 BPF_ALU64_REG(BPF_ADD, R1, R1),
806 BPF_ALU64_REG(BPF_ADD, R1, R2),
807 BPF_ALU64_REG(BPF_ADD, R1, R3),
808 BPF_ALU64_REG(BPF_ADD, R1, R4),
809 BPF_ALU64_REG(BPF_ADD, R1, R5),
810 BPF_ALU64_REG(BPF_ADD, R1, R6),
811 BPF_ALU64_REG(BPF_ADD, R1, R7),
812 BPF_ALU64_REG(BPF_ADD, R1, R8),
813 BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
814 BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
815 BPF_EXIT_INSN(),
816 BPF_ALU64_REG(BPF_ADD, R2, R0),
817 BPF_ALU64_REG(BPF_ADD, R2, R1),
818 BPF_ALU64_REG(BPF_ADD, R2, R2),
819 BPF_ALU64_REG(BPF_ADD, R2, R3),
820 BPF_ALU64_REG(BPF_ADD, R2, R4),
821 BPF_ALU64_REG(BPF_ADD, R2, R5),
822 BPF_ALU64_REG(BPF_ADD, R2, R6),
823 BPF_ALU64_REG(BPF_ADD, R2, R7),
824 BPF_ALU64_REG(BPF_ADD, R2, R8),
825 BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
826 BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
827 BPF_EXIT_INSN(),
828 BPF_ALU64_REG(BPF_ADD, R3, R0),
829 BPF_ALU64_REG(BPF_ADD, R3, R1),
830 BPF_ALU64_REG(BPF_ADD, R3, R2),
831 BPF_ALU64_REG(BPF_ADD, R3, R3),
832 BPF_ALU64_REG(BPF_ADD, R3, R4),
833 BPF_ALU64_REG(BPF_ADD, R3, R5),
834 BPF_ALU64_REG(BPF_ADD, R3, R6),
835 BPF_ALU64_REG(BPF_ADD, R3, R7),
836 BPF_ALU64_REG(BPF_ADD, R3, R8),
837 BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
838 BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
839 BPF_EXIT_INSN(),
840 BPF_ALU64_REG(BPF_ADD, R4, R0),
841 BPF_ALU64_REG(BPF_ADD, R4, R1),
842 BPF_ALU64_REG(BPF_ADD, R4, R2),
843 BPF_ALU64_REG(BPF_ADD, R4, R3),
844 BPF_ALU64_REG(BPF_ADD, R4, R4),
845 BPF_ALU64_REG(BPF_ADD, R4, R5),
846 BPF_ALU64_REG(BPF_ADD, R4, R6),
847 BPF_ALU64_REG(BPF_ADD, R4, R7),
848 BPF_ALU64_REG(BPF_ADD, R4, R8),
849 BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
850 BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
851 BPF_EXIT_INSN(),
852 BPF_ALU64_REG(BPF_ADD, R5, R0),
853 BPF_ALU64_REG(BPF_ADD, R5, R1),
854 BPF_ALU64_REG(BPF_ADD, R5, R2),
855 BPF_ALU64_REG(BPF_ADD, R5, R3),
856 BPF_ALU64_REG(BPF_ADD, R5, R4),
857 BPF_ALU64_REG(BPF_ADD, R5, R5),
858 BPF_ALU64_REG(BPF_ADD, R5, R6),
859 BPF_ALU64_REG(BPF_ADD, R5, R7),
860 BPF_ALU64_REG(BPF_ADD, R5, R8),
861 BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
862 BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
863 BPF_EXIT_INSN(),
864 BPF_ALU64_REG(BPF_ADD, R6, R0),
865 BPF_ALU64_REG(BPF_ADD, R6, R1),
866 BPF_ALU64_REG(BPF_ADD, R6, R2),
867 BPF_ALU64_REG(BPF_ADD, R6, R3),
868 BPF_ALU64_REG(BPF_ADD, R6, R4),
869 BPF_ALU64_REG(BPF_ADD, R6, R5),
870 BPF_ALU64_REG(BPF_ADD, R6, R6),
871 BPF_ALU64_REG(BPF_ADD, R6, R7),
872 BPF_ALU64_REG(BPF_ADD, R6, R8),
873 BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
874 BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
875 BPF_EXIT_INSN(),
876 BPF_ALU64_REG(BPF_ADD, R7, R0),
877 BPF_ALU64_REG(BPF_ADD, R7, R1),
878 BPF_ALU64_REG(BPF_ADD, R7, R2),
879 BPF_ALU64_REG(BPF_ADD, R7, R3),
880 BPF_ALU64_REG(BPF_ADD, R7, R4),
881 BPF_ALU64_REG(BPF_ADD, R7, R5),
882 BPF_ALU64_REG(BPF_ADD, R7, R6),
883 BPF_ALU64_REG(BPF_ADD, R7, R7),
884 BPF_ALU64_REG(BPF_ADD, R7, R8),
885 BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
886 BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
887 BPF_EXIT_INSN(),
888 BPF_ALU64_REG(BPF_ADD, R8, R0),
889 BPF_ALU64_REG(BPF_ADD, R8, R1),
890 BPF_ALU64_REG(BPF_ADD, R8, R2),
891 BPF_ALU64_REG(BPF_ADD, R8, R3),
892 BPF_ALU64_REG(BPF_ADD, R8, R4),
893 BPF_ALU64_REG(BPF_ADD, R8, R5),
894 BPF_ALU64_REG(BPF_ADD, R8, R6),
895 BPF_ALU64_REG(BPF_ADD, R8, R7),
896 BPF_ALU64_REG(BPF_ADD, R8, R8),
897 BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
898 BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
899 BPF_EXIT_INSN(),
900 BPF_ALU64_REG(BPF_ADD, R9, R0),
901 BPF_ALU64_REG(BPF_ADD, R9, R1),
902 BPF_ALU64_REG(BPF_ADD, R9, R2),
903 BPF_ALU64_REG(BPF_ADD, R9, R3),
904 BPF_ALU64_REG(BPF_ADD, R9, R4),
905 BPF_ALU64_REG(BPF_ADD, R9, R5),
906 BPF_ALU64_REG(BPF_ADD, R9, R6),
907 BPF_ALU64_REG(BPF_ADD, R9, R7),
908 BPF_ALU64_REG(BPF_ADD, R9, R8),
909 BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
910 BPF_ALU64_REG(BPF_MOV, R0, R9),
911 BPF_EXIT_INSN(),
912 },
913 INTERNAL,
914 { },
915 { { 0, 2957380 } }
916 },
917 {
918 "INT: ADD 32-bit",
919 .u.insns_int = {
920 BPF_ALU32_IMM(BPF_MOV, R0, 20),
921 BPF_ALU32_IMM(BPF_MOV, R1, 1),
922 BPF_ALU32_IMM(BPF_MOV, R2, 2),
923 BPF_ALU32_IMM(BPF_MOV, R3, 3),
924 BPF_ALU32_IMM(BPF_MOV, R4, 4),
925 BPF_ALU32_IMM(BPF_MOV, R5, 5),
926 BPF_ALU32_IMM(BPF_MOV, R6, 6),
927 BPF_ALU32_IMM(BPF_MOV, R7, 7),
928 BPF_ALU32_IMM(BPF_MOV, R8, 8),
929 BPF_ALU32_IMM(BPF_MOV, R9, 9),
930 BPF_ALU64_IMM(BPF_ADD, R1, 10),
931 BPF_ALU64_IMM(BPF_ADD, R2, 10),
932 BPF_ALU64_IMM(BPF_ADD, R3, 10),
933 BPF_ALU64_IMM(BPF_ADD, R4, 10),
934 BPF_ALU64_IMM(BPF_ADD, R5, 10),
935 BPF_ALU64_IMM(BPF_ADD, R6, 10),
936 BPF_ALU64_IMM(BPF_ADD, R7, 10),
937 BPF_ALU64_IMM(BPF_ADD, R8, 10),
938 BPF_ALU64_IMM(BPF_ADD, R9, 10),
939 BPF_ALU32_REG(BPF_ADD, R0, R1),
940 BPF_ALU32_REG(BPF_ADD, R0, R2),
941 BPF_ALU32_REG(BPF_ADD, R0, R3),
942 BPF_ALU32_REG(BPF_ADD, R0, R4),
943 BPF_ALU32_REG(BPF_ADD, R0, R5),
944 BPF_ALU32_REG(BPF_ADD, R0, R6),
945 BPF_ALU32_REG(BPF_ADD, R0, R7),
946 BPF_ALU32_REG(BPF_ADD, R0, R8),
947 BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
948 BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
949 BPF_EXIT_INSN(),
950 BPF_ALU32_REG(BPF_ADD, R1, R0),
951 BPF_ALU32_REG(BPF_ADD, R1, R1),
952 BPF_ALU32_REG(BPF_ADD, R1, R2),
953 BPF_ALU32_REG(BPF_ADD, R1, R3),
954 BPF_ALU32_REG(BPF_ADD, R1, R4),
955 BPF_ALU32_REG(BPF_ADD, R1, R5),
956 BPF_ALU32_REG(BPF_ADD, R1, R6),
957 BPF_ALU32_REG(BPF_ADD, R1, R7),
958 BPF_ALU32_REG(BPF_ADD, R1, R8),
959 BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
960 BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
961 BPF_EXIT_INSN(),
962 BPF_ALU32_REG(BPF_ADD, R2, R0),
963 BPF_ALU32_REG(BPF_ADD, R2, R1),
964 BPF_ALU32_REG(BPF_ADD, R2, R2),
965 BPF_ALU32_REG(BPF_ADD, R2, R3),
966 BPF_ALU32_REG(BPF_ADD, R2, R4),
967 BPF_ALU32_REG(BPF_ADD, R2, R5),
968 BPF_ALU32_REG(BPF_ADD, R2, R6),
969 BPF_ALU32_REG(BPF_ADD, R2, R7),
970 BPF_ALU32_REG(BPF_ADD, R2, R8),
971 BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
972 BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
973 BPF_EXIT_INSN(),
974 BPF_ALU32_REG(BPF_ADD, R3, R0),
975 BPF_ALU32_REG(BPF_ADD, R3, R1),
976 BPF_ALU32_REG(BPF_ADD, R3, R2),
977 BPF_ALU32_REG(BPF_ADD, R3, R3),
978 BPF_ALU32_REG(BPF_ADD, R3, R4),
979 BPF_ALU32_REG(BPF_ADD, R3, R5),
980 BPF_ALU32_REG(BPF_ADD, R3, R6),
981 BPF_ALU32_REG(BPF_ADD, R3, R7),
982 BPF_ALU32_REG(BPF_ADD, R3, R8),
983 BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
984 BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
985 BPF_EXIT_INSN(),
986 BPF_ALU32_REG(BPF_ADD, R4, R0),
987 BPF_ALU32_REG(BPF_ADD, R4, R1),
988 BPF_ALU32_REG(BPF_ADD, R4, R2),
989 BPF_ALU32_REG(BPF_ADD, R4, R3),
990 BPF_ALU32_REG(BPF_ADD, R4, R4),
991 BPF_ALU32_REG(BPF_ADD, R4, R5),
992 BPF_ALU32_REG(BPF_ADD, R4, R6),
993 BPF_ALU32_REG(BPF_ADD, R4, R7),
994 BPF_ALU32_REG(BPF_ADD, R4, R8),
995 BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
996 BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
997 BPF_EXIT_INSN(),
998 BPF_ALU32_REG(BPF_ADD, R5, R0),
999 BPF_ALU32_REG(BPF_ADD, R5, R1),
1000 BPF_ALU32_REG(BPF_ADD, R5, R2),
1001 BPF_ALU32_REG(BPF_ADD, R5, R3),
1002 BPF_ALU32_REG(BPF_ADD, R5, R4),
1003 BPF_ALU32_REG(BPF_ADD, R5, R5),
1004 BPF_ALU32_REG(BPF_ADD, R5, R6),
1005 BPF_ALU32_REG(BPF_ADD, R5, R7),
1006 BPF_ALU32_REG(BPF_ADD, R5, R8),
1007 BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
1008 BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
1009 BPF_EXIT_INSN(),
1010 BPF_ALU32_REG(BPF_ADD, R6, R0),
1011 BPF_ALU32_REG(BPF_ADD, R6, R1),
1012 BPF_ALU32_REG(BPF_ADD, R6, R2),
1013 BPF_ALU32_REG(BPF_ADD, R6, R3),
1014 BPF_ALU32_REG(BPF_ADD, R6, R4),
1015 BPF_ALU32_REG(BPF_ADD, R6, R5),
1016 BPF_ALU32_REG(BPF_ADD, R6, R6),
1017 BPF_ALU32_REG(BPF_ADD, R6, R7),
1018 BPF_ALU32_REG(BPF_ADD, R6, R8),
1019 BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
1020 BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
1021 BPF_EXIT_INSN(),
1022 BPF_ALU32_REG(BPF_ADD, R7, R0),
1023 BPF_ALU32_REG(BPF_ADD, R7, R1),
1024 BPF_ALU32_REG(BPF_ADD, R7, R2),
1025 BPF_ALU32_REG(BPF_ADD, R7, R3),
1026 BPF_ALU32_REG(BPF_ADD, R7, R4),
1027 BPF_ALU32_REG(BPF_ADD, R7, R5),
1028 BPF_ALU32_REG(BPF_ADD, R7, R6),
1029 BPF_ALU32_REG(BPF_ADD, R7, R7),
1030 BPF_ALU32_REG(BPF_ADD, R7, R8),
1031 BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
1032 BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
1033 BPF_EXIT_INSN(),
1034 BPF_ALU32_REG(BPF_ADD, R8, R0),
1035 BPF_ALU32_REG(BPF_ADD, R8, R1),
1036 BPF_ALU32_REG(BPF_ADD, R8, R2),
1037 BPF_ALU32_REG(BPF_ADD, R8, R3),
1038 BPF_ALU32_REG(BPF_ADD, R8, R4),
1039 BPF_ALU32_REG(BPF_ADD, R8, R5),
1040 BPF_ALU32_REG(BPF_ADD, R8, R6),
1041 BPF_ALU32_REG(BPF_ADD, R8, R7),
1042 BPF_ALU32_REG(BPF_ADD, R8, R8),
1043 BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
1044 BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
1045 BPF_EXIT_INSN(),
1046 BPF_ALU32_REG(BPF_ADD, R9, R0),
1047 BPF_ALU32_REG(BPF_ADD, R9, R1),
1048 BPF_ALU32_REG(BPF_ADD, R9, R2),
1049 BPF_ALU32_REG(BPF_ADD, R9, R3),
1050 BPF_ALU32_REG(BPF_ADD, R9, R4),
1051 BPF_ALU32_REG(BPF_ADD, R9, R5),
1052 BPF_ALU32_REG(BPF_ADD, R9, R6),
1053 BPF_ALU32_REG(BPF_ADD, R9, R7),
1054 BPF_ALU32_REG(BPF_ADD, R9, R8),
1055 BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
1056 BPF_ALU32_REG(BPF_MOV, R0, R9),
1057 BPF_EXIT_INSN(),
1058 },
1059 INTERNAL,
1060 { },
1061 { { 0, 2957380 } }
1062 },
1063 { /* Mainly checking JIT here. */
1064 "INT: SUB",
1065 .u.insns_int = {
1066 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1067 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1068 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1069 BPF_ALU64_IMM(BPF_MOV, R3, 3),
1070 BPF_ALU64_IMM(BPF_MOV, R4, 4),
1071 BPF_ALU64_IMM(BPF_MOV, R5, 5),
1072 BPF_ALU64_IMM(BPF_MOV, R6, 6),
1073 BPF_ALU64_IMM(BPF_MOV, R7, 7),
1074 BPF_ALU64_IMM(BPF_MOV, R8, 8),
1075 BPF_ALU64_IMM(BPF_MOV, R9, 9),
1076 BPF_ALU64_REG(BPF_SUB, R0, R0),
1077 BPF_ALU64_REG(BPF_SUB, R0, R1),
1078 BPF_ALU64_REG(BPF_SUB, R0, R2),
1079 BPF_ALU64_REG(BPF_SUB, R0, R3),
1080 BPF_ALU64_REG(BPF_SUB, R0, R4),
1081 BPF_ALU64_REG(BPF_SUB, R0, R5),
1082 BPF_ALU64_REG(BPF_SUB, R0, R6),
1083 BPF_ALU64_REG(BPF_SUB, R0, R7),
1084 BPF_ALU64_REG(BPF_SUB, R0, R8),
1085 BPF_ALU64_REG(BPF_SUB, R0, R9),
1086 BPF_ALU64_IMM(BPF_SUB, R0, 10),
1087 BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
1088 BPF_EXIT_INSN(),
1089 BPF_ALU64_REG(BPF_SUB, R1, R0),
1090 BPF_ALU64_REG(BPF_SUB, R1, R2),
1091 BPF_ALU64_REG(BPF_SUB, R1, R3),
1092 BPF_ALU64_REG(BPF_SUB, R1, R4),
1093 BPF_ALU64_REG(BPF_SUB, R1, R5),
1094 BPF_ALU64_REG(BPF_SUB, R1, R6),
1095 BPF_ALU64_REG(BPF_SUB, R1, R7),
1096 BPF_ALU64_REG(BPF_SUB, R1, R8),
1097 BPF_ALU64_REG(BPF_SUB, R1, R9),
1098 BPF_ALU64_IMM(BPF_SUB, R1, 10),
1099 BPF_ALU64_REG(BPF_SUB, R2, R0),
1100 BPF_ALU64_REG(BPF_SUB, R2, R1),
1101 BPF_ALU64_REG(BPF_SUB, R2, R3),
1102 BPF_ALU64_REG(BPF_SUB, R2, R4),
1103 BPF_ALU64_REG(BPF_SUB, R2, R5),
1104 BPF_ALU64_REG(BPF_SUB, R2, R6),
1105 BPF_ALU64_REG(BPF_SUB, R2, R7),
1106 BPF_ALU64_REG(BPF_SUB, R2, R8),
1107 BPF_ALU64_REG(BPF_SUB, R2, R9),
1108 BPF_ALU64_IMM(BPF_SUB, R2, 10),
1109 BPF_ALU64_REG(BPF_SUB, R3, R0),
1110 BPF_ALU64_REG(BPF_SUB, R3, R1),
1111 BPF_ALU64_REG(BPF_SUB, R3, R2),
1112 BPF_ALU64_REG(BPF_SUB, R3, R4),
1113 BPF_ALU64_REG(BPF_SUB, R3, R5),
1114 BPF_ALU64_REG(BPF_SUB, R3, R6),
1115 BPF_ALU64_REG(BPF_SUB, R3, R7),
1116 BPF_ALU64_REG(BPF_SUB, R3, R8),
1117 BPF_ALU64_REG(BPF_SUB, R3, R9),
1118 BPF_ALU64_IMM(BPF_SUB, R3, 10),
1119 BPF_ALU64_REG(BPF_SUB, R4, R0),
1120 BPF_ALU64_REG(BPF_SUB, R4, R1),
1121 BPF_ALU64_REG(BPF_SUB, R4, R2),
1122 BPF_ALU64_REG(BPF_SUB, R4, R3),
1123 BPF_ALU64_REG(BPF_SUB, R4, R5),
1124 BPF_ALU64_REG(BPF_SUB, R4, R6),
1125 BPF_ALU64_REG(BPF_SUB, R4, R7),
1126 BPF_ALU64_REG(BPF_SUB, R4, R8),
1127 BPF_ALU64_REG(BPF_SUB, R4, R9),
1128 BPF_ALU64_IMM(BPF_SUB, R4, 10),
1129 BPF_ALU64_REG(BPF_SUB, R5, R0),
1130 BPF_ALU64_REG(BPF_SUB, R5, R1),
1131 BPF_ALU64_REG(BPF_SUB, R5, R2),
1132 BPF_ALU64_REG(BPF_SUB, R5, R3),
1133 BPF_ALU64_REG(BPF_SUB, R5, R4),
1134 BPF_ALU64_REG(BPF_SUB, R5, R6),
1135 BPF_ALU64_REG(BPF_SUB, R5, R7),
1136 BPF_ALU64_REG(BPF_SUB, R5, R8),
1137 BPF_ALU64_REG(BPF_SUB, R5, R9),
1138 BPF_ALU64_IMM(BPF_SUB, R5, 10),
1139 BPF_ALU64_REG(BPF_SUB, R6, R0),
1140 BPF_ALU64_REG(BPF_SUB, R6, R1),
1141 BPF_ALU64_REG(BPF_SUB, R6, R2),
1142 BPF_ALU64_REG(BPF_SUB, R6, R3),
1143 BPF_ALU64_REG(BPF_SUB, R6, R4),
1144 BPF_ALU64_REG(BPF_SUB, R6, R5),
1145 BPF_ALU64_REG(BPF_SUB, R6, R7),
1146 BPF_ALU64_REG(BPF_SUB, R6, R8),
1147 BPF_ALU64_REG(BPF_SUB, R6, R9),
1148 BPF_ALU64_IMM(BPF_SUB, R6, 10),
1149 BPF_ALU64_REG(BPF_SUB, R7, R0),
1150 BPF_ALU64_REG(BPF_SUB, R7, R1),
1151 BPF_ALU64_REG(BPF_SUB, R7, R2),
1152 BPF_ALU64_REG(BPF_SUB, R7, R3),
1153 BPF_ALU64_REG(BPF_SUB, R7, R4),
1154 BPF_ALU64_REG(BPF_SUB, R7, R5),
1155 BPF_ALU64_REG(BPF_SUB, R7, R6),
1156 BPF_ALU64_REG(BPF_SUB, R7, R8),
1157 BPF_ALU64_REG(BPF_SUB, R7, R9),
1158 BPF_ALU64_IMM(BPF_SUB, R7, 10),
1159 BPF_ALU64_REG(BPF_SUB, R8, R0),
1160 BPF_ALU64_REG(BPF_SUB, R8, R1),
1161 BPF_ALU64_REG(BPF_SUB, R8, R2),
1162 BPF_ALU64_REG(BPF_SUB, R8, R3),
1163 BPF_ALU64_REG(BPF_SUB, R8, R4),
1164 BPF_ALU64_REG(BPF_SUB, R8, R5),
1165 BPF_ALU64_REG(BPF_SUB, R8, R6),
1166 BPF_ALU64_REG(BPF_SUB, R8, R7),
1167 BPF_ALU64_REG(BPF_SUB, R8, R9),
1168 BPF_ALU64_IMM(BPF_SUB, R8, 10),
1169 BPF_ALU64_REG(BPF_SUB, R9, R0),
1170 BPF_ALU64_REG(BPF_SUB, R9, R1),
1171 BPF_ALU64_REG(BPF_SUB, R9, R2),
1172 BPF_ALU64_REG(BPF_SUB, R9, R3),
1173 BPF_ALU64_REG(BPF_SUB, R9, R4),
1174 BPF_ALU64_REG(BPF_SUB, R9, R5),
1175 BPF_ALU64_REG(BPF_SUB, R9, R6),
1176 BPF_ALU64_REG(BPF_SUB, R9, R7),
1177 BPF_ALU64_REG(BPF_SUB, R9, R8),
1178 BPF_ALU64_IMM(BPF_SUB, R9, 10),
1179 BPF_ALU64_IMM(BPF_SUB, R0, 10),
1180 BPF_ALU64_IMM(BPF_NEG, R0, 0),
1181 BPF_ALU64_REG(BPF_SUB, R0, R1),
1182 BPF_ALU64_REG(BPF_SUB, R0, R2),
1183 BPF_ALU64_REG(BPF_SUB, R0, R3),
1184 BPF_ALU64_REG(BPF_SUB, R0, R4),
1185 BPF_ALU64_REG(BPF_SUB, R0, R5),
1186 BPF_ALU64_REG(BPF_SUB, R0, R6),
1187 BPF_ALU64_REG(BPF_SUB, R0, R7),
1188 BPF_ALU64_REG(BPF_SUB, R0, R8),
1189 BPF_ALU64_REG(BPF_SUB, R0, R9),
1190 BPF_EXIT_INSN(),
1191 },
1192 INTERNAL,
1193 { },
1194 { { 0, 11 } }
1195 },
1196 { /* Mainly checking JIT here. */
1197 "INT: XOR",
1198 .u.insns_int = {
1199 BPF_ALU64_REG(BPF_SUB, R0, R0),
1200 BPF_ALU64_REG(BPF_XOR, R1, R1),
1201 BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
1202 BPF_EXIT_INSN(),
1203 BPF_ALU64_IMM(BPF_MOV, R0, 10),
1204 BPF_ALU64_IMM(BPF_MOV, R1, -1),
1205 BPF_ALU64_REG(BPF_SUB, R1, R1),
1206 BPF_ALU64_REG(BPF_XOR, R2, R2),
1207 BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
1208 BPF_EXIT_INSN(),
1209 BPF_ALU64_REG(BPF_SUB, R2, R2),
1210 BPF_ALU64_REG(BPF_XOR, R3, R3),
1211 BPF_ALU64_IMM(BPF_MOV, R0, 10),
1212 BPF_ALU64_IMM(BPF_MOV, R1, -1),
1213 BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
1214 BPF_EXIT_INSN(),
1215 BPF_ALU64_REG(BPF_SUB, R3, R3),
1216 BPF_ALU64_REG(BPF_XOR, R4, R4),
1217 BPF_ALU64_IMM(BPF_MOV, R2, 1),
1218 BPF_ALU64_IMM(BPF_MOV, R5, -1),
1219 BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
1220 BPF_EXIT_INSN(),
1221 BPF_ALU64_REG(BPF_SUB, R4, R4),
1222 BPF_ALU64_REG(BPF_XOR, R5, R5),
1223 BPF_ALU64_IMM(BPF_MOV, R3, 1),
1224 BPF_ALU64_IMM(BPF_MOV, R7, -1),
1225 BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
1226 BPF_EXIT_INSN(),
1227 BPF_ALU64_IMM(BPF_MOV, R5, 1),
1228 BPF_ALU64_REG(BPF_SUB, R5, R5),
1229 BPF_ALU64_REG(BPF_XOR, R6, R6),
1230 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1231 BPF_ALU64_IMM(BPF_MOV, R8, -1),
1232 BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
1233 BPF_EXIT_INSN(),
1234 BPF_ALU64_REG(BPF_SUB, R6, R6),
1235 BPF_ALU64_REG(BPF_XOR, R7, R7),
1236 BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
1237 BPF_EXIT_INSN(),
1238 BPF_ALU64_REG(BPF_SUB, R7, R7),
1239 BPF_ALU64_REG(BPF_XOR, R8, R8),
1240 BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
1241 BPF_EXIT_INSN(),
1242 BPF_ALU64_REG(BPF_SUB, R8, R8),
1243 BPF_ALU64_REG(BPF_XOR, R9, R9),
1244 BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
1245 BPF_EXIT_INSN(),
1246 BPF_ALU64_REG(BPF_SUB, R9, R9),
1247 BPF_ALU64_REG(BPF_XOR, R0, R0),
1248 BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
1249 BPF_EXIT_INSN(),
1250 BPF_ALU64_REG(BPF_SUB, R1, R1),
1251 BPF_ALU64_REG(BPF_XOR, R0, R0),
1252 BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
1253 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1254 BPF_EXIT_INSN(),
1255 BPF_ALU64_IMM(BPF_MOV, R0, 1),
1256 BPF_EXIT_INSN(),
1257 },
1258 INTERNAL,
1259 { },
1260 { { 0, 1 } }
1261 },
1262 { /* Mainly checking JIT here. */
1263 "INT: MUL",
1264 .u.insns_int = {
1265 BPF_ALU64_IMM(BPF_MOV, R0, 11),
1266 BPF_ALU64_IMM(BPF_MOV, R1, 1),
1267 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1268 BPF_ALU64_IMM(BPF_MOV, R3, 3),
1269 BPF_ALU64_IMM(BPF_MOV, R4, 4),
1270 BPF_ALU64_IMM(BPF_MOV, R5, 5),
1271 BPF_ALU64_IMM(BPF_MOV, R6, 6),
1272 BPF_ALU64_IMM(BPF_MOV, R7, 7),
1273 BPF_ALU64_IMM(BPF_MOV, R8, 8),
1274 BPF_ALU64_IMM(BPF_MOV, R9, 9),
1275 BPF_ALU64_REG(BPF_MUL, R0, R0),
1276 BPF_ALU64_REG(BPF_MUL, R0, R1),
1277 BPF_ALU64_REG(BPF_MUL, R0, R2),
1278 BPF_ALU64_REG(BPF_MUL, R0, R3),
1279 BPF_ALU64_REG(BPF_MUL, R0, R4),
1280 BPF_ALU64_REG(BPF_MUL, R0, R5),
1281 BPF_ALU64_REG(BPF_MUL, R0, R6),
1282 BPF_ALU64_REG(BPF_MUL, R0, R7),
1283 BPF_ALU64_REG(BPF_MUL, R0, R8),
1284 BPF_ALU64_REG(BPF_MUL, R0, R9),
1285 BPF_ALU64_IMM(BPF_MUL, R0, 10),
1286 BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
1287 BPF_EXIT_INSN(),
1288 BPF_ALU64_REG(BPF_MUL, R1, R0),
1289 BPF_ALU64_REG(BPF_MUL, R1, R2),
1290 BPF_ALU64_REG(BPF_MUL, R1, R3),
1291 BPF_ALU64_REG(BPF_MUL, R1, R4),
1292 BPF_ALU64_REG(BPF_MUL, R1, R5),
1293 BPF_ALU64_REG(BPF_MUL, R1, R6),
1294 BPF_ALU64_REG(BPF_MUL, R1, R7),
1295 BPF_ALU64_REG(BPF_MUL, R1, R8),
1296 BPF_ALU64_REG(BPF_MUL, R1, R9),
1297 BPF_ALU64_IMM(BPF_MUL, R1, 10),
1298 BPF_ALU64_REG(BPF_MOV, R2, R1),
1299 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1300 BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
1301 BPF_EXIT_INSN(),
1302 BPF_ALU64_IMM(BPF_LSH, R1, 32),
1303 BPF_ALU64_IMM(BPF_ARSH, R1, 32),
1304 BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
1305 BPF_EXIT_INSN(),
1306 BPF_ALU64_REG(BPF_MUL, R2, R0),
1307 BPF_ALU64_REG(BPF_MUL, R2, R1),
1308 BPF_ALU64_REG(BPF_MUL, R2, R3),
1309 BPF_ALU64_REG(BPF_MUL, R2, R4),
1310 BPF_ALU64_REG(BPF_MUL, R2, R5),
1311 BPF_ALU64_REG(BPF_MUL, R2, R6),
1312 BPF_ALU64_REG(BPF_MUL, R2, R7),
1313 BPF_ALU64_REG(BPF_MUL, R2, R8),
1314 BPF_ALU64_REG(BPF_MUL, R2, R9),
1315 BPF_ALU64_IMM(BPF_MUL, R2, 10),
1316 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1317 BPF_ALU64_REG(BPF_MOV, R0, R2),
1318 BPF_EXIT_INSN(),
1319 },
1320 INTERNAL,
1321 { },
1322 { { 0, 0x35d97ef2 } }
1323 },
1324 {
1325 "INT: ALU MIX",
1326 .u.insns_int = {
1327 BPF_ALU64_IMM(BPF_MOV, R0, 11),
1328 BPF_ALU64_IMM(BPF_ADD, R0, -1),
1329 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1330 BPF_ALU64_IMM(BPF_XOR, R2, 3),
1331 BPF_ALU64_REG(BPF_DIV, R0, R2),
1332 BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
1333 BPF_EXIT_INSN(),
1334 BPF_ALU64_IMM(BPF_MOD, R0, 3),
1335 BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
1336 BPF_EXIT_INSN(),
1337 BPF_ALU64_IMM(BPF_MOV, R0, -1),
1338 BPF_EXIT_INSN(),
1339 },
1340 INTERNAL,
1341 { },
1342 { { 0, -1 } }
1343 },
1344 {
1345 "INT: DIV + ABS",
1346 .u.insns_int = {
1347 BPF_ALU64_REG(BPF_MOV, R6, R1),
1348 BPF_LD_ABS(BPF_B, 3),
1349 BPF_ALU64_IMM(BPF_MOV, R2, 2),
1350 BPF_ALU32_REG(BPF_DIV, R0, R2),
1351 BPF_ALU64_REG(BPF_MOV, R8, R0),
1352 BPF_LD_ABS(BPF_B, 4),
1353 BPF_ALU64_REG(BPF_ADD, R8, R0),
1354 BPF_LD_IND(BPF_B, R8, -70),
1355 BPF_EXIT_INSN(),
1356 },
1357 INTERNAL,
1358 { 10, 20, 30, 40, 50 },
1359 { { 4, 0 }, { 5, 10 } }
1360 },
1361 {
1362 "INT: DIV by zero",
1363 .u.insns_int = {
1364 BPF_ALU64_REG(BPF_MOV, R6, R1),
1365 BPF_ALU64_IMM(BPF_MOV, R7, 0),
1366 BPF_LD_ABS(BPF_B, 3),
1367 BPF_ALU32_REG(BPF_DIV, R0, R7),
1368 BPF_EXIT_INSN(),
1369 },
1370 INTERNAL,
1371 { 10, 20, 30, 40, 50 },
1372 { { 3, 0 }, { 4, 0 } }
1373 },
1374 {
1375 "check: missing ret",
1376 .u.insns = {
1377 BPF_STMT(BPF_LD | BPF_IMM, 1),
1378 },
1379 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1380 { },
1381 { }
1382 },
1383 {
1384 "check: div_k_0",
1385 .u.insns = {
1386 BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
1387 BPF_STMT(BPF_RET | BPF_K, 0)
1388 },
1389 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1390 { },
1391 { }
1392 },
1393 {
1394 "check: unknown insn",
1395 .u.insns = {
1396 /* seccomp insn, rejected in socket filter */
1397 BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
1398 BPF_STMT(BPF_RET | BPF_K, 0)
1399 },
1400 CLASSIC | FLAG_EXPECTED_FAIL,
1401 { },
1402 { }
1403 },
1404 {
1405 "check: out of range spill/fill",
1406 .u.insns = {
1407 BPF_STMT(BPF_STX, 16),
1408 BPF_STMT(BPF_RET | BPF_K, 0)
1409 },
1410 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1411 { },
1412 { }
1413 },
1414 {
1415 "JUMPS + HOLES",
1416 .u.insns = {
1417 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1418 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
1419 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1420 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1421 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1422 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1423 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1424 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1425 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1426 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1427 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1428 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1429 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1430 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1431 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1432 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
1433 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1434 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
1435 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1436 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
1437 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
1438 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1439 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1440 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1441 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1442 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1443 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1444 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1445 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1446 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1447 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1448 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1449 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1450 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1451 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
1452 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
1453 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1454 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
1455 BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
1456 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1457 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1458 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1459 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1460 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1461 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1462 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1463 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1464 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1465 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1466 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1467 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1468 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1469 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
1470 BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
1471 BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
1472 BPF_STMT(BPF_RET | BPF_A, 0),
1473 BPF_STMT(BPF_RET | BPF_A, 0),
1474 },
1475 CLASSIC,
1476 { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
1477 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
1478 0x08, 0x00,
1479 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
1480 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
1481 0xc0, 0xa8, 0x33, 0x01,
1482 0xc0, 0xa8, 0x33, 0x02,
1483 0xbb, 0xb6,
1484 0xa9, 0xfa,
1485 0x00, 0x14, 0x00, 0x00,
1486 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1487 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1488 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1489 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1490 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1491 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1492 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
1493 0xcc, 0xcc, 0xcc, 0xcc },
1494 { { 88, 0x001b } }
1495 },
1496 {
1497 "check: RET X",
1498 .u.insns = {
1499 BPF_STMT(BPF_RET | BPF_X, 0),
1500 },
1501 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1502 { },
1503 { },
1504 },
1505 {
1506 "check: LDX + RET X",
1507 .u.insns = {
1508 BPF_STMT(BPF_LDX | BPF_IMM, 42),
1509 BPF_STMT(BPF_RET | BPF_X, 0),
1510 },
1511 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1512 { },
1513 { },
1514 },
1515 { /* Mainly checking JIT here. */
1516 "M[]: alt STX + LDX",
1517 .u.insns = {
1518 BPF_STMT(BPF_LDX | BPF_IMM, 100),
1519 BPF_STMT(BPF_STX, 0),
1520 BPF_STMT(BPF_LDX | BPF_MEM, 0),
1521 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1522 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1523 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1524 BPF_STMT(BPF_STX, 1),
1525 BPF_STMT(BPF_LDX | BPF_MEM, 1),
1526 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1527 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1528 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1529 BPF_STMT(BPF_STX, 2),
1530 BPF_STMT(BPF_LDX | BPF_MEM, 2),
1531 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1532 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1533 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1534 BPF_STMT(BPF_STX, 3),
1535 BPF_STMT(BPF_LDX | BPF_MEM, 3),
1536 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1537 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1538 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1539 BPF_STMT(BPF_STX, 4),
1540 BPF_STMT(BPF_LDX | BPF_MEM, 4),
1541 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1542 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1543 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1544 BPF_STMT(BPF_STX, 5),
1545 BPF_STMT(BPF_LDX | BPF_MEM, 5),
1546 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1547 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1548 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1549 BPF_STMT(BPF_STX, 6),
1550 BPF_STMT(BPF_LDX | BPF_MEM, 6),
1551 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1552 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1553 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1554 BPF_STMT(BPF_STX, 7),
1555 BPF_STMT(BPF_LDX | BPF_MEM, 7),
1556 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1557 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1558 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1559 BPF_STMT(BPF_STX, 8),
1560 BPF_STMT(BPF_LDX | BPF_MEM, 8),
1561 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1562 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1563 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1564 BPF_STMT(BPF_STX, 9),
1565 BPF_STMT(BPF_LDX | BPF_MEM, 9),
1566 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1567 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1568 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1569 BPF_STMT(BPF_STX, 10),
1570 BPF_STMT(BPF_LDX | BPF_MEM, 10),
1571 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1572 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1573 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1574 BPF_STMT(BPF_STX, 11),
1575 BPF_STMT(BPF_LDX | BPF_MEM, 11),
1576 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1577 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1578 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1579 BPF_STMT(BPF_STX, 12),
1580 BPF_STMT(BPF_LDX | BPF_MEM, 12),
1581 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1582 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1583 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1584 BPF_STMT(BPF_STX, 13),
1585 BPF_STMT(BPF_LDX | BPF_MEM, 13),
1586 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1587 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1588 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1589 BPF_STMT(BPF_STX, 14),
1590 BPF_STMT(BPF_LDX | BPF_MEM, 14),
1591 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1592 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1593 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1594 BPF_STMT(BPF_STX, 15),
1595 BPF_STMT(BPF_LDX | BPF_MEM, 15),
1596 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1597 BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
1598 BPF_STMT(BPF_MISC | BPF_TAX, 0),
1599 BPF_STMT(BPF_RET | BPF_A, 0),
1600 },
1601 CLASSIC | FLAG_NO_DATA,
1602 { },
1603 { { 0, 116 } },
1604 },
1605 { /* Mainly checking JIT here. */
1606 "M[]: full STX + full LDX",
1607 .u.insns = {
1608 BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
1609 BPF_STMT(BPF_STX, 0),
1610 BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
1611 BPF_STMT(BPF_STX, 1),
1612 BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
1613 BPF_STMT(BPF_STX, 2),
1614 BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
1615 BPF_STMT(BPF_STX, 3),
1616 BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
1617 BPF_STMT(BPF_STX, 4),
1618 BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
1619 BPF_STMT(BPF_STX, 5),
1620 BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
1621 BPF_STMT(BPF_STX, 6),
1622 BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
1623 BPF_STMT(BPF_STX, 7),
1624 BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
1625 BPF_STMT(BPF_STX, 8),
1626 BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
1627 BPF_STMT(BPF_STX, 9),
1628 BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
1629 BPF_STMT(BPF_STX, 10),
1630 BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
1631 BPF_STMT(BPF_STX, 11),
1632 BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
1633 BPF_STMT(BPF_STX, 12),
1634 BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
1635 BPF_STMT(BPF_STX, 13),
1636 BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
1637 BPF_STMT(BPF_STX, 14),
1638 BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
1639 BPF_STMT(BPF_STX, 15),
1640 BPF_STMT(BPF_LDX | BPF_MEM, 0),
1641 BPF_STMT(BPF_MISC | BPF_TXA, 0),
1642 BPF_STMT(BPF_LDX | BPF_MEM, 1),
1643 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1644 BPF_STMT(BPF_LDX | BPF_MEM, 2),
1645 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1646 BPF_STMT(BPF_LDX | BPF_MEM, 3),
1647 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1648 BPF_STMT(BPF_LDX | BPF_MEM, 4),
1649 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1650 BPF_STMT(BPF_LDX | BPF_MEM, 5),
1651 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1652 BPF_STMT(BPF_LDX | BPF_MEM, 6),
1653 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1654 BPF_STMT(BPF_LDX | BPF_MEM, 7),
1655 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1656 BPF_STMT(BPF_LDX | BPF_MEM, 8),
1657 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1658 BPF_STMT(BPF_LDX | BPF_MEM, 9),
1659 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1660 BPF_STMT(BPF_LDX | BPF_MEM, 10),
1661 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1662 BPF_STMT(BPF_LDX | BPF_MEM, 11),
1663 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1664 BPF_STMT(BPF_LDX | BPF_MEM, 12),
1665 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1666 BPF_STMT(BPF_LDX | BPF_MEM, 13),
1667 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1668 BPF_STMT(BPF_LDX | BPF_MEM, 14),
1669 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1670 BPF_STMT(BPF_LDX | BPF_MEM, 15),
1671 BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
1672 BPF_STMT(BPF_RET | BPF_A, 0),
1673 },
1674 CLASSIC | FLAG_NO_DATA,
1675 { },
1676 { { 0, 0x2a5a5e5 } },
1677 },
1678 {
1679 "check: SKF_AD_MAX",
1680 .u.insns = {
1681 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
1682 SKF_AD_OFF + SKF_AD_MAX),
1683 BPF_STMT(BPF_RET | BPF_A, 0),
1684 },
1685 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
1686 { },
1687 { },
1688 },
1689 { /* Passes checker but fails during runtime. */
1690 "LD [SKF_AD_OFF-1]",
1691 .u.insns = {
1692 BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
1693 SKF_AD_OFF - 1),
1694 BPF_STMT(BPF_RET | BPF_K, 1),
1695 },
1696 CLASSIC,
1697 { },
1698 { { 1, 0 } },
1699 },
1700};
1701
1702static struct net_device dev;
1703
1704static struct sk_buff *populate_skb(char *buf, int size)
1705{
1706 struct sk_buff *skb;
1707
1708 if (size >= MAX_DATA)
1709 return NULL;
1710
1711 skb = alloc_skb(MAX_DATA, GFP_KERNEL);
1712 if (!skb)
1713 return NULL;
1714
1715 memcpy(__skb_put(skb, size), buf, size);
1716
1717 /* Initialize a fake skb with test pattern. */
1718 skb_reset_mac_header(skb);
1719 skb->protocol = htons(ETH_P_IP);
1720 skb->pkt_type = SKB_TYPE;
1721 skb->mark = SKB_MARK;
1722 skb->hash = SKB_HASH;
1723 skb->queue_mapping = SKB_QUEUE_MAP;
1724 skb->vlan_tci = SKB_VLAN_TCI;
1725 skb->dev = &dev;
1726 skb->dev->ifindex = SKB_DEV_IFINDEX;
1727 skb->dev->type = SKB_DEV_TYPE;
1728 skb_set_network_header(skb, min(size, ETH_HLEN));
1729
1730 return skb;
1731}
1732
1733static void *generate_test_data(struct bpf_test *test, int sub)
1734{
1735 if (test->aux & FLAG_NO_DATA)
1736 return NULL;
1737
1738 /* Test case expects an skb, so populate one. Various
1739 * subtests generate skbs of different sizes based on
1740 * the same data.
1741 */
1742 return populate_skb(test->data, test->test[sub].data_size);
1743}
1744
1745static void release_test_data(const struct bpf_test *test, void *data)
1746{
1747 if (test->aux & FLAG_NO_DATA)
1748 return;
1749
1750 kfree_skb(data);
1751}
1752
1753static int probe_filter_length(struct sock_filter *fp)
1754{
1755 int len = 0;
1756
1757 for (len = MAX_INSNS - 1; len > 0; --len)
1758 if (fp[len].code != 0 || fp[len].k != 0)
1759 break;
1760
1761 return len + 1;
1762}
1763
1764static struct sk_filter *generate_filter(int which, int *err)
1765{
1766 struct sk_filter *fp;
1767 struct sock_fprog_kern fprog;
1768 unsigned int flen = probe_filter_length(tests[which].u.insns);
1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1770
1771 switch (test_type) {
1772 case CLASSIC:
1773 fprog.filter = tests[which].u.insns;
1774 fprog.len = flen;
1775
1776 *err = sk_unattached_filter_create(&fp, &fprog);
1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
1778 if (*err == -EINVAL) {
1779 pr_cont("PASS\n");
1780 /* Verifier rejected filter as expected. */
1781 *err = 0;
1782 return NULL;
1783 } else {
1784 pr_cont("UNEXPECTED_PASS\n");
1785 /* Verifier didn't reject the test that's
1786 * bad enough, just return!
1787 */
1788 *err = -EINVAL;
1789 return NULL;
1790 }
1791 }
1792 /* We don't expect to fail. */
1793 if (*err) {
1794 pr_cont("FAIL to attach err=%d len=%d\n",
1795 *err, fprog.len);
1796 return NULL;
1797 }
1798 break;
1799
1800 case INTERNAL:
1801 fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
1802 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM;
1805 return NULL;
1806 }
1807
1808 fp->len = flen;
1809 memcpy(fp->insnsi, tests[which].u.insns_int,
1810 fp->len * sizeof(struct sock_filter_int));
1811
1812 sk_filter_select_runtime(fp);
1813 break;
1814 }
1815
1816 *err = 0;
1817 return fp;
1818}
1819
1820static void release_filter(struct sk_filter *fp, int which)
1821{
1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1823
1824 switch (test_type) {
1825 case CLASSIC:
1826 sk_unattached_filter_destroy(fp);
1827 break;
1828 case INTERNAL:
1829 sk_filter_free(fp);
1830 break;
1831 }
1832}
1833
1834static int __run_one(const struct sk_filter *fp, const void *data,
1835 int runs, u64 *duration)
1836{
1837 u64 start, finish;
1838 int ret, i;
1839
1840 start = ktime_to_us(ktime_get());
1841
1842 for (i = 0; i < runs; i++)
1843 ret = SK_RUN_FILTER(fp, data);
1844
1845 finish = ktime_to_us(ktime_get());
1846
1847 *duration = (finish - start) * 1000ULL;
1848 do_div(*duration, runs);
1849
1850 return ret;
1851}
1852
1853static int run_one(const struct sk_filter *fp, struct bpf_test *test)
1854{
1855 int err_cnt = 0, i, runs = MAX_TESTRUNS;
1856
1857 for (i = 0; i < MAX_SUBTESTS; i++) {
1858 void *data;
1859 u64 duration;
1860 u32 ret;
1861
1862 if (test->test[i].data_size == 0 &&
1863 test->test[i].result == 0)
1864 break;
1865
1866 data = generate_test_data(test, i);
1867 ret = __run_one(fp, data, runs, &duration);
1868 release_test_data(test, data);
1869
1870 if (ret == test->test[i].result) {
1871 pr_cont("%lld ", duration);
1872 } else {
1873 pr_cont("ret %d != %d ", ret,
1874 test->test[i].result);
1875 err_cnt++;
1876 }
1877 }
1878
1879 return err_cnt;
1880}
1881
1882static __init int test_bpf(void)
1883{
1884 int i, err_cnt = 0, pass_cnt = 0;
1885
1886 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1887 struct sk_filter *fp;
1888 int err;
1889
1890 pr_info("#%d %s ", i, tests[i].descr);
1891
1892 fp = generate_filter(i, &err);
1893 if (fp == NULL) {
1894 if (err == 0) {
1895 pass_cnt++;
1896 continue;
1897 }
1898
1899 return err;
1900 }
1901 err = run_one(fp, &tests[i]);
1902 release_filter(fp, i);
1903
1904 if (err) {
1905 pr_cont("FAIL (%d times)\n", err);
1906 err_cnt++;
1907 } else {
1908 pr_cont("PASS\n");
1909 pass_cnt++;
1910 }
1911 }
1912
1913 pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
1914 return err_cnt ? -EINVAL : 0;
1915}
1916
1917static int __init test_bpf_init(void)
1918{
1919 return test_bpf();
1920}
1921
1922static void __exit test_bpf_exit(void)
1923{
1924}
1925
1926module_init(test_bpf_init);
1927module_exit(test_bpf_exit);
1928
1929MODULE_LICENSE("GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
index e0cc0146ae62..0c7e9ab2d88f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -159,6 +159,7 @@ errout:
159 spin_unlock(&ts_mod_lock); 159 spin_unlock(&ts_mod_lock);
160 return err; 160 return err;
161} 161}
162EXPORT_SYMBOL(textsearch_register);
162 163
163/** 164/**
164 * textsearch_unregister - unregister a textsearch module 165 * textsearch_unregister - unregister a textsearch module
@@ -190,6 +191,7 @@ out:
190 spin_unlock(&ts_mod_lock); 191 spin_unlock(&ts_mod_lock);
191 return err; 192 return err;
192} 193}
194EXPORT_SYMBOL(textsearch_unregister);
193 195
194struct ts_linear_state 196struct ts_linear_state
195{ 197{
@@ -236,6 +238,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
236 238
237 return textsearch_find(conf, state); 239 return textsearch_find(conf, state);
238} 240}
241EXPORT_SYMBOL(textsearch_find_continuous);
239 242
240/** 243/**
241 * textsearch_prepare - Prepare a search 244 * textsearch_prepare - Prepare a search
@@ -298,6 +301,7 @@ errout:
298 301
299 return ERR_PTR(err); 302 return ERR_PTR(err);
300} 303}
304EXPORT_SYMBOL(textsearch_prepare);
301 305
302/** 306/**
303 * textsearch_destroy - destroy a search configuration 307 * textsearch_destroy - destroy a search configuration
@@ -316,9 +320,4 @@ void textsearch_destroy(struct ts_config *conf)
316 320
317 kfree(conf); 321 kfree(conf);
318} 322}
319
320EXPORT_SYMBOL(textsearch_register);
321EXPORT_SYMBOL(textsearch_unregister);
322EXPORT_SYMBOL(textsearch_prepare);
323EXPORT_SYMBOL(textsearch_find_continuous);
324EXPORT_SYMBOL(textsearch_destroy); 323EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0648291cdafe..6fe2c84eb055 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2347,7 +2347,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2347 break; 2347 break;
2348 2348
2349 base = 10; 2349 base = 10;
2350 is_sign = 0; 2350 is_sign = false;
2351 2351
2352 switch (*fmt++) { 2352 switch (*fmt++) {
2353 case 'c': 2353 case 'c':
@@ -2386,7 +2386,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2386 case 'i': 2386 case 'i':
2387 base = 0; 2387 base = 0;
2388 case 'd': 2388 case 'd':
2389 is_sign = 1; 2389 is_sign = true;
2390 case 'u': 2390 case 'u':
2391 break; 2391 break;
2392 case '%': 2392 case '%':
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 08837db52d94..12d2d777f36b 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -9,33 +9,33 @@ config XZ_DEC
9if XZ_DEC 9if XZ_DEC
10 10
11config XZ_DEC_X86 11config XZ_DEC_X86
12 bool "x86 BCJ filter decoder" 12 bool "x86 BCJ filter decoder" if EXPERT
13 default y if X86 13 default y
14 select XZ_DEC_BCJ 14 select XZ_DEC_BCJ
15 15
16config XZ_DEC_POWERPC 16config XZ_DEC_POWERPC
17 bool "PowerPC BCJ filter decoder" 17 bool "PowerPC BCJ filter decoder" if EXPERT
18 default y if PPC 18 default y
19 select XZ_DEC_BCJ 19 select XZ_DEC_BCJ
20 20
21config XZ_DEC_IA64 21config XZ_DEC_IA64
22 bool "IA-64 BCJ filter decoder" 22 bool "IA-64 BCJ filter decoder" if EXPERT
23 default y if IA64 23 default y
24 select XZ_DEC_BCJ 24 select XZ_DEC_BCJ
25 25
26config XZ_DEC_ARM 26config XZ_DEC_ARM
27 bool "ARM BCJ filter decoder" 27 bool "ARM BCJ filter decoder" if EXPERT
28 default y if ARM 28 default y
29 select XZ_DEC_BCJ 29 select XZ_DEC_BCJ
30 30
31config XZ_DEC_ARMTHUMB 31config XZ_DEC_ARMTHUMB
32 bool "ARM-Thumb BCJ filter decoder" 32 bool "ARM-Thumb BCJ filter decoder" if EXPERT
33 default y if (ARM && ARM_THUMB) 33 default y
34 select XZ_DEC_BCJ 34 select XZ_DEC_BCJ
35 35
36config XZ_DEC_SPARC 36config XZ_DEC_SPARC
37 bool "SPARC BCJ filter decoder" 37 bool "SPARC BCJ filter decoder" if EXPERT
38 default y if SPARC 38 default y
39 select XZ_DEC_BCJ 39 select XZ_DEC_BCJ
40 40
41endif 41endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index a6cdc969ea42..08c3c8049998 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1043,6 +1043,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
1043 1043
1044 s->lzma2.sequence = SEQ_LZMA_PREPARE; 1044 s->lzma2.sequence = SEQ_LZMA_PREPARE;
1045 1045
1046 /* Fall through */
1047
1046 case SEQ_LZMA_PREPARE: 1048 case SEQ_LZMA_PREPARE:
1047 if (s->lzma2.compressed < RC_INIT_BYTES) 1049 if (s->lzma2.compressed < RC_INIT_BYTES)
1048 return XZ_DATA_ERROR; 1050 return XZ_DATA_ERROR;
@@ -1053,6 +1055,8 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
1053 s->lzma2.compressed -= RC_INIT_BYTES; 1055 s->lzma2.compressed -= RC_INIT_BYTES;
1054 s->lzma2.sequence = SEQ_LZMA_RUN; 1056 s->lzma2.sequence = SEQ_LZMA_RUN;
1055 1057
1058 /* Fall through */
1059
1056 case SEQ_LZMA_RUN: 1060 case SEQ_LZMA_RUN:
1057 /* 1061 /*
1058 * Set dictionary limit to indicate how much we want 1062 * Set dictionary limit to indicate how much we want