aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore2
-rw-r--r--lib/Kconfig5
-rw-r--r--lib/Kconfig.debug71
-rw-r--r--lib/Makefile29
-rw-r--r--lib/asn1_decoder.c487
-rw-r--r--lib/bcd.c8
-rw-r--r--lib/bitmap.c2
-rwxr-xr-xlib/build_OID_registry209
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/crc32.c9
-rw-r--r--lib/decompress.c9
-rw-r--r--lib/dma-debug.c9
-rw-r--r--lib/dynamic_debug.c56
-rw-r--r--lib/flex_proportions.c2
-rw-r--r--lib/gcd.c3
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c90
-rw-r--r--lib/idr.c32
-rw-r--r--lib/interval_tree.c10
-rw-r--r--lib/interval_tree_test_main.c105
-rw-r--r--lib/kasprintf.c2
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--lib/mpi/Makefile1
-rw-r--r--lib/mpi/longlong.h157
-rw-r--r--lib/mpi/mpi-bit.c2
-rw-r--r--lib/mpi/mpi-cmp.c70
-rw-r--r--lib/mpi/mpi-pow.c4
-rw-r--r--lib/mpi/mpicoder.c55
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/oid_registry.c170
-rw-r--r--lib/parser.c10
-rw-r--r--lib/plist.c4
-rw-r--r--lib/prio_tree.c466
-rw-r--r--lib/rbtree.c656
-rw-r--r--lib/rbtree_test.c234
-rw-r--r--lib/scatterlist.c35
-rw-r--r--lib/spinlock_debug.c32
-rw-r--r--lib/swiotlb.c33
-rw-r--r--lib/vsprintf.c139
39 files changed, 2124 insertions, 1101 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
index 3bef1ea94c99..09aae85418ab 100644
--- a/lib/.gitignore
+++ b/lib/.gitignore
@@ -3,4 +3,4 @@
3# 3#
4gen_crc32table 4gen_crc32table
5crc32table.h 5crc32table.h
6 6oid_registry_data.c
diff --git a/lib/Kconfig b/lib/Kconfig
index bb94c1ba616a..4b31a46fb307 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -396,4 +396,9 @@ config SIGNATURE
396config LIBFDT 396config LIBFDT
397 bool 397 bool
398 398
399config OID_REGISTRY
400 tristate
401 help
402 Enable fast lookup object identifier registry.
403
399endmenu 404endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2403a63b5da5..e458782f3c52 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,12 +196,13 @@ config LOCKUP_DETECTOR
196 thresholds can be controlled through the sysctl watchdog_thresh. 196 thresholds can be controlled through the sysctl watchdog_thresh.
197 197
198config HARDLOCKUP_DETECTOR 198config HARDLOCKUP_DETECTOR
199 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ 199 def_bool y
200 !HAVE_NMI_WATCHDOG 200 depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
201 depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
201 202
202config BOOTPARAM_HARDLOCKUP_PANIC 203config BOOTPARAM_HARDLOCKUP_PANIC
203 bool "Panic (Reboot) On Hard Lockups" 204 bool "Panic (Reboot) On Hard Lockups"
204 depends on LOCKUP_DETECTOR 205 depends on HARDLOCKUP_DETECTOR
205 help 206 help
206 Say Y here to enable the kernel to panic on "hard lockups", 207 Say Y here to enable the kernel to panic on "hard lockups",
207 which are bugs that cause the kernel to loop in kernel 208 which are bugs that cause the kernel to loop in kernel
@@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC
212 213
213config BOOTPARAM_HARDLOCKUP_PANIC_VALUE 214config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
214 int 215 int
215 depends on LOCKUP_DETECTOR 216 depends on HARDLOCKUP_DETECTOR
216 range 0 1 217 range 0 1
217 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC 218 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
218 default 1 if BOOTPARAM_HARDLOCKUP_PANIC 219 default 1 if BOOTPARAM_HARDLOCKUP_PANIC
@@ -449,11 +450,12 @@ config SLUB_STATS
449 out which slabs are relevant to a particular load. 450 out which slabs are relevant to a particular load.
450 Try running: slabinfo -DA 451 Try running: slabinfo -DA
451 452
453config HAVE_DEBUG_KMEMLEAK
454 bool
455
452config DEBUG_KMEMLEAK 456config DEBUG_KMEMLEAK
453 bool "Kernel memory leak detector" 457 bool "Kernel memory leak detector"
454 depends on DEBUG_KERNEL && EXPERIMENTAL && \ 458 depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK
455 (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
456
457 select DEBUG_FS 459 select DEBUG_FS
458 select STACKTRACE if STACKTRACE_SUPPORT 460 select STACKTRACE if STACKTRACE_SUPPORT
459 select KALLSYMS 461 select KALLSYMS
@@ -629,6 +631,20 @@ config PROVE_RCU_REPEATEDLY
629 631
630 Say N if you are unsure. 632 Say N if you are unsure.
631 633
634config PROVE_RCU_DELAY
635 bool "RCU debugging: preemptible RCU race provocation"
636 depends on DEBUG_KERNEL && PREEMPT_RCU
637 default n
638 help
639 There is a class of races that involve an unlikely preemption
640 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
641 been set to INT_MIN. This feature inserts a delay at that
642 point to increase the probability of these races.
643
644 Say Y to increase probability of preemption of __rcu_read_unlock().
645
646 Say N if you are unsure.
647
632config SPARSE_RCU_POINTER 648config SPARSE_RCU_POINTER
633 bool "RCU debugging: sparse-based checks for pointer usage" 649 bool "RCU debugging: sparse-based checks for pointer usage"
634 default n 650 default n
@@ -735,11 +751,12 @@ config DEBUG_HIGHMEM
735 This options enables addition error checking for high memory systems. 751 This options enables addition error checking for high memory systems.
736 Disable for production systems. 752 Disable for production systems.
737 753
754config HAVE_DEBUG_BUGVERBOSE
755 bool
756
738config DEBUG_BUGVERBOSE 757config DEBUG_BUGVERBOSE
739 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT 758 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
740 depends on BUG 759 depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
741 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
742 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
743 default y 760 default y
744 help 761 help
745 Say Y here to make BUG() panics output the file name and line number 762 Say Y here to make BUG() panics output the file name and line number
@@ -781,6 +798,15 @@ config DEBUG_VM
781 798
782 If unsure, say N. 799 If unsure, say N.
783 800
801config DEBUG_VM_RB
802 bool "Debug VM red-black trees"
803 depends on DEBUG_VM
804 help
805 Enable this to turn on more extended checks in the virtual-memory
806 system that may impact performance.
807
808 If unsure, say N.
809
784config DEBUG_VIRTUAL 810config DEBUG_VIRTUAL
785 bool "Debug VM translations" 811 bool "Debug VM translations"
786 depends on DEBUG_KERNEL && X86 812 depends on DEBUG_KERNEL && X86
@@ -946,7 +972,7 @@ config RCU_CPU_STALL_TIMEOUT
946 int "RCU CPU stall timeout in seconds" 972 int "RCU CPU stall timeout in seconds"
947 depends on TREE_RCU || TREE_PREEMPT_RCU 973 depends on TREE_RCU || TREE_PREEMPT_RCU
948 range 3 300 974 range 3 300
949 default 60 975 default 21
950 help 976 help
951 If a given RCU grace period extends more than the specified 977 If a given RCU grace period extends more than the specified
952 number of seconds, a CPU stall warning is printed. If the 978 number of seconds, a CPU stall warning is printed. If the
@@ -1089,7 +1115,7 @@ config NOTIFIER_ERROR_INJECTION
1089 depends on DEBUG_KERNEL 1115 depends on DEBUG_KERNEL
1090 select DEBUG_FS 1116 select DEBUG_FS
1091 help 1117 help
1092 This option provides the ability to inject artifical errors to 1118 This option provides the ability to inject artificial errors to
1093 specified notifier chain callbacks. It is useful to test the error 1119 specified notifier chain callbacks. It is useful to test the error
1094 handling of notifier call chain failures. 1120 handling of notifier call chain failures.
1095 1121
@@ -1100,7 +1126,7 @@ config CPU_NOTIFIER_ERROR_INJECT
1100 depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION 1126 depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
1101 help 1127 help
1102 This option provides a kernel module that can be used to test 1128 This option provides a kernel module that can be used to test
1103 the error handling of the cpu notifiers by injecting artifical 1129 the error handling of the cpu notifiers by injecting artificial
1104 errors to CPU notifier chain callbacks. It is controlled through 1130 errors to CPU notifier chain callbacks. It is controlled through
1105 debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu 1131 debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
1106 1132
@@ -1124,7 +1150,7 @@ config PM_NOTIFIER_ERROR_INJECT
1124 depends on PM && NOTIFIER_ERROR_INJECTION 1150 depends on PM && NOTIFIER_ERROR_INJECTION
1125 default m if PM_DEBUG 1151 default m if PM_DEBUG
1126 help 1152 help
1127 This option provides the ability to inject artifical errors to 1153 This option provides the ability to inject artificial errors to
1128 PM notifier chain callbacks. It is controlled through debugfs 1154 PM notifier chain callbacks. It is controlled through debugfs
1129 interface /sys/kernel/debug/notifier-error-inject/pm 1155 interface /sys/kernel/debug/notifier-error-inject/pm
1130 1156
@@ -1147,7 +1173,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
1147 tristate "Memory hotplug notifier error injection module" 1173 tristate "Memory hotplug notifier error injection module"
1148 depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION 1174 depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
1149 help 1175 help
1150 This option provides the ability to inject artifical errors to 1176 This option provides the ability to inject artificial errors to
1151 memory hotplug notifier chain callbacks. It is controlled through 1177 memory hotplug notifier chain callbacks. It is controlled through
1152 debugfs interface under /sys/kernel/debug/notifier-error-inject/memory 1178 debugfs interface under /sys/kernel/debug/notifier-error-inject/memory
1153 1179
@@ -1170,7 +1196,7 @@ config PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT
1170 tristate "pSeries reconfig notifier error injection module" 1196 tristate "pSeries reconfig notifier error injection module"
1171 depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION 1197 depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION
1172 help 1198 help
1173 This option provides the ability to inject artifical errors to 1199 This option provides the ability to inject artificial errors to
1174 pSeries reconfig notifier chain callbacks. It is controlled 1200 pSeries reconfig notifier chain callbacks. It is controlled
1175 through debugfs interface under 1201 through debugfs interface under
1176 /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/ 1202 /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/
@@ -1265,6 +1291,19 @@ config LATENCYTOP
1265source mm/Kconfig.debug 1291source mm/Kconfig.debug
1266source kernel/trace/Kconfig 1292source kernel/trace/Kconfig
1267 1293
1294config RBTREE_TEST
1295 tristate "Red-Black tree test"
1296 depends on m && DEBUG_KERNEL
1297 help
1298 A benchmark measuring the performance of the rbtree library.
1299 Also includes rbtree invariant checks.
1300
1301config INTERVAL_TREE_TEST
1302 tristate "Interval tree test"
1303 depends on m && DEBUG_KERNEL
1304 help
1305 A benchmark measuring the performance of the interval tree library
1306
1268config PROVIDE_OHCI1394_DMA_INIT 1307config PROVIDE_OHCI1394_DMA_INIT
1269 bool "Remote debugging over FireWire early on boot" 1308 bool "Remote debugging over FireWire early on boot"
1270 depends on PCI && X86 1309 depends on PCI && X86
diff --git a/lib/Makefile b/lib/Makefile
index 0924041b6959..e2152fa7ff4d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -9,10 +9,11 @@ endif
9 9
10lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o earlycpio.o 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o
16 17
17lib-$(CONFIG_MMU) += ioremap.o 18lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 19lib-$(CONFIG_SMP) += cpumask.o
@@ -31,7 +32,6 @@ CFLAGS_kobject.o += -DDEBUG
31CFLAGS_kobject_uevent.o += -DDEBUG 32CFLAGS_kobject_uevent.o += -DDEBUG
32endif 33endif
33 34
34lib-$(CONFIG_HOTPLUG) += kobject_uevent.o
35obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 35obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
36obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 36obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
37obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 37obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -140,6 +140,13 @@ $(foreach file, $(libfdt_files), \
140 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) 140 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
141lib-$(CONFIG_LIBFDT) += $(libfdt_files) 141lib-$(CONFIG_LIBFDT) += $(libfdt_files)
142 142
143obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
144obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
145
146interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
147
148obj-$(CONFIG_ASN1) += asn1_decoder.o
149
143hostprogs-y := gen_crc32table 150hostprogs-y := gen_crc32table
144clean-files := crc32table.h 151clean-files := crc32table.h
145 152
@@ -150,3 +157,19 @@ quiet_cmd_crc32 = GEN $@
150 157
151$(obj)/crc32table.h: $(obj)/gen_crc32table 158$(obj)/crc32table.h: $(obj)/gen_crc32table
152 $(call cmd,crc32) 159 $(call cmd,crc32)
160
161#
162# Build a fast OID lookip registry from include/linux/oid_registry.h
163#
164obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
165
166$(obj)/oid_registry.o: $(obj)/oid_registry_data.c
167
168$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
169 $(src)/build_OID_registry
170 $(call cmd,build_OID_registry)
171
172quiet_cmd_build_OID_registry = GEN $@
173 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
174
175clean-files += oid_registry_data.c
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
new file mode 100644
index 000000000000..5293d2433029
--- /dev/null
+++ b/lib/asn1_decoder.c
@@ -0,0 +1,487 @@
1/* Decoder for ASN.1 BER/DER/CER encoded bytestream
2 *
3 * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/asn1_decoder.h>
16#include <linux/asn1_ber_bytecode.h>
17
18static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
19 /* OPC TAG JMP ACT */
20 [ASN1_OP_MATCH] = 1 + 1,
21 [ASN1_OP_MATCH_OR_SKIP] = 1 + 1,
22 [ASN1_OP_MATCH_ACT] = 1 + 1 + 1,
23 [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
24 [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
25 [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
26 [ASN1_OP_MATCH_ANY] = 1,
27 [ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
28 [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
29 [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
30 [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
31 [ASN1_OP_COND_MATCH_ANY] = 1,
32 [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
33 [ASN1_OP_COND_FAIL] = 1,
34 [ASN1_OP_COMPLETE] = 1,
35 [ASN1_OP_ACT] = 1 + 1,
36 [ASN1_OP_RETURN] = 1,
37 [ASN1_OP_END_SEQ] = 1,
38 [ASN1_OP_END_SEQ_OF] = 1 + 1,
39 [ASN1_OP_END_SET] = 1,
40 [ASN1_OP_END_SET_OF] = 1 + 1,
41 [ASN1_OP_END_SEQ_ACT] = 1 + 1,
42 [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1,
43 [ASN1_OP_END_SET_ACT] = 1 + 1,
44 [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1,
45};
46
47/*
48 * Find the length of an indefinite length object
49 * @data: The data buffer
50 * @datalen: The end of the innermost containing element in the buffer
51 * @_dp: The data parse cursor (updated before returning)
52 * @_len: Where to return the size of the element.
53 * @_errmsg: Where to return a pointer to an error message on error
54 */
55static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen,
56 size_t *_dp, size_t *_len,
57 const char **_errmsg)
58{
59 unsigned char tag, tmp;
60 size_t dp = *_dp, len, n;
61 int indef_level = 1;
62
63next_tag:
64 if (unlikely(datalen - dp < 2)) {
65 if (datalen == dp)
66 goto missing_eoc;
67 goto data_overrun_error;
68 }
69
70 /* Extract a tag from the data */
71 tag = data[dp++];
72 if (tag == 0) {
73 /* It appears to be an EOC. */
74 if (data[dp++] != 0)
75 goto invalid_eoc;
76 if (--indef_level <= 0) {
77 *_len = dp - *_dp;
78 *_dp = dp;
79 return 0;
80 }
81 goto next_tag;
82 }
83
84 if (unlikely((tag & 0x1f) == 0x1f)) {
85 do {
86 if (unlikely(datalen - dp < 2))
87 goto data_overrun_error;
88 tmp = data[dp++];
89 } while (tmp & 0x80);
90 }
91
92 /* Extract the length */
93 len = data[dp++];
94 if (len <= 0x7f) {
95 dp += len;
96 goto next_tag;
97 }
98
99 if (unlikely(len == 0x80)) {
100 /* Indefinite length */
101 if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
102 goto indefinite_len_primitive;
103 indef_level++;
104 goto next_tag;
105 }
106
107 n = len - 0x80;
108 if (unlikely(n > sizeof(size_t) - 1))
109 goto length_too_long;
110 if (unlikely(n > datalen - dp))
111 goto data_overrun_error;
112 for (len = 0; n > 0; n--) {
113 len <<= 8;
114 len |= data[dp++];
115 }
116 dp += len;
117 goto next_tag;
118
119length_too_long:
120 *_errmsg = "Unsupported length";
121 goto error;
122indefinite_len_primitive:
123 *_errmsg = "Indefinite len primitive not permitted";
124 goto error;
125invalid_eoc:
126 *_errmsg = "Invalid length EOC";
127 goto error;
128data_overrun_error:
129 *_errmsg = "Data overrun error";
130 goto error;
131missing_eoc:
132 *_errmsg = "Missing EOC in indefinite len cons";
133error:
134 *_dp = dp;
135 return -1;
136}
137
138/**
139 * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern
140 * @decoder: The decoder definition (produced by asn1_compiler)
141 * @context: The caller's context (to be passed to the action functions)
142 * @data: The encoded data
143 * @datasize: The size of the encoded data
144 *
145 * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
146 * produced by asn1_compiler. Action functions are called on marked tags to
147 * allow the caller to retrieve significant data.
148 *
149 * LIMITATIONS:
150 *
151 * To keep down the amount of stack used by this function, the following limits
152 * have been imposed:
153 *
154 * (1) This won't handle datalen > 65535 without increasing the size of the
155 * cons stack elements and length_too_long checking.
156 *
157 * (2) The stack of constructed types is 10 deep. If the depth of non-leaf
158 * constructed types exceeds this, the decode will fail.
159 *
160 * (3) The SET type (not the SET OF type) isn't really supported as tracking
161 * what members of the set have been seen is a pain.
162 */
163int asn1_ber_decoder(const struct asn1_decoder *decoder,
164 void *context,
165 const unsigned char *data,
166 size_t datalen)
167{
168 const unsigned char *machine = decoder->machine;
169 const asn1_action_t *actions = decoder->actions;
170 size_t machlen = decoder->machlen;
171 enum asn1_opcode op;
172 unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0;
173 const char *errmsg;
174 size_t pc = 0, dp = 0, tdp = 0, len = 0;
175 int ret;
176
177 unsigned char flags = 0;
178#define FLAG_INDEFINITE_LENGTH 0x01
179#define FLAG_MATCHED 0x02
180#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
181 * - ie. whether or not we are going to parse
182 * a compound type.
183 */
184
185#define NR_CONS_STACK 10
186 unsigned short cons_dp_stack[NR_CONS_STACK];
187 unsigned short cons_datalen_stack[NR_CONS_STACK];
188 unsigned char cons_hdrlen_stack[NR_CONS_STACK];
189#define NR_JUMP_STACK 10
190 unsigned char jump_stack[NR_JUMP_STACK];
191
192 if (datalen > 65535)
193 return -EMSGSIZE;
194
195next_op:
196 pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n",
197 pc, machlen, dp, datalen, csp, jsp);
198 if (unlikely(pc >= machlen))
199 goto machine_overrun_error;
200 op = machine[pc];
201 if (unlikely(pc + asn1_op_lengths[op] > machlen))
202 goto machine_overrun_error;
203
204 /* If this command is meant to match a tag, then do that before
205 * evaluating the command.
206 */
207 if (op <= ASN1_OP__MATCHES_TAG) {
208 unsigned char tmp;
209
210 /* Skip conditional matches if possible */
211 if ((op & ASN1_OP_MATCH__COND &&
212 flags & FLAG_MATCHED) ||
213 dp == datalen) {
214 pc += asn1_op_lengths[op];
215 goto next_op;
216 }
217
218 flags = 0;
219 hdr = 2;
220
221 /* Extract a tag from the data */
222 if (unlikely(dp >= datalen - 1))
223 goto data_overrun_error;
224 tag = data[dp++];
225 if (unlikely((tag & 0x1f) == 0x1f))
226 goto long_tag_not_supported;
227
228 if (op & ASN1_OP_MATCH__ANY) {
229 pr_debug("- any %02x\n", tag);
230 } else {
231 /* Extract the tag from the machine
232 * - Either CONS or PRIM are permitted in the data if
233 * CONS is not set in the op stream, otherwise CONS
234 * is mandatory.
235 */
236 optag = machine[pc + 1];
237 flags |= optag & FLAG_CONS;
238
239 /* Determine whether the tag matched */
240 tmp = optag ^ tag;
241 tmp &= ~(optag & ASN1_CONS_BIT);
242 pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp);
243 if (tmp != 0) {
244 /* All odd-numbered tags are MATCH_OR_SKIP. */
245 if (op & ASN1_OP_MATCH__SKIP) {
246 pc += asn1_op_lengths[op];
247 dp--;
248 goto next_op;
249 }
250 goto tag_mismatch;
251 }
252 }
253 flags |= FLAG_MATCHED;
254
255 len = data[dp++];
256 if (len > 0x7f) {
257 if (unlikely(len == 0x80)) {
258 /* Indefinite length */
259 if (unlikely(!(tag & ASN1_CONS_BIT)))
260 goto indefinite_len_primitive;
261 flags |= FLAG_INDEFINITE_LENGTH;
262 if (unlikely(2 > datalen - dp))
263 goto data_overrun_error;
264 } else {
265 int n = len - 0x80;
266 if (unlikely(n > 2))
267 goto length_too_long;
268 if (unlikely(dp >= datalen - n))
269 goto data_overrun_error;
270 hdr += n;
271 for (len = 0; n > 0; n--) {
272 len <<= 8;
273 len |= data[dp++];
274 }
275 if (unlikely(len > datalen - dp))
276 goto data_overrun_error;
277 }
278 }
279
280 if (flags & FLAG_CONS) {
281 /* For expected compound forms, we stack the positions
282 * of the start and end of the data.
283 */
284 if (unlikely(csp >= NR_CONS_STACK))
285 goto cons_stack_overflow;
286 cons_dp_stack[csp] = dp;
287 cons_hdrlen_stack[csp] = hdr;
288 if (!(flags & FLAG_INDEFINITE_LENGTH)) {
289 cons_datalen_stack[csp] = datalen;
290 datalen = dp + len;
291 } else {
292 cons_datalen_stack[csp] = 0;
293 }
294 csp++;
295 }
296
297 pr_debug("- TAG: %02x %zu%s\n",
298 tag, len, flags & FLAG_CONS ? " CONS" : "");
299 tdp = dp;
300 }
301
302 /* Decide how to handle the operation */
303 switch (op) {
304 case ASN1_OP_MATCH_ANY_ACT:
305 case ASN1_OP_COND_MATCH_ANY_ACT:
306 ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
307 if (ret < 0)
308 return ret;
309 goto skip_data;
310
311 case ASN1_OP_MATCH_ACT:
312 case ASN1_OP_MATCH_ACT_OR_SKIP:
313 case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
314 ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
315 if (ret < 0)
316 return ret;
317 goto skip_data;
318
319 case ASN1_OP_MATCH:
320 case ASN1_OP_MATCH_OR_SKIP:
321 case ASN1_OP_MATCH_ANY:
322 case ASN1_OP_COND_MATCH_OR_SKIP:
323 case ASN1_OP_COND_MATCH_ANY:
324 skip_data:
325 if (!(flags & FLAG_CONS)) {
326 if (flags & FLAG_INDEFINITE_LENGTH) {
327 ret = asn1_find_indefinite_length(
328 data, datalen, &dp, &len, &errmsg);
329 if (ret < 0)
330 goto error;
331 } else {
332 dp += len;
333 }
334 pr_debug("- LEAF: %zu\n", len);
335 }
336 pc += asn1_op_lengths[op];
337 goto next_op;
338
339 case ASN1_OP_MATCH_JUMP:
340 case ASN1_OP_MATCH_JUMP_OR_SKIP:
341 case ASN1_OP_COND_MATCH_JUMP_OR_SKIP:
342 pr_debug("- MATCH_JUMP\n");
343 if (unlikely(jsp == NR_JUMP_STACK))
344 goto jump_stack_overflow;
345 jump_stack[jsp++] = pc + asn1_op_lengths[op];
346 pc = machine[pc + 2];
347 goto next_op;
348
349 case ASN1_OP_COND_FAIL:
350 if (unlikely(!(flags & FLAG_MATCHED)))
351 goto tag_mismatch;
352 pc += asn1_op_lengths[op];
353 goto next_op;
354
355 case ASN1_OP_COMPLETE:
356 if (unlikely(jsp != 0 || csp != 0)) {
357 pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n",
358 jsp, csp);
359 return -EBADMSG;
360 }
361 return 0;
362
363 case ASN1_OP_END_SET:
364 case ASN1_OP_END_SET_ACT:
365 if (unlikely(!(flags & FLAG_MATCHED)))
366 goto tag_mismatch;
367 case ASN1_OP_END_SEQ:
368 case ASN1_OP_END_SET_OF:
369 case ASN1_OP_END_SEQ_OF:
370 case ASN1_OP_END_SEQ_ACT:
371 case ASN1_OP_END_SET_OF_ACT:
372 case ASN1_OP_END_SEQ_OF_ACT:
373 if (unlikely(csp <= 0))
374 goto cons_stack_underflow;
375 csp--;
376 tdp = cons_dp_stack[csp];
377 hdr = cons_hdrlen_stack[csp];
378 len = datalen;
379 datalen = cons_datalen_stack[csp];
380 pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n",
381 tdp, dp, len, datalen);
382 if (datalen == 0) {
383 /* Indefinite length - check for the EOC. */
384 datalen = len;
385 if (unlikely(datalen - dp < 2))
386 goto data_overrun_error;
387 if (data[dp++] != 0) {
388 if (op & ASN1_OP_END__OF) {
389 dp--;
390 csp++;
391 pc = machine[pc + 1];
392 pr_debug("- continue\n");
393 goto next_op;
394 }
395 goto missing_eoc;
396 }
397 if (data[dp++] != 0)
398 goto invalid_eoc;
399 len = dp - tdp - 2;
400 } else {
401 if (dp < len && (op & ASN1_OP_END__OF)) {
402 datalen = len;
403 csp++;
404 pc = machine[pc + 1];
405 pr_debug("- continue\n");
406 goto next_op;
407 }
408 if (dp != len)
409 goto cons_length_error;
410 len -= tdp;
411 pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp);
412 }
413
414 if (op & ASN1_OP_END__ACT) {
415 unsigned char act;
416 if (op & ASN1_OP_END__OF)
417 act = machine[pc + 2];
418 else
419 act = machine[pc + 1];
420 ret = actions[act](context, hdr, 0, data + tdp, len);
421 }
422 pc += asn1_op_lengths[op];
423 goto next_op;
424
425 case ASN1_OP_ACT:
426 ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
427 pc += asn1_op_lengths[op];
428 goto next_op;
429
430 case ASN1_OP_RETURN:
431 if (unlikely(jsp <= 0))
432 goto jump_stack_underflow;
433 pc = jump_stack[--jsp];
434 goto next_op;
435
436 default:
437 break;
438 }
439
440 /* Shouldn't reach here */
441 pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
442 return -EBADMSG;
443
444data_overrun_error:
445 errmsg = "Data overrun error";
446 goto error;
447machine_overrun_error:
448 errmsg = "Machine overrun error";
449 goto error;
450jump_stack_underflow:
451 errmsg = "Jump stack underflow";
452 goto error;
453jump_stack_overflow:
454 errmsg = "Jump stack overflow";
455 goto error;
456cons_stack_underflow:
457 errmsg = "Cons stack underflow";
458 goto error;
459cons_stack_overflow:
460 errmsg = "Cons stack overflow";
461 goto error;
462cons_length_error:
463 errmsg = "Cons length error";
464 goto error;
465missing_eoc:
466 errmsg = "Missing EOC in indefinite len cons";
467 goto error;
468invalid_eoc:
469 errmsg = "Invalid length EOC";
470 goto error;
471length_too_long:
472 errmsg = "Unsupported length";
473 goto error;
474indefinite_len_primitive:
475 errmsg = "Indefinite len primitive not permitted";
476 goto error;
477tag_mismatch:
478 errmsg = "Unexpected tag";
479 goto error;
480long_tag_not_supported:
481 errmsg = "Long tag not supported";
482error:
483 pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n",
484 errmsg, pc, dp, optag, tag, len);
485 return -EBADMSG;
486}
487EXPORT_SYMBOL_GPL(asn1_ber_decoder);
diff --git a/lib/bcd.c b/lib/bcd.c
index 55efaf742346..40d304efe272 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -1,14 +1,14 @@
1#include <linux/bcd.h> 1#include <linux/bcd.h>
2#include <linux/export.h> 2#include <linux/export.h>
3 3
4unsigned bcd2bin(unsigned char val) 4unsigned _bcd2bin(unsigned char val)
5{ 5{
6 return (val & 0x0f) + (val >> 4) * 10; 6 return (val & 0x0f) + (val >> 4) * 10;
7} 7}
8EXPORT_SYMBOL(bcd2bin); 8EXPORT_SYMBOL(_bcd2bin);
9 9
10unsigned char bin2bcd(unsigned val) 10unsigned char _bin2bcd(unsigned val)
11{ 11{
12 return ((val / 10) << 4) + val % 10; 12 return ((val / 10) << 4) + val % 10;
13} 13}
14EXPORT_SYMBOL(bin2bcd); 14EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 06fdfa1aeba7..06f7e4fe8d2d 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -353,7 +353,7 @@ again:
353EXPORT_SYMBOL(bitmap_find_next_zero_area); 353EXPORT_SYMBOL(bitmap_find_next_zero_area);
354 354
355/* 355/*
356 * Bitmap printing & parsing functions: first version by Bill Irwin, 356 * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
357 * second version by Paul Jackson, third by Joe Korty. 357 * second version by Paul Jackson, third by Joe Korty.
358 */ 358 */
359 359
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
new file mode 100755
index 000000000000..dfbdaab81bc8
--- /dev/null
+++ b/lib/build_OID_registry
@@ -0,0 +1,209 @@
1#!/usr/bin/perl -w
2#
3# Build a static ASN.1 Object Identified (OID) registry
4#
5# Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
6# Written by David Howells (dhowells@redhat.com)
7#
8# This program is free software; you can redistribute it and/or
9# modify it under the terms of the GNU General Public Licence
10# as published by the Free Software Foundation; either version
11# 2 of the Licence, or (at your option) any later version.
12#
13
14use strict;
15
16my @names = ();
17my @oids = ();
18
19if ($#ARGV != 1) {
20 print STDERR "Format: ", $0, " <in-h-file> <out-c-file>\n";
21 exit(2);
22}
23
24#
25# Open the file to read from
26#
27open IN_FILE, "<$ARGV[0]" || die;
28while (<IN_FILE>) {
29 chomp;
30 if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) {
31 push @names, $1;
32 push @oids, $2;
33 }
34}
35close IN_FILE || die;
36
37#
38# Open the files to write into
39#
40open C_FILE, ">$ARGV[1]" or die;
41print C_FILE "/*\n";
42print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
43print C_FILE " */\n";
44
45#
46# Split the data up into separate lists and also determine the lengths of the
47# encoded data arrays.
48#
49my @indices = ();
50my @lengths = ();
51my $total_length = 0;
52
53print "Compiling ", $#names + 1, " OIDs\n";
54
55for (my $i = 0; $i <= $#names; $i++) {
56 my $name = $names[$i];
57 my $oid = $oids[$i];
58
59 my @components = split(/[.]/, $oid);
60
61 # Determine the encoded length of this OID
62 my $size = $#components;
63 for (my $loop = 2; $loop <= $#components; $loop++) {
64 my $c = $components[$loop];
65
66 # We will base128 encode the number
67 my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
68 $tmp = int($tmp / 7);
69 $size += $tmp;
70 }
71 push @lengths, $size;
72 push @indices, $total_length;
73 $total_length += $size;
74}
75
76#
77# Emit the look-up-by-OID index table
78#
79print C_FILE "\n";
80if ($total_length <= 255) {
81 print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n";
82} else {
83 print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n";
84}
85for (my $i = 0; $i <= $#names; $i++) {
86 print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n"
87}
88print C_FILE "\t[OID__NR] = ", $total_length, "\n";
89print C_FILE "};\n";
90
91#
92# Encode the OIDs
93#
94my @encoded_oids = ();
95
96for (my $i = 0; $i <= $#names; $i++) {
97 my @octets = ();
98
99 my @components = split(/[.]/, $oids[$i]);
100
101 push @octets, $components[0] * 40 + $components[1];
102
103 for (my $loop = 2; $loop <= $#components; $loop++) {
104 my $c = $components[$loop];
105
106 # Base128 encode the number
107 my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
108 $tmp = int($tmp / 7);
109
110 for (; $tmp > 0; $tmp--) {
111 push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80;
112 }
113 push @octets, $c & 0x7f;
114 }
115
116 push @encoded_oids, \@octets;
117}
118
119#
120# Create a hash value for each OID
121#
122my @hash_values = ();
123for (my $i = 0; $i <= $#names; $i++) {
124 my @octets = @{$encoded_oids[$i]};
125
126 my $hash = $#octets;
127 foreach (@octets) {
128 $hash += $_ * 33;
129 }
130
131 $hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash);
132
133 push @hash_values, $hash & 0xff;
134}
135
136#
137# Emit the OID data
138#
139print C_FILE "\n";
140print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n";
141for (my $i = 0; $i <= $#names; $i++) {
142 my @octets = @{$encoded_oids[$i]};
143 print C_FILE "\t";
144 print C_FILE $_, ", " foreach (@octets);
145 print C_FILE "\t// ", $names[$i];
146 print C_FILE "\n";
147}
148print C_FILE "};\n";
149
150#
151# Build the search index table (ordered by length then hash then content)
152#
153my @index_table = ( 0 .. $#names );
154
155@index_table = sort {
156 my @octets_a = @{$encoded_oids[$a]};
157 my @octets_b = @{$encoded_oids[$b]};
158
159 return $hash_values[$a] <=> $hash_values[$b]
160 if ($hash_values[$a] != $hash_values[$b]);
161 return $#octets_a <=> $#octets_b
162 if ($#octets_a != $#octets_b);
163 for (my $i = $#octets_a; $i >= 0; $i--) {
164 return $octets_a[$i] <=> $octets_b[$i]
165 if ($octets_a[$i] != $octets_b[$i]);
166 }
167 return 0;
168
169} @index_table;
170
171#
172# Emit the search index and hash value table
173#
174print C_FILE "\n";
175print C_FILE "static const struct {\n";
176print C_FILE "\tunsigned char hash;\n";
177if ($#names <= 255) {
178 print C_FILE "\tenum OID oid : 8;\n";
179} else {
180 print C_FILE "\tenum OID oid : 16;\n";
181}
182print C_FILE "} oid_search_table[OID__NR] = {\n";
183for (my $i = 0; $i <= $#names; $i++) {
184 my @octets = @{$encoded_oids[$index_table[$i]]};
185 printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ",
186 $i,
187 $hash_values[$index_table[$i]],
188 $names[$index_table[$i]]);
189 printf C_FILE "%02x", $_ foreach (@octets);
190 print C_FILE "\n";
191}
192print C_FILE "};\n";
193
194#
195# Emit the OID debugging name table
196#
197#print C_FILE "\n";
198#print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n";
199#
200#for (my $i = 0; $i <= $#names; $i++) {
201# print C_FILE "\t\"", $names[$i], "\",\n"
202#}
203#print C_FILE "\t\"Unknown-OID\"\n";
204#print C_FILE "};\n";
205
206#
207# Polish off
208#
209close C_FILE or die;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 402a54ac35cb..d327b87c99b7 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -161,6 +161,6 @@ EXPORT_SYMBOL(free_cpumask_var);
161 */ 161 */
162void __init free_bootmem_cpumask_var(cpumask_var_t mask) 162void __init free_bootmem_cpumask_var(cpumask_var_t mask)
163{ 163{
164 free_bootmem((unsigned long)mask, cpumask_size()); 164 free_bootmem(__pa(mask), cpumask_size());
165} 165}
166#endif 166#endif
diff --git a/lib/crc32.c b/lib/crc32.c
index 61774b8db4de..072fbd8234d5 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
188#else 188#else
189u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 189u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
190{ 190{
191 return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); 191 return crc32_le_generic(crc, p, len,
192 (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
192} 193}
193u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) 194u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
194{ 195{
195 return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); 196 return crc32_le_generic(crc, p, len,
197 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
196} 198}
197#endif 199#endif
198EXPORT_SYMBOL(crc32_le); 200EXPORT_SYMBOL(crc32_le);
@@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
253#else 255#else
254u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 256u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
255{ 257{
256 return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); 258 return crc32_be_generic(crc, p, len,
259 (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
257} 260}
258#endif 261#endif
259EXPORT_SYMBOL(crc32_be); 262EXPORT_SYMBOL(crc32_be);
diff --git a/lib/decompress.c b/lib/decompress.c
index 3d766b7f60ab..31a804277282 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h>
17 18
18#ifndef CONFIG_DECOMPRESS_GZIP 19#ifndef CONFIG_DECOMPRESS_GZIP
19# define gunzip NULL 20# define gunzip NULL
@@ -31,11 +32,13 @@
31# define unlzo NULL 32# define unlzo NULL
32#endif 33#endif
33 34
34static const struct compress_format { 35struct compress_format {
35 unsigned char magic[2]; 36 unsigned char magic[2];
36 const char *name; 37 const char *name;
37 decompress_fn decompressor; 38 decompress_fn decompressor;
38} compressed_formats[] = { 39};
40
41static const struct compress_format compressed_formats[] __initdata = {
39 { {037, 0213}, "gzip", gunzip }, 42 { {037, 0213}, "gzip", gunzip },
40 { {037, 0236}, "gzip", gunzip }, 43 { {037, 0236}, "gzip", gunzip },
41 { {0x42, 0x5a}, "bzip2", bunzip2 }, 44 { {0x42, 0x5a}, "bzip2", bunzip2 },
@@ -45,7 +48,7 @@ static const struct compress_format {
45 { {0, 0}, NULL, NULL } 48 { {0, 0}, NULL, NULL }
46}; 49};
47 50
48decompress_fn decompress_method(const unsigned char *inbuf, int len, 51decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
49 const char **name) 52 const char **name)
50{ 53{
51 const struct compress_format *cf; 54 const struct compress_format *cf;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 66ce41489133..d84beb994f36 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page",
120static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 120static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
121 "DMA_FROM_DEVICE", "DMA_NONE" }; 121 "DMA_FROM_DEVICE", "DMA_NONE" };
122 122
123/* little merge helper - remove it after the merge window */
124#ifndef BUS_NOTIFY_UNBOUND_DRIVER
125#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
126#endif
127
128/* 123/*
129 * The access to some variables in this macro is racy. We can't use atomic_t 124 * The access to some variables in this macro is racy. We can't use atomic_t
130 * here because all these variables are exported to debugfs. Some of them even 125 * here because all these variables are exported to debugfs. Some of them even
@@ -269,7 +264,7 @@ static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
269 match_fn match) 264 match_fn match)
270{ 265{
271 struct dma_debug_entry *entry, *ret = NULL; 266 struct dma_debug_entry *entry, *ret = NULL;
272 int matches = 0, match_lvl, last_lvl = 0; 267 int matches = 0, match_lvl, last_lvl = -1;
273 268
274 list_for_each_entry(entry, &bucket->list, list) { 269 list_for_each_entry(entry, &bucket->list, list) {
275 if (!match(ref, entry)) 270 if (!match(ref, entry))
@@ -298,7 +293,7 @@ static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
298 } else if (match_lvl > last_lvl) { 293 } else if (match_lvl > last_lvl) {
299 /* 294 /*
300 * We found an entry that fits better then the 295 * We found an entry that fits better then the
301 * previous one 296 * previous one or it is the 1st match.
302 */ 297 */
303 last_lvl = match_lvl; 298 last_lvl = match_lvl;
304 ret = entry; 299 ret = entry;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7ca29a0a3019..e7f7d993357a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
521 int pos_after_tid; 521 int pos_after_tid;
522 int pos = 0; 522 int pos = 0;
523 523
524 pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); 524 *buf = '\0';
525
525 if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { 526 if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
526 if (in_interrupt()) 527 if (in_interrupt())
527 pos += snprintf(buf + pos, remaining(pos), "%s ", 528 pos += snprintf(buf + pos, remaining(pos), "<intr> ");
528 "<intr>");
529 else 529 else
530 pos += snprintf(buf + pos, remaining(pos), "[%d] ", 530 pos += snprintf(buf + pos, remaining(pos), "[%d] ",
531 task_pid_vnr(current)); 531 task_pid_vnr(current));
532 } 532 }
533 pos_after_tid = pos; 533 pos_after_tid = pos;
534 if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) 534 if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
535 pos += snprintf(buf + pos, remaining(pos), "%s:", 535 pos += snprintf(buf + pos, remaining(pos), "%s:",
536 desc->modname); 536 desc->modname);
537 if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) 537 if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
538 pos += snprintf(buf + pos, remaining(pos), "%s:", 538 pos += snprintf(buf + pos, remaining(pos), "%s:",
539 desc->function); 539 desc->function);
540 if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) 540 if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
541 pos += snprintf(buf + pos, remaining(pos), "%d:", 541 pos += snprintf(buf + pos, remaining(pos), "%d:",
542 desc->lineno); 542 desc->lineno);
543 if (pos - pos_after_tid) 543 if (pos - pos_after_tid)
544 pos += snprintf(buf + pos, remaining(pos), " "); 544 pos += snprintf(buf + pos, remaining(pos), " ");
545 if (pos >= PREFIX_SIZE) 545 if (pos >= PREFIX_SIZE)
@@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
559 BUG_ON(!fmt); 559 BUG_ON(!fmt);
560 560
561 va_start(args, fmt); 561 va_start(args, fmt);
562
562 vaf.fmt = fmt; 563 vaf.fmt = fmt;
563 vaf.va = &args; 564 vaf.va = &args;
564 res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); 565
566 res = printk(KERN_DEBUG "%s%pV",
567 dynamic_emit_prefix(descriptor, buf), &vaf);
568
565 va_end(args); 569 va_end(args);
566 570
567 return res; 571 return res;
@@ -574,15 +578,26 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
574 struct va_format vaf; 578 struct va_format vaf;
575 va_list args; 579 va_list args;
576 int res; 580 int res;
577 char buf[PREFIX_SIZE];
578 581
579 BUG_ON(!descriptor); 582 BUG_ON(!descriptor);
580 BUG_ON(!fmt); 583 BUG_ON(!fmt);
581 584
582 va_start(args, fmt); 585 va_start(args, fmt);
586
583 vaf.fmt = fmt; 587 vaf.fmt = fmt;
584 vaf.va = &args; 588 vaf.va = &args;
585 res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); 589
590 if (!dev) {
591 res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
592 } else {
593 char buf[PREFIX_SIZE];
594
595 res = dev_printk_emit(7, dev, "%s%s %s: %pV",
596 dynamic_emit_prefix(descriptor, buf),
597 dev_driver_string(dev), dev_name(dev),
598 &vaf);
599 }
600
586 va_end(args); 601 va_end(args);
587 602
588 return res; 603 return res;
@@ -592,20 +607,35 @@ EXPORT_SYMBOL(__dynamic_dev_dbg);
592#ifdef CONFIG_NET 607#ifdef CONFIG_NET
593 608
594int __dynamic_netdev_dbg(struct _ddebug *descriptor, 609int __dynamic_netdev_dbg(struct _ddebug *descriptor,
595 const struct net_device *dev, const char *fmt, ...) 610 const struct net_device *dev, const char *fmt, ...)
596{ 611{
597 struct va_format vaf; 612 struct va_format vaf;
598 va_list args; 613 va_list args;
599 int res; 614 int res;
600 char buf[PREFIX_SIZE];
601 615
602 BUG_ON(!descriptor); 616 BUG_ON(!descriptor);
603 BUG_ON(!fmt); 617 BUG_ON(!fmt);
604 618
605 va_start(args, fmt); 619 va_start(args, fmt);
620
606 vaf.fmt = fmt; 621 vaf.fmt = fmt;
607 vaf.va = &args; 622 vaf.va = &args;
608 res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); 623
624 if (dev && dev->dev.parent) {
625 char buf[PREFIX_SIZE];
626
627 res = dev_printk_emit(7, dev->dev.parent,
628 "%s%s %s %s: %pV",
629 dynamic_emit_prefix(descriptor, buf),
630 dev_driver_string(dev->dev.parent),
631 dev_name(dev->dev.parent),
632 netdev_name(dev), &vaf);
633 } else if (dev) {
634 res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
635 } else {
636 res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
637 }
638
609 va_end(args); 639 va_end(args);
610 640
611 return res; 641 return res;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index c785554f9523..ebf3bac460b0 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
62 */ 62 */
63bool fprop_new_period(struct fprop_global *p, int periods) 63bool fprop_new_period(struct fprop_global *p, int periods)
64{ 64{
65 u64 events; 65 s64 events;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 local_irq_save(flags); 68 local_irq_save(flags);
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3cd14b3..3657f129d7b8 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
9 9
10 if (a < b) 10 if (a < b)
11 swap(a, b); 11 swap(a, b);
12
13 if (!b)
14 return a;
12 while ((r = a % b) != 0) { 15 while ((r = a % b) != 0) {
13 a = b; 16 a = b;
14 b = r; 17 b = r;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f8d5439e2d9..71fcfcd96410 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
109 109
110 if (CRC_LE_BITS > 1) { 110 if (CRC_LE_BITS > 1) {
111 crc32init_le(); 111 crc32init_le();
112 printf("static const u32 __cacheline_aligned " 112 printf("static u32 __cacheline_aligned "
113 "crc32table_le[%d][%d] = {", 113 "crc32table_le[%d][%d] = {",
114 LE_TABLE_ROWS, LE_TABLE_SIZE); 114 LE_TABLE_ROWS, LE_TABLE_SIZE);
115 output_table(crc32table_le, LE_TABLE_ROWS, 115 output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
119 119
120 if (CRC_BE_BITS > 1) { 120 if (CRC_BE_BITS > 1) {
121 crc32init_be(); 121 crc32init_be();
122 printf("static const u32 __cacheline_aligned " 122 printf("static u32 __cacheline_aligned "
123 "crc32table_be[%d][%d] = {", 123 "crc32table_be[%d][%d] = {",
124 BE_TABLE_ROWS, BE_TABLE_SIZE); 124 BE_TABLE_ROWS, BE_TABLE_SIZE);
125 output_table(crc32table_be, LE_TABLE_ROWS, 125 output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
128 } 128 }
129 if (CRC_LE_BITS > 1) { 129 if (CRC_LE_BITS > 1) {
130 crc32cinit_le(); 130 crc32cinit_le();
131 printf("static const u32 __cacheline_aligned " 131 printf("static u32 __cacheline_aligned "
132 "crc32ctable_le[%d][%d] = {", 132 "crc32ctable_le[%d][%d] = {",
133 LE_TABLE_ROWS, LE_TABLE_SIZE); 133 LE_TABLE_ROWS, LE_TABLE_SIZE);
134 output_table(crc32ctable_le, LE_TABLE_ROWS, 134 output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 6bc04aab6ec7..54920433705a 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
152 spin_lock_init(&pool->lock); 152 spin_lock_init(&pool->lock);
153 INIT_LIST_HEAD(&pool->chunks); 153 INIT_LIST_HEAD(&pool->chunks);
154 pool->min_alloc_order = min_alloc_order; 154 pool->min_alloc_order = min_alloc_order;
155 pool->algo = gen_pool_first_fit;
156 pool->data = NULL;
155 } 157 }
156 return pool; 158 return pool;
157} 159}
@@ -176,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
176 struct gen_pool_chunk *chunk; 178 struct gen_pool_chunk *chunk;
177 int nbits = size >> pool->min_alloc_order; 179 int nbits = size >> pool->min_alloc_order;
178 int nbytes = sizeof(struct gen_pool_chunk) + 180 int nbytes = sizeof(struct gen_pool_chunk) +
179 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; 181 BITS_TO_LONGS(nbits) * sizeof(long);
180 182
181 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); 183 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
182 if (unlikely(chunk == NULL)) 184 if (unlikely(chunk == NULL))
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy);
255 * @size: number of bytes to allocate from the pool 257 * @size: number of bytes to allocate from the pool
256 * 258 *
257 * Allocate the requested number of bytes from the specified pool. 259 * Allocate the requested number of bytes from the specified pool.
258 * Uses a first-fit algorithm. Can not be used in NMI handler on 260 * Uses the pool allocation function (with first-fit algorithm by default).
259 * architectures without NMI-safe cmpxchg implementation. 261 * Can not be used in NMI handler on architectures without
262 * NMI-safe cmpxchg implementation.
260 */ 263 */
261unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 264unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
262{ 265{
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
280 283
281 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 284 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
282retry: 285retry:
283 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 286 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
284 start_bit, nbits, 0); 287 pool->data);
285 if (start_bit >= end_bit) 288 if (start_bit >= end_bit)
286 continue; 289 continue;
287 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 290 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool)
400 return size; 403 return size;
401} 404}
402EXPORT_SYMBOL_GPL(gen_pool_size); 405EXPORT_SYMBOL_GPL(gen_pool_size);
406
407/**
408 * gen_pool_set_algo - set the allocation algorithm
409 * @pool: pool to change allocation algorithm
410 * @algo: custom algorithm function
411 * @data: additional data used by @algo
412 *
413 * Call @algo for each memory allocation in the pool.
414 * If @algo is NULL use gen_pool_first_fit as default
415 * memory allocation function.
416 */
417void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
418{
419 rcu_read_lock();
420
421 pool->algo = algo;
422 if (!pool->algo)
423 pool->algo = gen_pool_first_fit;
424
425 pool->data = data;
426
427 rcu_read_unlock();
428}
429EXPORT_SYMBOL(gen_pool_set_algo);
430
431/**
432 * gen_pool_first_fit - find the first available region
433 * of memory matching the size requirement (no alignment constraint)
434 * @map: The address to base the search on
435 * @size: The bitmap size in bits
436 * @start: The bitnumber to start searching at
437 * @nr: The number of zeroed bits we're looking for
438 * @data: additional data - unused
439 */
440unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
441 unsigned long start, unsigned int nr, void *data)
442{
443 return bitmap_find_next_zero_area(map, size, start, nr, 0);
444}
445EXPORT_SYMBOL(gen_pool_first_fit);
446
447/**
448 * gen_pool_best_fit - find the best fitting region of memory
449 * macthing the size requirement (no alignment constraint)
450 * @map: The address to base the search on
451 * @size: The bitmap size in bits
452 * @start: The bitnumber to start searching at
453 * @nr: The number of zeroed bits we're looking for
454 * @data: additional data - unused
455 *
456 * Iterate over the bitmap to find the smallest free region
457 * which we can allocate the memory.
458 */
459unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
460 unsigned long start, unsigned int nr, void *data)
461{
462 unsigned long start_bit = size;
463 unsigned long len = size + 1;
464 unsigned long index;
465
466 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
467
468 while (index < size) {
469 int next_bit = find_next_bit(map, size, index + nr);
470 if ((next_bit - index) < len) {
471 len = next_bit - index;
472 start_bit = index;
473 if (len == nr)
474 return start_bit;
475 }
476 index = bitmap_find_next_zero_area(map, size,
477 next_bit + 1, nr, 0);
478 }
479
480 return start_bit;
481}
482EXPORT_SYMBOL(gen_pool_best_fit);
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a99..648239079dd2 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
20 * that id to this code and it returns your pointer. 20 * that id to this code and it returns your pointer.
21 21
22 * You can release ids at any time. When all ids are released, most of 22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just 24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts 25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator. 26 * with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
122 */ 122 */
123int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 123int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
124{ 124{
125 while (idp->id_free_cnt < IDR_FREE_MAX) { 125 while (idp->id_free_cnt < MAX_IDR_FREE) {
126 struct idr_layer *new; 126 struct idr_layer *new;
127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
128 if (new == NULL) 128 if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
179 sh = IDR_BITS*l; 179 sh = IDR_BITS*l;
180 id = ((id >> sh) ^ n ^ m) << sh; 180 id = ((id >> sh) ^ n ^ m) << sh;
181 } 181 }
182 if ((id >= MAX_ID_BIT) || (id < 0)) 182 if ((id >= MAX_IDR_BIT) || (id < 0))
183 return IDR_NOMORE_SPACE; 183 return IDR_NOMORE_SPACE;
184 if (l == 0) 184 if (l == 0)
185 break; 185 break;
@@ -223,7 +223,7 @@ build_up:
223 * Add a new layer to the top of the tree if the requested 223 * Add a new layer to the top of the tree if the requested
224 * id is larger than the currently allocated space. 224 * id is larger than the currently allocated space.
225 */ 225 */
226 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 226 while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
227 layers++; 227 layers++;
228 if (!p->count) { 228 if (!p->count) {
229 /* special case: if the tree is currently empty, 229 /* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
265 265
266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267{ 267{
268 struct idr_layer *pa[MAX_LEVEL]; 268 struct idr_layer *pa[MAX_IDR_LEVEL];
269 int id; 269 int id;
270 270
271 id = idr_get_empty_slot(idp, starting_id, pa); 271 id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
357static void sub_remove(struct idr *idp, int shift, int id) 357static void sub_remove(struct idr *idp, int shift, int id)
358{ 358{
359 struct idr_layer *p = idp->top; 359 struct idr_layer *p = idp->top;
360 struct idr_layer **pa[MAX_LEVEL]; 360 struct idr_layer **pa[MAX_IDR_LEVEL];
361 struct idr_layer ***paa = &pa[0]; 361 struct idr_layer ***paa = &pa[0];
362 struct idr_layer *to_free; 362 struct idr_layer *to_free;
363 int n; 363 int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
402 struct idr_layer *to_free; 402 struct idr_layer *to_free;
403 403
404 /* Mask off upper bits we don't use for the search. */ 404 /* Mask off upper bits we don't use for the search. */
405 id &= MAX_ID_MASK; 405 id &= MAX_IDR_MASK;
406 406
407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
420 to_free->bitmap = to_free->count = 0; 420 to_free->bitmap = to_free->count = 0;
421 free_layer(to_free); 421 free_layer(to_free);
422 } 422 }
423 while (idp->id_free_cnt >= IDR_FREE_MAX) { 423 while (idp->id_free_cnt >= MAX_IDR_FREE) {
424 p = get_from_free_list(idp); 424 p = get_from_free_list(idp);
425 /* 425 /*
426 * Note: we don't call the rcu callback here, since the only 426 * Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
451 int n, id, max; 451 int n, id, max;
452 int bt_mask; 452 int bt_mask;
453 struct idr_layer *p; 453 struct idr_layer *p;
454 struct idr_layer *pa[MAX_LEVEL]; 454 struct idr_layer *pa[MAX_IDR_LEVEL];
455 struct idr_layer **paa = &pa[0]; 455 struct idr_layer **paa = &pa[0];
456 456
457 n = idp->layers * IDR_BITS; 457 n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
517 n = (p->layer+1) * IDR_BITS; 517 n = (p->layer+1) * IDR_BITS;
518 518
519 /* Mask off upper bits we don't use for the search. */ 519 /* Mask off upper bits we don't use for the search. */
520 id &= MAX_ID_MASK; 520 id &= MAX_IDR_MASK;
521 521
522 if (id >= (1 << n)) 522 if (id >= (1 << n))
523 return NULL; 523 return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
555{ 555{
556 int n, id, max, error = 0; 556 int n, id, max, error = 0;
557 struct idr_layer *p; 557 struct idr_layer *p;
558 struct idr_layer *pa[MAX_LEVEL]; 558 struct idr_layer *pa[MAX_IDR_LEVEL];
559 struct idr_layer **paa = &pa[0]; 559 struct idr_layer **paa = &pa[0];
560 560
561 n = idp->layers * IDR_BITS; 561 n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
601 */ 601 */
602void *idr_get_next(struct idr *idp, int *nextidp) 602void *idr_get_next(struct idr *idp, int *nextidp)
603{ 603{
604 struct idr_layer *p, *pa[MAX_LEVEL]; 604 struct idr_layer *p, *pa[MAX_IDR_LEVEL];
605 struct idr_layer **paa = &pa[0]; 605 struct idr_layer **paa = &pa[0];
606 int id = *nextidp; 606 int id = *nextidp;
607 int n, max; 607 int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
659 659
660 n = (p->layer+1) * IDR_BITS; 660 n = (p->layer+1) * IDR_BITS;
661 661
662 id &= MAX_ID_MASK; 662 id &= MAX_IDR_MASK;
663 663
664 if (id >= (1 << n)) 664 if (id >= (1 << n))
665 return ERR_PTR(-EINVAL); 665 return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
780 */ 780 */
781int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 781int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
782{ 782{
783 struct idr_layer *pa[MAX_LEVEL]; 783 struct idr_layer *pa[MAX_IDR_LEVEL];
784 struct ida_bitmap *bitmap; 784 struct ida_bitmap *bitmap;
785 unsigned long flags; 785 unsigned long flags;
786 int idr_id = starting_id / IDA_BITMAP_BITS; 786 int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
793 if (t < 0) 793 if (t < 0)
794 return _idr_rc_to_errno(t); 794 return _idr_rc_to_errno(t);
795 795
796 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 796 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
797 return -ENOSPC; 797 return -ENOSPC;
798 798
799 if (t != idr_id) 799 if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
827 } 827 }
828 828
829 id = idr_id * IDA_BITMAP_BITS + t; 829 id = idr_id * IDA_BITMAP_BITS + t;
830 if (id >= MAX_ID_BIT) 830 if (id >= MAX_IDR_BIT)
831 return -ENOSPC; 831 return -ENOSPC;
832 832
833 __set_bit(t, bitmap->bitmap); 833 __set_bit(t, bitmap->bitmap);
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
new file mode 100644
index 000000000000..e6eb406f2d65
--- /dev/null
+++ b/lib/interval_tree.c
@@ -0,0 +1,10 @@
1#include <linux/init.h>
2#include <linux/interval_tree.h>
3#include <linux/interval_tree_generic.h>
4
5#define START(node) ((node)->start)
6#define LAST(node) ((node)->last)
7
8INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
9 unsigned long, __subtree_last,
10 START, LAST,, interval_tree)
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
new file mode 100644
index 000000000000..b25903987f7a
--- /dev/null
+++ b/lib/interval_tree_test_main.c
@@ -0,0 +1,105 @@
1#include <linux/module.h>
2#include <linux/interval_tree.h>
3#include <linux/random.h>
4#include <asm/timex.h>
5
6#define NODES 100
7#define PERF_LOOPS 100000
8#define SEARCHES 100
9#define SEARCH_LOOPS 10000
10
11static struct rb_root root = RB_ROOT;
12static struct interval_tree_node nodes[NODES];
13static u32 queries[SEARCHES];
14
15static struct rnd_state rnd;
16
17static inline unsigned long
18search(unsigned long query, struct rb_root *root)
19{
20 struct interval_tree_node *node;
21 unsigned long results = 0;
22
23 for (node = interval_tree_iter_first(root, query, query); node;
24 node = interval_tree_iter_next(node, query, query))
25 results++;
26 return results;
27}
28
29static void init(void)
30{
31 int i;
32 for (i = 0; i < NODES; i++) {
33 u32 a = prandom32(&rnd), b = prandom32(&rnd);
34 if (a <= b) {
35 nodes[i].start = a;
36 nodes[i].last = b;
37 } else {
38 nodes[i].start = b;
39 nodes[i].last = a;
40 }
41 }
42 for (i = 0; i < SEARCHES; i++)
43 queries[i] = prandom32(&rnd);
44}
45
46static int interval_tree_test_init(void)
47{
48 int i, j;
49 unsigned long results;
50 cycles_t time1, time2, time;
51
52 printk(KERN_ALERT "interval tree insert/remove");
53
54 prandom32_seed(&rnd, 3141592653589793238ULL);
55 init();
56
57 time1 = get_cycles();
58
59 for (i = 0; i < PERF_LOOPS; i++) {
60 for (j = 0; j < NODES; j++)
61 interval_tree_insert(nodes + j, &root);
62 for (j = 0; j < NODES; j++)
63 interval_tree_remove(nodes + j, &root);
64 }
65
66 time2 = get_cycles();
67 time = time2 - time1;
68
69 time = div_u64(time, PERF_LOOPS);
70 printk(" -> %llu cycles\n", (unsigned long long)time);
71
72 printk(KERN_ALERT "interval tree search");
73
74 for (j = 0; j < NODES; j++)
75 interval_tree_insert(nodes + j, &root);
76
77 time1 = get_cycles();
78
79 results = 0;
80 for (i = 0; i < SEARCH_LOOPS; i++)
81 for (j = 0; j < SEARCHES; j++)
82 results += search(queries[j], &root);
83
84 time2 = get_cycles();
85 time = time2 - time1;
86
87 time = div_u64(time, SEARCH_LOOPS);
88 results = div_u64(results, SEARCH_LOOPS);
89 printk(" -> %llu cycles (%lu results)\n",
90 (unsigned long long)time, results);
91
92 return -EAGAIN; /* Fail will directly unload the module */
93}
94
95static void interval_tree_test_exit(void)
96{
97 printk(KERN_ALERT "test exit\n");
98}
99
100module_init(interval_tree_test_init)
101module_exit(interval_tree_test_exit)
102
103MODULE_LICENSE("GPL");
104MODULE_AUTHOR("Michel Lespinasse");
105MODULE_DESCRIPTION("Interval Tree test");
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index ae0de80c1c88..32f12150fc4f 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -21,7 +21,7 @@ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
21 len = vsnprintf(NULL, 0, fmt, aq); 21 len = vsnprintf(NULL, 0, fmt, aq);
22 va_end(aq); 22 va_end(aq);
23 23
24 p = kmalloc(len+1, gfp); 24 p = kmalloc_track_caller(len+1, gfp);
25 if (!p) 25 if (!p)
26 return NULL; 26 return NULL;
27 27
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 0401d2916d9f..52e5abbc41db 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
375 struct uevent_sock *ue_sk; 375 struct uevent_sock *ue_sk;
376 struct netlink_kernel_cfg cfg = { 376 struct netlink_kernel_cfg cfg = {
377 .groups = 1, 377 .groups = 1,
378 .flags = NL_CFG_F_NONROOT_RECV,
378 }; 379 };
379 380
380 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 381 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
381 if (!ue_sk) 382 if (!ue_sk)
382 return -ENOMEM; 383 return -ENOMEM;
383 384
384 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, 385 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
385 THIS_MODULE, &cfg);
386 if (!ue_sk->sk) { 386 if (!ue_sk->sk) {
387 printk(KERN_ERR 387 printk(KERN_ERR
388 "kobject_uevent: unable to create netlink socket!\n"); 388 "kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
422 422
423static int __init kobject_uevent_init(void) 423static int __init kobject_uevent_init(void)
424{ 424{
425 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
426 return register_pernet_subsys(&uevent_net_ops); 425 return register_pernet_subsys(&uevent_net_ops);
427} 426}
428 427
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index 45ca90a8639c..019a68c90144 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -14,6 +14,7 @@ mpi-y = \
14 generic_mpih-add1.o \ 14 generic_mpih-add1.o \
15 mpicoder.o \ 15 mpicoder.o \
16 mpi-bit.o \ 16 mpi-bit.o \
17 mpi-cmp.o \
17 mpih-cmp.o \ 18 mpih-cmp.o \
18 mpih-div.o \ 19 mpih-div.o \
19 mpih-mul.o \ 20 mpih-mul.o \
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 29f98624ef93..095ab157a521 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -19,6 +19,8 @@
19 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, 19 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * MA 02111-1307, USA. */ 20 * MA 02111-1307, USA. */
21 21
22#include <asm-generic/bitops/count_zeros.h>
23
22/* You have to define the following before including this file: 24/* You have to define the following before including this file:
23 * 25 *
24 * UWtype -- An unsigned type, default type for operations (typically a "word") 26 * UWtype -- An unsigned type, default type for operations (typically a "word")
@@ -146,12 +148,6 @@ do { \
146 : "1" ((USItype)(n1)), \ 148 : "1" ((USItype)(n1)), \
147 "r" ((USItype)(n0)), \ 149 "r" ((USItype)(n0)), \
148 "r" ((USItype)(d))) 150 "r" ((USItype)(d)))
149
150#define count_leading_zeros(count, x) \
151 __asm__ ("clz %0,%1" \
152 : "=r" ((USItype)(count)) \
153 : "r" ((USItype)(x)))
154#define COUNT_LEADING_ZEROS_0 32
155#endif /* __a29k__ */ 151#endif /* __a29k__ */
156 152
157#if defined(__alpha) && W_TYPE_SIZE == 64 153#if defined(__alpha) && W_TYPE_SIZE == 64
@@ -298,11 +294,6 @@ extern UDItype __udiv_qrnnd();
298 : "1" ((USItype)(nh)), \ 294 : "1" ((USItype)(nh)), \
299 "0" ((USItype)(nl)), \ 295 "0" ((USItype)(nl)), \
300 "g" ((USItype)(d))) 296 "g" ((USItype)(d)))
301#define count_leading_zeros(count, x) \
302 __asm__ ("bsch/1 %1,%0" \
303 : "=g" (count) \
304 : "g" ((USItype)(x)), \
305 "0" ((USItype)0))
306#endif 297#endif
307 298
308/*************************************** 299/***************************************
@@ -354,27 +345,6 @@ do { USItype __r; \
354} while (0) 345} while (0)
355extern USItype __udiv_qrnnd(); 346extern USItype __udiv_qrnnd();
356#endif /* LONGLONG_STANDALONE */ 347#endif /* LONGLONG_STANDALONE */
357#define count_leading_zeros(count, x) \
358do { \
359 USItype __tmp; \
360 __asm__ ( \
361 "ldi 1,%0\n" \
362 "extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
363 "extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
364 "ldo 16(%0),%0 ; Yes. Perform add.\n" \
365 "extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
366 "extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
367 "ldo 8(%0),%0 ; Yes. Perform add.\n" \
368 "extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
369 "extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
370 "ldo 4(%0),%0 ; Yes. Perform add.\n" \
371 "extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
372 "extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
373 "ldo 2(%0),%0 ; Yes. Perform add.\n" \
374 "extru %1,30,1,%1 ; Extract bit 1.\n" \
375 "sub %0,%1,%0 ; Subtract it. " \
376 : "=r" (count), "=r" (__tmp) : "1" (x)); \
377} while (0)
378#endif /* hppa */ 348#endif /* hppa */
379 349
380/*************************************** 350/***************************************
@@ -457,15 +427,6 @@ do { \
457 : "0" ((USItype)(n0)), \ 427 : "0" ((USItype)(n0)), \
458 "1" ((USItype)(n1)), \ 428 "1" ((USItype)(n1)), \
459 "rm" ((USItype)(d))) 429 "rm" ((USItype)(d)))
460#define count_leading_zeros(count, x) \
461do { \
462 USItype __cbtmp; \
463 __asm__ ("bsrl %1,%0" \
464 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
465 (count) = __cbtmp ^ 31; \
466} while (0)
467#define count_trailing_zeros(count, x) \
468 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
469#ifndef UMUL_TIME 430#ifndef UMUL_TIME
470#define UMUL_TIME 40 431#define UMUL_TIME 40
471#endif 432#endif
@@ -536,15 +497,6 @@ do { \
536 "dI" ((USItype)(d))); \ 497 "dI" ((USItype)(d))); \
537 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \ 498 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
538} while (0) 499} while (0)
539#define count_leading_zeros(count, x) \
540do { \
541 USItype __cbtmp; \
542 __asm__ ("scanbit %1,%0" \
543 : "=r" (__cbtmp) \
544 : "r" ((USItype)(x))); \
545 (count) = __cbtmp ^ 31; \
546} while (0)
547#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
548#if defined(__i960mx) /* what is the proper symbol to test??? */ 500#if defined(__i960mx) /* what is the proper symbol to test??? */
549#define rshift_rhlc(r, h, l, c) \ 501#define rshift_rhlc(r, h, l, c) \
550do { \ 502do { \
@@ -603,11 +555,6 @@ do { \
603 : "0" ((USItype)(n0)), \ 555 : "0" ((USItype)(n0)), \
604 "1" ((USItype)(n1)), \ 556 "1" ((USItype)(n1)), \
605 "dmi" ((USItype)(d))) 557 "dmi" ((USItype)(d)))
606#define count_leading_zeros(count, x) \
607 __asm__ ("bfffo %1{%b2:%b2},%0" \
608 : "=d" ((USItype)(count)) \
609 : "od" ((USItype)(x)), "n" (0))
610#define COUNT_LEADING_ZEROS_0 32
611#else /* not mc68020 */ 558#else /* not mc68020 */
612#define umul_ppmm(xh, xl, a, b) \ 559#define umul_ppmm(xh, xl, a, b) \
613do { USItype __umul_tmp1, __umul_tmp2; \ 560do { USItype __umul_tmp1, __umul_tmp2; \
@@ -664,15 +611,6 @@ do { USItype __umul_tmp1, __umul_tmp2; \
664 "rJ" ((USItype)(bh)), \ 611 "rJ" ((USItype)(bh)), \
665 "rJ" ((USItype)(al)), \ 612 "rJ" ((USItype)(al)), \
666 "rJ" ((USItype)(bl))) 613 "rJ" ((USItype)(bl)))
667#define count_leading_zeros(count, x) \
668do { \
669 USItype __cbtmp; \
670 __asm__ ("ff1 %0,%1" \
671 : "=r" (__cbtmp) \
672 : "r" ((USItype)(x))); \
673 (count) = __cbtmp ^ 31; \
674} while (0)
675#define COUNT_LEADING_ZEROS_0 63 /* sic */
676#if defined(__m88110__) 614#if defined(__m88110__)
677#define umul_ppmm(wh, wl, u, v) \ 615#define umul_ppmm(wh, wl, u, v) \
678do { \ 616do { \
@@ -703,7 +641,14 @@ do { \
703 ************** MIPS ***************** 641 ************** MIPS *****************
704 ***************************************/ 642 ***************************************/
705#if defined(__mips__) && W_TYPE_SIZE == 32 643#if defined(__mips__) && W_TYPE_SIZE == 32
706#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 644#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
645#define umul_ppmm(w1, w0, u, v) \
646do { \
647 UDItype __ll = (UDItype)(u) * (v); \
648 w1 = __ll >> 32; \
649 w0 = __ll; \
650} while (0)
651#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
707#define umul_ppmm(w1, w0, u, v) \ 652#define umul_ppmm(w1, w0, u, v) \
708 __asm__ ("multu %2,%3" \ 653 __asm__ ("multu %2,%3" \
709 : "=l" ((USItype)(w0)), \ 654 : "=l" ((USItype)(w0)), \
@@ -728,7 +673,15 @@ do { \
728 ************** MIPS/64 ************** 673 ************** MIPS/64 **************
729 ***************************************/ 674 ***************************************/
730#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 675#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
731#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 676#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
677#define umul_ppmm(w1, w0, u, v) \
678do { \
679 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
680 __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
681 w1 = __ll >> 64; \
682 w0 = __ll; \
683} while (0)
684#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
732#define umul_ppmm(w1, w0, u, v) \ 685#define umul_ppmm(w1, w0, u, v) \
733 __asm__ ("dmultu %2,%3" \ 686 __asm__ ("dmultu %2,%3" \
734 : "=l" ((UDItype)(w0)), \ 687 : "=l" ((UDItype)(w0)), \
@@ -779,12 +732,6 @@ do { \
779 : "0" (__xx.__ll), \ 732 : "0" (__xx.__ll), \
780 "g" ((USItype)(d))); \ 733 "g" ((USItype)(d))); \
781 (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) 734 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
782#define count_trailing_zeros(count, x) \
783do { \
784 __asm__("ffsd %2,%0" \
785 : "=r"((USItype) (count)) \
786 : "0"((USItype) 0), "r"((USItype) (x))); \
787 } while (0)
788#endif /* __ns32000__ */ 735#endif /* __ns32000__ */
789 736
790/*************************************** 737/***************************************
@@ -855,11 +802,6 @@ do { \
855 "rI" ((USItype)(al)), \ 802 "rI" ((USItype)(al)), \
856 "r" ((USItype)(bl))); \ 803 "r" ((USItype)(bl))); \
857} while (0) 804} while (0)
858#define count_leading_zeros(count, x) \
859 __asm__ ("{cntlz|cntlzw} %0,%1" \
860 : "=r" ((USItype)(count)) \
861 : "r" ((USItype)(x)))
862#define COUNT_LEADING_ZEROS_0 32
863#if defined(_ARCH_PPC) 805#if defined(_ARCH_PPC)
864#define umul_ppmm(ph, pl, m0, m1) \ 806#define umul_ppmm(ph, pl, m0, m1) \
865do { \ 807do { \
@@ -1001,19 +943,6 @@ do { \
1001} while (0) 943} while (0)
1002#define UMUL_TIME 20 944#define UMUL_TIME 20
1003#define UDIV_TIME 200 945#define UDIV_TIME 200
1004#define count_leading_zeros(count, x) \
1005do { \
1006 if ((x) >= 0x10000) \
1007 __asm__ ("clz %0,%1" \
1008 : "=r" ((USItype)(count)) \
1009 : "r" ((USItype)(x) >> 16)); \
1010 else { \
1011 __asm__ ("clz %0,%1" \
1012 : "=r" ((USItype)(count)) \
1013 : "r" ((USItype)(x))); \
1014 (count) += 16; \
1015 } \
1016} while (0)
1017#endif /* RT/ROMP */ 946#endif /* RT/ROMP */
1018 947
1019/*************************************** 948/***************************************
@@ -1142,13 +1071,6 @@ do { \
1142 "rI" ((USItype)(d)) \ 1071 "rI" ((USItype)(d)) \
1143 : "%g1" __AND_CLOBBER_CC) 1072 : "%g1" __AND_CLOBBER_CC)
1144#define UDIV_TIME 37 1073#define UDIV_TIME 37
1145#define count_leading_zeros(count, x) \
1146 __asm__ ("scan %1,0,%0" \
1147 : "=r" ((USItype)(x)) \
1148 : "r" ((USItype)(count)))
1149/* Early sparclites return 63 for an argument of 0, but they warn that future
1150 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
1151 undefined. */
1152#endif /* __sparclite__ */ 1074#endif /* __sparclite__ */
1153#endif /* __sparc_v8__ */ 1075#endif /* __sparc_v8__ */
1154 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */ 1076 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
@@ -1454,47 +1376,6 @@ do { \
1454#define udiv_qrnnd __udiv_qrnnd_c 1376#define udiv_qrnnd __udiv_qrnnd_c
1455#endif 1377#endif
1456 1378
1457#undef count_leading_zeros
1458#if !defined(count_leading_zeros)
1459 extern
1460#ifdef __STDC__
1461 const
1462#endif
1463 unsigned char __clz_tab[];
1464#define count_leading_zeros(count, x) \
1465do { \
1466 UWtype __xr = (x); \
1467 UWtype __a; \
1468 \
1469 if (W_TYPE_SIZE <= 32) { \
1470 __a = __xr < ((UWtype) 1 << 2*__BITS4) \
1471 ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \
1472 : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
1473 } \
1474 else { \
1475 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1476 if (((__xr >> __a) & 0xff) != 0) \
1477 break; \
1478 } \
1479 \
1480 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1481} while (0)
1482 /* This version gives a well-defined value for zero. */
1483#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
1484#endif
1485
1486#if !defined(count_trailing_zeros)
1487/* Define count_trailing_zeros using count_leading_zeros. The latter might be
1488 defined in asm, but if it is not, the C version above is good enough. */
1489#define count_trailing_zeros(count, x) \
1490do { \
1491 UWtype __ctz_x = (x); \
1492 UWtype __ctz_c; \
1493 count_leading_zeros(__ctz_c, __ctz_x & -__ctz_x); \
1494 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
1495} while (0)
1496#endif
1497
1498#ifndef UDIV_NEEDS_NORMALIZATION 1379#ifndef UDIV_NEEDS_NORMALIZATION
1499#define UDIV_NEEDS_NORMALIZATION 0 1380#define UDIV_NEEDS_NORMALIZATION 0
1500#endif 1381#endif
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 568724804f29..503537e08436 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -45,7 +45,7 @@ unsigned mpi_get_nbits(MPI a)
45 if (a->nlimbs) { 45 if (a->nlimbs) {
46 mpi_limb_t alimb = a->d[a->nlimbs - 1]; 46 mpi_limb_t alimb = a->d[a->nlimbs - 1];
47 if (alimb) 47 if (alimb)
48 count_leading_zeros(n, alimb); 48 n = count_leading_zeros(alimb);
49 else 49 else
50 n = BITS_PER_MPI_LIMB; 50 n = BITS_PER_MPI_LIMB;
51 n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB; 51 n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
new file mode 100644
index 000000000000..1871e7b61ca0
--- /dev/null
+++ b/lib/mpi/mpi-cmp.c
@@ -0,0 +1,70 @@
1/* mpi-cmp.c - MPI functions
2 * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
3 *
4 * This file is part of GnuPG.
5 *
6 * GnuPG is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * GnuPG is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19 */
20
21#include "mpi-internal.h"
22
23int mpi_cmp_ui(MPI u, unsigned long v)
24{
25 mpi_limb_t limb = v;
26
27 mpi_normalize(u);
28 if (!u->nlimbs && !limb)
29 return 0;
30 if (u->sign)
31 return -1;
32 if (u->nlimbs > 1)
33 return 1;
34
35 if (u->d[0] == limb)
36 return 0;
37 else if (u->d[0] > limb)
38 return 1;
39 else
40 return -1;
41}
42EXPORT_SYMBOL_GPL(mpi_cmp_ui);
43
44int mpi_cmp(MPI u, MPI v)
45{
46 mpi_size_t usize, vsize;
47 int cmp;
48
49 mpi_normalize(u);
50 mpi_normalize(v);
51 usize = u->nlimbs;
52 vsize = v->nlimbs;
53 if (!u->sign && v->sign)
54 return 1;
55 if (u->sign && !v->sign)
56 return -1;
57 if (usize != vsize && !u->sign && !v->sign)
58 return usize - vsize;
59 if (usize != vsize && u->sign && v->sign)
60 return vsize + usize;
61 if (!usize)
62 return 0;
63 cmp = mpihelp_cmp(u->d, v->d, usize);
64 if (!cmp)
65 return 0;
66 if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0))
67 return 1;
68 return -1;
69}
70EXPORT_SYMBOL_GPL(mpi_cmp);
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 67f3e79af914..5464c8744ea9 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -77,7 +77,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
77 mp = mp_marker = mpi_alloc_limb_space(msize); 77 mp = mp_marker = mpi_alloc_limb_space(msize);
78 if (!mp) 78 if (!mp)
79 goto enomem; 79 goto enomem;
80 count_leading_zeros(mod_shift_cnt, mod->d[msize - 1]); 80 mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]);
81 if (mod_shift_cnt) 81 if (mod_shift_cnt)
82 mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt); 82 mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
83 else 83 else
@@ -169,7 +169,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
169 169
170 i = esize - 1; 170 i = esize - 1;
171 e = ep[i]; 171 e = ep[i];
172 count_leading_zeros(c, e); 172 c = count_leading_zeros(e);
173 e = (e << c) << 1; /* shift the exp bits to the left, lose msb */ 173 e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
174 c = BITS_PER_MPI_LIMB - 1 - c; 174 c = BITS_PER_MPI_LIMB - 1 - c;
175 175
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index f0fa65995800..3962b7f7fe3f 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -18,10 +18,65 @@
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19 */ 19 */
20 20
21#include <linux/bitops.h>
22#include <asm-generic/bitops/count_zeros.h>
21#include "mpi-internal.h" 23#include "mpi-internal.h"
22 24
23#define MAX_EXTERN_MPI_BITS 16384 25#define MAX_EXTERN_MPI_BITS 16384
24 26
27/**
28 * mpi_read_raw_data - Read a raw byte stream as a positive integer
29 * @xbuffer: The data to read
30 * @nbytes: The amount of data to read
31 */
32MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
33{
34 const uint8_t *buffer = xbuffer;
35 int i, j;
36 unsigned nbits, nlimbs;
37 mpi_limb_t a;
38 MPI val = NULL;
39
40 while (nbytes >= 0 && buffer[0] == 0) {
41 buffer++;
42 nbytes--;
43 }
44
45 nbits = nbytes * 8;
46 if (nbits > MAX_EXTERN_MPI_BITS) {
47 pr_info("MPI: mpi too large (%u bits)\n", nbits);
48 return NULL;
49 }
50 if (nbytes > 0)
51 nbits -= count_leading_zeros(buffer[0]);
52 else
53 nbits = 0;
54
55 nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
56 val = mpi_alloc(nlimbs);
57 if (!val)
58 return NULL;
59 val->nbits = nbits;
60 val->sign = 0;
61 val->nlimbs = nlimbs;
62
63 if (nbytes > 0) {
64 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
65 i %= BYTES_PER_MPI_LIMB;
66 for (j = nlimbs; j > 0; j--) {
67 a = 0;
68 for (; i < BYTES_PER_MPI_LIMB; i++) {
69 a <<= 8;
70 a |= *buffer++;
71 }
72 i = 0;
73 val->d[j - 1] = a;
74 }
75 }
76 return val;
77}
78EXPORT_SYMBOL_GPL(mpi_read_raw_data);
79
25MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) 80MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
26{ 81{
27 const uint8_t *buffer = xbuffer; 82 const uint8_t *buffer = xbuffer;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb5178..18eca7809b08 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64), 23 [NLA_MSECS] = sizeof(u64),
24 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
25 [NLA_S8] = sizeof(s8),
26 [NLA_S16] = sizeof(s16),
27 [NLA_S32] = sizeof(s32),
28 [NLA_S64] = sizeof(s64),
25}; 29};
26 30
27static int validate_nla(const struct nlattr *nla, int maxtype, 31static int validate_nla(const struct nlattr *nla, int maxtype,
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
new file mode 100644
index 000000000000..d8de11f45908
--- /dev/null
+++ b/lib/oid_registry.c
@@ -0,0 +1,170 @@
1/* ASN.1 Object identifier (OID) registry
2 *
3 * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/export.h>
13#include <linux/oid_registry.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/bug.h>
17#include "oid_registry_data.c"
18
19/**
20 * look_up_OID - Find an OID registration for the specified data
21 * @data: Binary representation of the OID
22 * @datasize: Size of the binary representation
23 */
24enum OID look_up_OID(const void *data, size_t datasize)
25{
26 const unsigned char *octets = data;
27 enum OID oid;
28 unsigned char xhash;
29 unsigned i, j, k, hash;
30 size_t len;
31
32 /* Hash the OID data */
33 hash = datasize - 1;
34
35 for (i = 0; i < datasize; i++)
36 hash += octets[i] * 33;
37 hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash;
38 hash &= 0xff;
39
40 /* Binary search the OID registry. OIDs are stored in ascending order
41 * of hash value then ascending order of size and then in ascending
42 * order of reverse value.
43 */
44 i = 0;
45 k = OID__NR;
46 while (i < k) {
47 j = (i + k) / 2;
48
49 xhash = oid_search_table[j].hash;
50 if (xhash > hash) {
51 k = j;
52 continue;
53 }
54 if (xhash < hash) {
55 i = j + 1;
56 continue;
57 }
58
59 oid = oid_search_table[j].oid;
60 len = oid_index[oid + 1] - oid_index[oid];
61 if (len > datasize) {
62 k = j;
63 continue;
64 }
65 if (len < datasize) {
66 i = j + 1;
67 continue;
68 }
69
70 /* Variation is most likely to be at the tail end of the
71 * OID, so do the comparison in reverse.
72 */
73 while (len > 0) {
74 unsigned char a = oid_data[oid_index[oid] + --len];
75 unsigned char b = octets[len];
76 if (a > b) {
77 k = j;
78 goto next;
79 }
80 if (a < b) {
81 i = j + 1;
82 goto next;
83 }
84 }
85 return oid;
86 next:
87 ;
88 }
89
90 return OID__NR;
91}
92EXPORT_SYMBOL_GPL(look_up_OID);
93
94/*
95 * sprint_OID - Print an Object Identifier into a buffer
96 * @data: The encoded OID to print
97 * @datasize: The size of the encoded OID
98 * @buffer: The buffer to render into
99 * @bufsize: The size of the buffer
100 *
101 * The OID is rendered into the buffer in "a.b.c.d" format and the number of
102 * bytes is returned. -EBADMSG is returned if the data could not be intepreted
103 * and -ENOBUFS if the buffer was too small.
104 */
105int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
106{
107 const unsigned char *v = data, *end = v + datasize;
108 unsigned long num;
109 unsigned char n;
110 size_t ret;
111 int count;
112
113 if (v >= end)
114 return -EBADMSG;
115
116 n = *v++;
117 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
118 buffer += count;
119 bufsize -= count;
120 if (bufsize == 0)
121 return -ENOBUFS;
122
123 while (v < end) {
124 num = 0;
125 n = *v++;
126 if (!(n & 0x80)) {
127 num = n;
128 } else {
129 num = n & 0x7f;
130 do {
131 if (v >= end)
132 return -EBADMSG;
133 n = *v++;
134 num <<= 7;
135 num |= n & 0x7f;
136 } while (n & 0x80);
137 }
138 ret += count = snprintf(buffer, bufsize, ".%lu", num);
139 buffer += count;
140 bufsize -= count;
141 if (bufsize == 0)
142 return -ENOBUFS;
143 }
144
145 return ret;
146}
147EXPORT_SYMBOL_GPL(sprint_oid);
148
149/**
150 * sprint_OID - Print an Object Identifier into a buffer
151 * @oid: The OID to print
152 * @buffer: The buffer to render into
153 * @bufsize: The size of the buffer
154 *
155 * The OID is rendered into the buffer in "a.b.c.d" format and the number of
156 * bytes is returned.
157 */
158int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
159{
160 int ret;
161
162 BUG_ON(oid >= OID__NR);
163
164 ret = sprint_oid(oid_data + oid_index[oid],
165 oid_index[oid + 1] - oid_index[oid],
166 buffer, bufsize);
167 BUG_ON(ret == -EBADMSG);
168 return ret;
169}
170EXPORT_SYMBOL_GPL(sprint_OID);
diff --git a/lib/parser.c b/lib/parser.c
index c43410084838..52cfa69f73df 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[])
122 * 122 *
123 * Description: Given a &substring_t and a base, attempts to parse the substring 123 * Description: Given a &substring_t and a base, attempts to parse the substring
124 * as a number in that base. On success, sets @result to the integer represented 124 * as a number in that base. On success, sets @result to the integer represented
125 * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. 125 * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
126 */ 126 */
127static int match_number(substring_t *s, int *result, int base) 127static int match_number(substring_t *s, int *result, int base)
128{ 128{
129 char *endp; 129 char *endp;
130 char *buf; 130 char *buf;
131 int ret; 131 int ret;
132 long val;
132 size_t len = s->to - s->from; 133 size_t len = s->to - s->from;
133 134
134 buf = kmalloc(len + 1, GFP_KERNEL); 135 buf = kmalloc(len + 1, GFP_KERNEL);
@@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base)
136 return -ENOMEM; 137 return -ENOMEM;
137 memcpy(buf, s->from, len); 138 memcpy(buf, s->from, len);
138 buf[len] = '\0'; 139 buf[len] = '\0';
139 *result = simple_strtol(buf, &endp, base); 140
140 ret = 0; 141 ret = 0;
142 val = simple_strtol(buf, &endp, base);
141 if (endp == buf) 143 if (endp == buf)
142 ret = -EINVAL; 144 ret = -EINVAL;
145 else if (val < (long)INT_MIN || val > (long)INT_MAX)
146 ret = -ERANGE;
147 else
148 *result = (int) val;
143 kfree(buf); 149 kfree(buf);
144 return ret; 150 return ret;
145} 151}
diff --git a/lib/plist.c b/lib/plist.c
index 6ab0e521c48b..1ebc95f7a46f 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -175,7 +175,7 @@ static int __init plist_test(void)
175 int nr_expect = 0, i, loop; 175 int nr_expect = 0, i, loop;
176 unsigned int r = local_clock(); 176 unsigned int r = local_clock();
177 177
178 printk(KERN_INFO "start plist test\n"); 178 pr_debug("start plist test\n");
179 plist_head_init(&test_head); 179 plist_head_init(&test_head);
180 for (i = 0; i < ARRAY_SIZE(test_node); i++) 180 for (i = 0; i < ARRAY_SIZE(test_node); i++)
181 plist_node_init(test_node + i, 0); 181 plist_node_init(test_node + i, 0);
@@ -203,7 +203,7 @@ static int __init plist_test(void)
203 plist_test_check(nr_expect); 203 plist_test_check(nr_expect);
204 } 204 }
205 205
206 printk(KERN_INFO "end plist test\n"); 206 pr_debug("end plist test\n");
207 return 0; 207 return 0;
208} 208}
209 209
diff --git a/lib/prio_tree.c b/lib/prio_tree.c
deleted file mode 100644
index 8d443af03b4c..000000000000
--- a/lib/prio_tree.c
+++ /dev/null
@@ -1,466 +0,0 @@
1/*
2 * lib/prio_tree.c - priority search tree
3 *
4 * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
5 *
6 * This file is released under the GPL v2.
7 *
8 * Based on the radix priority search tree proposed by Edward M. McCreight
9 * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
10 *
11 * 02Feb2004 Initial version
12 */
13
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/prio_tree.h>
17
18/*
19 * A clever mix of heap and radix trees forms a radix priority search tree (PST)
20 * which is useful for storing intervals, e.g, we can consider a vma as a closed
21 * interval of file pages [offset_begin, offset_end], and store all vmas that
22 * map a file in a PST. Then, using the PST, we can answer a stabbing query,
23 * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a
24 * given input interval X (a set of consecutive file pages), in "O(log n + m)"
25 * time where 'log n' is the height of the PST, and 'm' is the number of stored
26 * intervals (vmas) that overlap (map) with the input interval X (the set of
27 * consecutive file pages).
28 *
29 * In our implementation, we store closed intervals of the form [radix_index,
30 * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST
31 * is designed for storing intervals with unique radix indices, i.e., each
32 * interval have different radix_index. However, this limitation can be easily
33 * overcome by using the size, i.e., heap_index - radix_index, as part of the
34 * index, so we index the tree using [(radix_index,size), heap_index].
35 *
36 * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit
37 * machine, the maximum height of a PST can be 64. We can use a balanced version
38 * of the priority search tree to optimize the tree height, but the balanced
39 * tree proposed by McCreight is too complex and memory-hungry for our purpose.
40 */
41
42/*
43 * The following macros are used for implementing prio_tree for i_mmap
44 */
45
46#define RADIX_INDEX(vma) ((vma)->vm_pgoff)
47#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
48/* avoid overflow */
49#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
50
51
52static void get_index(const struct prio_tree_root *root,
53 const struct prio_tree_node *node,
54 unsigned long *radix, unsigned long *heap)
55{
56 if (root->raw) {
57 struct vm_area_struct *vma = prio_tree_entry(
58 node, struct vm_area_struct, shared.prio_tree_node);
59
60 *radix = RADIX_INDEX(vma);
61 *heap = HEAP_INDEX(vma);
62 }
63 else {
64 *radix = node->start;
65 *heap = node->last;
66 }
67}
68
69static unsigned long index_bits_to_maxindex[BITS_PER_LONG];
70
71void __init prio_tree_init(void)
72{
73 unsigned int i;
74
75 for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++)
76 index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1;
77 index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL;
78}
79
80/*
81 * Maximum heap_index that can be stored in a PST with index_bits bits
82 */
83static inline unsigned long prio_tree_maxindex(unsigned int bits)
84{
85 return index_bits_to_maxindex[bits - 1];
86}
87
88static void prio_set_parent(struct prio_tree_node *parent,
89 struct prio_tree_node *child, bool left)
90{
91 if (left)
92 parent->left = child;
93 else
94 parent->right = child;
95
96 child->parent = parent;
97}
98
99/*
100 * Extend a priority search tree so that it can store a node with heap_index
101 * max_heap_index. In the worst case, this algorithm takes O((log n)^2).
102 * However, this function is used rarely and the common case performance is
103 * not bad.
104 */
105static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
106 struct prio_tree_node *node, unsigned long max_heap_index)
107{
108 struct prio_tree_node *prev;
109
110 if (max_heap_index > prio_tree_maxindex(root->index_bits))
111 root->index_bits++;
112
113 prev = node;
114 INIT_PRIO_TREE_NODE(node);
115
116 while (max_heap_index > prio_tree_maxindex(root->index_bits)) {
117 struct prio_tree_node *tmp = root->prio_tree_node;
118
119 root->index_bits++;
120
121 if (prio_tree_empty(root))
122 continue;
123
124 prio_tree_remove(root, root->prio_tree_node);
125 INIT_PRIO_TREE_NODE(tmp);
126
127 prio_set_parent(prev, tmp, true);
128 prev = tmp;
129 }
130
131 if (!prio_tree_empty(root))
132 prio_set_parent(prev, root->prio_tree_node, true);
133
134 root->prio_tree_node = node;
135 return node;
136}
137
138/*
139 * Replace a prio_tree_node with a new node and return the old node
140 */
141struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
142 struct prio_tree_node *old, struct prio_tree_node *node)
143{
144 INIT_PRIO_TREE_NODE(node);
145
146 if (prio_tree_root(old)) {
147 BUG_ON(root->prio_tree_node != old);
148 /*
149 * We can reduce root->index_bits here. However, it is complex
150 * and does not help much to improve performance (IMO).
151 */
152 root->prio_tree_node = node;
153 } else
154 prio_set_parent(old->parent, node, old->parent->left == old);
155
156 if (!prio_tree_left_empty(old))
157 prio_set_parent(node, old->left, true);
158
159 if (!prio_tree_right_empty(old))
160 prio_set_parent(node, old->right, false);
161
162 return old;
163}
164
165/*
166 * Insert a prio_tree_node @node into a radix priority search tree @root. The
167 * algorithm typically takes O(log n) time where 'log n' is the number of bits
168 * required to represent the maximum heap_index. In the worst case, the algo
169 * can take O((log n)^2) - check prio_tree_expand.
170 *
171 * If a prior node with same radix_index and heap_index is already found in
172 * the tree, then returns the address of the prior node. Otherwise, inserts
173 * @node into the tree and returns @node.
174 */
175struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
176 struct prio_tree_node *node)
177{
178 struct prio_tree_node *cur, *res = node;
179 unsigned long radix_index, heap_index;
180 unsigned long r_index, h_index, index, mask;
181 int size_flag = 0;
182
183 get_index(root, node, &radix_index, &heap_index);
184
185 if (prio_tree_empty(root) ||
186 heap_index > prio_tree_maxindex(root->index_bits))
187 return prio_tree_expand(root, node, heap_index);
188
189 cur = root->prio_tree_node;
190 mask = 1UL << (root->index_bits - 1);
191
192 while (mask) {
193 get_index(root, cur, &r_index, &h_index);
194
195 if (r_index == radix_index && h_index == heap_index)
196 return cur;
197
198 if (h_index < heap_index ||
199 (h_index == heap_index && r_index > radix_index)) {
200 struct prio_tree_node *tmp = node;
201 node = prio_tree_replace(root, cur, node);
202 cur = tmp;
203 /* swap indices */
204 index = r_index;
205 r_index = radix_index;
206 radix_index = index;
207 index = h_index;
208 h_index = heap_index;
209 heap_index = index;
210 }
211
212 if (size_flag)
213 index = heap_index - radix_index;
214 else
215 index = radix_index;
216
217 if (index & mask) {
218 if (prio_tree_right_empty(cur)) {
219 INIT_PRIO_TREE_NODE(node);
220 prio_set_parent(cur, node, false);
221 return res;
222 } else
223 cur = cur->right;
224 } else {
225 if (prio_tree_left_empty(cur)) {
226 INIT_PRIO_TREE_NODE(node);
227 prio_set_parent(cur, node, true);
228 return res;
229 } else
230 cur = cur->left;
231 }
232
233 mask >>= 1;
234
235 if (!mask) {
236 mask = 1UL << (BITS_PER_LONG - 1);
237 size_flag = 1;
238 }
239 }
240 /* Should not reach here */
241 BUG();
242 return NULL;
243}
244
245/*
246 * Remove a prio_tree_node @node from a radix priority search tree @root. The
247 * algorithm takes O(log n) time where 'log n' is the number of bits required
248 * to represent the maximum heap_index.
249 */
250void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node)
251{
252 struct prio_tree_node *cur;
253 unsigned long r_index, h_index_right, h_index_left;
254
255 cur = node;
256
257 while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) {
258 if (!prio_tree_left_empty(cur))
259 get_index(root, cur->left, &r_index, &h_index_left);
260 else {
261 cur = cur->right;
262 continue;
263 }
264
265 if (!prio_tree_right_empty(cur))
266 get_index(root, cur->right, &r_index, &h_index_right);
267 else {
268 cur = cur->left;
269 continue;
270 }
271
272 /* both h_index_left and h_index_right cannot be 0 */
273 if (h_index_left >= h_index_right)
274 cur = cur->left;
275 else
276 cur = cur->right;
277 }
278
279 if (prio_tree_root(cur)) {
280 BUG_ON(root->prio_tree_node != cur);
281 __INIT_PRIO_TREE_ROOT(root, root->raw);
282 return;
283 }
284
285 if (cur->parent->right == cur)
286 cur->parent->right = cur->parent;
287 else
288 cur->parent->left = cur->parent;
289
290 while (cur != node)
291 cur = prio_tree_replace(root, cur->parent, cur);
292}
293
294static void iter_walk_down(struct prio_tree_iter *iter)
295{
296 iter->mask >>= 1;
297 if (iter->mask) {
298 if (iter->size_level)
299 iter->size_level++;
300 return;
301 }
302
303 if (iter->size_level) {
304 BUG_ON(!prio_tree_left_empty(iter->cur));
305 BUG_ON(!prio_tree_right_empty(iter->cur));
306 iter->size_level++;
307 iter->mask = ULONG_MAX;
308 } else {
309 iter->size_level = 1;
310 iter->mask = 1UL << (BITS_PER_LONG - 1);
311 }
312}
313
314static void iter_walk_up(struct prio_tree_iter *iter)
315{
316 if (iter->mask == ULONG_MAX)
317 iter->mask = 1UL;
318 else if (iter->size_level == 1)
319 iter->mask = 1UL;
320 else
321 iter->mask <<= 1;
322 if (iter->size_level)
323 iter->size_level--;
324 if (!iter->size_level && (iter->value & iter->mask))
325 iter->value ^= iter->mask;
326}
327
328/*
329 * Following functions help to enumerate all prio_tree_nodes in the tree that
330 * overlap with the input interval X [radix_index, heap_index]. The enumeration
331 * takes O(log n + m) time where 'log n' is the height of the tree (which is
332 * proportional to # of bits required to represent the maximum heap_index) and
333 * 'm' is the number of prio_tree_nodes that overlap the interval X.
334 */
335
336static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
337 unsigned long *r_index, unsigned long *h_index)
338{
339 if (prio_tree_left_empty(iter->cur))
340 return NULL;
341
342 get_index(iter->root, iter->cur->left, r_index, h_index);
343
344 if (iter->r_index <= *h_index) {
345 iter->cur = iter->cur->left;
346 iter_walk_down(iter);
347 return iter->cur;
348 }
349
350 return NULL;
351}
352
353static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
354 unsigned long *r_index, unsigned long *h_index)
355{
356 unsigned long value;
357
358 if (prio_tree_right_empty(iter->cur))
359 return NULL;
360
361 if (iter->size_level)
362 value = iter->value;
363 else
364 value = iter->value | iter->mask;
365
366 if (iter->h_index < value)
367 return NULL;
368
369 get_index(iter->root, iter->cur->right, r_index, h_index);
370
371 if (iter->r_index <= *h_index) {
372 iter->cur = iter->cur->right;
373 iter_walk_down(iter);
374 return iter->cur;
375 }
376
377 return NULL;
378}
379
380static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter)
381{
382 iter->cur = iter->cur->parent;
383 iter_walk_up(iter);
384 return iter->cur;
385}
386
387static inline int overlap(struct prio_tree_iter *iter,
388 unsigned long r_index, unsigned long h_index)
389{
390 return iter->h_index >= r_index && iter->r_index <= h_index;
391}
392
393/*
394 * prio_tree_first:
395 *
396 * Get the first prio_tree_node that overlaps with the interval [radix_index,
397 * heap_index]. Note that always radix_index <= heap_index. We do a pre-order
398 * traversal of the tree.
399 */
400static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
401{
402 struct prio_tree_root *root;
403 unsigned long r_index, h_index;
404
405 INIT_PRIO_TREE_ITER(iter);
406
407 root = iter->root;
408 if (prio_tree_empty(root))
409 return NULL;
410
411 get_index(root, root->prio_tree_node, &r_index, &h_index);
412
413 if (iter->r_index > h_index)
414 return NULL;
415
416 iter->mask = 1UL << (root->index_bits - 1);
417 iter->cur = root->prio_tree_node;
418
419 while (1) {
420 if (overlap(iter, r_index, h_index))
421 return iter->cur;
422
423 if (prio_tree_left(iter, &r_index, &h_index))
424 continue;
425
426 if (prio_tree_right(iter, &r_index, &h_index))
427 continue;
428
429 break;
430 }
431 return NULL;
432}
433
434/*
435 * prio_tree_next:
436 *
437 * Get the next prio_tree_node that overlaps with the input interval in iter
438 */
439struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
440{
441 unsigned long r_index, h_index;
442
443 if (iter->cur == NULL)
444 return prio_tree_first(iter);
445
446repeat:
447 while (prio_tree_left(iter, &r_index, &h_index))
448 if (overlap(iter, r_index, h_index))
449 return iter->cur;
450
451 while (!prio_tree_right(iter, &r_index, &h_index)) {
452 while (!prio_tree_root(iter->cur) &&
453 iter->cur->parent->right == iter->cur)
454 prio_tree_parent(iter);
455
456 if (prio_tree_root(iter->cur))
457 return NULL;
458
459 prio_tree_parent(iter);
460 }
461
462 if (overlap(iter, r_index, h_index))
463 return iter->cur;
464
465 goto repeat;
466}
diff --git a/lib/rbtree.c b/lib/rbtree.c
index d4175565dc2c..4f56a11d67fa 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -2,7 +2,8 @@
2 Red Black Trees 2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de> 3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org> 4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 5 (C) 2012 Michel Lespinasse <walken@google.com>
6
6 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or 9 the Free Software Foundation; either version 2 of the License, or
@@ -20,339 +21,382 @@
20 linux/lib/rbtree.c 21 linux/lib/rbtree.c
21*/ 22*/
22 23
23#include <linux/rbtree.h> 24#include <linux/rbtree_augmented.h>
24#include <linux/export.h> 25#include <linux/export.h>
25 26
26static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) 27/*
27{ 28 * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
28 struct rb_node *right = node->rb_right; 29 *
29 struct rb_node *parent = rb_parent(node); 30 * 1) A node is either red or black
30 31 * 2) The root is black
31 if ((node->rb_right = right->rb_left)) 32 * 3) All leaves (NULL) are black
32 rb_set_parent(right->rb_left, node); 33 * 4) Both children of every red node are black
33 right->rb_left = node; 34 * 5) Every simple path from root to leaves contains the same number
34 35 * of black nodes.
35 rb_set_parent(right, parent); 36 *
37 * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
38 * consecutive red nodes in a path and every red node is therefore followed by
39 * a black. So if B is the number of black nodes on every simple path (as per
40 * 5), then the longest possible path due to 4 is 2B.
41 *
42 * We shall indicate color with case, where black nodes are uppercase and red
43 * nodes will be lowercase. Unknown color nodes shall be drawn as red within
44 * parentheses and have some accompanying text comment.
45 */
36 46
37 if (parent) 47static inline void rb_set_black(struct rb_node *rb)
38 { 48{
39 if (node == parent->rb_left) 49 rb->__rb_parent_color |= RB_BLACK;
40 parent->rb_left = right;
41 else
42 parent->rb_right = right;
43 }
44 else
45 root->rb_node = right;
46 rb_set_parent(node, right);
47} 50}
48 51
49static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) 52static inline struct rb_node *rb_red_parent(struct rb_node *red)
50{ 53{
51 struct rb_node *left = node->rb_left; 54 return (struct rb_node *)red->__rb_parent_color;
52 struct rb_node *parent = rb_parent(node); 55}
53
54 if ((node->rb_left = left->rb_right))
55 rb_set_parent(left->rb_right, node);
56 left->rb_right = node;
57
58 rb_set_parent(left, parent);
59 56
60 if (parent) 57/*
61 { 58 * Helper function for rotations:
62 if (node == parent->rb_right) 59 * - old's parent and color get assigned to new
63 parent->rb_right = left; 60 * - old gets assigned new as a parent and 'color' as a color.
64 else 61 */
65 parent->rb_left = left; 62static inline void
66 } 63__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
67 else 64 struct rb_root *root, int color)
68 root->rb_node = left; 65{
69 rb_set_parent(node, left); 66 struct rb_node *parent = rb_parent(old);
67 new->__rb_parent_color = old->__rb_parent_color;
68 rb_set_parent_color(old, new, color);
69 __rb_change_child(old, new, parent, root);
70} 70}
71 71
72void rb_insert_color(struct rb_node *node, struct rb_root *root) 72static __always_inline void
73__rb_insert(struct rb_node *node, struct rb_root *root,
74 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
73{ 75{
74 struct rb_node *parent, *gparent; 76 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
75 77
76 while ((parent = rb_parent(node)) && rb_is_red(parent)) 78 while (true) {
77 { 79 /*
78 gparent = rb_parent(parent); 80 * Loop invariant: node is red
79 81 *
80 if (parent == gparent->rb_left) 82 * If there is a black parent, we are done.
81 { 83 * Otherwise, take some corrective action as we don't
82 { 84 * want a red root or two consecutive red nodes.
83 register struct rb_node *uncle = gparent->rb_right; 85 */
84 if (uncle && rb_is_red(uncle)) 86 if (!parent) {
85 { 87 rb_set_parent_color(node, NULL, RB_BLACK);
86 rb_set_black(uncle); 88 break;
87 rb_set_black(parent); 89 } else if (rb_is_black(parent))
88 rb_set_red(gparent); 90 break;
89 node = gparent; 91
90 continue; 92 gparent = rb_red_parent(parent);
91 } 93
94 tmp = gparent->rb_right;
95 if (parent != tmp) { /* parent == gparent->rb_left */
96 if (tmp && rb_is_red(tmp)) {
97 /*
98 * Case 1 - color flips
99 *
100 * G g
101 * / \ / \
102 * p u --> P U
103 * / /
104 * n N
105 *
106 * However, since g's parent might be red, and
107 * 4) does not allow this, we need to recurse
108 * at g.
109 */
110 rb_set_parent_color(tmp, gparent, RB_BLACK);
111 rb_set_parent_color(parent, gparent, RB_BLACK);
112 node = gparent;
113 parent = rb_parent(node);
114 rb_set_parent_color(node, parent, RB_RED);
115 continue;
92 } 116 }
93 117
94 if (parent->rb_right == node) 118 tmp = parent->rb_right;
95 { 119 if (node == tmp) {
96 register struct rb_node *tmp; 120 /*
97 __rb_rotate_left(parent, root); 121 * Case 2 - left rotate at parent
98 tmp = parent; 122 *
123 * G G
124 * / \ / \
125 * p U --> n U
126 * \ /
127 * n p
128 *
129 * This still leaves us in violation of 4), the
130 * continuation into Case 3 will fix that.
131 */
132 parent->rb_right = tmp = node->rb_left;
133 node->rb_left = parent;
134 if (tmp)
135 rb_set_parent_color(tmp, parent,
136 RB_BLACK);
137 rb_set_parent_color(parent, node, RB_RED);
138 augment_rotate(parent, node);
99 parent = node; 139 parent = node;
100 node = tmp; 140 tmp = node->rb_right;
101 } 141 }
102 142
103 rb_set_black(parent); 143 /*
104 rb_set_red(gparent); 144 * Case 3 - right rotate at gparent
105 __rb_rotate_right(gparent, root); 145 *
146 * G P
147 * / \ / \
148 * p U --> n g
149 * / \
150 * n U
151 */
152 gparent->rb_left = tmp; /* == parent->rb_right */
153 parent->rb_right = gparent;
154 if (tmp)
155 rb_set_parent_color(tmp, gparent, RB_BLACK);
156 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
157 augment_rotate(gparent, parent);
158 break;
106 } else { 159 } else {
107 { 160 tmp = gparent->rb_left;
108 register struct rb_node *uncle = gparent->rb_left; 161 if (tmp && rb_is_red(tmp)) {
109 if (uncle && rb_is_red(uncle)) 162 /* Case 1 - color flips */
110 { 163 rb_set_parent_color(tmp, gparent, RB_BLACK);
111 rb_set_black(uncle); 164 rb_set_parent_color(parent, gparent, RB_BLACK);
112 rb_set_black(parent); 165 node = gparent;
113 rb_set_red(gparent); 166 parent = rb_parent(node);
114 node = gparent; 167 rb_set_parent_color(node, parent, RB_RED);
115 continue; 168 continue;
116 }
117 } 169 }
118 170
119 if (parent->rb_left == node) 171 tmp = parent->rb_left;
120 { 172 if (node == tmp) {
121 register struct rb_node *tmp; 173 /* Case 2 - right rotate at parent */
122 __rb_rotate_right(parent, root); 174 parent->rb_left = tmp = node->rb_right;
123 tmp = parent; 175 node->rb_right = parent;
176 if (tmp)
177 rb_set_parent_color(tmp, parent,
178 RB_BLACK);
179 rb_set_parent_color(parent, node, RB_RED);
180 augment_rotate(parent, node);
124 parent = node; 181 parent = node;
125 node = tmp; 182 tmp = node->rb_left;
126 } 183 }
127 184
128 rb_set_black(parent); 185 /* Case 3 - left rotate at gparent */
129 rb_set_red(gparent); 186 gparent->rb_right = tmp; /* == parent->rb_left */
130 __rb_rotate_left(gparent, root); 187 parent->rb_left = gparent;
188 if (tmp)
189 rb_set_parent_color(tmp, gparent, RB_BLACK);
190 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
191 augment_rotate(gparent, parent);
192 break;
131 } 193 }
132 } 194 }
133
134 rb_set_black(root->rb_node);
135} 195}
136EXPORT_SYMBOL(rb_insert_color);
137 196
138static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, 197__always_inline void
139 struct rb_root *root) 198__rb_erase_color(struct rb_node *parent, struct rb_root *root,
199 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
140{ 200{
141 struct rb_node *other; 201 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
142 202
143 while ((!node || rb_is_black(node)) && node != root->rb_node) 203 while (true) {
144 { 204 /*
145 if (parent->rb_left == node) 205 * Loop invariants:
146 { 206 * - node is black (or NULL on first iteration)
147 other = parent->rb_right; 207 * - node is not the root (parent is not NULL)
148 if (rb_is_red(other)) 208 * - All leaf paths going through parent and node have a
149 { 209 * black node count that is 1 lower than other leaf paths.
150 rb_set_black(other); 210 */
151 rb_set_red(parent); 211 sibling = parent->rb_right;
152 __rb_rotate_left(parent, root); 212 if (node != sibling) { /* node == parent->rb_left */
153 other = parent->rb_right; 213 if (rb_is_red(sibling)) {
214 /*
215 * Case 1 - left rotate at parent
216 *
217 * P S
218 * / \ / \
219 * N s --> p Sr
220 * / \ / \
221 * Sl Sr N Sl
222 */
223 parent->rb_right = tmp1 = sibling->rb_left;
224 sibling->rb_left = parent;
225 rb_set_parent_color(tmp1, parent, RB_BLACK);
226 __rb_rotate_set_parents(parent, sibling, root,
227 RB_RED);
228 augment_rotate(parent, sibling);
229 sibling = tmp1;
154 } 230 }
155 if ((!other->rb_left || rb_is_black(other->rb_left)) && 231 tmp1 = sibling->rb_right;
156 (!other->rb_right || rb_is_black(other->rb_right))) 232 if (!tmp1 || rb_is_black(tmp1)) {
157 { 233 tmp2 = sibling->rb_left;
158 rb_set_red(other); 234 if (!tmp2 || rb_is_black(tmp2)) {
159 node = parent; 235 /*
160 parent = rb_parent(node); 236 * Case 2 - sibling color flip
161 } 237 * (p could be either color here)
162 else 238 *
163 { 239 * (p) (p)
164 if (!other->rb_right || rb_is_black(other->rb_right)) 240 * / \ / \
165 { 241 * N S --> N s
166 rb_set_black(other->rb_left); 242 * / \ / \
167 rb_set_red(other); 243 * Sl Sr Sl Sr
168 __rb_rotate_right(other, root); 244 *
169 other = parent->rb_right; 245 * This leaves us violating 5) which
246 * can be fixed by flipping p to black
247 * if it was red, or by recursing at p.
248 * p is red when coming from Case 1.
249 */
250 rb_set_parent_color(sibling, parent,
251 RB_RED);
252 if (rb_is_red(parent))
253 rb_set_black(parent);
254 else {
255 node = parent;
256 parent = rb_parent(node);
257 if (parent)
258 continue;
259 }
260 break;
170 } 261 }
171 rb_set_color(other, rb_color(parent)); 262 /*
172 rb_set_black(parent); 263 * Case 3 - right rotate at sibling
173 rb_set_black(other->rb_right); 264 * (p could be either color here)
174 __rb_rotate_left(parent, root); 265 *
175 node = root->rb_node; 266 * (p) (p)
176 break; 267 * / \ / \
177 } 268 * N S --> N Sl
178 } 269 * / \ \
179 else 270 * sl Sr s
180 { 271 * \
181 other = parent->rb_left; 272 * Sr
182 if (rb_is_red(other)) 273 */
183 { 274 sibling->rb_left = tmp1 = tmp2->rb_right;
184 rb_set_black(other); 275 tmp2->rb_right = sibling;
185 rb_set_red(parent); 276 parent->rb_right = tmp2;
186 __rb_rotate_right(parent, root); 277 if (tmp1)
187 other = parent->rb_left; 278 rb_set_parent_color(tmp1, sibling,
279 RB_BLACK);
280 augment_rotate(sibling, tmp2);
281 tmp1 = sibling;
282 sibling = tmp2;
188 } 283 }
189 if ((!other->rb_left || rb_is_black(other->rb_left)) && 284 /*
190 (!other->rb_right || rb_is_black(other->rb_right))) 285 * Case 4 - left rotate at parent + color flips
191 { 286 * (p and sl could be either color here.
192 rb_set_red(other); 287 * After rotation, p becomes black, s acquires
193 node = parent; 288 * p's color, and sl keeps its color)
194 parent = rb_parent(node); 289 *
290 * (p) (s)
291 * / \ / \
292 * N S --> P Sr
293 * / \ / \
294 * (sl) sr N (sl)
295 */
296 parent->rb_right = tmp2 = sibling->rb_left;
297 sibling->rb_left = parent;
298 rb_set_parent_color(tmp1, sibling, RB_BLACK);
299 if (tmp2)
300 rb_set_parent(tmp2, parent);
301 __rb_rotate_set_parents(parent, sibling, root,
302 RB_BLACK);
303 augment_rotate(parent, sibling);
304 break;
305 } else {
306 sibling = parent->rb_left;
307 if (rb_is_red(sibling)) {
308 /* Case 1 - right rotate at parent */
309 parent->rb_left = tmp1 = sibling->rb_right;
310 sibling->rb_right = parent;
311 rb_set_parent_color(tmp1, parent, RB_BLACK);
312 __rb_rotate_set_parents(parent, sibling, root,
313 RB_RED);
314 augment_rotate(parent, sibling);
315 sibling = tmp1;
195 } 316 }
196 else 317 tmp1 = sibling->rb_left;
197 { 318 if (!tmp1 || rb_is_black(tmp1)) {
198 if (!other->rb_left || rb_is_black(other->rb_left)) 319 tmp2 = sibling->rb_right;
199 { 320 if (!tmp2 || rb_is_black(tmp2)) {
200 rb_set_black(other->rb_right); 321 /* Case 2 - sibling color flip */
201 rb_set_red(other); 322 rb_set_parent_color(sibling, parent,
202 __rb_rotate_left(other, root); 323 RB_RED);
203 other = parent->rb_left; 324 if (rb_is_red(parent))
325 rb_set_black(parent);
326 else {
327 node = parent;
328 parent = rb_parent(node);
329 if (parent)
330 continue;
331 }
332 break;
204 } 333 }
205 rb_set_color(other, rb_color(parent)); 334 /* Case 3 - right rotate at sibling */
206 rb_set_black(parent); 335 sibling->rb_right = tmp1 = tmp2->rb_left;
207 rb_set_black(other->rb_left); 336 tmp2->rb_left = sibling;
208 __rb_rotate_right(parent, root); 337 parent->rb_left = tmp2;
209 node = root->rb_node; 338 if (tmp1)
210 break; 339 rb_set_parent_color(tmp1, sibling,
340 RB_BLACK);
341 augment_rotate(sibling, tmp2);
342 tmp1 = sibling;
343 sibling = tmp2;
211 } 344 }
345 /* Case 4 - left rotate at parent + color flips */
346 parent->rb_left = tmp2 = sibling->rb_right;
347 sibling->rb_right = parent;
348 rb_set_parent_color(tmp1, sibling, RB_BLACK);
349 if (tmp2)
350 rb_set_parent(tmp2, parent);
351 __rb_rotate_set_parents(parent, sibling, root,
352 RB_BLACK);
353 augment_rotate(parent, sibling);
354 break;
212 } 355 }
213 } 356 }
214 if (node)
215 rb_set_black(node);
216} 357}
358EXPORT_SYMBOL(__rb_erase_color);
217 359
218void rb_erase(struct rb_node *node, struct rb_root *root) 360/*
219{ 361 * Non-augmented rbtree manipulation functions.
220 struct rb_node *child, *parent; 362 *
221 int color; 363 * We use dummy augmented callbacks here, and have the compiler optimize them
222 364 * out of the rb_insert_color() and rb_erase() function definitions.
223 if (!node->rb_left) 365 */
224 child = node->rb_right;
225 else if (!node->rb_right)
226 child = node->rb_left;
227 else
228 {
229 struct rb_node *old = node, *left;
230
231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL)
233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
243 child = node->rb_right;
244 parent = rb_parent(node);
245 color = rb_color(node);
246
247 if (parent == old) {
248 parent = node;
249 } else {
250 if (child)
251 rb_set_parent(child, parent);
252 parent->rb_left = child;
253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
258 node->rb_parent_color = old->rb_parent_color;
259 node->rb_left = old->rb_left;
260 rb_set_parent(old->rb_left, node);
261 366
262 goto color; 367static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
263 } 368static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
369static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
264 370
265 parent = rb_parent(node); 371static const struct rb_augment_callbacks dummy_callbacks = {
266 color = rb_color(node); 372 dummy_propagate, dummy_copy, dummy_rotate
267 373};
268 if (child)
269 rb_set_parent(child, parent);
270 if (parent)
271 {
272 if (parent->rb_left == node)
273 parent->rb_left = child;
274 else
275 parent->rb_right = child;
276 }
277 else
278 root->rb_node = child;
279 374
280 color: 375void rb_insert_color(struct rb_node *node, struct rb_root *root)
281 if (color == RB_BLACK)
282 __rb_erase_color(child, parent, root);
283}
284EXPORT_SYMBOL(rb_erase);
285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{ 376{
288 struct rb_node *parent; 377 __rb_insert(node, root, dummy_rotate);
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303} 378}
379EXPORT_SYMBOL(rb_insert_color);
304 380
305/* 381void rb_erase(struct rb_node *node, struct rb_root *root)
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{ 382{
311 if (node->rb_left) 383 rb_erase_augmented(node, root, &dummy_callbacks);
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317} 384}
318EXPORT_SYMBOL(rb_augment_insert); 385EXPORT_SYMBOL(rb_erase);
319 386
320/* 387/*
321 * before removing the node, find the deepest node on the rebalance path 388 * Augmented rbtree manipulation functions.
322 * that will still be there after @node gets removed 389 *
390 * This instantiates the same __always_inline functions as in the non-augmented
391 * case, but this time with user-defined callbacks.
323 */ 392 */
324struct rb_node *rb_augment_erase_begin(struct rb_node *node)
325{
326 struct rb_node *deepest;
327
328 if (!node->rb_right && !node->rb_left)
329 deepest = rb_parent(node);
330 else if (!node->rb_right)
331 deepest = node->rb_left;
332 else if (!node->rb_left)
333 deepest = node->rb_right;
334 else {
335 deepest = rb_next(node);
336 if (deepest->rb_right)
337 deepest = deepest->rb_right;
338 else if (rb_parent(deepest) != node)
339 deepest = rb_parent(deepest);
340 }
341
342 return deepest;
343}
344EXPORT_SYMBOL(rb_augment_erase_begin);
345 393
346/* 394void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
347 * after removal, update the tree to account for the removed entry 395 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
348 * and any rebalance damage.
349 */
350void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
351{ 396{
352 if (node) 397 __rb_insert(node, root, augment_rotate);
353 rb_augment_path(node, func, data);
354} 398}
355EXPORT_SYMBOL(rb_augment_erase_end); 399EXPORT_SYMBOL(__rb_insert_augmented);
356 400
357/* 401/*
358 * This function returns the first node (in sort order) of the tree. 402 * This function returns the first node (in sort order) of the tree.
@@ -387,11 +431,13 @@ struct rb_node *rb_next(const struct rb_node *node)
387{ 431{
388 struct rb_node *parent; 432 struct rb_node *parent;
389 433
390 if (rb_parent(node) == node) 434 if (RB_EMPTY_NODE(node))
391 return NULL; 435 return NULL;
392 436
393 /* If we have a right-hand child, go down and then left as far 437 /*
394 as we can. */ 438 * If we have a right-hand child, go down and then left as far
439 * as we can.
440 */
395 if (node->rb_right) { 441 if (node->rb_right) {
396 node = node->rb_right; 442 node = node->rb_right;
397 while (node->rb_left) 443 while (node->rb_left)
@@ -399,12 +445,13 @@ struct rb_node *rb_next(const struct rb_node *node)
399 return (struct rb_node *)node; 445 return (struct rb_node *)node;
400 } 446 }
401 447
402 /* No right-hand children. Everything down and left is 448 /*
403 smaller than us, so any 'next' node must be in the general 449 * No right-hand children. Everything down and left is smaller than us,
404 direction of our parent. Go up the tree; any time the 450 * so any 'next' node must be in the general direction of our parent.
405 ancestor is a right-hand child of its parent, keep going 451 * Go up the tree; any time the ancestor is a right-hand child of its
406 up. First time it's a left-hand child of its parent, said 452 * parent, keep going up. First time it's a left-hand child of its
407 parent is our 'next' node. */ 453 * parent, said parent is our 'next' node.
454 */
408 while ((parent = rb_parent(node)) && node == parent->rb_right) 455 while ((parent = rb_parent(node)) && node == parent->rb_right)
409 node = parent; 456 node = parent;
410 457
@@ -416,11 +463,13 @@ struct rb_node *rb_prev(const struct rb_node *node)
416{ 463{
417 struct rb_node *parent; 464 struct rb_node *parent;
418 465
419 if (rb_parent(node) == node) 466 if (RB_EMPTY_NODE(node))
420 return NULL; 467 return NULL;
421 468
422 /* If we have a left-hand child, go down and then right as far 469 /*
423 as we can. */ 470 * If we have a left-hand child, go down and then right as far
471 * as we can.
472 */
424 if (node->rb_left) { 473 if (node->rb_left) {
425 node = node->rb_left; 474 node = node->rb_left;
426 while (node->rb_right) 475 while (node->rb_right)
@@ -428,8 +477,10 @@ struct rb_node *rb_prev(const struct rb_node *node)
428 return (struct rb_node *)node; 477 return (struct rb_node *)node;
429 } 478 }
430 479
431 /* No left-hand children. Go up till we find an ancestor which 480 /*
432 is a right-hand child of its parent */ 481 * No left-hand children. Go up till we find an ancestor which
482 * is a right-hand child of its parent.
483 */
433 while ((parent = rb_parent(node)) && node == parent->rb_left) 484 while ((parent = rb_parent(node)) && node == parent->rb_left)
434 node = parent; 485 node = parent;
435 486
@@ -443,14 +494,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
443 struct rb_node *parent = rb_parent(victim); 494 struct rb_node *parent = rb_parent(victim);
444 495
445 /* Set the surrounding nodes to point to the replacement */ 496 /* Set the surrounding nodes to point to the replacement */
446 if (parent) { 497 __rb_change_child(victim, new, parent, root);
447 if (victim == parent->rb_left)
448 parent->rb_left = new;
449 else
450 parent->rb_right = new;
451 } else {
452 root->rb_node = new;
453 }
454 if (victim->rb_left) 498 if (victim->rb_left)
455 rb_set_parent(victim->rb_left, new); 499 rb_set_parent(victim->rb_left, new);
456 if (victim->rb_right) 500 if (victim->rb_right)
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
new file mode 100644
index 000000000000..268b23951fec
--- /dev/null
+++ b/lib/rbtree_test.c
@@ -0,0 +1,234 @@
1#include <linux/module.h>
2#include <linux/rbtree_augmented.h>
3#include <linux/random.h>
4#include <asm/timex.h>
5
6#define NODES 100
7#define PERF_LOOPS 100000
8#define CHECK_LOOPS 100
9
10struct test_node {
11 struct rb_node rb;
12 u32 key;
13
14 /* following fields used for testing augmented rbtree functionality */
15 u32 val;
16 u32 augmented;
17};
18
19static struct rb_root root = RB_ROOT;
20static struct test_node nodes[NODES];
21
22static struct rnd_state rnd;
23
24static void insert(struct test_node *node, struct rb_root *root)
25{
26 struct rb_node **new = &root->rb_node, *parent = NULL;
27 u32 key = node->key;
28
29 while (*new) {
30 parent = *new;
31 if (key < rb_entry(parent, struct test_node, rb)->key)
32 new = &parent->rb_left;
33 else
34 new = &parent->rb_right;
35 }
36
37 rb_link_node(&node->rb, parent, new);
38 rb_insert_color(&node->rb, root);
39}
40
41static inline void erase(struct test_node *node, struct rb_root *root)
42{
43 rb_erase(&node->rb, root);
44}
45
46static inline u32 augment_recompute(struct test_node *node)
47{
48 u32 max = node->val, child_augmented;
49 if (node->rb.rb_left) {
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node,
51 rb)->augmented;
52 if (max < child_augmented)
53 max = child_augmented;
54 }
55 if (node->rb.rb_right) {
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node,
57 rb)->augmented;
58 if (max < child_augmented)
59 max = child_augmented;
60 }
61 return max;
62}
63
64RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
65 u32, augmented, augment_recompute)
66
67static void insert_augmented(struct test_node *node, struct rb_root *root)
68{
69 struct rb_node **new = &root->rb_node, *rb_parent = NULL;
70 u32 key = node->key;
71 u32 val = node->val;
72 struct test_node *parent;
73
74 while (*new) {
75 rb_parent = *new;
76 parent = rb_entry(rb_parent, struct test_node, rb);
77 if (parent->augmented < val)
78 parent->augmented = val;
79 if (key < parent->key)
80 new = &parent->rb.rb_left;
81 else
82 new = &parent->rb.rb_right;
83 }
84
85 node->augmented = val;
86 rb_link_node(&node->rb, rb_parent, new);
87 rb_insert_augmented(&node->rb, root, &augment_callbacks);
88}
89
90static void erase_augmented(struct test_node *node, struct rb_root *root)
91{
92 rb_erase_augmented(&node->rb, root, &augment_callbacks);
93}
94
95static void init(void)
96{
97 int i;
98 for (i = 0; i < NODES; i++) {
99 nodes[i].key = prandom32(&rnd);
100 nodes[i].val = prandom32(&rnd);
101 }
102}
103
104static bool is_red(struct rb_node *rb)
105{
106 return !(rb->__rb_parent_color & 1);
107}
108
109static int black_path_count(struct rb_node *rb)
110{
111 int count;
112 for (count = 0; rb; rb = rb_parent(rb))
113 count += !is_red(rb);
114 return count;
115}
116
117static void check(int nr_nodes)
118{
119 struct rb_node *rb;
120 int count = 0;
121 int blacks;
122 u32 prev_key = 0;
123
124 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
125 struct test_node *node = rb_entry(rb, struct test_node, rb);
126 WARN_ON_ONCE(node->key < prev_key);
127 WARN_ON_ONCE(is_red(rb) &&
128 (!rb_parent(rb) || is_red(rb_parent(rb))));
129 if (!count)
130 blacks = black_path_count(rb);
131 else
132 WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
133 blacks != black_path_count(rb));
134 prev_key = node->key;
135 count++;
136 }
137 WARN_ON_ONCE(count != nr_nodes);
138}
139
140static void check_augmented(int nr_nodes)
141{
142 struct rb_node *rb;
143
144 check(nr_nodes);
145 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
146 struct test_node *node = rb_entry(rb, struct test_node, rb);
147 WARN_ON_ONCE(node->augmented != augment_recompute(node));
148 }
149}
150
151static int rbtree_test_init(void)
152{
153 int i, j;
154 cycles_t time1, time2, time;
155
156 printk(KERN_ALERT "rbtree testing");
157
158 prandom32_seed(&rnd, 3141592653589793238ULL);
159 init();
160
161 time1 = get_cycles();
162
163 for (i = 0; i < PERF_LOOPS; i++) {
164 for (j = 0; j < NODES; j++)
165 insert(nodes + j, &root);
166 for (j = 0; j < NODES; j++)
167 erase(nodes + j, &root);
168 }
169
170 time2 = get_cycles();
171 time = time2 - time1;
172
173 time = div_u64(time, PERF_LOOPS);
174 printk(" -> %llu cycles\n", (unsigned long long)time);
175
176 for (i = 0; i < CHECK_LOOPS; i++) {
177 init();
178 for (j = 0; j < NODES; j++) {
179 check(j);
180 insert(nodes + j, &root);
181 }
182 for (j = 0; j < NODES; j++) {
183 check(NODES - j);
184 erase(nodes + j, &root);
185 }
186 check(0);
187 }
188
189 printk(KERN_ALERT "augmented rbtree testing");
190
191 init();
192
193 time1 = get_cycles();
194
195 for (i = 0; i < PERF_LOOPS; i++) {
196 for (j = 0; j < NODES; j++)
197 insert_augmented(nodes + j, &root);
198 for (j = 0; j < NODES; j++)
199 erase_augmented(nodes + j, &root);
200 }
201
202 time2 = get_cycles();
203 time = time2 - time1;
204
205 time = div_u64(time, PERF_LOOPS);
206 printk(" -> %llu cycles\n", (unsigned long long)time);
207
208 for (i = 0; i < CHECK_LOOPS; i++) {
209 init();
210 for (j = 0; j < NODES; j++) {
211 check_augmented(j);
212 insert_augmented(nodes + j, &root);
213 }
214 for (j = 0; j < NODES; j++) {
215 check_augmented(NODES - j);
216 erase_augmented(nodes + j, &root);
217 }
218 check_augmented(0);
219 }
220
221 return -EAGAIN; /* Fail will directly unload the module */
222}
223
224static void rbtree_test_exit(void)
225{
226 printk(KERN_ALERT "test exit\n");
227}
228
229module_init(rbtree_test_init)
230module_exit(rbtree_test_exit)
231
232MODULE_LICENSE("GPL");
233MODULE_AUTHOR("Michel Lespinasse");
234MODULE_DESCRIPTION("Red Black Tree test");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index fadae774a20c..3675452b23ca 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -39,6 +39,25 @@ struct scatterlist *sg_next(struct scatterlist *sg)
39EXPORT_SYMBOL(sg_next); 39EXPORT_SYMBOL(sg_next);
40 40
41/** 41/**
42 * sg_nents - return total count of entries in scatterlist
43 * @sg: The scatterlist
44 *
45 * Description:
46 * Allows to know how many entries are in sg, taking into acount
47 * chaining as well
48 *
49 **/
50int sg_nents(struct scatterlist *sg)
51{
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
56}
57EXPORT_SYMBOL(sg_nents);
58
59
60/**
42 * sg_last - return the last scatterlist entry in a list 61 * sg_last - return the last scatterlist entry in a list
43 * @sgl: First entry in the scatterlist 62 * @sgl: First entry in the scatterlist
44 * @nents: Number of entries in the scatterlist 63 * @nents: Number of entries in the scatterlist
@@ -404,14 +423,13 @@ EXPORT_SYMBOL(sg_miter_start);
404 * @miter: sg mapping iter to proceed 423 * @miter: sg mapping iter to proceed
405 * 424 *
406 * Description: 425 * Description:
407 * Proceeds @miter@ to the next mapping. @miter@ should have been 426 * Proceeds @miter to the next mapping. @miter should have been started
408 * started using sg_miter_start(). On successful return, 427 * using sg_miter_start(). On successful return, @miter->page,
409 * @miter@->page, @miter@->addr and @miter@->length point to the 428 * @miter->addr and @miter->length point to the current mapping.
410 * current mapping.
411 * 429 *
412 * Context: 430 * Context:
413 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till 431 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
414 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. 432 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
415 * 433 *
416 * Returns: 434 * Returns:
417 * true if @miter contains the next mapping. false if end of sg 435 * true if @miter contains the next mapping. false if end of sg
@@ -465,7 +483,8 @@ EXPORT_SYMBOL(sg_miter_next);
465 * resources (kmap) need to be released during iteration. 483 * resources (kmap) need to be released during iteration.
466 * 484 *
467 * Context: 485 * Context:
468 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. 486 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
487 * otherwise.
469 */ 488 */
470void sg_miter_stop(struct sg_mapping_iter *miter) 489void sg_miter_stop(struct sg_mapping_iter *miter)
471{ 490{
@@ -479,7 +498,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
479 flush_kernel_dcache_page(miter->page); 498 flush_kernel_dcache_page(miter->page);
480 499
481 if (miter->__flags & SG_MITER_ATOMIC) { 500 if (miter->__flags & SG_MITER_ATOMIC) {
482 WARN_ON(!irqs_disabled()); 501 WARN_ON_ONCE(preemptible());
483 kunmap_atomic(miter->addr); 502 kunmap_atomic(miter->addr);
484 } else 503 } else
485 kunmap(miter->page); 504 kunmap(miter->page);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
107{ 107{
108 u64 i; 108 u64 i;
109 u64 loops = loops_per_jiffy * HZ; 109 u64 loops = loops_per_jiffy * HZ;
110 int print_once = 1;
111 110
112 for (;;) { 111 for (i = 0; i < loops; i++) {
113 for (i = 0; i < loops; i++) { 112 if (arch_spin_trylock(&lock->raw_lock))
114 if (arch_spin_trylock(&lock->raw_lock)) 113 return;
115 return; 114 __delay(1);
116 __delay(1); 115 }
117 } 116 /* lockup suspected: */
118 /* lockup suspected: */ 117 spin_dump(lock, "lockup suspected");
119 if (print_once) {
120 print_once = 0;
121 spin_dump(lock, "lockup suspected");
122#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
123 trigger_all_cpu_backtrace(); 119 trigger_all_cpu_backtrace();
124#endif 120#endif
125 } 121
126 } 122 /*
123 * The trylock above was causing a livelock. Give the lower level arch
124 * specific lock code a chance to acquire the lock. We have already
125 * printed a warning/backtrace at this point. The non-debug arch
126 * specific code might actually succeed in acquiring the lock. If it is
127 * not successful, the end-result is the same - there is no forward
128 * progress.
129 */
130 arch_spin_lock(&lock->raw_lock);
127} 131}
128 132
129void do_raw_spin_lock(raw_spinlock_t *lock) 133void do_raw_spin_lock(raw_spinlock_t *lock)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 45bc1f83a5ad..f114bf6a8e13 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
170 * Statically reserve bounce buffer space and initialize bounce buffer data 170 * Statically reserve bounce buffer space and initialize bounce buffer data
171 * structures for the software IO TLB used to implement the DMA API. 171 * structures for the software IO TLB used to implement the DMA API.
172 */ 172 */
173void __init 173static void __init
174swiotlb_init_with_default_size(size_t default_size, int verbose) 174swiotlb_init_with_default_size(size_t default_size, int verbose)
175{ 175{
176 unsigned long bytes; 176 unsigned long bytes;
@@ -206,8 +206,9 @@ swiotlb_init(int verbose)
206int 206int
207swiotlb_late_init_with_default_size(size_t default_size) 207swiotlb_late_init_with_default_size(size_t default_size)
208{ 208{
209 unsigned long i, bytes, req_nslabs = io_tlb_nslabs; 209 unsigned long bytes, req_nslabs = io_tlb_nslabs;
210 unsigned int order; 210 unsigned int order;
211 int rc = 0;
211 212
212 if (!io_tlb_nslabs) { 213 if (!io_tlb_nslabs) {
213 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 214 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
@@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size)
229 order--; 230 order--;
230 } 231 }
231 232
232 if (!io_tlb_start) 233 if (!io_tlb_start) {
233 goto cleanup1; 234 io_tlb_nslabs = req_nslabs;
234 235 return -ENOMEM;
236 }
235 if (order != get_order(bytes)) { 237 if (order != get_order(bytes)) {
236 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 238 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
237 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 239 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
238 io_tlb_nslabs = SLABS_PER_PAGE << order; 240 io_tlb_nslabs = SLABS_PER_PAGE << order;
239 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
240 } 241 }
242 rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs);
243 if (rc)
244 free_pages((unsigned long)io_tlb_start, order);
245 return rc;
246}
247
248int
249swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
250{
251 unsigned long i, bytes;
252
253 bytes = nslabs << IO_TLB_SHIFT;
254
255 io_tlb_nslabs = nslabs;
256 io_tlb_start = tlb;
241 io_tlb_end = io_tlb_start + bytes; 257 io_tlb_end = io_tlb_start + bytes;
258
242 memset(io_tlb_start, 0, bytes); 259 memset(io_tlb_start, 0, bytes);
243 260
244 /* 261 /*
@@ -288,10 +305,8 @@ cleanup3:
288 io_tlb_list = NULL; 305 io_tlb_list = NULL;
289cleanup2: 306cleanup2:
290 io_tlb_end = NULL; 307 io_tlb_end = NULL;
291 free_pages((unsigned long)io_tlb_start, order);
292 io_tlb_start = NULL; 308 io_tlb_start = NULL;
293cleanup1: 309 io_tlb_nslabs = 0;
294 io_tlb_nslabs = req_nslabs;
295 return -ENOMEM; 310 return -ENOMEM;
296} 311}
297 312
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0e337541f005..39c99fea7c03 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r)
174 unsigned q; 174 unsigned q;
175 175
176 /* Copy of previous function's body with added early returns */ 176 /* Copy of previous function's body with added early returns */
177 q = (r * (uint64_t)0x1999999a) >> 32; 177 while (r >= 10000) {
178 *buf++ = (r - 10 * q) + '0'; /* 2 */ 178 q = r + '0';
179 if (q == 0) 179 r = (r * (uint64_t)0x1999999a) >> 32;
180 return buf; 180 *buf++ = q - 10*r;
181 r = (q * (uint64_t)0x1999999a) >> 32; 181 }
182 *buf++ = (q - 10 * r) + '0'; /* 3 */ 182
183 if (r == 0) 183 q = (r * 0x199a) >> 16; /* r <= 9999 */
184 return buf; 184 *buf++ = (r - 10 * q) + '0';
185 q = (r * (uint64_t)0x1999999a) >> 32;
186 *buf++ = (r - 10 * q) + '0'; /* 4 */
187 if (q == 0)
188 return buf;
189 r = (q * (uint64_t)0x1999999a) >> 32;
190 *buf++ = (q - 10 * r) + '0'; /* 5 */
191 if (r == 0)
192 return buf;
193 q = (r * 0x199a) >> 16;
194 *buf++ = (r - 10 * q) + '0'; /* 6 */
195 if (q == 0) 185 if (q == 0)
196 return buf; 186 return buf;
197 r = (q * 0xcd) >> 11; 187 r = (q * 0xcd) >> 11; /* q <= 999 */
198 *buf++ = (q - 10 * r) + '0'; /* 7 */ 188 *buf++ = (q - 10 * r) + '0';
199 if (r == 0) 189 if (r == 0)
200 return buf; 190 return buf;
201 q = (r * 0xcd) >> 11; 191 q = (r * 0xcd) >> 11; /* r <= 99 */
202 *buf++ = (r - 10 * q) + '0'; /* 8 */ 192 *buf++ = (r - 10 * q) + '0';
203 if (q == 0) 193 if (q == 0)
204 return buf; 194 return buf;
205 *buf++ = q + '0'; /* 9 */ 195 *buf++ = q + '0'; /* q <= 9 */
206 return buf; 196 return buf;
207} 197}
208 198
@@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n)
243 233
244/* Second algorithm: valid only for 64-bit long longs */ 234/* Second algorithm: valid only for 64-bit long longs */
245 235
236/* See comment in put_dec_full9 for choice of constants */
246static noinline_for_stack 237static noinline_for_stack
247char *put_dec_full4(char *buf, unsigned q) 238void put_dec_full4(char *buf, unsigned q)
248{ 239{
249 unsigned r; 240 unsigned r;
250 r = (q * 0xcccd) >> 19; 241 r = (q * 0xccd) >> 15;
251 *buf++ = (q - 10 * r) + '0'; 242 buf[0] = (q - 10 * r) + '0';
252 q = (r * 0x199a) >> 16; 243 q = (r * 0xcd) >> 11;
253 *buf++ = (r - 10 * q) + '0'; 244 buf[1] = (r - 10 * q) + '0';
254 r = (q * 0xcd) >> 11; 245 r = (q * 0xcd) >> 11;
255 *buf++ = (q - 10 * r) + '0'; 246 buf[2] = (q - 10 * r) + '0';
256 *buf++ = r + '0'; 247 buf[3] = r + '0';
257 return buf; 248}
249
250/*
251 * Call put_dec_full4 on x % 10000, return x / 10000.
252 * The approximation x/10000 == (x * 0x346DC5D7) >> 43
253 * holds for all x < 1,128,869,999. The largest value this
254 * helper will ever be asked to convert is 1,125,520,955.
255 * (d1 in the put_dec code, assuming n is all-ones).
256 */
257static
258unsigned put_dec_helper4(char *buf, unsigned x)
259{
260 uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
261
262 put_dec_full4(buf, x - q * 10000);
263 return q;
258} 264}
259 265
260/* Based on code by Douglas W. Jones found at 266/* Based on code by Douglas W. Jones found at
@@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n)
276 d3 = (h >> 16); /* implicit "& 0xffff" */ 282 d3 = (h >> 16); /* implicit "& 0xffff" */
277 283
278 q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); 284 q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
285 q = put_dec_helper4(buf, q);
286
287 q += 7671 * d3 + 9496 * d2 + 6 * d1;
288 q = put_dec_helper4(buf+4, q);
289
290 q += 4749 * d3 + 42 * d2;
291 q = put_dec_helper4(buf+8, q);
279 292
280 buf = put_dec_full4(buf, q % 10000); 293 q += 281 * d3;
281 q = q / 10000; 294 buf += 12;
282 295 if (q)
283 d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1; 296 buf = put_dec_trunc8(buf, q);
284 buf = put_dec_full4(buf, d1 % 10000); 297 else while (buf[-1] == '0')
285 q = d1 / 10000;
286
287 d2 = q + 4749 * d3 + 42 * d2;
288 buf = put_dec_full4(buf, d2 % 10000);
289 q = d2 / 10000;
290
291 d3 = q + 281 * d3;
292 if (!d3)
293 goto done;
294 buf = put_dec_full4(buf, d3 % 10000);
295 q = d3 / 10000;
296 if (!q)
297 goto done;
298 buf = put_dec_full4(buf, q);
299 done:
300 while (buf[-1] == '0')
301 --buf; 298 --buf;
302 299
303 return buf; 300 return buf;
@@ -990,7 +987,7 @@ int kptr_restrict __read_mostly;
990 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 987 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
991 * - 'MF' For a 6-byte MAC FDDI address, it prints the address 988 * - 'MF' For a 6-byte MAC FDDI address, it prints the address
992 * with a dash-separated hex notation 989 * with a dash-separated hex notation
993 * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth) 990 * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
994 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 991 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
995 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 992 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
996 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 993 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -1341,7 +1338,10 @@ qualifier:
1341 * %pR output the address range in a struct resource with decoded flags 1338 * %pR output the address range in a struct resource with decoded flags
1342 * %pr output the address range in a struct resource with raw flags 1339 * %pr output the address range in a struct resource with raw flags
1343 * %pM output a 6-byte MAC address with colons 1340 * %pM output a 6-byte MAC address with colons
1341 * %pMR output a 6-byte MAC address with colons in reversed order
1342 * %pMF output a 6-byte MAC address with dashes
1344 * %pm output a 6-byte MAC address without colons 1343 * %pm output a 6-byte MAC address without colons
1344 * %pmR output a 6-byte MAC address without colons in reversed order
1345 * %pI4 print an IPv4 address without leading zeros 1345 * %pI4 print an IPv4 address without leading zeros
1346 * %pi4 print an IPv4 address with leading zeros 1346 * %pi4 print an IPv4 address with leading zeros
1347 * %pI6 print an IPv6 address with colons 1347 * %pI6 print an IPv6 address with colons
@@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2017 s16 field_width; 2017 s16 field_width;
2018 bool is_sign; 2018 bool is_sign;
2019 2019
2020 while (*fmt && *str) { 2020 while (*fmt) {
2021 /* skip any white space in format */ 2021 /* skip any white space in format */
2022 /* white space in format matchs any amount of 2022 /* white space in format matchs any amount of
2023 * white space, including none, in the input. 2023 * white space, including none, in the input.
@@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2042 * advance both strings to next white space 2042 * advance both strings to next white space
2043 */ 2043 */
2044 if (*fmt == '*') { 2044 if (*fmt == '*') {
2045 if (!*str)
2046 break;
2045 while (!isspace(*fmt) && *fmt != '%' && *fmt) 2047 while (!isspace(*fmt) && *fmt != '%' && *fmt)
2046 fmt++; 2048 fmt++;
2047 while (!isspace(*str) && *str) 2049 while (!isspace(*str) && *str)
@@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2070 } 2072 }
2071 } 2073 }
2072 2074
2073 if (!*fmt || !*str) 2075 if (!*fmt)
2076 break;
2077
2078 if (*fmt == 'n') {
2079 /* return number of characters read so far */
2080 *va_arg(args, int *) = str - buf;
2081 ++fmt;
2082 continue;
2083 }
2084
2085 if (!*str)
2074 break; 2086 break;
2075 2087
2076 base = 10; 2088 base = 10;
@@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2103 num++; 2115 num++;
2104 } 2116 }
2105 continue; 2117 continue;
2106 case 'n':
2107 /* return number of characters read so far */
2108 {
2109 int *i = (int *)va_arg(args, int*);
2110 *i = str - buf;
2111 }
2112 continue;
2113 case 'o': 2118 case 'o':
2114 base = 8; 2119 base = 8;
2115 break; 2120 break;
@@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2210 str = next; 2215 str = next;
2211 } 2216 }
2212 2217
2213 /*
2214 * Now we've come all the way through so either the input string or the
2215 * format ended. In the former case, there can be a %n at the current
2216 * position in the format that needs to be filled.
2217 */
2218 if (*fmt == '%' && *(fmt + 1) == 'n') {
2219 int *p = (int *)va_arg(args, int *);
2220 *p = str - buf;
2221 }
2222
2223 return num; 2218 return num;
2224} 2219}
2225EXPORT_SYMBOL(vsscanf); 2220EXPORT_SYMBOL(vsscanf);