diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/.gitignore | 2 | ||||
-rw-r--r-- | lib/Kconfig | 17 | ||||
-rw-r--r-- | lib/Kconfig.debug | 16 | ||||
-rw-r--r-- | lib/Makefile | 11 | ||||
-rw-r--r-- | lib/atomic64.c | 2 | ||||
-rw-r--r-- | lib/atomic64_test.c | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 4 | ||||
-rw-r--r-- | lib/checksum.c | 13 | ||||
-rw-r--r-- | lib/cordic.c | 101 | ||||
-rw-r--r-- | lib/cpumask.c | 4 | ||||
-rw-r--r-- | lib/crc32.c | 2 | ||||
-rw-r--r-- | lib/crc8.c | 86 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 2 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/fault-inject.c | 156 | ||||
-rw-r--r-- | lib/genalloc.c | 323 | ||||
-rw-r--r-- | lib/idr.c | 67 | ||||
-rw-r--r-- | lib/iomap.c | 4 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 2 | ||||
-rw-r--r-- | lib/kstrtox.c | 5 | ||||
-rw-r--r-- | lib/lcm.c | 1 | ||||
-rw-r--r-- | lib/llist.c | 129 | ||||
-rw-r--r-- | lib/md5.c | 95 | ||||
-rw-r--r-- | lib/nlattr.c | 1 | ||||
-rw-r--r-- | lib/plist.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 121 | ||||
-rw-r--r-- | lib/sha1.c | 211 | ||||
-rw-r--r-- | lib/vsprintf.c | 26 | ||||
-rw-r--r-- | lib/xz/xz_dec_bcj.c | 27 | ||||
-rw-r--r-- | lib/xz/xz_private.h | 2 |
30 files changed, 1143 insertions, 298 deletions
diff --git a/lib/.gitignore b/lib/.gitignore index 3bef1ea94c9..09aae85418a 100644 --- a/lib/.gitignore +++ b/lib/.gitignore | |||
@@ -3,4 +3,4 @@ | |||
3 | # | 3 | # |
4 | gen_crc32table | 4 | gen_crc32table |
5 | crc32table.h | 5 | crc32table.h |
6 | 6 | oid_registry_data.c | |
diff --git a/lib/Kconfig b/lib/Kconfig index 830181cc7a8..6c695ff9cab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -79,6 +79,13 @@ config LIBCRC32C | |||
79 | require M here. See Castagnoli93. | 79 | require M here. See Castagnoli93. |
80 | Module will be libcrc32c. | 80 | Module will be libcrc32c. |
81 | 81 | ||
82 | config CRC8 | ||
83 | tristate "CRC8 function" | ||
84 | help | ||
85 | This option provides CRC8 function. Drivers may select this | ||
86 | when they need to do cyclic redundancy check according CRC8 | ||
87 | algorithm. Module will be called crc8. | ||
88 | |||
82 | config AUDIT_GENERIC | 89 | config AUDIT_GENERIC |
83 | bool | 90 | bool |
84 | depends on AUDIT && !AUDIT_ARCH | 91 | depends on AUDIT && !AUDIT_ARCH |
@@ -262,4 +269,14 @@ config AVERAGE | |||
262 | 269 | ||
263 | If unsure, say N. | 270 | If unsure, say N. |
264 | 271 | ||
272 | config CORDIC | ||
273 | tristate "Cordic function" | ||
274 | help | ||
275 | The option provides arithmetic function using cordic algorithm | ||
276 | so its calculations are in fixed point. Modules can select this | ||
277 | when they require this function. Module will be called cordic. | ||
278 | |||
279 | config LLIST | ||
280 | bool | ||
281 | |||
265 | endmenu | 282 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dd373c8ee94..36b60dbac3a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -227,7 +227,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | |||
227 | config DETECT_HUNG_TASK | 227 | config DETECT_HUNG_TASK |
228 | bool "Detect Hung Tasks" | 228 | bool "Detect Hung Tasks" |
229 | depends on DEBUG_KERNEL | 229 | depends on DEBUG_KERNEL |
230 | default DETECT_SOFTLOCKUP | 230 | default LOCKUP_DETECTOR |
231 | help | 231 | help |
232 | Say Y here to enable the kernel to detect "hung tasks", | 232 | Say Y here to enable the kernel to detect "hung tasks", |
233 | which are bugs that cause the task to be stuck in | 233 | which are bugs that cause the task to be stuck in |
@@ -648,12 +648,15 @@ config TRACE_IRQFLAGS | |||
648 | Enables hooks to interrupt enabling and disabling for | 648 | Enables hooks to interrupt enabling and disabling for |
649 | either tracing or lock debugging. | 649 | either tracing or lock debugging. |
650 | 650 | ||
651 | config DEBUG_SPINLOCK_SLEEP | 651 | config DEBUG_ATOMIC_SLEEP |
652 | bool "Spinlock debugging: sleep-inside-spinlock checking" | 652 | bool "Sleep inside atomic section checking" |
653 | select PREEMPT_COUNT | ||
653 | depends on DEBUG_KERNEL | 654 | depends on DEBUG_KERNEL |
654 | help | 655 | help |
655 | If you say Y here, various routines which may sleep will become very | 656 | If you say Y here, various routines which may sleep will become very |
656 | noisy if they are called with a spinlock held. | 657 | noisy if they are called inside atomic sections: when a spinlock is |
658 | held, inside an rcu read side critical section, inside preempt disabled | ||
659 | sections, inside an interrupt, etc... | ||
657 | 660 | ||
658 | config DEBUG_LOCKING_API_SELFTESTS | 661 | config DEBUG_LOCKING_API_SELFTESTS |
659 | bool "Locking API boot-time self-tests" | 662 | bool "Locking API boot-time self-tests" |
@@ -667,8 +670,9 @@ config DEBUG_LOCKING_API_SELFTESTS | |||
667 | mutexes and rwsems. | 670 | mutexes and rwsems. |
668 | 671 | ||
669 | config STACKTRACE | 672 | config STACKTRACE |
670 | bool | 673 | bool "Stacktrace" |
671 | depends on STACKTRACE_SUPPORT | 674 | depends on STACKTRACE_SUPPORT |
675 | default y | ||
672 | 676 | ||
673 | config DEBUG_STACK_USAGE | 677 | config DEBUG_STACK_USAGE |
674 | bool "Stack utilization instrumentation" | 678 | bool "Stack utilization instrumentation" |
@@ -866,7 +870,7 @@ config BOOT_PRINTK_DELAY | |||
866 | system, and then set "lpj=M" before setting "boot_delay=N". | 870 | system, and then set "lpj=M" before setting "boot_delay=N". |
867 | NOTE: Using this option may adversely affect SMP systems. | 871 | NOTE: Using this option may adversely affect SMP systems. |
868 | I.e., processors other than the first one may not boot up. | 872 | I.e., processors other than the first one may not boot up. |
869 | BOOT_PRINTK_DELAY also may cause DETECT_SOFTLOCKUP to detect | 873 | BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect |
870 | what it believes to be lockup conditions. | 874 | what it believes to be lockup conditions. |
871 | 875 | ||
872 | config RCU_TORTURE_TEST | 876 | config RCU_TORTURE_TEST |
diff --git a/lib/Makefile b/lib/Makefile index 6b597fdb189..3f5bc6d903e 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -10,9 +10,9 @@ endif | |||
10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ | 11 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o decompress.o find_next_bit.o | 15 | is_single_threaded.o plist.o decompress.o |
16 | 16 | ||
17 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
18 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o | |||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o find_last_bit.o | 25 | bsearch.o find_last_bit.o find_next_bit.o |
26 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
28 | 28 | ||
@@ -61,6 +61,7 @@ obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o | |||
61 | obj-$(CONFIG_CRC32) += crc32.o | 61 | obj-$(CONFIG_CRC32) += crc32.o |
62 | obj-$(CONFIG_CRC7) += crc7.o | 62 | obj-$(CONFIG_CRC7) += crc7.o |
63 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o | 63 | obj-$(CONFIG_LIBCRC32C) += libcrc32c.o |
64 | obj-$(CONFIG_CRC8) += crc8.o | ||
64 | obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o | 65 | obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o |
65 | 66 | ||
66 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ | 67 | obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ |
@@ -112,6 +113,10 @@ obj-$(CONFIG_AVERAGE) += average.o | |||
112 | 113 | ||
113 | obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o | 114 | obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o |
114 | 115 | ||
116 | obj-$(CONFIG_CORDIC) += cordic.o | ||
117 | |||
118 | obj-$(CONFIG_LLIST) += llist.o | ||
119 | |||
115 | hostprogs-y := gen_crc32table | 120 | hostprogs-y := gen_crc32table |
116 | clean-files := crc32table.h | 121 | clean-files := crc32table.h |
117 | 122 | ||
diff --git a/lib/atomic64.c b/lib/atomic64.c index a21c12bc727..e12ae0dd08a 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <asm/atomic.h> | 17 | #include <linux/atomic.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * We use a hashed array of spinlocks to provide exclusive access | 20 | * We use a hashed array of spinlocks to provide exclusive access |
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 44524cc8c32..0c33cde2a1e 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <asm/atomic.h> | 13 | #include <linux/atomic.h> |
14 | 14 | ||
15 | #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) | 15 | #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) |
16 | static __init int test_atomic64(void) | 16 | static __init int test_atomic64(void) |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 3f3b68199d7..2f4412e4d07 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -271,8 +271,6 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
271 | } | 271 | } |
272 | EXPORT_SYMBOL(__bitmap_weight); | 272 | EXPORT_SYMBOL(__bitmap_weight); |
273 | 273 | ||
274 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) | ||
275 | |||
276 | void bitmap_set(unsigned long *map, int start, int nr) | 274 | void bitmap_set(unsigned long *map, int start, int nr) |
277 | { | 275 | { |
278 | unsigned long *p = map + BIT_WORD(start); | 276 | unsigned long *p = map + BIT_WORD(start); |
@@ -756,7 +754,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | |||
756 | * | 754 | * |
757 | * The bit positions 0 through @bits are valid positions in @buf. | 755 | * The bit positions 0 through @bits are valid positions in @buf. |
758 | */ | 756 | */ |
759 | static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | 757 | int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) |
760 | { | 758 | { |
761 | int pos = 0; | 759 | int pos = 0; |
762 | 760 | ||
diff --git a/lib/checksum.c b/lib/checksum.c index 097508732f3..8df2f91e6d9 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
@@ -49,7 +49,7 @@ static inline unsigned short from32to16(unsigned int x) | |||
49 | 49 | ||
50 | static unsigned int do_csum(const unsigned char *buff, int len) | 50 | static unsigned int do_csum(const unsigned char *buff, int len) |
51 | { | 51 | { |
52 | int odd, count; | 52 | int odd; |
53 | unsigned int result = 0; | 53 | unsigned int result = 0; |
54 | 54 | ||
55 | if (len <= 0) | 55 | if (len <= 0) |
@@ -64,25 +64,22 @@ static unsigned int do_csum(const unsigned char *buff, int len) | |||
64 | len--; | 64 | len--; |
65 | buff++; | 65 | buff++; |
66 | } | 66 | } |
67 | count = len >> 1; /* nr of 16-bit words.. */ | 67 | if (len >= 2) { |
68 | if (count) { | ||
69 | if (2 & (unsigned long) buff) { | 68 | if (2 & (unsigned long) buff) { |
70 | result += *(unsigned short *) buff; | 69 | result += *(unsigned short *) buff; |
71 | count--; | ||
72 | len -= 2; | 70 | len -= 2; |
73 | buff += 2; | 71 | buff += 2; |
74 | } | 72 | } |
75 | count >>= 1; /* nr of 32-bit words.. */ | 73 | if (len >= 4) { |
76 | if (count) { | 74 | const unsigned char *end = buff + ((unsigned)len & ~3); |
77 | unsigned int carry = 0; | 75 | unsigned int carry = 0; |
78 | do { | 76 | do { |
79 | unsigned int w = *(unsigned int *) buff; | 77 | unsigned int w = *(unsigned int *) buff; |
80 | count--; | ||
81 | buff += 4; | 78 | buff += 4; |
82 | result += carry; | 79 | result += carry; |
83 | result += w; | 80 | result += w; |
84 | carry = (w > result); | 81 | carry = (w > result); |
85 | } while (count); | 82 | } while (buff < end); |
86 | result += carry; | 83 | result += carry; |
87 | result = (result & 0xffff) + (result >> 16); | 84 | result = (result & 0xffff) + (result >> 16); |
88 | } | 85 | } |
diff --git a/lib/cordic.c b/lib/cordic.c new file mode 100644 index 00000000000..aa27a88d7e0 --- /dev/null +++ b/lib/cordic.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Broadcom Corporation | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | ||
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | ||
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | ||
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/cordic.h> | ||
18 | |||
19 | #define CORDIC_ANGLE_GEN 39797 | ||
20 | #define CORDIC_PRECISION_SHIFT 16 | ||
21 | #define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) | ||
22 | |||
23 | #define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) | ||
24 | #define FLOAT(X) (((X) >= 0) \ | ||
25 | ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ | ||
26 | : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) | ||
27 | |||
28 | static const s32 arctan_table[] = { | ||
29 | 2949120, | ||
30 | 1740967, | ||
31 | 919879, | ||
32 | 466945, | ||
33 | 234379, | ||
34 | 117304, | ||
35 | 58666, | ||
36 | 29335, | ||
37 | 14668, | ||
38 | 7334, | ||
39 | 3667, | ||
40 | 1833, | ||
41 | 917, | ||
42 | 458, | ||
43 | 229, | ||
44 | 115, | ||
45 | 57, | ||
46 | 29 | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * cordic_calc_iq() - calculates the i/q coordinate for given angle | ||
51 | * | ||
52 | * theta: angle in degrees for which i/q coordinate is to be calculated | ||
53 | * coord: function output parameter holding the i/q coordinate | ||
54 | */ | ||
55 | struct cordic_iq cordic_calc_iq(s32 theta) | ||
56 | { | ||
57 | struct cordic_iq coord; | ||
58 | s32 angle, valtmp; | ||
59 | unsigned iter; | ||
60 | int signx = 1; | ||
61 | int signtheta; | ||
62 | |||
63 | coord.i = CORDIC_ANGLE_GEN; | ||
64 | coord.q = 0; | ||
65 | angle = 0; | ||
66 | |||
67 | theta = FIXED(theta); | ||
68 | signtheta = (theta < 0) ? -1 : 1; | ||
69 | theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - | ||
70 | FIXED(180) * signtheta; | ||
71 | |||
72 | if (FLOAT(theta) > 90) { | ||
73 | theta -= FIXED(180); | ||
74 | signx = -1; | ||
75 | } else if (FLOAT(theta) < -90) { | ||
76 | theta += FIXED(180); | ||
77 | signx = -1; | ||
78 | } | ||
79 | |||
80 | for (iter = 0; iter < CORDIC_NUM_ITER; iter++) { | ||
81 | if (theta > angle) { | ||
82 | valtmp = coord.i - (coord.q >> iter); | ||
83 | coord.q += (coord.i >> iter); | ||
84 | angle += arctan_table[iter]; | ||
85 | } else { | ||
86 | valtmp = coord.i + (coord.q >> iter); | ||
87 | coord.q -= (coord.i >> iter); | ||
88 | angle -= arctan_table[iter]; | ||
89 | } | ||
90 | coord.i = valtmp; | ||
91 | } | ||
92 | |||
93 | coord.i *= signx; | ||
94 | coord.q *= signx; | ||
95 | return coord; | ||
96 | } | ||
97 | EXPORT_SYMBOL(cordic_calc_iq); | ||
98 | |||
99 | MODULE_DESCRIPTION("Cordic functions"); | ||
100 | MODULE_AUTHOR("Broadcom Corporation"); | ||
101 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index 05d6aca7fc1..af3e5817de9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -30,7 +30,7 @@ int __any_online_cpu(const cpumask_t *mask) | |||
30 | { | 30 | { |
31 | int cpu; | 31 | int cpu; |
32 | 32 | ||
33 | for_each_cpu_mask(cpu, *mask) { | 33 | for_each_cpu(cpu, mask) { |
34 | if (cpu_online(cpu)) | 34 | if (cpu_online(cpu)) |
35 | break; | 35 | break; |
36 | } | 36 | } |
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var_node); | |||
131 | */ | 131 | */ |
132 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | 132 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
133 | { | 133 | { |
134 | return alloc_cpumask_var_node(mask, flags, numa_node_id()); | 134 | return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); |
135 | } | 135 | } |
136 | EXPORT_SYMBOL(alloc_cpumask_var); | 136 | EXPORT_SYMBOL(alloc_cpumask_var); |
137 | 137 | ||
diff --git a/lib/crc32.c b/lib/crc32.c index 4855995fcde..a6e633a48ce 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <asm/atomic.h> | 29 | #include <linux/atomic.h> |
30 | #include "crc32defs.h" | 30 | #include "crc32defs.h" |
31 | #if CRC_LE_BITS == 8 | 31 | #if CRC_LE_BITS == 8 |
32 | # define tole(x) __constant_cpu_to_le32(x) | 32 | # define tole(x) __constant_cpu_to_le32(x) |
diff --git a/lib/crc8.c b/lib/crc8.c new file mode 100644 index 00000000000..87b59cafdb8 --- /dev/null +++ b/lib/crc8.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011 Broadcom Corporation | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | ||
11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | ||
13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | ||
14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/crc8.h> | ||
21 | #include <linux/printk.h> | ||
22 | |||
23 | /* | ||
24 | * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. | ||
25 | * | ||
26 | * table: table to be filled. | ||
27 | * polynomial: polynomial for which table is to be filled. | ||
28 | */ | ||
29 | void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | ||
30 | { | ||
31 | int i, j; | ||
32 | const u8 msbit = 0x80; | ||
33 | u8 t = msbit; | ||
34 | |||
35 | table[0] = 0; | ||
36 | |||
37 | for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) { | ||
38 | t = (t << 1) ^ (t & msbit ? polynomial : 0); | ||
39 | for (j = 0; j < i; j++) | ||
40 | table[i+j] = table[j] ^ t; | ||
41 | } | ||
42 | } | ||
43 | EXPORT_SYMBOL(crc8_populate_msb); | ||
44 | |||
45 | /* | ||
46 | * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. | ||
47 | * | ||
48 | * table: table to be filled. | ||
49 | * polynomial: polynomial for which table is to be filled. | ||
50 | */ | ||
51 | void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | ||
52 | { | ||
53 | int i, j; | ||
54 | u8 t = 1; | ||
55 | |||
56 | table[0] = 0; | ||
57 | |||
58 | for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) { | ||
59 | t = (t >> 1) ^ (t & 1 ? polynomial : 0); | ||
60 | for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i) | ||
61 | table[i+j] = table[j] ^ t; | ||
62 | } | ||
63 | } | ||
64 | EXPORT_SYMBOL(crc8_populate_lsb); | ||
65 | |||
66 | /* | ||
67 | * crc8 - calculate a crc8 over the given input data. | ||
68 | * | ||
69 | * table: crc table used for calculation. | ||
70 | * pdata: pointer to data buffer. | ||
71 | * nbytes: number of bytes in data buffer. | ||
72 | * crc: previous returned crc8 value. | ||
73 | */ | ||
74 | u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) | ||
75 | { | ||
76 | /* loop over the buffer data */ | ||
77 | while (nbytes-- > 0) | ||
78 | crc = table[(crc ^ *pdata++) & 0xff]; | ||
79 | |||
80 | return crc; | ||
81 | } | ||
82 | EXPORT_SYMBOL(crc8); | ||
83 | |||
84 | MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function"); | ||
85 | MODULE_AUTHOR("Broadcom Corporation"); | ||
86 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index e73822aa6e9..b5257725daa 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <asm/atomic.h> | 3 | #include <linux/atomic.h> |
4 | 4 | ||
5 | /* | 5 | /* |
6 | * This is an implementation of the notion of "decrement a | 6 | * This is an implementation of the notion of "decrement a |
diff --git a/lib/devres.c b/lib/devres.c index 6efddf53b90..7c0e953a748 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -79,9 +79,9 @@ EXPORT_SYMBOL(devm_ioremap_nocache); | |||
79 | */ | 79 | */ |
80 | void devm_iounmap(struct device *dev, void __iomem *addr) | 80 | void devm_iounmap(struct device *dev, void __iomem *addr) |
81 | { | 81 | { |
82 | iounmap(addr); | ||
83 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, | 82 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
84 | (void *)addr)); | 83 | (void *)addr)); |
84 | iounmap(addr); | ||
85 | } | 85 | } |
86 | EXPORT_SYMBOL(devm_iounmap); | 86 | EXPORT_SYMBOL(devm_iounmap); |
87 | 87 | ||
diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 7e65af70635..f193b779644 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/stacktrace.h> | 10 | #include <linux/stacktrace.h> |
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/fault-inject.h> | 11 | #include <linux/fault-inject.h> |
13 | 12 | ||
14 | /* | 13 | /* |
@@ -140,16 +139,6 @@ static int debugfs_ul_set(void *data, u64 val) | |||
140 | return 0; | 139 | return 0; |
141 | } | 140 | } |
142 | 141 | ||
143 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER | ||
144 | static int debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val) | ||
145 | { | ||
146 | *(unsigned long *)data = | ||
147 | val < MAX_STACK_TRACE_DEPTH ? | ||
148 | val : MAX_STACK_TRACE_DEPTH; | ||
149 | return 0; | ||
150 | } | ||
151 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | ||
152 | |||
153 | static int debugfs_ul_get(void *data, u64 *val) | 142 | static int debugfs_ul_get(void *data, u64 *val) |
154 | { | 143 | { |
155 | *val = *(unsigned long *)data; | 144 | *val = *(unsigned long *)data; |
@@ -165,16 +154,26 @@ static struct dentry *debugfs_create_ul(const char *name, mode_t mode, | |||
165 | } | 154 | } |
166 | 155 | ||
167 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER | 156 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER |
168 | DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get, | ||
169 | debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n"); | ||
170 | 157 | ||
171 | static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH( | 158 | static int debugfs_stacktrace_depth_set(void *data, u64 val) |
159 | { | ||
160 | *(unsigned long *)data = | ||
161 | min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, | ||
167 | debugfs_stacktrace_depth_set, "%llu\n"); | ||
168 | |||
169 | static struct dentry *debugfs_create_stacktrace_depth( | ||
172 | const char *name, mode_t mode, | 170 | const char *name, mode_t mode, |
173 | struct dentry *parent, unsigned long *value) | 171 | struct dentry *parent, unsigned long *value) |
174 | { | 172 | { |
175 | return debugfs_create_file(name, mode, parent, value, | 173 | return debugfs_create_file(name, mode, parent, value, |
176 | &fops_ul_MAX_STACK_TRACE_DEPTH); | 174 | &fops_stacktrace_depth); |
177 | } | 175 | } |
176 | |||
178 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | 177 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ |
179 | 178 | ||
180 | static int debugfs_atomic_t_set(void *data, u64 val) | 179 | static int debugfs_atomic_t_set(void *data, u64 val) |
@@ -198,118 +197,51 @@ static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode, | |||
198 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); | 197 | return debugfs_create_file(name, mode, parent, value, &fops_atomic_t); |
199 | } | 198 | } |
200 | 199 | ||
201 | void cleanup_fault_attr_dentries(struct fault_attr *attr) | 200 | struct dentry *fault_create_debugfs_attr(const char *name, |
202 | { | 201 | struct dentry *parent, struct fault_attr *attr) |
203 | debugfs_remove(attr->dentries.probability_file); | ||
204 | attr->dentries.probability_file = NULL; | ||
205 | |||
206 | debugfs_remove(attr->dentries.interval_file); | ||
207 | attr->dentries.interval_file = NULL; | ||
208 | |||
209 | debugfs_remove(attr->dentries.times_file); | ||
210 | attr->dentries.times_file = NULL; | ||
211 | |||
212 | debugfs_remove(attr->dentries.space_file); | ||
213 | attr->dentries.space_file = NULL; | ||
214 | |||
215 | debugfs_remove(attr->dentries.verbose_file); | ||
216 | attr->dentries.verbose_file = NULL; | ||
217 | |||
218 | debugfs_remove(attr->dentries.task_filter_file); | ||
219 | attr->dentries.task_filter_file = NULL; | ||
220 | |||
221 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER | ||
222 | |||
223 | debugfs_remove(attr->dentries.stacktrace_depth_file); | ||
224 | attr->dentries.stacktrace_depth_file = NULL; | ||
225 | |||
226 | debugfs_remove(attr->dentries.require_start_file); | ||
227 | attr->dentries.require_start_file = NULL; | ||
228 | |||
229 | debugfs_remove(attr->dentries.require_end_file); | ||
230 | attr->dentries.require_end_file = NULL; | ||
231 | |||
232 | debugfs_remove(attr->dentries.reject_start_file); | ||
233 | attr->dentries.reject_start_file = NULL; | ||
234 | |||
235 | debugfs_remove(attr->dentries.reject_end_file); | ||
236 | attr->dentries.reject_end_file = NULL; | ||
237 | |||
238 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | ||
239 | |||
240 | if (attr->dentries.dir) | ||
241 | WARN_ON(!simple_empty(attr->dentries.dir)); | ||
242 | |||
243 | debugfs_remove(attr->dentries.dir); | ||
244 | attr->dentries.dir = NULL; | ||
245 | } | ||
246 | |||
247 | int init_fault_attr_dentries(struct fault_attr *attr, const char *name) | ||
248 | { | 202 | { |
249 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | 203 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; |
250 | struct dentry *dir; | 204 | struct dentry *dir; |
251 | 205 | ||
252 | memset(&attr->dentries, 0, sizeof(attr->dentries)); | 206 | dir = debugfs_create_dir(name, parent); |
253 | |||
254 | dir = debugfs_create_dir(name, NULL); | ||
255 | if (!dir) | 207 | if (!dir) |
256 | goto fail; | 208 | return ERR_PTR(-ENOMEM); |
257 | attr->dentries.dir = dir; | ||
258 | |||
259 | attr->dentries.probability_file = | ||
260 | debugfs_create_ul("probability", mode, dir, &attr->probability); | ||
261 | 209 | ||
262 | attr->dentries.interval_file = | 210 | if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) |
263 | debugfs_create_ul("interval", mode, dir, &attr->interval); | 211 | goto fail; |
264 | 212 | if (!debugfs_create_ul("interval", mode, dir, &attr->interval)) | |
265 | attr->dentries.times_file = | 213 | goto fail; |
266 | debugfs_create_atomic_t("times", mode, dir, &attr->times); | 214 | if (!debugfs_create_atomic_t("times", mode, dir, &attr->times)) |
267 | 215 | goto fail; | |
268 | attr->dentries.space_file = | 216 | if (!debugfs_create_atomic_t("space", mode, dir, &attr->space)) |
269 | debugfs_create_atomic_t("space", mode, dir, &attr->space); | 217 | goto fail; |
270 | 218 | if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) | |
271 | attr->dentries.verbose_file = | 219 | goto fail; |
272 | debugfs_create_ul("verbose", mode, dir, &attr->verbose); | 220 | if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter)) |
273 | |||
274 | attr->dentries.task_filter_file = debugfs_create_bool("task-filter", | ||
275 | mode, dir, &attr->task_filter); | ||
276 | |||
277 | if (!attr->dentries.probability_file || !attr->dentries.interval_file || | ||
278 | !attr->dentries.times_file || !attr->dentries.space_file || | ||
279 | !attr->dentries.verbose_file || !attr->dentries.task_filter_file) | ||
280 | goto fail; | 221 | goto fail; |
281 | 222 | ||
282 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER | 223 | #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER |
283 | 224 | ||
284 | attr->dentries.stacktrace_depth_file = | 225 | if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir, |
285 | debugfs_create_ul_MAX_STACK_TRACE_DEPTH( | 226 | &attr->stacktrace_depth)) |
286 | "stacktrace-depth", mode, dir, &attr->stacktrace_depth); | 227 | goto fail; |
287 | 228 | if (!debugfs_create_ul("require-start", mode, dir, | |
288 | attr->dentries.require_start_file = | 229 | &attr->require_start)) |
289 | debugfs_create_ul("require-start", mode, dir, &attr->require_start); | 230 | goto fail; |
290 | 231 | if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end)) | |
291 | attr->dentries.require_end_file = | 232 | goto fail; |
292 | debugfs_create_ul("require-end", mode, dir, &attr->require_end); | 233 | if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start)) |
293 | 234 | goto fail; | |
294 | attr->dentries.reject_start_file = | 235 | if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end)) |
295 | debugfs_create_ul("reject-start", mode, dir, &attr->reject_start); | ||
296 | |||
297 | attr->dentries.reject_end_file = | ||
298 | debugfs_create_ul("reject-end", mode, dir, &attr->reject_end); | ||
299 | |||
300 | if (!attr->dentries.stacktrace_depth_file || | ||
301 | !attr->dentries.require_start_file || | ||
302 | !attr->dentries.require_end_file || | ||
303 | !attr->dentries.reject_start_file || | ||
304 | !attr->dentries.reject_end_file) | ||
305 | goto fail; | 236 | goto fail; |
306 | 237 | ||
307 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ | 238 | #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ |
308 | 239 | ||
309 | return 0; | 240 | return dir; |
310 | fail: | 241 | fail: |
311 | cleanup_fault_attr_dentries(attr); | 242 | debugfs_remove_recursive(dir); |
312 | return -ENOMEM; | 243 | |
244 | return ERR_PTR(-ENOMEM); | ||
313 | } | 245 | } |
314 | 246 | ||
315 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | 247 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 577ddf80597..667bd5ffad3 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -1,8 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Basic general purpose allocator for managing special purpose memory | 2 | * Basic general purpose allocator for managing special purpose |
3 | * not managed by the regular kmalloc/kfree interface. | 3 | * memory, for example, memory that is not managed by the regular |
4 | * Uses for this includes on-device special memory, uncached memory | 4 | * kmalloc/kfree interface. Uses for this includes on-device special |
5 | * etc. | 5 | * memory, uncached memory etc. |
6 | * | ||
7 | * It is safe to use the allocator in NMI handlers and other special | ||
8 | * unblockable contexts that could otherwise deadlock on locks. This | ||
9 | * is implemented by using atomic operations and retries on any | ||
10 | * conflicts. The disadvantage is that there may be livelocks in | ||
11 | * extreme cases. For better scalability, one allocator can be used | ||
12 | * for each CPU. | ||
13 | * | ||
14 | * The lockless operation only works if there is enough memory | ||
15 | * available. If new memory is added to the pool a lock has to be | ||
16 | * still taken. So any user relying on locklessness has to ensure | ||
17 | * that sufficient memory is preallocated. | ||
18 | * | ||
19 | * The basic atomic operation of this allocator is cmpxchg on long. | ||
20 | * On architectures that don't have NMI-safe cmpxchg implementation, | ||
21 | * the allocator can NOT be used in NMI handler. So code uses the | ||
22 | * allocator in NMI handler should depend on | ||
23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
6 | * | 24 | * |
7 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | 25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> |
8 | * | 26 | * |
@@ -13,8 +31,109 @@ | |||
13 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
14 | #include <linux/module.h> | 32 | #include <linux/module.h> |
15 | #include <linux/bitmap.h> | 33 | #include <linux/bitmap.h> |
34 | #include <linux/rculist.h> | ||
35 | #include <linux/interrupt.h> | ||
16 | #include <linux/genalloc.h> | 36 | #include <linux/genalloc.h> |
17 | 37 | ||
38 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) | ||
39 | { | ||
40 | unsigned long val, nval; | ||
41 | |||
42 | nval = *addr; | ||
43 | do { | ||
44 | val = nval; | ||
45 | if (val & mask_to_set) | ||
46 | return -EBUSY; | ||
47 | cpu_relax(); | ||
48 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) | ||
54 | { | ||
55 | unsigned long val, nval; | ||
56 | |||
57 | nval = *addr; | ||
58 | do { | ||
59 | val = nval; | ||
60 | if ((val & mask_to_clear) != mask_to_clear) | ||
61 | return -EBUSY; | ||
62 | cpu_relax(); | ||
63 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * bitmap_set_ll - set the specified number of bits at the specified position | ||
70 | * @map: pointer to a bitmap | ||
71 | * @start: a bit position in @map | ||
72 | * @nr: number of bits to set | ||
73 | * | ||
74 | * Set @nr bits start from @start in @map lock-lessly. Several users | ||
75 | * can set/clear the same bitmap simultaneously without lock. If two | ||
76 | * users set the same bit, one user will return remain bits, otherwise | ||
77 | * return 0. | ||
78 | */ | ||
79 | static int bitmap_set_ll(unsigned long *map, int start, int nr) | ||
80 | { | ||
81 | unsigned long *p = map + BIT_WORD(start); | ||
82 | const int size = start + nr; | ||
83 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
84 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | ||
85 | |||
86 | while (nr - bits_to_set >= 0) { | ||
87 | if (set_bits_ll(p, mask_to_set)) | ||
88 | return nr; | ||
89 | nr -= bits_to_set; | ||
90 | bits_to_set = BITS_PER_LONG; | ||
91 | mask_to_set = ~0UL; | ||
92 | p++; | ||
93 | } | ||
94 | if (nr) { | ||
95 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
96 | if (set_bits_ll(p, mask_to_set)) | ||
97 | return nr; | ||
98 | } | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * bitmap_clear_ll - clear the specified number of bits at the specified position | ||
105 | * @map: pointer to a bitmap | ||
106 | * @start: a bit position in @map | ||
107 | * @nr: number of bits to set | ||
108 | * | ||
109 | * Clear @nr bits start from @start in @map lock-lessly. Several users | ||
110 | * can set/clear the same bitmap simultaneously without lock. If two | ||
111 | * users clear the same bit, one user will return remain bits, | ||
112 | * otherwise return 0. | ||
113 | */ | ||
114 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) | ||
115 | { | ||
116 | unsigned long *p = map + BIT_WORD(start); | ||
117 | const int size = start + nr; | ||
118 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | ||
119 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | ||
120 | |||
121 | while (nr - bits_to_clear >= 0) { | ||
122 | if (clear_bits_ll(p, mask_to_clear)) | ||
123 | return nr; | ||
124 | nr -= bits_to_clear; | ||
125 | bits_to_clear = BITS_PER_LONG; | ||
126 | mask_to_clear = ~0UL; | ||
127 | p++; | ||
128 | } | ||
129 | if (nr) { | ||
130 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
131 | if (clear_bits_ll(p, mask_to_clear)) | ||
132 | return nr; | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | } | ||
18 | 137 | ||
19 | /** | 138 | /** |
20 | * gen_pool_create - create a new special memory pool | 139 | * gen_pool_create - create a new special memory pool |
@@ -30,7 +149,7 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |||
30 | 149 | ||
31 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); | 150 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); |
32 | if (pool != NULL) { | 151 | if (pool != NULL) { |
33 | rwlock_init(&pool->lock); | 152 | spin_lock_init(&pool->lock); |
34 | INIT_LIST_HEAD(&pool->chunks); | 153 | INIT_LIST_HEAD(&pool->chunks); |
35 | pool->min_alloc_order = min_alloc_order; | 154 | pool->min_alloc_order = min_alloc_order; |
36 | } | 155 | } |
@@ -63,14 +182,14 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
63 | if (unlikely(chunk == NULL)) | 182 | if (unlikely(chunk == NULL)) |
64 | return -ENOMEM; | 183 | return -ENOMEM; |
65 | 184 | ||
66 | spin_lock_init(&chunk->lock); | ||
67 | chunk->phys_addr = phys; | 185 | chunk->phys_addr = phys; |
68 | chunk->start_addr = virt; | 186 | chunk->start_addr = virt; |
69 | chunk->end_addr = virt + size; | 187 | chunk->end_addr = virt + size; |
188 | atomic_set(&chunk->avail, size); | ||
70 | 189 | ||
71 | write_lock(&pool->lock); | 190 | spin_lock(&pool->lock); |
72 | list_add(&chunk->next_chunk, &pool->chunks); | 191 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
73 | write_unlock(&pool->lock); | 192 | spin_unlock(&pool->lock); |
74 | 193 | ||
75 | return 0; | 194 | return 0; |
76 | } | 195 | } |
@@ -85,19 +204,19 @@ EXPORT_SYMBOL(gen_pool_add_virt); | |||
85 | */ | 204 | */ |
86 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | 205 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) |
87 | { | 206 | { |
88 | struct list_head *_chunk; | ||
89 | struct gen_pool_chunk *chunk; | 207 | struct gen_pool_chunk *chunk; |
208 | phys_addr_t paddr = -1; | ||
90 | 209 | ||
91 | read_lock(&pool->lock); | 210 | rcu_read_lock(); |
92 | list_for_each(_chunk, &pool->chunks) { | 211 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
93 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 212 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
94 | 213 | paddr = chunk->phys_addr + (addr - chunk->start_addr); | |
95 | if (addr >= chunk->start_addr && addr < chunk->end_addr) | 214 | break; |
96 | return chunk->phys_addr + addr - chunk->start_addr; | 215 | } |
97 | } | 216 | } |
98 | read_unlock(&pool->lock); | 217 | rcu_read_unlock(); |
99 | 218 | ||
100 | return -1; | 219 | return paddr; |
101 | } | 220 | } |
102 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | 221 | EXPORT_SYMBOL(gen_pool_virt_to_phys); |
103 | 222 | ||
@@ -115,7 +234,6 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
115 | int order = pool->min_alloc_order; | 234 | int order = pool->min_alloc_order; |
116 | int bit, end_bit; | 235 | int bit, end_bit; |
117 | 236 | ||
118 | |||
119 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | 237 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { |
120 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 238 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); |
121 | list_del(&chunk->next_chunk); | 239 | list_del(&chunk->next_chunk); |
@@ -132,51 +250,74 @@ void gen_pool_destroy(struct gen_pool *pool) | |||
132 | EXPORT_SYMBOL(gen_pool_destroy); | 250 | EXPORT_SYMBOL(gen_pool_destroy); |
133 | 251 | ||
134 | /** | 252 | /** |
135 | * gen_pool_alloc - allocate special memory from the pool | 253 | * gen_pool_alloc_addr - allocate special memory from the pool |
136 | * @pool: pool to allocate from | 254 | * @pool: pool to allocate from |
137 | * @size: number of bytes to allocate from the pool | 255 | * @size: number of bytes to allocate from the pool |
256 | * @alloc_addr: if non-zero, allocate starting at alloc_addr. | ||
138 | * | 257 | * |
139 | * Allocate the requested number of bytes from the specified pool. | 258 | * Allocate the requested number of bytes from the specified pool. |
140 | * Uses a first-fit algorithm. | 259 | * Uses a first-fit algorithm. Can not be used in NMI handler on |
260 | * architectures without NMI-safe cmpxchg implementation. | ||
141 | */ | 261 | */ |
142 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | 262 | unsigned long gen_pool_alloc_addr(struct gen_pool *pool, size_t size, |
263 | unsigned long alloc_addr) | ||
143 | { | 264 | { |
144 | struct list_head *_chunk; | ||
145 | struct gen_pool_chunk *chunk; | 265 | struct gen_pool_chunk *chunk; |
146 | unsigned long addr, flags; | 266 | unsigned long addr = 0; |
147 | int order = pool->min_alloc_order; | 267 | int order = pool->min_alloc_order; |
148 | int nbits, start_bit, end_bit; | 268 | int nbits, start_bit = 0, end_bit, remain; |
269 | int alloc_bit_needed = 0; | ||
270 | |||
271 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
272 | BUG_ON(in_nmi()); | ||
273 | #endif | ||
149 | 274 | ||
150 | if (size == 0) | 275 | if (size == 0) |
151 | return 0; | 276 | return 0; |
152 | 277 | ||
153 | nbits = (size + (1UL << order) - 1) >> order; | 278 | if (alloc_addr & (1 << order) - 1) |
279 | return 0; | ||
154 | 280 | ||
155 | read_lock(&pool->lock); | 281 | nbits = (size + (1UL << order) - 1) >> order; |
156 | list_for_each(_chunk, &pool->chunks) { | 282 | rcu_read_lock(); |
157 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | 283 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
284 | if (size > atomic_read(&chunk->avail)) | ||
285 | continue; | ||
158 | 286 | ||
159 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; | 287 | end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
160 | 288 | if (alloc_addr) { | |
161 | spin_lock_irqsave(&chunk->lock, flags); | 289 | if (alloc_addr < chunk->start_addr || |
162 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, | 290 | alloc_addr >= chunk->end_addr) |
163 | nbits, 0); | 291 | continue; |
164 | if (start_bit >= end_bit) { | 292 | if (alloc_addr + size > chunk->end_addr) |
165 | spin_unlock_irqrestore(&chunk->lock, flags); | 293 | return 0; |
294 | alloc_bit_needed = start_bit = | ||
295 | (alloc_addr - chunk->start_addr) >> order; | ||
296 | } | ||
297 | retry: | ||
298 | start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, | ||
299 | start_bit, nbits, 0); | ||
300 | if (alloc_addr && alloc_bit_needed != start_bit) | ||
301 | return 0; | ||
302 | if (start_bit >= end_bit) | ||
166 | continue; | 303 | continue; |
304 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | ||
305 | if (remain) { | ||
306 | remain = bitmap_clear_ll(chunk->bits, start_bit, | ||
307 | nbits - remain); | ||
308 | BUG_ON(remain); | ||
309 | goto retry; | ||
167 | } | 310 | } |
168 | 311 | ||
169 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 312 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
170 | 313 | size = nbits << order; | |
171 | bitmap_set(chunk->bits, start_bit, nbits); | 314 | atomic_sub(size, &chunk->avail); |
172 | spin_unlock_irqrestore(&chunk->lock, flags); | 315 | break; |
173 | read_unlock(&pool->lock); | ||
174 | return addr; | ||
175 | } | 316 | } |
176 | read_unlock(&pool->lock); | 317 | rcu_read_unlock(); |
177 | return 0; | 318 | return addr; |
178 | } | 319 | } |
179 | EXPORT_SYMBOL(gen_pool_alloc); | 320 | EXPORT_SYMBOL(gen_pool_alloc_addr); |
180 | 321 | ||
181 | /** | 322 | /** |
182 | * gen_pool_free - free allocated special memory back to the pool | 323 | * gen_pool_free - free allocated special memory back to the pool |
@@ -184,33 +325,95 @@ EXPORT_SYMBOL(gen_pool_alloc); | |||
184 | * @addr: starting address of memory to free back to pool | 325 | * @addr: starting address of memory to free back to pool |
185 | * @size: size in bytes of memory to free | 326 | * @size: size in bytes of memory to free |
186 | * | 327 | * |
187 | * Free previously allocated special memory back to the specified pool. | 328 | * Free previously allocated special memory back to the specified |
329 | * pool. Can not be used in NMI handler on architectures without | ||
330 | * NMI-safe cmpxchg implementation. | ||
188 | */ | 331 | */ |
189 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | 332 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) |
190 | { | 333 | { |
191 | struct list_head *_chunk; | ||
192 | struct gen_pool_chunk *chunk; | 334 | struct gen_pool_chunk *chunk; |
193 | unsigned long flags; | ||
194 | int order = pool->min_alloc_order; | 335 | int order = pool->min_alloc_order; |
195 | int bit, nbits; | 336 | int start_bit, nbits, remain; |
196 | 337 | ||
197 | nbits = (size + (1UL << order) - 1) >> order; | 338 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG |
198 | 339 | BUG_ON(in_nmi()); | |
199 | read_lock(&pool->lock); | 340 | #endif |
200 | list_for_each(_chunk, &pool->chunks) { | ||
201 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | ||
202 | 341 | ||
342 | nbits = (size + (1UL << order) - 1) >> order; | ||
343 | rcu_read_lock(); | ||
344 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | ||
203 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { | 345 | if (addr >= chunk->start_addr && addr < chunk->end_addr) { |
204 | BUG_ON(addr + size > chunk->end_addr); | 346 | BUG_ON(addr + size > chunk->end_addr); |
205 | spin_lock_irqsave(&chunk->lock, flags); | 347 | start_bit = (addr - chunk->start_addr) >> order; |
206 | bit = (addr - chunk->start_addr) >> order; | 348 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
207 | while (nbits--) | 349 | BUG_ON(remain); |
208 | __clear_bit(bit++, chunk->bits); | 350 | size = nbits << order; |
209 | spin_unlock_irqrestore(&chunk->lock, flags); | 351 | atomic_add(size, &chunk->avail); |
210 | break; | 352 | rcu_read_unlock(); |
353 | return; | ||
211 | } | 354 | } |
212 | } | 355 | } |
213 | BUG_ON(nbits > 0); | 356 | rcu_read_unlock(); |
214 | read_unlock(&pool->lock); | 357 | BUG(); |
215 | } | 358 | } |
216 | EXPORT_SYMBOL(gen_pool_free); | 359 | EXPORT_SYMBOL(gen_pool_free); |
360 | |||
361 | /** | ||
362 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | ||
363 | * @pool: the generic memory pool | ||
364 | * @func: func to call | ||
365 | * @data: additional data used by @func | ||
366 | * | ||
367 | * Call @func for every chunk of generic memory pool. The @func is | ||
368 | * called with rcu_read_lock held. | ||
369 | */ | ||
370 | void gen_pool_for_each_chunk(struct gen_pool *pool, | ||
371 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), | ||
372 | void *data) | ||
373 | { | ||
374 | struct gen_pool_chunk *chunk; | ||
375 | |||
376 | rcu_read_lock(); | ||
377 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) | ||
378 | func(pool, chunk, data); | ||
379 | rcu_read_unlock(); | ||
380 | } | ||
381 | EXPORT_SYMBOL(gen_pool_for_each_chunk); | ||
382 | |||
383 | /** | ||
384 | * gen_pool_avail - get available free space of the pool | ||
385 | * @pool: pool to get available free space | ||
386 | * | ||
387 | * Return available free space of the specified pool. | ||
388 | */ | ||
389 | size_t gen_pool_avail(struct gen_pool *pool) | ||
390 | { | ||
391 | struct gen_pool_chunk *chunk; | ||
392 | size_t avail = 0; | ||
393 | |||
394 | rcu_read_lock(); | ||
395 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
396 | avail += atomic_read(&chunk->avail); | ||
397 | rcu_read_unlock(); | ||
398 | return avail; | ||
399 | } | ||
400 | EXPORT_SYMBOL_GPL(gen_pool_avail); | ||
401 | |||
402 | /** | ||
403 | * gen_pool_size - get size in bytes of memory managed by the pool | ||
404 | * @pool: pool to get size | ||
405 | * | ||
406 | * Return size in bytes of memory managed by the pool. | ||
407 | */ | ||
408 | size_t gen_pool_size(struct gen_pool *pool) | ||
409 | { | ||
410 | struct gen_pool_chunk *chunk; | ||
411 | size_t size = 0; | ||
412 | |||
413 | rcu_read_lock(); | ||
414 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | ||
415 | size += chunk->end_addr - chunk->start_addr; | ||
416 | rcu_read_unlock(); | ||
417 | return size; | ||
418 | } | ||
419 | EXPORT_SYMBOL_GPL(gen_pool_size); | ||
@@ -34,8 +34,10 @@ | |||
34 | #include <linux/err.h> | 34 | #include <linux/err.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
37 | #include <linux/spinlock.h> | ||
37 | 38 | ||
38 | static struct kmem_cache *idr_layer_cache; | 39 | static struct kmem_cache *idr_layer_cache; |
40 | static DEFINE_SPINLOCK(simple_ida_lock); | ||
39 | 41 | ||
40 | static struct idr_layer *get_from_free_list(struct idr *idp) | 42 | static struct idr_layer *get_from_free_list(struct idr *idp) |
41 | { | 43 | { |
@@ -926,6 +928,71 @@ void ida_destroy(struct ida *ida) | |||
926 | EXPORT_SYMBOL(ida_destroy); | 928 | EXPORT_SYMBOL(ida_destroy); |
927 | 929 | ||
928 | /** | 930 | /** |
931 | * ida_simple_get - get a new id. | ||
932 | * @ida: the (initialized) ida. | ||
933 | * @start: the minimum id (inclusive, < 0x8000000) | ||
934 | * @end: the maximum id (exclusive, < 0x8000000 or 0) | ||
935 | * @gfp_mask: memory allocation flags | ||
936 | * | ||
937 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. | ||
938 | * On memory allocation failure, returns -ENOMEM. | ||
939 | * | ||
940 | * Use ida_simple_remove() to get rid of an id. | ||
941 | */ | ||
942 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | ||
943 | gfp_t gfp_mask) | ||
944 | { | ||
945 | int ret, id; | ||
946 | unsigned int max; | ||
947 | |||
948 | BUG_ON((int)start < 0); | ||
949 | BUG_ON((int)end < 0); | ||
950 | |||
951 | if (end == 0) | ||
952 | max = 0x80000000; | ||
953 | else { | ||
954 | BUG_ON(end < start); | ||
955 | max = end - 1; | ||
956 | } | ||
957 | |||
958 | again: | ||
959 | if (!ida_pre_get(ida, gfp_mask)) | ||
960 | return -ENOMEM; | ||
961 | |||
962 | spin_lock(&simple_ida_lock); | ||
963 | ret = ida_get_new_above(ida, start, &id); | ||
964 | if (!ret) { | ||
965 | if (id > max) { | ||
966 | ida_remove(ida, id); | ||
967 | ret = -ENOSPC; | ||
968 | } else { | ||
969 | ret = id; | ||
970 | } | ||
971 | } | ||
972 | spin_unlock(&simple_ida_lock); | ||
973 | |||
974 | if (unlikely(ret == -EAGAIN)) | ||
975 | goto again; | ||
976 | |||
977 | return ret; | ||
978 | } | ||
979 | EXPORT_SYMBOL(ida_simple_get); | ||
980 | |||
981 | /** | ||
982 | * ida_simple_remove - remove an allocated id. | ||
983 | * @ida: the (initialized) ida. | ||
984 | * @id: the id returned by ida_simple_get. | ||
985 | */ | ||
986 | void ida_simple_remove(struct ida *ida, unsigned int id) | ||
987 | { | ||
988 | BUG_ON((int)id < 0); | ||
989 | spin_lock(&simple_ida_lock); | ||
990 | ida_remove(ida, id); | ||
991 | spin_unlock(&simple_ida_lock); | ||
992 | } | ||
993 | EXPORT_SYMBOL(ida_simple_remove); | ||
994 | |||
995 | /** | ||
929 | * ida_init - initialize ida handle | 996 | * ida_init - initialize ida handle |
930 | * @ida: ida handle | 997 | * @ida: ida handle |
931 | * | 998 | * |
diff --git a/lib/iomap.c b/lib/iomap.c index d3222938515..5dbcb4b2d86 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -224,6 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep); | |||
224 | EXPORT_SYMBOL(iowrite16_rep); | 224 | EXPORT_SYMBOL(iowrite16_rep); |
225 | EXPORT_SYMBOL(iowrite32_rep); | 225 | EXPORT_SYMBOL(iowrite32_rep); |
226 | 226 | ||
227 | #ifdef CONFIG_HAS_IOPORT | ||
227 | /* Create a virtual mapping cookie for an IO port range */ | 228 | /* Create a virtual mapping cookie for an IO port range */ |
228 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | 229 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
229 | { | 230 | { |
@@ -238,7 +239,9 @@ void ioport_unmap(void __iomem *addr) | |||
238 | } | 239 | } |
239 | EXPORT_SYMBOL(ioport_map); | 240 | EXPORT_SYMBOL(ioport_map); |
240 | EXPORT_SYMBOL(ioport_unmap); | 241 | EXPORT_SYMBOL(ioport_unmap); |
242 | #endif /* CONFIG_HAS_IOPORT */ | ||
241 | 243 | ||
244 | #ifdef CONFIG_PCI | ||
242 | /** | 245 | /** |
243 | * pci_iomap - create a virtual mapping cookie for a PCI BAR | 246 | * pci_iomap - create a virtual mapping cookie for a PCI BAR |
244 | * @dev: PCI device that owns the BAR | 247 | * @dev: PCI device that owns the BAR |
@@ -280,3 +283,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | |||
280 | } | 283 | } |
281 | EXPORT_SYMBOL(pci_iomap); | 284 | EXPORT_SYMBOL(pci_iomap); |
282 | EXPORT_SYMBOL(pci_iounmap); | 285 | EXPORT_SYMBOL(pci_iounmap); |
286 | #endif /* CONFIG_PCI */ | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 70af0a7f97c..ad72a03ce5e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
282 | kobj_bcast_filter, | 282 | kobj_bcast_filter, |
283 | kobj); | 283 | kobj); |
284 | /* ENOBUFS should be handled in userspace */ | 284 | /* ENOBUFS should be handled in userspace */ |
285 | if (retval == -ENOBUFS) | 285 | if (retval == -ENOBUFS || retval == -ESRCH) |
286 | retval = 0; | 286 | retval = 0; |
287 | } else | 287 | } else |
288 | retval = -ENOMEM; | 288 | retval = -ENOMEM; |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 2dbae88090a..5e066759f55 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
@@ -19,11 +19,6 @@ | |||
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | 21 | ||
22 | static inline char _tolower(const char c) | ||
23 | { | ||
24 | return c | 0x20; | ||
25 | } | ||
26 | |||
27 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | 22 | static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) |
28 | { | 23 | { |
29 | unsigned long long acc; | 24 | unsigned long long acc; |
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | #include <linux/gcd.h> | 2 | #include <linux/gcd.h> |
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/lcm.h> | ||
4 | 5 | ||
5 | /* Lowest common multiple */ | 6 | /* Lowest common multiple */ |
6 | unsigned long lcm(unsigned long a, unsigned long b) | 7 | unsigned long lcm(unsigned long a, unsigned long b) |
diff --git a/lib/llist.c b/lib/llist.c new file mode 100644 index 00000000000..da445724fa1 --- /dev/null +++ b/lib/llist.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Lock-less NULL terminated single linked list | ||
3 | * | ||
4 | * The basic atomic operation of this list is cmpxchg on long. On | ||
5 | * architectures that don't have NMI-safe cmpxchg implementation, the | ||
6 | * list can NOT be used in NMI handler. So code uses the list in NMI | ||
7 | * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | ||
8 | * | ||
9 | * Copyright 2010,2011 Intel Corp. | ||
10 | * Author: Huang Ying <ying.huang@intel.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License version | ||
14 | * 2 as published by the Free Software Foundation; | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/llist.h> | ||
29 | |||
30 | #include <asm/system.h> | ||
31 | |||
32 | /** | ||
33 | * llist_add - add a new entry | ||
34 | * @new: new entry to be added | ||
35 | * @head: the head for your lock-less list | ||
36 | */ | ||
37 | void llist_add(struct llist_node *new, struct llist_head *head) | ||
38 | { | ||
39 | struct llist_node *entry, *old_entry; | ||
40 | |||
41 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
42 | BUG_ON(in_nmi()); | ||
43 | #endif | ||
44 | |||
45 | entry = head->first; | ||
46 | do { | ||
47 | old_entry = entry; | ||
48 | new->next = entry; | ||
49 | cpu_relax(); | ||
50 | } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(llist_add); | ||
53 | |||
54 | /** | ||
55 | * llist_add_batch - add several linked entries in batch | ||
56 | * @new_first: first entry in batch to be added | ||
57 | * @new_last: last entry in batch to be added | ||
58 | * @head: the head for your lock-less list | ||
59 | */ | ||
60 | void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | ||
61 | struct llist_head *head) | ||
62 | { | ||
63 | struct llist_node *entry, *old_entry; | ||
64 | |||
65 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
66 | BUG_ON(in_nmi()); | ||
67 | #endif | ||
68 | |||
69 | entry = head->first; | ||
70 | do { | ||
71 | old_entry = entry; | ||
72 | new_last->next = entry; | ||
73 | cpu_relax(); | ||
74 | } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(llist_add_batch); | ||
77 | |||
78 | /** | ||
79 | * llist_del_first - delete the first entry of lock-less list | ||
80 | * @head: the head for your lock-less list | ||
81 | * | ||
82 | * If list is empty, return NULL, otherwise, return the first entry | ||
83 | * deleted, this is the newest added one. | ||
84 | * | ||
85 | * Only one llist_del_first user can be used simultaneously with | ||
86 | * multiple llist_add users without lock. Because otherwise | ||
87 | * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, | ||
88 | * llist_add) sequence in another user may change @head->first->next, | ||
89 | * but keep @head->first. If multiple consumers are needed, please | ||
90 | * use llist_del_all or use lock between consumers. | ||
91 | */ | ||
92 | struct llist_node *llist_del_first(struct llist_head *head) | ||
93 | { | ||
94 | struct llist_node *entry, *old_entry, *next; | ||
95 | |||
96 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
97 | BUG_ON(in_nmi()); | ||
98 | #endif | ||
99 | |||
100 | entry = head->first; | ||
101 | do { | ||
102 | if (entry == NULL) | ||
103 | return NULL; | ||
104 | old_entry = entry; | ||
105 | next = entry->next; | ||
106 | cpu_relax(); | ||
107 | } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); | ||
108 | |||
109 | return entry; | ||
110 | } | ||
111 | EXPORT_SYMBOL_GPL(llist_del_first); | ||
112 | |||
113 | /** | ||
114 | * llist_del_all - delete all entries from lock-less list | ||
115 | * @head: the head of lock-less list to delete all entries | ||
116 | * | ||
117 | * If list is empty, return NULL, otherwise, delete all entries and | ||
118 | * return the pointer to the first entry. The order of entries | ||
119 | * deleted is from the newest to the oldest added one. | ||
120 | */ | ||
121 | struct llist_node *llist_del_all(struct llist_head *head) | ||
122 | { | ||
123 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | ||
124 | BUG_ON(in_nmi()); | ||
125 | #endif | ||
126 | |||
127 | return xchg(&head->first, NULL); | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(llist_del_all); | ||
diff --git a/lib/md5.c b/lib/md5.c new file mode 100644 index 00000000000..c777180e1f2 --- /dev/null +++ b/lib/md5.c | |||
@@ -0,0 +1,95 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/cryptohash.h> | ||
4 | |||
5 | #define F1(x, y, z) (z ^ (x & (y ^ z))) | ||
6 | #define F2(x, y, z) F1(z, x, y) | ||
7 | #define F3(x, y, z) (x ^ y ^ z) | ||
8 | #define F4(x, y, z) (y ^ (x | ~z)) | ||
9 | |||
10 | #define MD5STEP(f, w, x, y, z, in, s) \ | ||
11 | (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) | ||
12 | |||
13 | void md5_transform(__u32 *hash, __u32 const *in) | ||
14 | { | ||
15 | u32 a, b, c, d; | ||
16 | |||
17 | a = hash[0]; | ||
18 | b = hash[1]; | ||
19 | c = hash[2]; | ||
20 | d = hash[3]; | ||
21 | |||
22 | MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); | ||
23 | MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); | ||
24 | MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); | ||
25 | MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); | ||
26 | MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); | ||
27 | MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); | ||
28 | MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); | ||
29 | MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); | ||
30 | MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); | ||
31 | MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); | ||
32 | MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); | ||
33 | MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); | ||
34 | MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); | ||
35 | MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); | ||
36 | MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); | ||
37 | MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); | ||
38 | |||
39 | MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); | ||
40 | MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); | ||
41 | MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); | ||
42 | MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); | ||
43 | MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); | ||
44 | MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); | ||
45 | MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); | ||
46 | MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); | ||
47 | MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); | ||
48 | MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); | ||
49 | MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); | ||
50 | MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); | ||
51 | MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); | ||
52 | MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); | ||
53 | MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); | ||
54 | MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); | ||
55 | |||
56 | MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); | ||
57 | MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); | ||
58 | MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); | ||
59 | MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); | ||
60 | MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); | ||
61 | MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); | ||
62 | MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); | ||
63 | MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); | ||
64 | MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); | ||
65 | MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); | ||
66 | MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); | ||
67 | MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); | ||
68 | MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); | ||
69 | MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); | ||
70 | MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); | ||
71 | MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); | ||
72 | |||
73 | MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); | ||
74 | MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); | ||
75 | MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); | ||
76 | MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); | ||
77 | MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); | ||
78 | MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); | ||
79 | MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); | ||
80 | MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); | ||
81 | MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); | ||
82 | MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); | ||
83 | MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); | ||
84 | MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); | ||
85 | MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); | ||
86 | MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); | ||
87 | MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); | ||
88 | MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); | ||
89 | |||
90 | hash[0] += a; | ||
91 | hash[1] += b; | ||
92 | hash[2] += c; | ||
93 | hash[3] += d; | ||
94 | } | ||
95 | EXPORT_SYMBOL(md5_transform); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index ac09f2226dc..a8408b6cacd 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -20,6 +20,7 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = { | |||
20 | [NLA_U16] = sizeof(u16), | 20 | [NLA_U16] = sizeof(u16), |
21 | [NLA_U32] = sizeof(u32), | 21 | [NLA_U32] = sizeof(u32), |
22 | [NLA_U64] = sizeof(u64), | 22 | [NLA_U64] = sizeof(u64), |
23 | [NLA_MSECS] = sizeof(u64), | ||
23 | [NLA_NESTED] = NLA_HDRLEN, | 24 | [NLA_NESTED] = NLA_HDRLEN, |
24 | }; | 25 | }; |
25 | 26 | ||
diff --git a/lib/plist.c b/lib/plist.c index 0ae7e643172..a0a4da489c2 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
@@ -56,11 +56,6 @@ static void plist_check_list(struct list_head *top) | |||
56 | 56 | ||
57 | static void plist_check_head(struct plist_head *head) | 57 | static void plist_check_head(struct plist_head *head) |
58 | { | 58 | { |
59 | WARN_ON(head != &test_head && !head->rawlock && !head->spinlock); | ||
60 | if (head->rawlock) | ||
61 | WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); | ||
62 | if (head->spinlock) | ||
63 | WARN_ON_SMP(!spin_is_locked(head->spinlock)); | ||
64 | if (!plist_head_empty(head)) | 59 | if (!plist_head_empty(head)) |
65 | plist_check_list(&plist_first(head)->prio_list); | 60 | plist_check_list(&plist_first(head)->prio_list); |
66 | plist_check_list(&head->node_list); | 61 | plist_check_list(&head->node_list); |
@@ -180,7 +175,7 @@ static int __init plist_test(void) | |||
180 | unsigned int r = local_clock(); | 175 | unsigned int r = local_clock(); |
181 | 176 | ||
182 | printk(KERN_INFO "start plist test\n"); | 177 | printk(KERN_INFO "start plist test\n"); |
183 | plist_head_init(&test_head, NULL); | 178 | plist_head_init(&test_head); |
184 | for (i = 0; i < ARRAY_SIZE(test_node); i++) | 179 | for (i = 0; i < ARRAY_SIZE(test_node); i++) |
185 | plist_node_init(test_node + i, 0); | 180 | plist_node_init(test_node + i, 0); |
186 | 181 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7ea2e033d71..a2f9da59c19 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -823,8 +823,8 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | |||
823 | EXPORT_SYMBOL(radix_tree_prev_hole); | 823 | EXPORT_SYMBOL(radix_tree_prev_hole); |
824 | 824 | ||
825 | static unsigned int | 825 | static unsigned int |
826 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | 826 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long *indices, |
827 | unsigned int max_items, unsigned long *next_index) | 827 | unsigned long index, unsigned int max_items, unsigned long *next_index) |
828 | { | 828 | { |
829 | unsigned int nr_found = 0; | 829 | unsigned int nr_found = 0; |
830 | unsigned int shift, height; | 830 | unsigned int shift, height; |
@@ -857,12 +857,16 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
857 | 857 | ||
858 | /* Bottom level: grab some items */ | 858 | /* Bottom level: grab some items */ |
859 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | 859 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { |
860 | index++; | ||
861 | if (slot->slots[i]) { | 860 | if (slot->slots[i]) { |
862 | results[nr_found++] = &(slot->slots[i]); | 861 | results[nr_found] = &(slot->slots[i]); |
863 | if (nr_found == max_items) | 862 | if (indices) |
863 | indices[nr_found] = index; | ||
864 | if (++nr_found == max_items) { | ||
865 | index++; | ||
864 | goto out; | 866 | goto out; |
867 | } | ||
865 | } | 868 | } |
869 | index++; | ||
866 | } | 870 | } |
867 | out: | 871 | out: |
868 | *next_index = index; | 872 | *next_index = index; |
@@ -918,8 +922,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
918 | 922 | ||
919 | if (cur_index > max_index) | 923 | if (cur_index > max_index) |
920 | break; | 924 | break; |
921 | slots_found = __lookup(node, (void ***)results + ret, cur_index, | 925 | slots_found = __lookup(node, (void ***)results + ret, NULL, |
922 | max_items - ret, &next_index); | 926 | cur_index, max_items - ret, &next_index); |
923 | nr_found = 0; | 927 | nr_found = 0; |
924 | for (i = 0; i < slots_found; i++) { | 928 | for (i = 0; i < slots_found; i++) { |
925 | struct radix_tree_node *slot; | 929 | struct radix_tree_node *slot; |
@@ -944,6 +948,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); | |||
944 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | 948 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree |
945 | * @root: radix tree root | 949 | * @root: radix tree root |
946 | * @results: where the results of the lookup are placed | 950 | * @results: where the results of the lookup are placed |
951 | * @indices: where their indices should be placed (but usually NULL) | ||
947 | * @first_index: start the lookup from this key | 952 | * @first_index: start the lookup from this key |
948 | * @max_items: place up to this many items at *results | 953 | * @max_items: place up to this many items at *results |
949 | * | 954 | * |
@@ -958,7 +963,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); | |||
958 | * protection, radix_tree_deref_slot may fail requiring a retry. | 963 | * protection, radix_tree_deref_slot may fail requiring a retry. |
959 | */ | 964 | */ |
960 | unsigned int | 965 | unsigned int |
961 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | 966 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
967 | void ***results, unsigned long *indices, | ||
962 | unsigned long first_index, unsigned int max_items) | 968 | unsigned long first_index, unsigned int max_items) |
963 | { | 969 | { |
964 | unsigned long max_index; | 970 | unsigned long max_index; |
@@ -974,6 +980,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
974 | if (first_index > 0) | 980 | if (first_index > 0) |
975 | return 0; | 981 | return 0; |
976 | results[0] = (void **)&root->rnode; | 982 | results[0] = (void **)&root->rnode; |
983 | if (indices) | ||
984 | indices[0] = 0; | ||
977 | return 1; | 985 | return 1; |
978 | } | 986 | } |
979 | node = indirect_to_ptr(node); | 987 | node = indirect_to_ptr(node); |
@@ -987,8 +995,9 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
987 | 995 | ||
988 | if (cur_index > max_index) | 996 | if (cur_index > max_index) |
989 | break; | 997 | break; |
990 | slots_found = __lookup(node, results + ret, cur_index, | 998 | slots_found = __lookup(node, results + ret, |
991 | max_items - ret, &next_index); | 999 | indices ? indices + ret : NULL, |
1000 | cur_index, max_items - ret, &next_index); | ||
992 | ret += slots_found; | 1001 | ret += slots_found; |
993 | if (next_index == 0) | 1002 | if (next_index == 0) |
994 | break; | 1003 | break; |
@@ -1194,6 +1203,98 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1194 | } | 1203 | } |
1195 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | 1204 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); |
1196 | 1205 | ||
1206 | #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP) | ||
1207 | #include <linux/sched.h> /* for cond_resched() */ | ||
1208 | |||
1209 | /* | ||
1210 | * This linear search is at present only useful to shmem_unuse_inode(). | ||
1211 | */ | ||
1212 | static unsigned long __locate(struct radix_tree_node *slot, void *item, | ||
1213 | unsigned long index, unsigned long *found_index) | ||
1214 | { | ||
1215 | unsigned int shift, height; | ||
1216 | unsigned long i; | ||
1217 | |||
1218 | height = slot->height; | ||
1219 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | ||
1220 | |||
1221 | for ( ; height > 1; height--) { | ||
1222 | i = (index >> shift) & RADIX_TREE_MAP_MASK; | ||
1223 | for (;;) { | ||
1224 | if (slot->slots[i] != NULL) | ||
1225 | break; | ||
1226 | index &= ~((1UL << shift) - 1); | ||
1227 | index += 1UL << shift; | ||
1228 | if (index == 0) | ||
1229 | goto out; /* 32-bit wraparound */ | ||
1230 | i++; | ||
1231 | if (i == RADIX_TREE_MAP_SIZE) | ||
1232 | goto out; | ||
1233 | } | ||
1234 | |||
1235 | shift -= RADIX_TREE_MAP_SHIFT; | ||
1236 | slot = rcu_dereference_raw(slot->slots[i]); | ||
1237 | if (slot == NULL) | ||
1238 | goto out; | ||
1239 | } | ||
1240 | |||
1241 | /* Bottom level: check items */ | ||
1242 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | ||
1243 | if (slot->slots[i] == item) { | ||
1244 | *found_index = index + i; | ||
1245 | index = 0; | ||
1246 | goto out; | ||
1247 | } | ||
1248 | } | ||
1249 | index += RADIX_TREE_MAP_SIZE; | ||
1250 | out: | ||
1251 | return index; | ||
1252 | } | ||
1253 | |||
1254 | /** | ||
1255 | * radix_tree_locate_item - search through radix tree for item | ||
1256 | * @root: radix tree root | ||
1257 | * @item: item to be found | ||
1258 | * | ||
1259 | * Returns index where item was found, or -1 if not found. | ||
1260 | * Caller must hold no lock (since this time-consuming function needs | ||
1261 | * to be preemptible), and must check afterwards if item is still there. | ||
1262 | */ | ||
1263 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | ||
1264 | { | ||
1265 | struct radix_tree_node *node; | ||
1266 | unsigned long max_index; | ||
1267 | unsigned long cur_index = 0; | ||
1268 | unsigned long found_index = -1; | ||
1269 | |||
1270 | do { | ||
1271 | rcu_read_lock(); | ||
1272 | node = rcu_dereference_raw(root->rnode); | ||
1273 | if (!radix_tree_is_indirect_ptr(node)) { | ||
1274 | rcu_read_unlock(); | ||
1275 | if (node == item) | ||
1276 | found_index = 0; | ||
1277 | break; | ||
1278 | } | ||
1279 | |||
1280 | node = indirect_to_ptr(node); | ||
1281 | max_index = radix_tree_maxindex(node->height); | ||
1282 | if (cur_index > max_index) | ||
1283 | break; | ||
1284 | |||
1285 | cur_index = __locate(node, item, cur_index, &found_index); | ||
1286 | rcu_read_unlock(); | ||
1287 | cond_resched(); | ||
1288 | } while (cur_index != 0 && cur_index <= max_index); | ||
1289 | |||
1290 | return found_index; | ||
1291 | } | ||
1292 | #else | ||
1293 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) | ||
1294 | { | ||
1295 | return -1; | ||
1296 | } | ||
1297 | #endif /* CONFIG_SHMEM && CONFIG_SWAP */ | ||
1197 | 1298 | ||
1198 | /** | 1299 | /** |
1199 | * radix_tree_shrink - shrink height of a radix tree to minimal | 1300 | * radix_tree_shrink - shrink height of a radix tree to minimal |
diff --git a/lib/sha1.c b/lib/sha1.c index 4c45fd50e91..1de509a159c 100644 --- a/lib/sha1.c +++ b/lib/sha1.c | |||
@@ -1,31 +1,73 @@ | |||
1 | /* | 1 | /* |
2 | * SHA transform algorithm, originally taken from code written by | 2 | * SHA1 routine optimized to do word accesses rather than byte accesses, |
3 | * Peter Gutmann, and placed in the public domain. | 3 | * and to avoid unnecessary copies into the context array. |
4 | * | ||
5 | * This was based on the git SHA1 implementation. | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/bitops.h> | ||
8 | #include <linux/cryptohash.h> | 11 | #include <linux/cryptohash.h> |
12 | #include <asm/unaligned.h> | ||
9 | 13 | ||
10 | /* The SHA f()-functions. */ | 14 | /* |
15 | * If you have 32 registers or more, the compiler can (and should) | ||
16 | * try to change the array[] accesses into registers. However, on | ||
17 | * machines with less than ~25 registers, that won't really work, | ||
18 | * and at least gcc will make an unholy mess of it. | ||
19 | * | ||
20 | * So to avoid that mess which just slows things down, we force | ||
21 | * the stores to memory to actually happen (we might be better off | ||
22 | * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as | ||
23 | * suggested by Artur Skawina - that will also make gcc unable to | ||
24 | * try to do the silly "optimize away loads" part because it won't | ||
25 | * see what the value will be). | ||
26 | * | ||
27 | * Ben Herrenschmidt reports that on PPC, the C version comes close | ||
28 | * to the optimized asm with this (ie on PPC you don't want that | ||
29 | * 'volatile', since there are lots of registers). | ||
30 | * | ||
31 | * On ARM we get the best code generation by forcing a full memory barrier | ||
32 | * between each SHA_ROUND, otherwise gcc happily get wild with spilling and | ||
33 | * the stack frame size simply explode and performance goes down the drain. | ||
34 | */ | ||
11 | 35 | ||
12 | #define f1(x,y,z) (z ^ (x & (y ^ z))) /* x ? y : z */ | 36 | #ifdef CONFIG_X86 |
13 | #define f2(x,y,z) (x ^ y ^ z) /* XOR */ | 37 | #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) |
14 | #define f3(x,y,z) ((x & y) + (z & (x ^ y))) /* majority */ | 38 | #elif defined(CONFIG_ARM) |
39 | #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) | ||
40 | #else | ||
41 | #define setW(x, val) (W(x) = (val)) | ||
42 | #endif | ||
15 | 43 | ||
16 | /* The SHA Mysterious Constants */ | 44 | /* This "rolls" over the 512-bit array */ |
45 | #define W(x) (array[(x)&15]) | ||
17 | 46 | ||
18 | #define K1 0x5A827999L /* Rounds 0-19: sqrt(2) * 2^30 */ | 47 | /* |
19 | #define K2 0x6ED9EBA1L /* Rounds 20-39: sqrt(3) * 2^30 */ | 48 | * Where do we get the source from? The first 16 iterations get it from |
20 | #define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ | 49 | * the input data, the next mix it from the 512-bit array. |
21 | #define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ | 50 | */ |
51 | #define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) | ||
52 | #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) | ||
53 | |||
54 | #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ | ||
55 | __u32 TEMP = input(t); setW(t, TEMP); \ | ||
56 | E += TEMP + rol32(A,5) + (fn) + (constant); \ | ||
57 | B = ror32(B, 2); } while (0) | ||
58 | |||
59 | #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) | ||
60 | #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) | ||
61 | #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) | ||
62 | #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) | ||
63 | #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) | ||
22 | 64 | ||
23 | /** | 65 | /** |
24 | * sha_transform - single block SHA1 transform | 66 | * sha_transform - single block SHA1 transform |
25 | * | 67 | * |
26 | * @digest: 160 bit digest to update | 68 | * @digest: 160 bit digest to update |
27 | * @data: 512 bits of data to hash | 69 | * @data: 512 bits of data to hash |
28 | * @W: 80 words of workspace (see note) | 70 | * @array: 16 words of workspace (see note) |
29 | * | 71 | * |
30 | * This function generates a SHA1 digest for a single 512-bit block. | 72 | * This function generates a SHA1 digest for a single 512-bit block. |
31 | * Be warned, it does not handle padding and message digest, do not | 73 | * Be warned, it does not handle padding and message digest, do not |
@@ -36,47 +78,111 @@ | |||
36 | * to clear the workspace. This is left to the caller to avoid | 78 | * to clear the workspace. This is left to the caller to avoid |
37 | * unnecessary clears between chained hashing operations. | 79 | * unnecessary clears between chained hashing operations. |
38 | */ | 80 | */ |
39 | void sha_transform(__u32 *digest, const char *in, __u32 *W) | 81 | void sha_transform(__u32 *digest, const char *data, __u32 *array) |
40 | { | 82 | { |
41 | __u32 a, b, c, d, e, t, i; | 83 | __u32 A, B, C, D, E; |
42 | 84 | ||
43 | for (i = 0; i < 16; i++) | 85 | A = digest[0]; |
44 | W[i] = be32_to_cpu(((const __be32 *)in)[i]); | 86 | B = digest[1]; |
45 | 87 | C = digest[2]; | |
46 | for (i = 0; i < 64; i++) | 88 | D = digest[3]; |
47 | W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1); | 89 | E = digest[4]; |
48 | 90 | ||
49 | a = digest[0]; | 91 | /* Round 1 - iterations 0-16 take their input from 'data' */ |
50 | b = digest[1]; | 92 | T_0_15( 0, A, B, C, D, E); |
51 | c = digest[2]; | 93 | T_0_15( 1, E, A, B, C, D); |
52 | d = digest[3]; | 94 | T_0_15( 2, D, E, A, B, C); |
53 | e = digest[4]; | 95 | T_0_15( 3, C, D, E, A, B); |
54 | 96 | T_0_15( 4, B, C, D, E, A); | |
55 | for (i = 0; i < 20; i++) { | 97 | T_0_15( 5, A, B, C, D, E); |
56 | t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i]; | 98 | T_0_15( 6, E, A, B, C, D); |
57 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 99 | T_0_15( 7, D, E, A, B, C); |
58 | } | 100 | T_0_15( 8, C, D, E, A, B); |
59 | 101 | T_0_15( 9, B, C, D, E, A); | |
60 | for (; i < 40; i ++) { | 102 | T_0_15(10, A, B, C, D, E); |
61 | t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i]; | 103 | T_0_15(11, E, A, B, C, D); |
62 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 104 | T_0_15(12, D, E, A, B, C); |
63 | } | 105 | T_0_15(13, C, D, E, A, B); |
64 | 106 | T_0_15(14, B, C, D, E, A); | |
65 | for (; i < 60; i ++) { | 107 | T_0_15(15, A, B, C, D, E); |
66 | t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i]; | 108 | |
67 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 109 | /* Round 1 - tail. Input from 512-bit mixing array */ |
68 | } | 110 | T_16_19(16, E, A, B, C, D); |
69 | 111 | T_16_19(17, D, E, A, B, C); | |
70 | for (; i < 80; i ++) { | 112 | T_16_19(18, C, D, E, A, B); |
71 | t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i]; | 113 | T_16_19(19, B, C, D, E, A); |
72 | e = d; d = c; c = rol32(b, 30); b = a; a = t; | 114 | |
73 | } | 115 | /* Round 2 */ |
74 | 116 | T_20_39(20, A, B, C, D, E); | |
75 | digest[0] += a; | 117 | T_20_39(21, E, A, B, C, D); |
76 | digest[1] += b; | 118 | T_20_39(22, D, E, A, B, C); |
77 | digest[2] += c; | 119 | T_20_39(23, C, D, E, A, B); |
78 | digest[3] += d; | 120 | T_20_39(24, B, C, D, E, A); |
79 | digest[4] += e; | 121 | T_20_39(25, A, B, C, D, E); |
122 | T_20_39(26, E, A, B, C, D); | ||
123 | T_20_39(27, D, E, A, B, C); | ||
124 | T_20_39(28, C, D, E, A, B); | ||
125 | T_20_39(29, B, C, D, E, A); | ||
126 | T_20_39(30, A, B, C, D, E); | ||
127 | T_20_39(31, E, A, B, C, D); | ||
128 | T_20_39(32, D, E, A, B, C); | ||
129 | T_20_39(33, C, D, E, A, B); | ||
130 | T_20_39(34, B, C, D, E, A); | ||
131 | T_20_39(35, A, B, C, D, E); | ||
132 | T_20_39(36, E, A, B, C, D); | ||
133 | T_20_39(37, D, E, A, B, C); | ||
134 | T_20_39(38, C, D, E, A, B); | ||
135 | T_20_39(39, B, C, D, E, A); | ||
136 | |||
137 | /* Round 3 */ | ||
138 | T_40_59(40, A, B, C, D, E); | ||
139 | T_40_59(41, E, A, B, C, D); | ||
140 | T_40_59(42, D, E, A, B, C); | ||
141 | T_40_59(43, C, D, E, A, B); | ||
142 | T_40_59(44, B, C, D, E, A); | ||
143 | T_40_59(45, A, B, C, D, E); | ||
144 | T_40_59(46, E, A, B, C, D); | ||
145 | T_40_59(47, D, E, A, B, C); | ||
146 | T_40_59(48, C, D, E, A, B); | ||
147 | T_40_59(49, B, C, D, E, A); | ||
148 | T_40_59(50, A, B, C, D, E); | ||
149 | T_40_59(51, E, A, B, C, D); | ||
150 | T_40_59(52, D, E, A, B, C); | ||
151 | T_40_59(53, C, D, E, A, B); | ||
152 | T_40_59(54, B, C, D, E, A); | ||
153 | T_40_59(55, A, B, C, D, E); | ||
154 | T_40_59(56, E, A, B, C, D); | ||
155 | T_40_59(57, D, E, A, B, C); | ||
156 | T_40_59(58, C, D, E, A, B); | ||
157 | T_40_59(59, B, C, D, E, A); | ||
158 | |||
159 | /* Round 4 */ | ||
160 | T_60_79(60, A, B, C, D, E); | ||
161 | T_60_79(61, E, A, B, C, D); | ||
162 | T_60_79(62, D, E, A, B, C); | ||
163 | T_60_79(63, C, D, E, A, B); | ||
164 | T_60_79(64, B, C, D, E, A); | ||
165 | T_60_79(65, A, B, C, D, E); | ||
166 | T_60_79(66, E, A, B, C, D); | ||
167 | T_60_79(67, D, E, A, B, C); | ||
168 | T_60_79(68, C, D, E, A, B); | ||
169 | T_60_79(69, B, C, D, E, A); | ||
170 | T_60_79(70, A, B, C, D, E); | ||
171 | T_60_79(71, E, A, B, C, D); | ||
172 | T_60_79(72, D, E, A, B, C); | ||
173 | T_60_79(73, C, D, E, A, B); | ||
174 | T_60_79(74, B, C, D, E, A); | ||
175 | T_60_79(75, A, B, C, D, E); | ||
176 | T_60_79(76, E, A, B, C, D); | ||
177 | T_60_79(77, D, E, A, B, C); | ||
178 | T_60_79(78, C, D, E, A, B); | ||
179 | T_60_79(79, B, C, D, E, A); | ||
180 | |||
181 | digest[0] += A; | ||
182 | digest[1] += B; | ||
183 | digest[2] += C; | ||
184 | digest[3] += D; | ||
185 | digest[4] += E; | ||
80 | } | 186 | } |
81 | EXPORT_SYMBOL(sha_transform); | 187 | EXPORT_SYMBOL(sha_transform); |
82 | 188 | ||
@@ -92,4 +198,3 @@ void sha_init(__u32 *buf) | |||
92 | buf[3] = 0x10325476; | 198 | buf[3] = 0x10325476; |
93 | buf[4] = 0xc3d2e1f0; | 199 | buf[4] = 0xc3d2e1f0; |
94 | } | 200 | } |
95 | |||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 4365df31a1d..d7222a9c826 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -31,13 +31,10 @@ | |||
31 | #include <asm/div64.h> | 31 | #include <asm/div64.h> |
32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | 32 | #include <asm/sections.h> /* for dereference_function_descriptor() */ |
33 | 33 | ||
34 | /* Works only for digits and letters, but small and fast */ | ||
35 | #define TOLOWER(x) ((x) | 0x20) | ||
36 | |||
37 | static unsigned int simple_guess_base(const char *cp) | 34 | static unsigned int simple_guess_base(const char *cp) |
38 | { | 35 | { |
39 | if (cp[0] == '0') { | 36 | if (cp[0] == '0') { |
40 | if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) | 37 | if (_tolower(cp[1]) == 'x' && isxdigit(cp[2])) |
41 | return 16; | 38 | return 16; |
42 | else | 39 | else |
43 | return 8; | 40 | return 8; |
@@ -59,13 +56,13 @@ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int bas | |||
59 | if (!base) | 56 | if (!base) |
60 | base = simple_guess_base(cp); | 57 | base = simple_guess_base(cp); |
61 | 58 | ||
62 | if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') | 59 | if (base == 16 && cp[0] == '0' && _tolower(cp[1]) == 'x') |
63 | cp += 2; | 60 | cp += 2; |
64 | 61 | ||
65 | while (isxdigit(*cp)) { | 62 | while (isxdigit(*cp)) { |
66 | unsigned int value; | 63 | unsigned int value; |
67 | 64 | ||
68 | value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; | 65 | value = isdigit(*cp) ? *cp - '0' : _tolower(*cp) - 'a' + 10; |
69 | if (value >= base) | 66 | if (value >= base) |
70 | break; | 67 | break; |
71 | result = result * base + value; | 68 | result = result * base + value; |
@@ -1036,8 +1033,8 @@ precision: | |||
1036 | qualifier: | 1033 | qualifier: |
1037 | /* get the conversion qualifier */ | 1034 | /* get the conversion qualifier */ |
1038 | spec->qualifier = -1; | 1035 | spec->qualifier = -1; |
1039 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || | 1036 | if (*fmt == 'h' || _tolower(*fmt) == 'l' || |
1040 | TOLOWER(*fmt) == 'z' || *fmt == 't') { | 1037 | _tolower(*fmt) == 'z' || *fmt == 't') { |
1041 | spec->qualifier = *fmt++; | 1038 | spec->qualifier = *fmt++; |
1042 | if (unlikely(spec->qualifier == *fmt)) { | 1039 | if (unlikely(spec->qualifier == *fmt)) { |
1043 | if (spec->qualifier == 'l') { | 1040 | if (spec->qualifier == 'l') { |
@@ -1104,7 +1101,7 @@ qualifier: | |||
1104 | spec->type = FORMAT_TYPE_LONG; | 1101 | spec->type = FORMAT_TYPE_LONG; |
1105 | else | 1102 | else |
1106 | spec->type = FORMAT_TYPE_ULONG; | 1103 | spec->type = FORMAT_TYPE_ULONG; |
1107 | } else if (TOLOWER(spec->qualifier) == 'z') { | 1104 | } else if (_tolower(spec->qualifier) == 'z') { |
1108 | spec->type = FORMAT_TYPE_SIZE_T; | 1105 | spec->type = FORMAT_TYPE_SIZE_T; |
1109 | } else if (spec->qualifier == 't') { | 1106 | } else if (spec->qualifier == 't') { |
1110 | spec->type = FORMAT_TYPE_PTRDIFF; | 1107 | spec->type = FORMAT_TYPE_PTRDIFF; |
@@ -1149,8 +1146,7 @@ qualifier: | |||
1149 | * %pi4 print an IPv4 address with leading zeros | 1146 | * %pi4 print an IPv4 address with leading zeros |
1150 | * %pI6 print an IPv6 address with colons | 1147 | * %pI6 print an IPv6 address with colons |
1151 | * %pi6 print an IPv6 address without colons | 1148 | * %pi6 print an IPv6 address without colons |
1152 | * %pI6c print an IPv6 address as specified by | 1149 | * %pI6c print an IPv6 address as specified by RFC 5952 |
1153 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 | ||
1154 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper | 1150 | * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper |
1155 | * case. | 1151 | * case. |
1156 | * %n is ignored | 1152 | * %n is ignored |
@@ -1263,7 +1259,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
1263 | if (qualifier == 'l') { | 1259 | if (qualifier == 'l') { |
1264 | long *ip = va_arg(args, long *); | 1260 | long *ip = va_arg(args, long *); |
1265 | *ip = (str - buf); | 1261 | *ip = (str - buf); |
1266 | } else if (TOLOWER(qualifier) == 'z') { | 1262 | } else if (_tolower(qualifier) == 'z') { |
1267 | size_t *ip = va_arg(args, size_t *); | 1263 | size_t *ip = va_arg(args, size_t *); |
1268 | *ip = (str - buf); | 1264 | *ip = (str - buf); |
1269 | } else { | 1265 | } else { |
@@ -1550,7 +1546,7 @@ do { \ | |||
1550 | void *skip_arg; | 1546 | void *skip_arg; |
1551 | if (qualifier == 'l') | 1547 | if (qualifier == 'l') |
1552 | skip_arg = va_arg(args, long *); | 1548 | skip_arg = va_arg(args, long *); |
1553 | else if (TOLOWER(qualifier) == 'z') | 1549 | else if (_tolower(qualifier) == 'z') |
1554 | skip_arg = va_arg(args, size_t *); | 1550 | skip_arg = va_arg(args, size_t *); |
1555 | else | 1551 | else |
1556 | skip_arg = va_arg(args, int *); | 1552 | skip_arg = va_arg(args, int *); |
@@ -1856,8 +1852,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1856 | 1852 | ||
1857 | /* get conversion qualifier */ | 1853 | /* get conversion qualifier */ |
1858 | qualifier = -1; | 1854 | qualifier = -1; |
1859 | if (*fmt == 'h' || TOLOWER(*fmt) == 'l' || | 1855 | if (*fmt == 'h' || _tolower(*fmt) == 'l' || |
1860 | TOLOWER(*fmt) == 'z') { | 1856 | _tolower(*fmt) == 'z') { |
1861 | qualifier = *fmt++; | 1857 | qualifier = *fmt++; |
1862 | if (unlikely(qualifier == *fmt)) { | 1858 | if (unlikely(qualifier == *fmt)) { |
1863 | if (qualifier == 'h') { | 1859 | if (qualifier == 'h') { |
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c index e51e2558ca9..a768e6d28bb 100644 --- a/lib/xz/xz_dec_bcj.c +++ b/lib/xz/xz_dec_bcj.c | |||
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
441 | * next filter in the chain. Apply the BCJ filter on the new data | 441 | * next filter in the chain. Apply the BCJ filter on the new data |
442 | * in the output buffer. If everything cannot be filtered, copy it | 442 | * in the output buffer. If everything cannot be filtered, copy it |
443 | * to temp and rewind the output buffer position accordingly. | 443 | * to temp and rewind the output buffer position accordingly. |
444 | * | ||
445 | * This needs to be always run when temp.size == 0 to handle a special | ||
446 | * case where the output buffer is full and the next filter has no | ||
447 | * more output coming but hasn't returned XZ_STREAM_END yet. | ||
444 | */ | 448 | */ |
445 | if (s->temp.size < b->out_size - b->out_pos) { | 449 | if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) { |
446 | out_start = b->out_pos; | 450 | out_start = b->out_pos; |
447 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); | 451 | memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); |
448 | b->out_pos += s->temp.size; | 452 | b->out_pos += s->temp.size; |
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, | |||
465 | s->temp.size = b->out_pos - out_start; | 469 | s->temp.size = b->out_pos - out_start; |
466 | b->out_pos -= s->temp.size; | 470 | b->out_pos -= s->temp.size; |
467 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); | 471 | memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); |
472 | |||
473 | /* | ||
474 | * If there wasn't enough input to the next filter to fill | ||
475 | * the output buffer with unfiltered data, there's no point | ||
476 | * to try decoding more data to temp. | ||
477 | */ | ||
478 | if (b->out_pos + s->temp.size < b->out_size) | ||
479 | return XZ_OK; | ||
468 | } | 480 | } |
469 | 481 | ||
470 | /* | 482 | /* |
471 | * If we have unfiltered data in temp, try to fill by decoding more | 483 | * We have unfiltered data in temp. If the output buffer isn't full |
472 | * data from the next filter. Apply the BCJ filter on temp. Then we | 484 | * yet, try to fill the temp buffer by decoding more data from the |
473 | * hopefully can fill the actual output buffer by copying filtered | 485 | * next filter. Apply the BCJ filter on temp. Then we hopefully can |
474 | * data from temp. A mix of filtered and unfiltered data may be left | 486 | * fill the actual output buffer by copying filtered data from temp. |
475 | * in temp; it will be taken care on the next call to this function. | 487 | * A mix of filtered and unfiltered data may be left in temp; it will |
488 | * be taken care on the next call to this function. | ||
476 | */ | 489 | */ |
477 | if (s->temp.size > 0) { | 490 | if (b->out_pos < b->out_size) { |
478 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ | 491 | /* Make b->out{,_pos,_size} temporarily point to s->temp. */ |
479 | s->out = b->out; | 492 | s->out = b->out; |
480 | s->out_pos = b->out_pos; | 493 | s->out_pos = b->out_pos; |
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h index a65633e0696..482b90f363f 100644 --- a/lib/xz/xz_private.h +++ b/lib/xz/xz_private.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | # include <linux/xz.h> | 14 | # include <linux/xz.h> |
15 | # include <asm/byteorder.h> | 15 | # include <linux/kernel.h> |
16 | # include <asm/unaligned.h> | 16 | # include <asm/unaligned.h> |
17 | /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ | 17 | /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ |
18 | # ifndef XZ_PREBOOT | 18 | # ifndef XZ_PREBOOT |