aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
committerMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
commit772320e84588dcbe1600ffb83e5f328f2209ac2a (patch)
treea7de21b79340aeaa17c58126f6b801b82c77b53a /lib
parent1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff)
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts: arch/powerpc/Makefile
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug91
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile11
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c166
-rw-r--r--lib/bitmap.c19
-rw-r--r--lib/btree.c798
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/cpumask.c1
-rw-r--r--lib/crc32.c55
-rw-r--r--lib/debug_locks.c1
-rw-r--r--lib/debugobjects.c64
-rw-r--r--lib/decompress_unlzo.c22
-rw-r--r--lib/devres.c1
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/dynamic_debug.c5
-rw-r--r--lib/flex_array.c2
-rw-r--r--lib/gen_crc32table.c47
-rw-r--r--lib/genalloc.c2
-rw-r--r--lib/hexdump.c54
-rw-r--r--lib/hweight.c26
-rw-r--r--lib/idr.c23
-rw-r--r--lib/inflate.c1
-rw-r--r--lib/kasprintf.c1
-rw-r--r--lib/kobject.c121
-rw-r--r--lib/kobject_uevent.c115
-rw-r--r--lib/kref.c16
-rw-r--r--lib/lcm.c15
-rw-r--r--lib/list_sort.c263
-rw-r--r--lib/lmb.c532
-rw-r--r--lib/radix-tree.c41
-rw-r--r--lib/random32.c38
-rw-r--r--lib/ratelimit.c11
-rw-r--r--lib/rbtree.c68
-rw-r--r--lib/rwsem-spinlock.c14
-rw-r--r--lib/rwsem.c5
-rw-r--r--lib/scatterlist.c1
-rw-r--r--lib/show_mem.c14
-rw-r--r--lib/string.c40
-rw-r--r--lib/swiotlb.c32
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/uuid.c53
-rw-r--r--lib/vsprintf.c198
-rw-r--r--lib/zlib_inflate/inffast.c72
46 files changed, 2160 insertions, 983 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 97b136ff117e..5b916bc0fbae 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -160,6 +160,9 @@ config TEXTSEARCH_BM
160config TEXTSEARCH_FSM 160config TEXTSEARCH_FSM
161 tristate 161 tristate
162 162
163config BTREE
164 boolean
165
163config HAS_IOMEM 166config HAS_IOMEM
164 boolean 167 boolean
165 depends on !NO_IOMEM 168 depends on !NO_IOMEM
@@ -178,9 +181,6 @@ config HAS_DMA
178config CHECK_SIGNATURE 181config CHECK_SIGNATURE
179 bool 182 bool
180 183
181config HAVE_LMB
182 boolean
183
184config CPUMASK_OFFSTACK 184config CPUMASK_OFFSTACK
185 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 185 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
186 help 186 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2af5d84ec824..083b23d211d6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -103,7 +103,8 @@ config HEADERS_CHECK
103 103
104config DEBUG_SECTION_MISMATCH 104config DEBUG_SECTION_MISMATCH
105 bool "Enable full Section mismatch analysis" 105 bool "Enable full Section mismatch analysis"
106 depends on UNDEFINED 106 depends on UNDEFINED || (BLACKFIN)
107 default y
107 # This option is on purpose disabled for now. 108 # This option is on purpose disabled for now.
108 # It will be enabled when we are down to a reasonable number 109 # It will be enabled when we are down to a reasonable number
109 # of section mismatch warnings (< 10 for an allyesconfig build) 110 # of section mismatch warnings (< 10 for an allyesconfig build)
@@ -355,7 +356,7 @@ config SLUB_STATS
355config DEBUG_KMEMLEAK 356config DEBUG_KMEMLEAK
356 bool "Kernel memory leak detector" 357 bool "Kernel memory leak detector"
357 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
358 (X86 || ARM || PPC || S390) 359 (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
359 360
360 select DEBUG_FS if SYSFS 361 select DEBUG_FS if SYSFS
361 select STACKTRACE if STACKTRACE_SUPPORT 362 select STACKTRACE if STACKTRACE_SUPPORT
@@ -499,6 +500,30 @@ config PROVE_LOCKING
499 500
500 For more details, see Documentation/lockdep-design.txt. 501 For more details, see Documentation/lockdep-design.txt.
501 502
503config PROVE_RCU
504 bool "RCU debugging: prove RCU correctness"
505 depends on PROVE_LOCKING
506 default n
507 help
508 This feature enables lockdep extensions that check for correct
509 use of RCU APIs. This is currently under development. Say Y
510 if you want to debug RCU usage or help work on the PROVE_RCU
511 feature.
512
513 Say N if you are unsure.
514
515config PROVE_RCU_REPEATEDLY
516 bool "RCU debugging: don't disable PROVE_RCU on first splat"
517 depends on PROVE_RCU
518 default n
519 help
520 By itself, PROVE_RCU will disable checking upon issuing the
521 first warning (or "splat"). This feature prevents such
522 disabling, allowing multiple RCU-lockdep warnings to be printed
523 on a single reboot.
524
525 Say N if you are unsure.
526
502config LOCKDEP 527config LOCKDEP
503 bool 528 bool
504 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 529 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -520,6 +545,14 @@ config LOCK_STAT
520 545
521 For more details, see Documentation/lockstat.txt 546 For more details, see Documentation/lockstat.txt
522 547
548 This also enables lock events required by "perf lock",
549 subcommand of perf.
550 If you want to use "perf lock", you also need to turn on
551 CONFIG_EVENT_TRACING.
552
553 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
554 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
555
523config DEBUG_LOCKDEP 556config DEBUG_LOCKDEP
524 bool "Lock dependency engine debugging" 557 bool "Lock dependency engine debugging"
525 depends on DEBUG_KERNEL && LOCKDEP 558 depends on DEBUG_KERNEL && LOCKDEP
@@ -778,10 +811,22 @@ config RCU_CPU_STALL_DETECTOR
778 CPUs are delaying the current grace period, but only when 811 CPUs are delaying the current grace period, but only when
779 the grace period extends for excessive time periods. 812 the grace period extends for excessive time periods.
780 813
781 Say Y if you want RCU to perform such checks. 814 Say N if you want to disable such checks.
815
816 Say Y if you are unsure.
817
818config RCU_CPU_STALL_VERBOSE
819 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
820 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
821 default y
822 help
823 This option causes RCU to printk detailed per-task information
824 for any tasks that are stalling the current RCU grace period.
782 825
783 Say N if you are unsure. 826 Say N if you are unsure.
784 827
828 Say Y if you want to enable such checks.
829
785config KPROBES_SANITY_TEST 830config KPROBES_SANITY_TEST
786 bool "Kprobes sanity tests" 831 bool "Kprobes sanity tests"
787 depends on DEBUG_KERNEL 832 depends on DEBUG_KERNEL
@@ -853,8 +898,7 @@ config DEBUG_FORCE_WEAK_PER_CPU
853 898
854config LKDTM 899config LKDTM
855 tristate "Linux Kernel Dump Test Tool Module" 900 tristate "Linux Kernel Dump Test Tool Module"
856 depends on DEBUG_KERNEL 901 depends on DEBUG_FS
857 depends on KPROBES
858 depends on BLOCK 902 depends on BLOCK
859 default n 903 default n
860 help 904 help
@@ -865,7 +909,19 @@ config LKDTM
865 called lkdtm. 909 called lkdtm.
866 910
867 Documentation on how to use the module can be found in 911 Documentation on how to use the module can be found in
868 drivers/misc/lkdtm.c 912 Documentation/fault-injection/provoke-crashes.txt
913
914config CPU_NOTIFIER_ERROR_INJECT
915 tristate "CPU notifier error injection module"
916 depends on HOTPLUG_CPU && DEBUG_KERNEL
917 help
918 This option provides a kernel module that can be used to test
919 the error handling of the cpu notifiers
920
921 To compile this code as a module, choose M here: the module will
922 be called cpu-notifier-error-inject.
923
924 If unsure, say N.
869 925
870config FAULT_INJECTION 926config FAULT_INJECTION
871 bool "Fault-injection framework" 927 bool "Fault-injection framework"
@@ -1008,10 +1064,10 @@ config DYNAMIC_DEBUG
1008 1064
1009 Usage: 1065 Usage:
1010 1066
1011 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file, 1067 Dynamic debugging is controlled via the 'dynamic_debug/control' file,
1012 which is contained in the 'debugfs' filesystem. Thus, the debugfs 1068 which is contained in the 'debugfs' filesystem. Thus, the debugfs
1013 filesystem must first be mounted before making use of this feature. 1069 filesystem must first be mounted before making use of this feature.
1014 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This 1070 We refer the control file as: <debugfs>/dynamic_debug/control. This
1015 file contains a list of the debug statements that can be enabled. The 1071 file contains a list of the debug statements that can be enabled. The
1016 format for each line of the file is: 1072 format for each line of the file is:
1017 1073
@@ -1026,7 +1082,7 @@ config DYNAMIC_DEBUG
1026 1082
1027 From a live system: 1083 From a live system:
1028 1084
1029 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug 1085 nullarbor:~ # cat <debugfs>/dynamic_debug/control
1030 # filename:lineno [module]function flags format 1086 # filename:lineno [module]function flags format
1031 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" 1087 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
1032 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" 1088 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
@@ -1036,23 +1092,23 @@ config DYNAMIC_DEBUG
1036 1092
1037 // enable the message at line 1603 of file svcsock.c 1093 // enable the message at line 1603 of file svcsock.c
1038 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > 1094 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
1039 <debugfs>/dynamic_debug/ddebug 1095 <debugfs>/dynamic_debug/control
1040 1096
1041 // enable all the messages in file svcsock.c 1097 // enable all the messages in file svcsock.c
1042 nullarbor:~ # echo -n 'file svcsock.c +p' > 1098 nullarbor:~ # echo -n 'file svcsock.c +p' >
1043 <debugfs>/dynamic_debug/ddebug 1099 <debugfs>/dynamic_debug/control
1044 1100
1045 // enable all the messages in the NFS server module 1101 // enable all the messages in the NFS server module
1046 nullarbor:~ # echo -n 'module nfsd +p' > 1102 nullarbor:~ # echo -n 'module nfsd +p' >
1047 <debugfs>/dynamic_debug/ddebug 1103 <debugfs>/dynamic_debug/control
1048 1104
1049 // enable all 12 messages in the function svc_process() 1105 // enable all 12 messages in the function svc_process()
1050 nullarbor:~ # echo -n 'func svc_process +p' > 1106 nullarbor:~ # echo -n 'func svc_process +p' >
1051 <debugfs>/dynamic_debug/ddebug 1107 <debugfs>/dynamic_debug/control
1052 1108
1053 // disable all 12 messages in the function svc_process() 1109 // disable all 12 messages in the function svc_process()
1054 nullarbor:~ # echo -n 'func svc_process -p' > 1110 nullarbor:~ # echo -n 'func svc_process -p' >
1055 <debugfs>/dynamic_debug/ddebug 1111 <debugfs>/dynamic_debug/control
1056 1112
1057 See Documentation/dynamic-debug-howto.txt for additional information. 1113 See Documentation/dynamic-debug-howto.txt for additional information.
1058 1114
@@ -1067,6 +1123,13 @@ config DMA_API_DEBUG
1067 This option causes a performance degredation. Use only if you want 1123 This option causes a performance degredation. Use only if you want
1068 to debug device drivers. If unsure, say N. 1124 to debug device drivers. If unsure, say N.
1069 1125
1126config ATOMIC64_SELFTEST
1127 bool "Perform an atomic64_t self-test at boot"
1128 help
1129 Enable this option to test the atomic64_t functions at boot.
1130
1131 If unsure, say N.
1132
1070source "samples/Kconfig" 1133source "samples/Kconfig"
1071 1134
1072source "lib/Kconfig.kgdb" 1135source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..43cb93fa2651 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
3 bool 3 bool
4 4
5menuconfig KGDB 5menuconfig KGDB
6 bool "KGDB: kernel debugging with remote gdb" 6 bool "KGDB: kernel debugger"
7 depends on HAVE_ARCH_KGDB 7 depends on HAVE_ARCH_KGDB
8 depends on DEBUG_KERNEL && EXPERIMENTAL 8 depends on DEBUG_KERNEL && EXPERIMENTAL
9 help 9 help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59 59
60config KGDB_LOW_LEVEL_TRAP
61 bool "KGDB: Allow debugging with traps in notifiers"
62 depends on X86 || MIPS
63 default n
64 help
65 This will add an extra call back to kgdb for the breakpoint
66 exception handler on which will will allow kgdb to step
67 through a notify handler.
68
69config KGDB_KDB
70 bool "KGDB_KDB: include kdb frontend for kgdb"
71 default n
72 help
73 KDB frontend for kernel
74
75config KDB_KEYBOARD
76 bool "KGDB_KDB: keyboard as input device"
77 depends on VT && KGDB_KDB
78 default n
79 help
80 KDB can use a PS/2 type keyboard for an input device
81
60endif # KGDB 82endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 3b0b4a696db9..0bfabba1bb32 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o list_sort.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -39,8 +39,12 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45
43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
47obj-$(CONFIG_BTREE) += btree.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 49obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 50obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -81,11 +85,10 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
81obj-$(CONFIG_SWIOTLB) += swiotlb.o 85obj-$(CONFIG_SWIOTLB) += swiotlb.o
82obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
83obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
88obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
84 89
85lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
86 91
87obj-$(CONFIG_HAVE_LMB) += lmb.o
88
89obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 92obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
90 93
91obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o 94obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
@@ -100,6 +103,8 @@ obj-$(CONFIG_GENERIC_CSUM) += checksum.o
100 103
101obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o 104obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
102 105
106obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
107
103hostprogs-y := gen_crc32table 108hostprogs-y := gen_crc32table
104clean-files := crc32table.h 109clean-files := crc32table.h
105 110
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 spinlock_t *lock = lock_addr(v);
165 int ret = 1; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 0; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..250ed11d3ed2
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,166 @@
1/*
2 * Testsuite for atomic64_t functions
3 *
4 * Copyright © 2010 Luca Barbieri
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <asm/atomic.h>
14
15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
16static __init int test_atomic64(void)
17{
18 long long v0 = 0xaaa31337c001d00dLL;
19 long long v1 = 0xdeadbeefdeafcafeLL;
20 long long v2 = 0xfaceabadf00df001LL;
21 long long onestwos = 0x1111111122222222LL;
22 long long one = 1LL;
23
24 atomic64_t v = ATOMIC64_INIT(v0);
25 long long r = v0;
26 BUG_ON(v.counter != r);
27
28 atomic64_set(&v, v1);
29 r = v1;
30 BUG_ON(v.counter != r);
31 BUG_ON(atomic64_read(&v) != r);
32
33 INIT(v0);
34 atomic64_add(onestwos, &v);
35 r += onestwos;
36 BUG_ON(v.counter != r);
37
38 INIT(v0);
39 atomic64_add(-one, &v);
40 r += -one;
41 BUG_ON(v.counter != r);
42
43 INIT(v0);
44 r += onestwos;
45 BUG_ON(atomic64_add_return(onestwos, &v) != r);
46 BUG_ON(v.counter != r);
47
48 INIT(v0);
49 r += -one;
50 BUG_ON(atomic64_add_return(-one, &v) != r);
51 BUG_ON(v.counter != r);
52
53 INIT(v0);
54 atomic64_sub(onestwos, &v);
55 r -= onestwos;
56 BUG_ON(v.counter != r);
57
58 INIT(v0);
59 atomic64_sub(-one, &v);
60 r -= -one;
61 BUG_ON(v.counter != r);
62
63 INIT(v0);
64 r -= onestwos;
65 BUG_ON(atomic64_sub_return(onestwos, &v) != r);
66 BUG_ON(v.counter != r);
67
68 INIT(v0);
69 r -= -one;
70 BUG_ON(atomic64_sub_return(-one, &v) != r);
71 BUG_ON(v.counter != r);
72
73 INIT(v0);
74 atomic64_inc(&v);
75 r += one;
76 BUG_ON(v.counter != r);
77
78 INIT(v0);
79 r += one;
80 BUG_ON(atomic64_inc_return(&v) != r);
81 BUG_ON(v.counter != r);
82
83 INIT(v0);
84 atomic64_dec(&v);
85 r -= one;
86 BUG_ON(v.counter != r);
87
88 INIT(v0);
89 r -= one;
90 BUG_ON(atomic64_dec_return(&v) != r);
91 BUG_ON(v.counter != r);
92
93 INIT(v0);
94 BUG_ON(atomic64_xchg(&v, v1) != v0);
95 r = v1;
96 BUG_ON(v.counter != r);
97
98 INIT(v0);
99 BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
100 r = v1;
101 BUG_ON(v.counter != r);
102
103 INIT(v0);
104 BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
105 BUG_ON(v.counter != r);
106
107 INIT(v0);
108 BUG_ON(atomic64_add_unless(&v, one, v0));
109 BUG_ON(v.counter != r);
110
111 INIT(v0);
112 BUG_ON(!atomic64_add_unless(&v, one, v1));
113 r += one;
114 BUG_ON(v.counter != r);
115
116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H)
118 INIT(onestwos);
119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
120 r -= one;
121 BUG_ON(v.counter != r);
122
123 INIT(0);
124 BUG_ON(atomic64_dec_if_positive(&v) != -one);
125 BUG_ON(v.counter != r);
126
127 INIT(-one);
128 BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
129 BUG_ON(v.counter != r);
130#else
131#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
132#endif
133
134 INIT(onestwos);
135 BUG_ON(!atomic64_inc_not_zero(&v));
136 r += one;
137 BUG_ON(v.counter != r);
138
139 INIT(0);
140 BUG_ON(atomic64_inc_not_zero(&v));
141 BUG_ON(v.counter != r);
142
143 INIT(-one);
144 BUG_ON(!atomic64_inc_not_zero(&v));
145 r += one;
146 BUG_ON(v.counter != r);
147
148#ifdef CONFIG_X86
149 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
150#ifdef CONFIG_X86_64
151 "x86-64",
152#elif defined(CONFIG_X86_CMPXCHG64)
153 "i586+",
154#else
155 "i386+",
156#endif
157 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
158 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
159#else
160 printk(KERN_INFO "atomic64 test passed\n");
161#endif
162
163 return 0;
164}
165
166core_initcall(test_atomic64);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 11bf49750583..ffb78c916ccd 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -487,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
487EXPORT_SYMBOL(__bitmap_parse); 487EXPORT_SYMBOL(__bitmap_parse);
488 488
489/** 489/**
490 * bitmap_parse_user() 490 * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
491 * 491 *
492 * @ubuf: pointer to user buffer containing string. 492 * @ubuf: pointer to user buffer containing string.
493 * @ulen: buffer size in bytes. If string is smaller than this 493 * @ulen: buffer size in bytes. If string is smaller than this
@@ -619,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
619EXPORT_SYMBOL(bitmap_parselist); 619EXPORT_SYMBOL(bitmap_parselist);
620 620
621/** 621/**
622 * bitmap_pos_to_ord(buf, pos, bits) 622 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
623 * @buf: pointer to a bitmap 623 * @buf: pointer to a bitmap
624 * @pos: a bit position in @buf (0 <= @pos < @bits) 624 * @pos: a bit position in @buf (0 <= @pos < @bits)
625 * @bits: number of valid bit positions in @buf 625 * @bits: number of valid bit positions in @buf
@@ -655,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
655} 655}
656 656
657/** 657/**
658 * bitmap_ord_to_pos(buf, ord, bits) 658 * bitmap_ord_to_pos - find position of n-th set bit in bitmap
659 * @buf: pointer to bitmap 659 * @buf: pointer to bitmap
660 * @ord: ordinal bit position (n-th set bit, n >= 0) 660 * @ord: ordinal bit position (n-th set bit, n >= 0)
661 * @bits: number of valid bit positions in @buf 661 * @bits: number of valid bit positions in @buf
@@ -733,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
733 bitmap_zero(dst, bits); 733 bitmap_zero(dst, bits);
734 734
735 w = bitmap_weight(new, bits); 735 w = bitmap_weight(new, bits);
736 for (oldbit = find_first_bit(src, bits); 736 for_each_set_bit(oldbit, src, bits) {
737 oldbit < bits;
738 oldbit = find_next_bit(src, bits, oldbit + 1)) {
739 int n = bitmap_pos_to_ord(old, oldbit, bits); 737 int n = bitmap_pos_to_ord(old, oldbit, bits);
738
740 if (n < 0 || w == 0) 739 if (n < 0 || w == 0)
741 set_bit(oldbit, dst); /* identity map */ 740 set_bit(oldbit, dst); /* identity map */
742 else 741 else
@@ -903,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
903 */ 902 */
904 903
905 m = 0; 904 m = 0;
906 for (n = find_first_bit(relmap, bits); 905 for_each_set_bit(n, relmap, bits) {
907 n < bits;
908 n = find_next_bit(relmap, bits, n + 1)) {
909 /* m == bitmap_pos_to_ord(relmap, n, bits) */ 906 /* m == bitmap_pos_to_ord(relmap, n, bits) */
910 if (test_bit(m, orig)) 907 if (test_bit(m, orig))
911 set_bit(n, dst); 908 set_bit(n, dst);
@@ -934,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
934 return; 931 return;
935 bitmap_zero(dst, bits); 932 bitmap_zero(dst, bits);
936 933
937 for (oldbit = find_first_bit(orig, bits); 934 for_each_set_bit(oldbit, orig, bits)
938 oldbit < bits;
939 oldbit = find_next_bit(orig, bits, oldbit + 1))
940 set_bit(oldbit % sz, dst); 935 set_bit(oldbit % sz, dst);
941} 936}
942EXPORT_SYMBOL(bitmap_fold); 937EXPORT_SYMBOL(bitmap_fold);
diff --git a/lib/btree.c b/lib/btree.c
new file mode 100644
index 000000000000..c9c6f0351526
--- /dev/null
+++ b/lib/btree.c
@@ -0,0 +1,798 @@
1/*
2 * lib/btree.c - Simple In-memory B+Tree
3 *
4 * As should be obvious for Linux kernel code, license is GPLv2
5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
9 * GPLv2
10 *
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
12 *
13 * A relatively simple B+Tree implementation. I have written it as a learning
14 * excercise to understand how B+Trees work. Turned out to be useful as well.
15 *
16 * B+Trees can be used similar to Linux radix trees (which don't have anything
17 * in common with textbook radix trees, beware). Prerequisite for them working
18 * well is that access to a random tree node is much faster than a large number
19 * of operations within each node.
20 *
21 * Disks have fulfilled the prerequisite for a long time. More recently DRAM
22 * has gained similar properties, as memory access times, when measured in cpu
23 * cycles, have increased. Cacheline sizes have increased as well, which also
24 * helps B+Trees.
25 *
26 * Compared to radix trees, B+Trees are more efficient when dealing with a
27 * sparsely populated address space. Between 25% and 50% of the memory is
28 * occupied with valid pointers. When densely populated, radix trees contain
29 * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
30 * pointers.
31 *
32 * This particular implementation stores pointers identified by a long value.
33 * Storing NULL pointers is illegal, lookup will return NULL when no entry
34 * was found.
35 *
36 * A tricks was used that is not commonly found in textbooks. The lowest
37 * values are to the right, not to the left. All used slots within a node
38 * are on the left, all unused slots contain NUL values. Most operations
39 * simply loop once over all slots and terminate on the first NUL.
40 */
41
42#include <linux/btree.h>
43#include <linux/cache.h>
44#include <linux/kernel.h>
45#include <linux/slab.h>
46#include <linux/module.h>
47
48#define MAX(a, b) ((a) > (b) ? (a) : (b))
49#define NODESIZE MAX(L1_CACHE_BYTES, 128)
50
51struct btree_geo {
52 int keylen;
53 int no_pairs;
54 int no_longs;
55};
56
57struct btree_geo btree_geo32 = {
58 .keylen = 1,
59 .no_pairs = NODESIZE / sizeof(long) / 2,
60 .no_longs = NODESIZE / sizeof(long) / 2,
61};
62EXPORT_SYMBOL_GPL(btree_geo32);
63
64#define LONG_PER_U64 (64 / BITS_PER_LONG)
65struct btree_geo btree_geo64 = {
66 .keylen = LONG_PER_U64,
67 .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
68 .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
69};
70EXPORT_SYMBOL_GPL(btree_geo64);
71
72struct btree_geo btree_geo128 = {
73 .keylen = 2 * LONG_PER_U64,
74 .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
75 .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
76};
77EXPORT_SYMBOL_GPL(btree_geo128);
78
79static struct kmem_cache *btree_cachep;
80
81void *btree_alloc(gfp_t gfp_mask, void *pool_data)
82{
83 return kmem_cache_alloc(btree_cachep, gfp_mask);
84}
85EXPORT_SYMBOL_GPL(btree_alloc);
86
87void btree_free(void *element, void *pool_data)
88{
89 kmem_cache_free(btree_cachep, element);
90}
91EXPORT_SYMBOL_GPL(btree_free);
92
93static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
94{
95 unsigned long *node;
96
97 node = mempool_alloc(head->mempool, gfp);
98 if (likely(node))
99 memset(node, 0, NODESIZE);
100 return node;
101}
102
103static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
104{
105 size_t i;
106
107 for (i = 0; i < n; i++) {
108 if (l1[i] < l2[i])
109 return -1;
110 if (l1[i] > l2[i])
111 return 1;
112 }
113 return 0;
114}
115
116static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
117 size_t n)
118{
119 size_t i;
120
121 for (i = 0; i < n; i++)
122 dest[i] = src[i];
123 return dest;
124}
125
126static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
127{
128 size_t i;
129
130 for (i = 0; i < n; i++)
131 s[i] = c;
132 return s;
133}
134
135static void dec_key(struct btree_geo *geo, unsigned long *key)
136{
137 unsigned long val;
138 int i;
139
140 for (i = geo->keylen - 1; i >= 0; i--) {
141 val = key[i];
142 key[i] = val - 1;
143 if (val)
144 break;
145 }
146}
147
148static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
149{
150 return &node[n * geo->keylen];
151}
152
153static void *bval(struct btree_geo *geo, unsigned long *node, int n)
154{
155 return (void *)node[geo->no_longs + n];
156}
157
158static void setkey(struct btree_geo *geo, unsigned long *node, int n,
159 unsigned long *key)
160{
161 longcpy(bkey(geo, node, n), key, geo->keylen);
162}
163
164static void setval(struct btree_geo *geo, unsigned long *node, int n,
165 void *val)
166{
167 node[geo->no_longs + n] = (unsigned long) val;
168}
169
170static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
171{
172 longset(bkey(geo, node, n), 0, geo->keylen);
173 node[geo->no_longs + n] = 0;
174}
175
176static inline void __btree_init(struct btree_head *head)
177{
178 head->node = NULL;
179 head->height = 0;
180}
181
182void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
183{
184 __btree_init(head);
185 head->mempool = mempool;
186}
187EXPORT_SYMBOL_GPL(btree_init_mempool);
188
189int btree_init(struct btree_head *head)
190{
191 __btree_init(head);
192 head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
193 if (!head->mempool)
194 return -ENOMEM;
195 return 0;
196}
197EXPORT_SYMBOL_GPL(btree_init);
198
199void btree_destroy(struct btree_head *head)
200{
201 mempool_destroy(head->mempool);
202 head->mempool = NULL;
203}
204EXPORT_SYMBOL_GPL(btree_destroy);
205
206void *btree_last(struct btree_head *head, struct btree_geo *geo,
207 unsigned long *key)
208{
209 int height = head->height;
210 unsigned long *node = head->node;
211
212 if (height == 0)
213 return NULL;
214
215 for ( ; height > 1; height--)
216 node = bval(geo, node, 0);
217
218 longcpy(key, bkey(geo, node, 0), geo->keylen);
219 return bval(geo, node, 0);
220}
221EXPORT_SYMBOL_GPL(btree_last);
222
223static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
224 unsigned long *key)
225{
226 return longcmp(bkey(geo, node, pos), key, geo->keylen);
227}
228
229static int keyzero(struct btree_geo *geo, unsigned long *key)
230{
231 int i;
232
233 for (i = 0; i < geo->keylen; i++)
234 if (key[i])
235 return 0;
236
237 return 1;
238}
239
240void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
241 unsigned long *key)
242{
243 int i, height = head->height;
244 unsigned long *node = head->node;
245
246 if (height == 0)
247 return NULL;
248
249 for ( ; height > 1; height--) {
250 for (i = 0; i < geo->no_pairs; i++)
251 if (keycmp(geo, node, i, key) <= 0)
252 break;
253 if (i == geo->no_pairs)
254 return NULL;
255 node = bval(geo, node, i);
256 if (!node)
257 return NULL;
258 }
259
260 if (!node)
261 return NULL;
262
263 for (i = 0; i < geo->no_pairs; i++)
264 if (keycmp(geo, node, i, key) == 0)
265 return bval(geo, node, i);
266 return NULL;
267}
268EXPORT_SYMBOL_GPL(btree_lookup);
269
270int btree_update(struct btree_head *head, struct btree_geo *geo,
271 unsigned long *key, void *val)
272{
273 int i, height = head->height;
274 unsigned long *node = head->node;
275
276 if (height == 0)
277 return -ENOENT;
278
279 for ( ; height > 1; height--) {
280 for (i = 0; i < geo->no_pairs; i++)
281 if (keycmp(geo, node, i, key) <= 0)
282 break;
283 if (i == geo->no_pairs)
284 return -ENOENT;
285 node = bval(geo, node, i);
286 if (!node)
287 return -ENOENT;
288 }
289
290 if (!node)
291 return -ENOENT;
292
293 for (i = 0; i < geo->no_pairs; i++)
294 if (keycmp(geo, node, i, key) == 0) {
295 setval(geo, node, i, val);
296 return 0;
297 }
298 return -ENOENT;
299}
300EXPORT_SYMBOL_GPL(btree_update);
301
302/*
303 * Usually this function is quite similar to normal lookup. But the key of
304 * a parent node may be smaller than the smallest key of all its siblings.
305 * In such a case we cannot just return NULL, as we have only proven that no
306 * key smaller than __key, but larger than this parent key exists.
307 * So we set __key to the parent key and retry. We have to use the smallest
308 * such parent key, which is the last parent key we encountered.
309 */
310void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
311 unsigned long *__key)
312{
313 int i, height;
314 unsigned long *node, *oldnode;
315 unsigned long *retry_key = NULL, key[geo->keylen];
316
317 if (keyzero(geo, __key))
318 return NULL;
319
320 if (head->height == 0)
321 return NULL;
322retry:
323 longcpy(key, __key, geo->keylen);
324 dec_key(geo, key);
325
326 node = head->node;
327 for (height = head->height ; height > 1; height--) {
328 for (i = 0; i < geo->no_pairs; i++)
329 if (keycmp(geo, node, i, key) <= 0)
330 break;
331 if (i == geo->no_pairs)
332 goto miss;
333 oldnode = node;
334 node = bval(geo, node, i);
335 if (!node)
336 goto miss;
337 retry_key = bkey(geo, oldnode, i);
338 }
339
340 if (!node)
341 goto miss;
342
343 for (i = 0; i < geo->no_pairs; i++) {
344 if (keycmp(geo, node, i, key) <= 0) {
345 if (bval(geo, node, i)) {
346 longcpy(__key, bkey(geo, node, i), geo->keylen);
347 return bval(geo, node, i);
348 } else
349 goto miss;
350 }
351 }
352miss:
353 if (retry_key) {
354 __key = retry_key;
355 retry_key = NULL;
356 goto retry;
357 }
358 return NULL;
359}
360
361static int getpos(struct btree_geo *geo, unsigned long *node,
362 unsigned long *key)
363{
364 int i;
365
366 for (i = 0; i < geo->no_pairs; i++) {
367 if (keycmp(geo, node, i, key) <= 0)
368 break;
369 }
370 return i;
371}
372
373static int getfill(struct btree_geo *geo, unsigned long *node, int start)
374{
375 int i;
376
377 for (i = start; i < geo->no_pairs; i++)
378 if (!bval(geo, node, i))
379 break;
380 return i;
381}
382
383/*
384 * locate the correct leaf node in the btree
385 */
386static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
387 unsigned long *key, int level)
388{
389 unsigned long *node = head->node;
390 int i, height;
391
392 for (height = head->height; height > level; height--) {
393 for (i = 0; i < geo->no_pairs; i++)
394 if (keycmp(geo, node, i, key) <= 0)
395 break;
396
397 if ((i == geo->no_pairs) || !bval(geo, node, i)) {
398 /* right-most key is too large, update it */
399 /* FIXME: If the right-most key on higher levels is
400 * always zero, this wouldn't be necessary. */
401 i--;
402 setkey(geo, node, i, key);
403 }
404 BUG_ON(i < 0);
405 node = bval(geo, node, i);
406 }
407 BUG_ON(!node);
408 return node;
409}
410
411static int btree_grow(struct btree_head *head, struct btree_geo *geo,
412 gfp_t gfp)
413{
414 unsigned long *node;
415 int fill;
416
417 node = btree_node_alloc(head, gfp);
418 if (!node)
419 return -ENOMEM;
420 if (head->node) {
421 fill = getfill(geo, head->node, 0);
422 setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
423 setval(geo, node, 0, head->node);
424 }
425 head->node = node;
426 head->height++;
427 return 0;
428}
429
430static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
431{
432 unsigned long *node;
433 int fill;
434
435 if (head->height <= 1)
436 return;
437
438 node = head->node;
439 fill = getfill(geo, node, 0);
440 BUG_ON(fill > 1);
441 head->node = bval(geo, node, 0);
442 head->height--;
443 mempool_free(node, head->mempool);
444}
445
446static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
447 unsigned long *key, void *val, int level,
448 gfp_t gfp)
449{
450 unsigned long *node;
451 int i, pos, fill, err;
452
453 BUG_ON(!val);
454 if (head->height < level) {
455 err = btree_grow(head, geo, gfp);
456 if (err)
457 return err;
458 }
459
460retry:
461 node = find_level(head, geo, key, level);
462 pos = getpos(geo, node, key);
463 fill = getfill(geo, node, pos);
464 /* two identical keys are not allowed */
465 BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
466
467 if (fill == geo->no_pairs) {
468 /* need to split node */
469 unsigned long *new;
470
471 new = btree_node_alloc(head, gfp);
472 if (!new)
473 return -ENOMEM;
474 err = btree_insert_level(head, geo,
475 bkey(geo, node, fill / 2 - 1),
476 new, level + 1, gfp);
477 if (err) {
478 mempool_free(new, head->mempool);
479 return err;
480 }
481 for (i = 0; i < fill / 2; i++) {
482 setkey(geo, new, i, bkey(geo, node, i));
483 setval(geo, new, i, bval(geo, node, i));
484 setkey(geo, node, i, bkey(geo, node, i + fill / 2));
485 setval(geo, node, i, bval(geo, node, i + fill / 2));
486 clearpair(geo, node, i + fill / 2);
487 }
488 if (fill & 1) {
489 setkey(geo, node, i, bkey(geo, node, fill - 1));
490 setval(geo, node, i, bval(geo, node, fill - 1));
491 clearpair(geo, node, fill - 1);
492 }
493 goto retry;
494 }
495 BUG_ON(fill >= geo->no_pairs);
496
497 /* shift and insert */
498 for (i = fill; i > pos; i--) {
499 setkey(geo, node, i, bkey(geo, node, i - 1));
500 setval(geo, node, i, bval(geo, node, i - 1));
501 }
502 setkey(geo, node, pos, key);
503 setval(geo, node, pos, val);
504
505 return 0;
506}
507
508int btree_insert(struct btree_head *head, struct btree_geo *geo,
509 unsigned long *key, void *val, gfp_t gfp)
510{
511 return btree_insert_level(head, geo, key, val, 1, gfp);
512}
513EXPORT_SYMBOL_GPL(btree_insert);
514
515static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
516 unsigned long *key, int level);
517static void merge(struct btree_head *head, struct btree_geo *geo, int level,
518 unsigned long *left, int lfill,
519 unsigned long *right, int rfill,
520 unsigned long *parent, int lpos)
521{
522 int i;
523
524 for (i = 0; i < rfill; i++) {
525 /* Move all keys to the left */
526 setkey(geo, left, lfill + i, bkey(geo, right, i));
527 setval(geo, left, lfill + i, bval(geo, right, i));
528 }
529 /* Exchange left and right child in parent */
530 setval(geo, parent, lpos, right);
531 setval(geo, parent, lpos + 1, left);
532 /* Remove left (formerly right) child from parent */
533 btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
534 mempool_free(right, head->mempool);
535}
536
537static void rebalance(struct btree_head *head, struct btree_geo *geo,
538 unsigned long *key, int level, unsigned long *child, int fill)
539{
540 unsigned long *parent, *left = NULL, *right = NULL;
541 int i, no_left, no_right;
542
543 if (fill == 0) {
544 /* Because we don't steal entries from a neigbour, this case
545 * can happen. Parent node contains a single child, this
546 * node, so merging with a sibling never happens.
547 */
548 btree_remove_level(head, geo, key, level + 1);
549 mempool_free(child, head->mempool);
550 return;
551 }
552
553 parent = find_level(head, geo, key, level + 1);
554 i = getpos(geo, parent, key);
555 BUG_ON(bval(geo, parent, i) != child);
556
557 if (i > 0) {
558 left = bval(geo, parent, i - 1);
559 no_left = getfill(geo, left, 0);
560 if (fill + no_left <= geo->no_pairs) {
561 merge(head, geo, level,
562 left, no_left,
563 child, fill,
564 parent, i - 1);
565 return;
566 }
567 }
568 if (i + 1 < getfill(geo, parent, i)) {
569 right = bval(geo, parent, i + 1);
570 no_right = getfill(geo, right, 0);
571 if (fill + no_right <= geo->no_pairs) {
572 merge(head, geo, level,
573 child, fill,
574 right, no_right,
575 parent, i);
576 return;
577 }
578 }
579 /*
580 * We could also try to steal one entry from the left or right
581 * neighbor. By not doing so we changed the invariant from
582 * "all nodes are at least half full" to "no two neighboring
583 * nodes can be merged". Which means that the average fill of
584 * all nodes is still half or better.
585 */
586}
587
588static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
589 unsigned long *key, int level)
590{
591 unsigned long *node;
592 int i, pos, fill;
593 void *ret;
594
595 if (level > head->height) {
596 /* we recursed all the way up */
597 head->height = 0;
598 head->node = NULL;
599 return NULL;
600 }
601
602 node = find_level(head, geo, key, level);
603 pos = getpos(geo, node, key);
604 fill = getfill(geo, node, pos);
605 if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
606 return NULL;
607 ret = bval(geo, node, pos);
608
609 /* remove and shift */
610 for (i = pos; i < fill - 1; i++) {
611 setkey(geo, node, i, bkey(geo, node, i + 1));
612 setval(geo, node, i, bval(geo, node, i + 1));
613 }
614 clearpair(geo, node, fill - 1);
615
616 if (fill - 1 < geo->no_pairs / 2) {
617 if (level < head->height)
618 rebalance(head, geo, key, level, node, fill - 1);
619 else if (fill - 1 == 1)
620 btree_shrink(head, geo);
621 }
622
623 return ret;
624}
625
626void *btree_remove(struct btree_head *head, struct btree_geo *geo,
627 unsigned long *key)
628{
629 if (head->height == 0)
630 return NULL;
631
632 return btree_remove_level(head, geo, key, 1);
633}
634EXPORT_SYMBOL_GPL(btree_remove);
635
636int btree_merge(struct btree_head *target, struct btree_head *victim,
637 struct btree_geo *geo, gfp_t gfp)
638{
639 unsigned long key[geo->keylen];
640 unsigned long dup[geo->keylen];
641 void *val;
642 int err;
643
644 BUG_ON(target == victim);
645
646 if (!(target->node)) {
647 /* target is empty, just copy fields over */
648 target->node = victim->node;
649 target->height = victim->height;
650 __btree_init(victim);
651 return 0;
652 }
653
654 /* TODO: This needs some optimizations. Currently we do three tree
655 * walks to remove a single object from the victim.
656 */
657 for (;;) {
658 if (!btree_last(victim, geo, key))
659 break;
660 val = btree_lookup(victim, geo, key);
661 err = btree_insert(target, geo, key, val, gfp);
662 if (err)
663 return err;
664 /* We must make a copy of the key, as the original will get
665 * mangled inside btree_remove. */
666 longcpy(dup, key, geo->keylen);
667 btree_remove(victim, geo, dup);
668 }
669 return 0;
670}
671EXPORT_SYMBOL_GPL(btree_merge);
672
673static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
674 unsigned long *node, unsigned long opaque,
675 void (*func)(void *elem, unsigned long opaque,
676 unsigned long *key, size_t index,
677 void *func2),
678 void *func2, int reap, int height, size_t count)
679{
680 int i;
681 unsigned long *child;
682
683 for (i = 0; i < geo->no_pairs; i++) {
684 child = bval(geo, node, i);
685 if (!child)
686 break;
687 if (height > 1)
688 count = __btree_for_each(head, geo, child, opaque,
689 func, func2, reap, height - 1, count);
690 else
691 func(child, opaque, bkey(geo, node, i), count++,
692 func2);
693 }
694 if (reap)
695 mempool_free(node, head->mempool);
696 return count;
697}
698
699static void empty(void *elem, unsigned long opaque, unsigned long *key,
700 size_t index, void *func2)
701{
702}
703
704void visitorl(void *elem, unsigned long opaque, unsigned long *key,
705 size_t index, void *__func)
706{
707 visitorl_t func = __func;
708
709 func(elem, opaque, *key, index);
710}
711EXPORT_SYMBOL_GPL(visitorl);
712
713void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
714 size_t index, void *__func)
715{
716 visitor32_t func = __func;
717 u32 *key = (void *)__key;
718
719 func(elem, opaque, *key, index);
720}
721EXPORT_SYMBOL_GPL(visitor32);
722
723void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
724 size_t index, void *__func)
725{
726 visitor64_t func = __func;
727 u64 *key = (void *)__key;
728
729 func(elem, opaque, *key, index);
730}
731EXPORT_SYMBOL_GPL(visitor64);
732
733void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
734 size_t index, void *__func)
735{
736 visitor128_t func = __func;
737 u64 *key = (void *)__key;
738
739 func(elem, opaque, key[0], key[1], index);
740}
741EXPORT_SYMBOL_GPL(visitor128);
742
743size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
744 unsigned long opaque,
745 void (*func)(void *elem, unsigned long opaque,
746 unsigned long *key,
747 size_t index, void *func2),
748 void *func2)
749{
750 size_t count = 0;
751
752 if (!func2)
753 func = empty;
754 if (head->node)
755 count = __btree_for_each(head, geo, head->node, opaque, func,
756 func2, 0, head->height, 0);
757 return count;
758}
759EXPORT_SYMBOL_GPL(btree_visitor);
760
761size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
762 unsigned long opaque,
763 void (*func)(void *elem, unsigned long opaque,
764 unsigned long *key,
765 size_t index, void *func2),
766 void *func2)
767{
768 size_t count = 0;
769
770 if (!func2)
771 func = empty;
772 if (head->node)
773 count = __btree_for_each(head, geo, head->node, opaque, func,
774 func2, 1, head->height, 0);
775 __btree_init(head);
776 return count;
777}
778EXPORT_SYMBOL_GPL(btree_grim_visitor);
779
780static int __init btree_module_init(void)
781{
782 btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
783 SLAB_HWCACHE_ALIGN, NULL);
784 return 0;
785}
786
787static void __exit btree_module_exit(void)
788{
789 kmem_cache_destroy(btree_cachep);
790}
791
792/* If core code starts using btree, initialization should happen even earlier */
793module_init(btree_module_init);
794module_exit(btree_module_exit);
795
796MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
797MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
798MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index 300e41afbf97..f13daf435211 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -165,7 +165,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
165 (void *)bugaddr); 165 (void *)bugaddr);
166 166
167 show_regs(regs); 167 show_regs(regs);
168 add_taint(TAINT_WARN); 168 add_taint(BUG_GET_TAINT(bug));
169 return BUG_TRAP_TYPE_WARN; 169 return BUG_TRAP_TYPE_WARN;
170 } 170 }
171 171
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <linux/cpu.h>
3#include <linux/module.h>
4#include <linux/notifier.h>
5
6static int priority;
7static int cpu_up_prepare_error;
8static int cpu_down_prepare_error;
9
10module_param(priority, int, 0);
11MODULE_PARM_DESC(priority, "specify cpu notifier priority");
12
13module_param(cpu_up_prepare_error, int, 0644);
14MODULE_PARM_DESC(cpu_up_prepare_error,
15 "specify error code to inject CPU_UP_PREPARE action");
16
17module_param(cpu_down_prepare_error, int, 0644);
18MODULE_PARM_DESC(cpu_down_prepare_error,
19 "specify error code to inject CPU_DOWN_PREPARE action");
20
21static int err_inject_cpu_callback(struct notifier_block *nfb,
22 unsigned long action, void *hcpu)
23{
24 int err = 0;
25
26 switch (action) {
27 case CPU_UP_PREPARE:
28 case CPU_UP_PREPARE_FROZEN:
29 err = cpu_up_prepare_error;
30 break;
31 case CPU_DOWN_PREPARE:
32 case CPU_DOWN_PREPARE_FROZEN:
33 err = cpu_down_prepare_error;
34 break;
35 }
36 if (err)
37 printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
38
39 return notifier_from_errno(err);
40}
41
42static struct notifier_block err_inject_cpu_notifier = {
43 .notifier_call = err_inject_cpu_callback,
44};
45
46static int err_inject_init(void)
47{
48 err_inject_cpu_notifier.priority = priority;
49
50 return register_hotcpu_notifier(&err_inject_cpu_notifier);
51}
52
53static void err_inject_exit(void)
54{
55 unregister_hotcpu_notifier(&err_inject_cpu_notifier);
56}
57
58module_init(err_inject_init);
59module_exit(err_inject_exit);
60
61MODULE_DESCRIPTION("CPU notifier error injection module");
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 7bb4142a502f..05d6aca7fc19 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -1,3 +1,4 @@
1#include <linux/slab.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
2#include <linux/bitops.h> 3#include <linux/bitops.h>
3#include <linux/cpumask.h> 4#include <linux/cpumask.h>
diff --git a/lib/crc32.c b/lib/crc32.c
index 02e3b31b3a79..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -25,16 +25,19 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <asm/atomic.h> 29#include <asm/atomic.h>
31#include "crc32defs.h" 30#include "crc32defs.h"
32#if CRC_LE_BITS == 8 31#if CRC_LE_BITS == 8
33#define tole(x) __constant_cpu_to_le32(x) 32# define tole(x) __constant_cpu_to_le32(x)
34#define tobe(x) __constant_cpu_to_be32(x)
35#else 33#else
36#define tole(x) (x) 34# define tole(x) (x)
37#define tobe(x) (x) 35#endif
36
37#if CRC_BE_BITS == 8
38# define tobe(x) __constant_cpu_to_be32(x)
39#else
40# define tobe(x) (x)
38#endif 41#endif
39#include "crc32table.h" 42#include "crc32table.h"
40 43
@@ -45,33 +48,37 @@ MODULE_LICENSE("GPL");
45#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 48#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
46 49
47static inline u32 50static inline u32
48crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) 51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
49{ 52{
50# ifdef __LITTLE_ENDIAN 53# ifdef __LITTLE_ENDIAN
51# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) 54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \
57 tab[1][(crc >> 16) & 255] ^ \
58 tab[0][(crc >> 24) & 255]
52# else 59# else
53# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) 60# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
61# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
62 tab[1][(crc >> 8) & 255] ^ \
63 tab[2][(crc >> 16) & 255] ^ \
64 tab[3][(crc >> 24) & 255]
54# endif 65# endif
55 const u32 *b = (const u32 *)buf; 66 const u32 *b;
56 size_t rem_len; 67 size_t rem_len;
57 68
58 /* Align it */ 69 /* Align it */
59 if (unlikely((long)b & 3 && len)) { 70 if (unlikely((long)buf & 3 && len)) {
60 u8 *p = (u8 *)b;
61 do { 71 do {
62 DO_CRC(*p++); 72 DO_CRC(*buf++);
63 } while ((--len) && ((long)p)&3); 73 } while ((--len) && ((long)buf)&3);
64 b = (u32 *)p;
65 } 74 }
66 rem_len = len & 3; 75 rem_len = len & 3;
67 /* load data 32 bits wide, xor data 32 bits wide. */ 76 /* load data 32 bits wide, xor data 32 bits wide. */
68 len = len >> 2; 77 len = len >> 2;
78 b = (const u32 *)buf;
69 for (--b; len; --len) { 79 for (--b; len; --len) {
70 crc ^= *++b; /* use pre increment for speed */ 80 crc ^= *++b; /* use pre increment for speed */
71 DO_CRC(0); 81 DO_CRC4;
72 DO_CRC(0);
73 DO_CRC(0);
74 DO_CRC(0);
75 } 82 }
76 len = rem_len; 83 len = rem_len;
77 /* And the last few bytes */ 84 /* And the last few bytes */
@@ -82,6 +89,8 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
82 } while (--len); 89 } while (--len);
83 } 90 }
84 return crc; 91 return crc;
92#undef DO_CRC
93#undef DO_CRC4
85} 94}
86#endif 95#endif
87/** 96/**
@@ -114,14 +123,11 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
114u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 123u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
115{ 124{
116# if CRC_LE_BITS == 8 125# if CRC_LE_BITS == 8
117 const u32 *tab = crc32table_le; 126 const u32 (*tab)[] = crc32table_le;
118 127
119 crc = __cpu_to_le32(crc); 128 crc = __cpu_to_le32(crc);
120 crc = crc32_body(crc, p, len, tab); 129 crc = crc32_body(crc, p, len, tab);
121 return __le32_to_cpu(crc); 130 return __le32_to_cpu(crc);
122#undef ENDIAN_SHIFT
123#undef DO_CRC
124
125# elif CRC_LE_BITS == 4 131# elif CRC_LE_BITS == 4
126 while (len--) { 132 while (len--) {
127 crc ^= *p++; 133 crc ^= *p++;
@@ -174,14 +180,11 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
174u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 180u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
175{ 181{
176# if CRC_BE_BITS == 8 182# if CRC_BE_BITS == 8
177 const u32 *tab = crc32table_be; 183 const u32 (*tab)[] = crc32table_be;
178 184
179 crc = __cpu_to_be32(crc); 185 crc = __cpu_to_be32(crc);
180 crc = crc32_body(crc, p, len, tab); 186 crc = crc32_body(crc, p, len, tab);
181 return __be32_to_cpu(crc); 187 return __be32_to_cpu(crc);
182#undef ENDIAN_SHIFT
183#undef DO_CRC
184
185# elif CRC_BE_BITS == 4 188# elif CRC_BE_BITS == 4
186 while (len--) { 189 while (len--) {
187 crc ^= *p++ << 24; 190 crc ^= *p++ << 24;
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index bc3b11731b9c..5bf0020b9248 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -23,6 +23,7 @@
23 * shut up after that. 23 * shut up after that.
24 */ 24 */
25int debug_locks = 1; 25int debug_locks = 1;
26EXPORT_SYMBOL_GPL(debug_locks);
26 27
27/* 28/*
28 * The locking-testsuite uses <debug_locks_silent> to get a 29 * The locking-testsuite uses <debug_locks_silent> to get a
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a9a8996d286a..deebcc57d4e6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/slab.h>
15#include <linux/hash.h> 16#include <linux/hash.h>
16 17
17#define ODEBUG_HASH_BITS 14 18#define ODEBUG_HASH_BITS 14
@@ -140,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
140 obj->object = addr; 141 obj->object = addr;
141 obj->descr = descr; 142 obj->descr = descr;
142 obj->state = ODEBUG_STATE_NONE; 143 obj->state = ODEBUG_STATE_NONE;
144 obj->astate = 0;
143 hlist_del(&obj->node); 145 hlist_del(&obj->node);
144 146
145 hlist_add_head(&obj->node, &b->list); 147 hlist_add_head(&obj->node, &b->list);
@@ -251,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
251 253
252 if (limit < 5 && obj->descr != descr_test) { 254 if (limit < 5 && obj->descr != descr_test) {
253 limit++; 255 limit++;
254 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
255 obj_states[obj->state], obj->descr->name); 257 "object type: %s\n",
258 msg, obj_states[obj->state], obj->astate,
259 obj->descr->name);
256 } 260 }
257 debug_objects_warnings++; 261 debug_objects_warnings++;
258} 262}
@@ -446,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
446 case ODEBUG_STATE_INIT: 450 case ODEBUG_STATE_INIT:
447 case ODEBUG_STATE_INACTIVE: 451 case ODEBUG_STATE_INACTIVE:
448 case ODEBUG_STATE_ACTIVE: 452 case ODEBUG_STATE_ACTIVE:
449 obj->state = ODEBUG_STATE_INACTIVE; 453 if (!obj->astate)
454 obj->state = ODEBUG_STATE_INACTIVE;
455 else
456 debug_print_object(obj, "deactivate");
450 break; 457 break;
451 458
452 case ODEBUG_STATE_DESTROYED: 459 case ODEBUG_STATE_DESTROYED:
@@ -552,6 +559,53 @@ out_unlock:
552 raw_spin_unlock_irqrestore(&db->lock, flags); 559 raw_spin_unlock_irqrestore(&db->lock, flags);
553} 560}
554 561
562/**
563 * debug_object_active_state - debug checks object usage state machine
564 * @addr: address of the object
565 * @descr: pointer to an object specific debug description structure
566 * @expect: expected state
567 * @next: state to move to if expected state is found
568 */
569void
570debug_object_active_state(void *addr, struct debug_obj_descr *descr,
571 unsigned int expect, unsigned int next)
572{
573 struct debug_bucket *db;
574 struct debug_obj *obj;
575 unsigned long flags;
576
577 if (!debug_objects_enabled)
578 return;
579
580 db = get_bucket((unsigned long) addr);
581
582 raw_spin_lock_irqsave(&db->lock, flags);
583
584 obj = lookup_object(addr, db);
585 if (obj) {
586 switch (obj->state) {
587 case ODEBUG_STATE_ACTIVE:
588 if (obj->astate == expect)
589 obj->astate = next;
590 else
591 debug_print_object(obj, "active_state");
592 break;
593
594 default:
595 debug_print_object(obj, "active_state");
596 break;
597 }
598 } else {
599 struct debug_obj o = { .object = addr,
600 .state = ODEBUG_STATE_NOTAVAILABLE,
601 .descr = descr };
602
603 debug_print_object(&o, "active_state");
604 }
605
606 raw_spin_unlock_irqrestore(&db->lock, flags);
607}
608
555#ifdef CONFIG_DEBUG_OBJECTS_FREE 609#ifdef CONFIG_DEBUG_OBJECTS_FREE
556static void __debug_check_no_obj_freed(const void *address, unsigned long size) 610static void __debug_check_no_obj_freed(const void *address, unsigned long size)
557{ 611{
@@ -773,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
773 } 827 }
774} 828}
775 829
776static int 830static int __init
777check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 831check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
778{ 832{
779 struct debug_bucket *db; 833 struct debug_bucket *db;
@@ -916,7 +970,7 @@ void __init debug_objects_early_init(void)
916/* 970/*
917 * Convert the statically allocated objects to dynamic ones: 971 * Convert the statically allocated objects to dynamic ones:
918 */ 972 */
919static int debug_objects_replace_static_objects(void) 973static int __init debug_objects_replace_static_objects(void)
920{ 974{
921 struct debug_bucket *db = obj_hash; 975 struct debug_bucket *db = obj_hash;
922 struct hlist_node *node, *tmp; 976 struct hlist_node *node, *tmp;
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index db521f45626e..bcb3a4bd68ff 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
97 u32 src_len, dst_len; 97 u32 src_len, dst_len;
98 size_t tmp; 98 size_t tmp;
99 u8 *in_buf, *in_buf_save, *out_buf; 99 u8 *in_buf, *in_buf_save, *out_buf;
100 int obytes_processed = 0; 100 int ret = -1;
101 101
102 set_error_fn(error_fn); 102 set_error_fn(error_fn);
103 103
@@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
174 174
175 /* decompress */ 175 /* decompress */
176 tmp = dst_len; 176 tmp = dst_len;
177 r = lzo1x_decompress_safe((u8 *) in_buf, src_len, 177
178 /* When the input data is not compressed at all,
179 * lzo1x_decompress_safe will fail, so call memcpy()
180 * instead */
181 if (unlikely(dst_len == src_len))
182 memcpy(out_buf, in_buf, src_len);
183 else {
184 r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
178 out_buf, &tmp); 185 out_buf, &tmp);
179 186
180 if (r != LZO_E_OK || dst_len != tmp) { 187 if (r != LZO_E_OK || dst_len != tmp) {
181 error("Compressed data violation"); 188 error("Compressed data violation");
182 goto exit_2; 189 goto exit_2;
190 }
183 } 191 }
184 192
185 obytes_processed += dst_len;
186 if (flush) 193 if (flush)
187 flush(out_buf, dst_len); 194 flush(out_buf, dst_len);
188 if (output) 195 if (output)
@@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
196 in_buf += src_len; 203 in_buf += src_len;
197 } 204 }
198 205
206 ret = 0;
199exit_2: 207exit_2:
200 if (!input) 208 if (!input)
201 free(in_buf); 209 free(in_buf);
@@ -203,7 +211,7 @@ exit_1:
203 if (!output) 211 if (!output)
204 free(out_buf); 212 free(out_buf);
205exit: 213exit:
206 return obytes_processed; 214 return ret;
207} 215}
208 216
209#define decompress unlzo 217#define decompress unlzo
diff --git a/lib/devres.c b/lib/devres.c
index 72c8909006da..49368608f988 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,5 +1,6 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/gfp.h>
3#include <linux/module.h> 4#include <linux/module.h>
4 5
5void devm_ioremap_release(struct device *dev, void *res) 6void devm_ioremap_release(struct device *dev, void *res)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 7d2f0b33e5a8..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
570 * Now parse out the first token and use it as the name for the 570 * Now parse out the first token and use it as the name for the
571 * driver to filter for. 571 * driver to filter for.
572 */ 572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) { 573 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
574 current_driver_name[i] = buf[i]; 574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break; 576 break;
@@ -587,7 +587,7 @@ out_unlock:
587 return count; 587 return count;
588} 588}
589 589
590const struct file_operations filter_fops = { 590static const struct file_operations filter_fops = {
591 .read = filter_read, 591 .read = filter_read,
592 .write = filter_write, 592 .write = filter_write,
593}; 593};
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index f93502915988..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -25,6 +25,7 @@
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h>
28 29
29extern struct _ddebug __start___verbose[]; 30extern struct _ddebug __start___verbose[];
30extern struct _ddebug __stop___verbose[]; 31extern struct _ddebug __stop___verbose[];
@@ -455,7 +456,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
455 __func__, (int)len); 456 __func__, (int)len);
456 457
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 458 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0) 459 if (nwords <= 0)
459 return -EINVAL; 460 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query)) 461 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL; 462 return -EINVAL;
@@ -691,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
691 * Called in response to a module being unloaded. Removes 692 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module. 693 * any ddebug_table's which point at the module.
693 */ 694 */
694int ddebug_remove_module(char *mod_name) 695int ddebug_remove_module(const char *mod_name)
695{ 696{
696 struct ddebug_table *dt, *nextdt; 697 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT; 698 int ret = -ENOENT;
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e4483e..41b1804fa728 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
99 ret->element_size = element_size; 99 ret->element_size = element_size;
100 ret->total_nr_elements = total; 100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE, 102 memset(&ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT); 103 FLEX_ARRAY_BASE_BYTES_LEFT);
104 return ret; 104 return ret;
105} 105}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97df991..85d0e412a04f 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -7,8 +7,8 @@
7#define LE_TABLE_SIZE (1 << CRC_LE_BITS) 7#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
8#define BE_TABLE_SIZE (1 << CRC_BE_BITS) 8#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
9 9
10static uint32_t crc32table_le[LE_TABLE_SIZE]; 10static uint32_t crc32table_le[4][LE_TABLE_SIZE];
11static uint32_t crc32table_be[BE_TABLE_SIZE]; 11static uint32_t crc32table_be[4][BE_TABLE_SIZE];
12 12
13/** 13/**
14 * crc32init_le() - allocate and initialize LE table data 14 * crc32init_le() - allocate and initialize LE table data
@@ -22,12 +22,19 @@ static void crc32init_le(void)
22 unsigned i, j; 22 unsigned i, j;
23 uint32_t crc = 1; 23 uint32_t crc = 1;
24 24
25 crc32table_le[0] = 0; 25 crc32table_le[0][0] = 0;
26 26
27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { 27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) 29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
30 crc32table_le[i + j] = crc ^ crc32table_le[j]; 30 crc32table_le[0][i + j] = crc ^ crc32table_le[0][j];
31 }
32 for (i = 0; i < LE_TABLE_SIZE; i++) {
33 crc = crc32table_le[0][i];
34 for (j = 1; j < 4; j++) {
35 crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8);
36 crc32table_le[j][i] = crc;
37 }
31 } 38 }
32} 39}
33 40
@@ -39,25 +46,35 @@ static void crc32init_be(void)
39 unsigned i, j; 46 unsigned i, j;
40 uint32_t crc = 0x80000000; 47 uint32_t crc = 0x80000000;
41 48
42 crc32table_be[0] = 0; 49 crc32table_be[0][0] = 0;
43 50
44 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { 51 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
45 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); 52 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
46 for (j = 0; j < i; j++) 53 for (j = 0; j < i; j++)
47 crc32table_be[i + j] = crc ^ crc32table_be[j]; 54 crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
55 }
56 for (i = 0; i < BE_TABLE_SIZE; i++) {
57 crc = crc32table_be[0][i];
58 for (j = 1; j < 4; j++) {
59 crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
60 crc32table_be[j][i] = crc;
61 }
48 } 62 }
49} 63}
50 64
51static void output_table(uint32_t table[], int len, char *trans) 65static void output_table(uint32_t table[4][256], int len, char *trans)
52{ 66{
53 int i; 67 int i, j;
54 68
55 for (i = 0; i < len - 1; i++) { 69 for (j = 0 ; j < 4; j++) {
56 if (i % ENTRIES_PER_LINE == 0) 70 printf("{");
57 printf("\n"); 71 for (i = 0; i < len - 1; i++) {
58 printf("%s(0x%8.8xL), ", trans, table[i]); 72 if (i % ENTRIES_PER_LINE == 0)
73 printf("\n");
74 printf("%s(0x%8.8xL), ", trans, table[j][i]);
75 }
76 printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
59 } 77 }
60 printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
61} 78}
62 79
63int main(int argc, char** argv) 80int main(int argc, char** argv)
@@ -66,14 +83,14 @@ int main(int argc, char** argv)
66 83
67 if (CRC_LE_BITS > 1) { 84 if (CRC_LE_BITS > 1) {
68 crc32init_le(); 85 crc32init_le();
69 printf("static const u32 crc32table_le[] = {"); 86 printf("static const u32 crc32table_le[4][256] = {");
70 output_table(crc32table_le, LE_TABLE_SIZE, "tole"); 87 output_table(crc32table_le, LE_TABLE_SIZE, "tole");
71 printf("};\n"); 88 printf("};\n");
72 } 89 }
73 90
74 if (CRC_BE_BITS > 1) { 91 if (CRC_BE_BITS > 1) {
75 crc32init_be(); 92 crc32init_be();
76 printf("static const u32 crc32table_be[] = {"); 93 printf("static const u32 crc32table_be[4][256] = {");
77 output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); 94 output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
78 printf("};\n"); 95 printf("};\n");
79 } 96 }
diff --git a/lib/genalloc.c b/lib/genalloc.c
index e67f97495dd5..1923f1490e72 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -10,6 +10,7 @@
10 * Version 2. See the file COPYING for more details. 10 * Version 2. See the file COPYING for more details.
11 */ 11 */
12 12
13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/bitmap.h> 15#include <linux/bitmap.h>
15#include <linux/genalloc.h> 16#include <linux/genalloc.h>
@@ -127,7 +128,6 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
127 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
128 129
129 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
130 end_bit -= nbits + 1;
131 131
132 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 39af2560f765..5d7a4802c562 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -16,6 +16,24 @@ const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17 17
18/** 18/**
19 * hex_to_bin - convert a hex digit to its real value
20 * @ch: ascii character represents hex digit
21 *
22 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
23 * input.
24 */
25int hex_to_bin(char ch)
26{
27 if ((ch >= '0') && (ch <= '9'))
28 return ch - '0';
29 ch = tolower(ch);
30 if ((ch >= 'a') && (ch <= 'f'))
31 return ch - 'a' + 10;
32 return -1;
33}
34EXPORT_SYMBOL(hex_to_bin);
35
36/**
19 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 37 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
20 * @buf: data blob to dump 38 * @buf: data blob to dump
21 * @len: number of bytes in the @buf 39 * @len: number of bytes in the @buf
@@ -34,7 +52,7 @@ EXPORT_SYMBOL(hex_asc);
34 * 52 *
35 * E.g.: 53 * E.g.:
36 * hex_dump_to_buffer(frame->data, frame->len, 16, 1, 54 * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
37 * linebuf, sizeof(linebuf), 1); 55 * linebuf, sizeof(linebuf), true);
38 * 56 *
39 * example output buffer: 57 * example output buffer:
40 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 58 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -65,8 +83,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 83
66 for (j = 0; j < ngroups; j++) 84 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 85 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%s%16.16llx", j ? " " : "", 86 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j)); 87 (unsigned long long)*(ptr8 + j));
70 ascii_column = 17 * ngroups + 2; 88 ascii_column = 17 * ngroups + 2;
71 break; 89 break;
72 } 90 }
@@ -77,7 +95,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
77 95
78 for (j = 0; j < ngroups; j++) 96 for (j = 0; j < ngroups; j++)
79 lx += scnprintf(linebuf + lx, linebuflen - lx, 97 lx += scnprintf(linebuf + lx, linebuflen - lx,
80 "%s%8.8x", j ? " " : "", *(ptr4 + j)); 98 "%s%8.8x", j ? " " : "", *(ptr4 + j));
81 ascii_column = 9 * ngroups + 2; 99 ascii_column = 9 * ngroups + 2;
82 break; 100 break;
83 } 101 }
@@ -88,7 +106,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
88 106
89 for (j = 0; j < ngroups; j++) 107 for (j = 0; j < ngroups; j++)
90 lx += scnprintf(linebuf + lx, linebuflen - lx, 108 lx += scnprintf(linebuf + lx, linebuflen - lx,
91 "%s%4.4x", j ? " " : "", *(ptr2 + j)); 109 "%s%4.4x", j ? " " : "", *(ptr2 + j));
92 ascii_column = 5 * ngroups + 2; 110 ascii_column = 5 * ngroups + 2;
93 break; 111 break;
94 } 112 }
@@ -111,9 +129,10 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
111 129
112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 130 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
113 linebuf[lx++] = ' '; 131 linebuf[lx++] = ' ';
114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) 132 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 133 ch = ptr[j];
116 : '.'; 134 linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
135 }
117nil: 136nil:
118 linebuf[lx++] = '\0'; 137 linebuf[lx++] = '\0';
119} 138}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
143 * 162 *
144 * E.g.: 163 * E.g.:
145 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, 164 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
146 * 16, 1, frame->data, frame->len, 1); 165 * 16, 1, frame->data, frame->len, true);
147 * 166 *
148 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: 167 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
149 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 168 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -151,12 +170,12 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
151 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. 170 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
152 */ 171 */
153void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 172void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
154 int rowsize, int groupsize, 173 int rowsize, int groupsize,
155 const void *buf, size_t len, bool ascii) 174 const void *buf, size_t len, bool ascii)
156{ 175{
157 const u8 *ptr = buf; 176 const u8 *ptr = buf;
158 int i, linelen, remaining = len; 177 int i, linelen, remaining = len;
159 unsigned char linebuf[200]; 178 unsigned char linebuf[32 * 3 + 2 + 32 + 1];
160 179
161 if (rowsize != 16 && rowsize != 32) 180 if (rowsize != 16 && rowsize != 32)
162 rowsize = 16; 181 rowsize = 16;
@@ -164,13 +183,14 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
164 for (i = 0; i < len; i += rowsize) { 183 for (i = 0; i < len; i += rowsize) {
165 linelen = min(remaining, rowsize); 184 linelen = min(remaining, rowsize);
166 remaining -= rowsize; 185 remaining -= rowsize;
186
167 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, 187 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
168 linebuf, sizeof(linebuf), ascii); 188 linebuf, sizeof(linebuf), ascii);
169 189
170 switch (prefix_type) { 190 switch (prefix_type) {
171 case DUMP_PREFIX_ADDRESS: 191 case DUMP_PREFIX_ADDRESS:
172 printk("%s%s%*p: %s\n", level, prefix_str, 192 printk("%s%s%p: %s\n",
173 (int)(2 * sizeof(void *)), ptr + i, linebuf); 193 level, prefix_str, ptr + i, linebuf);
174 break; 194 break;
175 case DUMP_PREFIX_OFFSET: 195 case DUMP_PREFIX_OFFSET:
176 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); 196 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
@@ -196,9 +216,9 @@ EXPORT_SYMBOL(print_hex_dump);
196 * rowsize of 16, groupsize of 1, and ASCII output included. 216 * rowsize of 16, groupsize of 1, and ASCII output included.
197 */ 217 */
198void print_hex_dump_bytes(const char *prefix_str, int prefix_type, 218void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
199 const void *buf, size_t len) 219 const void *buf, size_t len)
200{ 220{
201 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, 221 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
202 buf, len, 1); 222 buf, len, true);
203} 223}
204EXPORT_SYMBOL(print_hex_dump_bytes); 224EXPORT_SYMBOL(print_hex_dump_bytes);
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424ecb129..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,37 +9,45 @@
9 * The Hamming Weight of a number is the total number of bits set in it. 9 * The Hamming Weight of a number is the total number of bits set in it.
10 */ 10 */
11 11
12unsigned int hweight32(unsigned int w) 12unsigned int __sw_hweight32(unsigned int w)
13{ 13{
14#ifdef ARCH_HAS_FAST_MULTIPLIER
15 w -= (w >> 1) & 0x55555555;
16 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
17 w = (w + (w >> 4)) & 0x0f0f0f0f;
18 return (w * 0x01010101) >> 24;
19#else
14 unsigned int res = w - ((w >> 1) & 0x55555555); 20 unsigned int res = w - ((w >> 1) & 0x55555555);
15 res = (res & 0x33333333) + ((res >> 2) & 0x33333333); 21 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
16 res = (res + (res >> 4)) & 0x0F0F0F0F; 22 res = (res + (res >> 4)) & 0x0F0F0F0F;
17 res = res + (res >> 8); 23 res = res + (res >> 8);
18 return (res + (res >> 16)) & 0x000000FF; 24 return (res + (res >> 16)) & 0x000000FF;
25#endif
19} 26}
20EXPORT_SYMBOL(hweight32); 27EXPORT_SYMBOL(__sw_hweight32);
21 28
22unsigned int hweight16(unsigned int w) 29unsigned int __sw_hweight16(unsigned int w)
23{ 30{
24 unsigned int res = w - ((w >> 1) & 0x5555); 31 unsigned int res = w - ((w >> 1) & 0x5555);
25 res = (res & 0x3333) + ((res >> 2) & 0x3333); 32 res = (res & 0x3333) + ((res >> 2) & 0x3333);
26 res = (res + (res >> 4)) & 0x0F0F; 33 res = (res + (res >> 4)) & 0x0F0F;
27 return (res + (res >> 8)) & 0x00FF; 34 return (res + (res >> 8)) & 0x00FF;
28} 35}
29EXPORT_SYMBOL(hweight16); 36EXPORT_SYMBOL(__sw_hweight16);
30 37
31unsigned int hweight8(unsigned int w) 38unsigned int __sw_hweight8(unsigned int w)
32{ 39{
33 unsigned int res = w - ((w >> 1) & 0x55); 40 unsigned int res = w - ((w >> 1) & 0x55);
34 res = (res & 0x33) + ((res >> 2) & 0x33); 41 res = (res & 0x33) + ((res >> 2) & 0x33);
35 return (res + (res >> 4)) & 0x0F; 42 return (res + (res >> 4)) & 0x0F;
36} 43}
37EXPORT_SYMBOL(hweight8); 44EXPORT_SYMBOL(__sw_hweight8);
38 45
39unsigned long hweight64(__u64 w) 46unsigned long __sw_hweight64(__u64 w)
40{ 47{
41#if BITS_PER_LONG == 32 48#if BITS_PER_LONG == 32
42 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); 49 return __sw_hweight32((unsigned int)(w >> 32)) +
50 __sw_hweight32((unsigned int)w);
43#elif BITS_PER_LONG == 64 51#elif BITS_PER_LONG == 64
44#ifdef ARCH_HAS_FAST_MULTIPLIER 52#ifdef ARCH_HAS_FAST_MULTIPLIER
45 w -= (w >> 1) & 0x5555555555555555ul; 53 w -= (w >> 1) & 0x5555555555555555ul;
@@ -56,4 +64,4 @@ unsigned long hweight64(__u64 w)
56#endif 64#endif
57#endif 65#endif
58} 66}
59EXPORT_SYMBOL(hweight64); 67EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index 1cac726c44bc..7f1a4f0acf50 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 157
158 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
159 if (!(p = pa[l])) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
160 *starting_id = id; 160 *starting_id = id;
161 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
162 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
163 165
164 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
@@ -443,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
443void idr_remove_all(struct idr *idp) 445void idr_remove_all(struct idr *idp)
444{ 446{
445 int n, id, max; 447 int n, id, max;
448 int bt_mask;
446 struct idr_layer *p; 449 struct idr_layer *p;
447 struct idr_layer *pa[MAX_LEVEL]; 450 struct idr_layer *pa[MAX_LEVEL];
448 struct idr_layer **paa = &pa[0]; 451 struct idr_layer **paa = &pa[0];
@@ -460,8 +463,10 @@ void idr_remove_all(struct idr *idp)
460 p = p->ary[(id >> n) & IDR_MASK]; 463 p = p->ary[(id >> n) & IDR_MASK];
461 } 464 }
462 465
466 bt_mask = id;
463 id += 1 << n; 467 id += 1 << n;
464 while (n < fls(id)) { 468 /* Get the highest bit that the above add changed from 0->1. */
469 while (n < fls(id ^ bt_mask)) {
465 if (p) 470 if (p)
466 free_layer(p); 471 free_layer(p);
467 n += IDR_BITS; 472 n += IDR_BITS;
@@ -502,7 +507,7 @@ void *idr_find(struct idr *idp, int id)
502 int n; 507 int n;
503 struct idr_layer *p; 508 struct idr_layer *p;
504 509
505 p = rcu_dereference(idp->top); 510 p = rcu_dereference_raw(idp->top);
506 if (!p) 511 if (!p)
507 return NULL; 512 return NULL;
508 n = (p->layer+1) * IDR_BITS; 513 n = (p->layer+1) * IDR_BITS;
@@ -517,7 +522,7 @@ void *idr_find(struct idr *idp, int id)
517 while (n > 0 && p) { 522 while (n > 0 && p) {
518 n -= IDR_BITS; 523 n -= IDR_BITS;
519 BUG_ON(n != p->layer*IDR_BITS); 524 BUG_ON(n != p->layer*IDR_BITS);
520 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 525 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
521 } 526 }
522 return((void *)p); 527 return((void *)p);
523} 528}
@@ -550,7 +555,7 @@ int idr_for_each(struct idr *idp,
550 struct idr_layer **paa = &pa[0]; 555 struct idr_layer **paa = &pa[0];
551 556
552 n = idp->layers * IDR_BITS; 557 n = idp->layers * IDR_BITS;
553 p = rcu_dereference(idp->top); 558 p = rcu_dereference_raw(idp->top);
554 max = 1 << n; 559 max = 1 << n;
555 560
556 id = 0; 561 id = 0;
@@ -558,7 +563,7 @@ int idr_for_each(struct idr *idp,
558 while (n > 0 && p) { 563 while (n > 0 && p) {
559 n -= IDR_BITS; 564 n -= IDR_BITS;
560 *paa++ = p; 565 *paa++ = p;
561 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 566 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
562 } 567 }
563 568
564 if (p) { 569 if (p) {
@@ -597,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
597 /* find first ent */ 602 /* find first ent */
598 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
599 max = 1 << n; 604 max = 1 << n;
600 p = rcu_dereference(idp->top); 605 p = rcu_dereference_raw(idp->top);
601 if (!p) 606 if (!p)
602 return NULL; 607 return NULL;
603 608
@@ -605,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
605 while (n > 0 && p) { 610 while (n > 0 && p) {
606 n -= IDR_BITS; 611 n -= IDR_BITS;
607 *paa++ = p; 612 *paa++ = p;
608 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 613 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
609 } 614 }
610 615
611 if (p) { 616 if (p) {
@@ -621,7 +626,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
621 } 626 }
622 return NULL; 627 return NULL;
623} 628}
624 629EXPORT_SYMBOL(idr_get_next);
625 630
626 631
627/** 632/**
diff --git a/lib/inflate.c b/lib/inflate.c
index d10255973a9f..677b738c2204 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -103,6 +103,7 @@
103 the two sets of lengths. 103 the two sets of lengths.
104 */ 104 */
105#include <linux/compiler.h> 105#include <linux/compiler.h>
106#include <linux/slab.h>
106 107
107#ifdef RCSID 108#ifdef RCSID
108static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; 109static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index c5ff1fd10030..9c4233b23783 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -6,6 +6,7 @@
6 6
7#include <stdarg.h> 7#include <stdarg.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/string.h> 11#include <linux/string.h>
11 12
diff --git a/lib/kobject.c b/lib/kobject.c
index b512b746d2af..f07c57252e82 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
700 return ret; 700 return ret;
701} 701}
702 702
703struct sysfs_ops kobj_sysfs_ops = { 703const struct sysfs_ops kobj_sysfs_ops = {
704 .show = kobj_attr_show, 704 .show = kobj_attr_show,
705 .store = kobj_attr_store, 705 .store = kobj_attr_store,
706}; 706};
@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
789 * If the kset was not able to be created, NULL will be returned. 789 * If the kset was not able to be created, NULL will be returned.
790 */ 790 */
791static struct kset *kset_create(const char *name, 791static struct kset *kset_create(const char *name,
792 struct kset_uevent_ops *uevent_ops, 792 const struct kset_uevent_ops *uevent_ops,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
832 * If the kset was not able to be created, NULL will be returned. 832 * If the kset was not able to be created, NULL will be returned.
833 */ 833 */
834struct kset *kset_create_and_add(const char *name, 834struct kset *kset_create_and_add(const char *name,
835 struct kset_uevent_ops *uevent_ops, 835 const struct kset_uevent_ops *uevent_ops,
836 struct kobject *parent_kobj) 836 struct kobject *parent_kobj)
837{ 837{
838 struct kset *kset; 838 struct kset *kset;
@@ -850,6 +850,121 @@ struct kset *kset_create_and_add(const char *name,
850} 850}
851EXPORT_SYMBOL_GPL(kset_create_and_add); 851EXPORT_SYMBOL_GPL(kset_create_and_add);
852 852
853
854static DEFINE_SPINLOCK(kobj_ns_type_lock);
855static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
856
857int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
858{
859 enum kobj_ns_type type = ops->type;
860 int error;
861
862 spin_lock(&kobj_ns_type_lock);
863
864 error = -EINVAL;
865 if (type >= KOBJ_NS_TYPES)
866 goto out;
867
868 error = -EINVAL;
869 if (type <= KOBJ_NS_TYPE_NONE)
870 goto out;
871
872 error = -EBUSY;
873 if (kobj_ns_ops_tbl[type])
874 goto out;
875
876 error = 0;
877 kobj_ns_ops_tbl[type] = ops;
878
879out:
880 spin_unlock(&kobj_ns_type_lock);
881 return error;
882}
883
884int kobj_ns_type_registered(enum kobj_ns_type type)
885{
886 int registered = 0;
887
888 spin_lock(&kobj_ns_type_lock);
889 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
890 registered = kobj_ns_ops_tbl[type] != NULL;
891 spin_unlock(&kobj_ns_type_lock);
892
893 return registered;
894}
895
896const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
897{
898 const struct kobj_ns_type_operations *ops = NULL;
899
900 if (parent && parent->ktype->child_ns_type)
901 ops = parent->ktype->child_ns_type(parent);
902
903 return ops;
904}
905
906const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
907{
908 return kobj_child_ns_ops(kobj->parent);
909}
910
911
912const void *kobj_ns_current(enum kobj_ns_type type)
913{
914 const void *ns = NULL;
915
916 spin_lock(&kobj_ns_type_lock);
917 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
918 kobj_ns_ops_tbl[type])
919 ns = kobj_ns_ops_tbl[type]->current_ns();
920 spin_unlock(&kobj_ns_type_lock);
921
922 return ns;
923}
924
925const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
926{
927 const void *ns = NULL;
928
929 spin_lock(&kobj_ns_type_lock);
930 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
931 kobj_ns_ops_tbl[type])
932 ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
933 spin_unlock(&kobj_ns_type_lock);
934
935 return ns;
936}
937
938const void *kobj_ns_initial(enum kobj_ns_type type)
939{
940 const void *ns = NULL;
941
942 spin_lock(&kobj_ns_type_lock);
943 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
944 kobj_ns_ops_tbl[type])
945 ns = kobj_ns_ops_tbl[type]->initial_ns();
946 spin_unlock(&kobj_ns_type_lock);
947
948 return ns;
949}
950
951/*
952 * kobj_ns_exit - invalidate a namespace tag
953 *
954 * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
955 * @ns: the actual namespace being invalidated
956 *
957 * This is called when a tag is no longer valid. For instance,
958 * when a network namespace exits, it uses this helper to
959 * make sure no sb's sysfs_info points to the now-invalidated
960 * netns.
961 */
962void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
963{
964 sysfs_exit_ns(type, ns);
965}
966
967
853EXPORT_SYMBOL(kobject_get); 968EXPORT_SYMBOL(kobject_get);
854EXPORT_SYMBOL(kobject_put); 969EXPORT_SYMBOL(kobject_put);
855EXPORT_SYMBOL(kobject_del); 970EXPORT_SYMBOL(kobject_del);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 920a3ca6e259..b93579504dfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -18,18 +18,25 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kobject.h> 19#include <linux/kobject.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21#include <linux/slab.h>
22#include <linux/user_namespace.h>
22#include <linux/socket.h> 23#include <linux/socket.h>
23#include <linux/skbuff.h> 24#include <linux/skbuff.h>
24#include <linux/netlink.h> 25#include <linux/netlink.h>
25#include <net/sock.h> 26#include <net/sock.h>
27#include <net/net_namespace.h>
26 28
27 29
28u64 uevent_seqnum; 30u64 uevent_seqnum;
29char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 31char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
30static DEFINE_SPINLOCK(sequence_lock); 32static DEFINE_SPINLOCK(sequence_lock);
31#if defined(CONFIG_NET) 33#ifdef CONFIG_NET
32static struct sock *uevent_sock; 34struct uevent_sock {
35 struct list_head list;
36 struct sock *sk;
37};
38static LIST_HEAD(uevent_sock_list);
39static DEFINE_MUTEX(uevent_sock_mutex);
33#endif 40#endif
34 41
35/* the strings here must match the enum in include/linux/kobject.h */ 42/* the strings here must match the enum in include/linux/kobject.h */
@@ -76,6 +83,39 @@ out:
76 return ret; 83 return ret;
77} 84}
78 85
86#ifdef CONFIG_NET
87static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
88{
89 struct kobject *kobj = data;
90 const struct kobj_ns_type_operations *ops;
91
92 ops = kobj_ns_ops(kobj);
93 if (ops) {
94 const void *sock_ns, *ns;
95 ns = kobj->ktype->namespace(kobj);
96 sock_ns = ops->netlink_ns(dsk);
97 return sock_ns != ns;
98 }
99
100 return 0;
101}
102#endif
103
104static int kobj_usermode_filter(struct kobject *kobj)
105{
106 const struct kobj_ns_type_operations *ops;
107
108 ops = kobj_ns_ops(kobj);
109 if (ops) {
110 const void *init_ns, *ns;
111 ns = kobj->ktype->namespace(kobj);
112 init_ns = ops->initial_ns();
113 return ns != init_ns;
114 }
115
116 return 0;
117}
118
79/** 119/**
80 * kobject_uevent_env - send an uevent with environmental data 120 * kobject_uevent_env - send an uevent with environmental data
81 * 121 *
@@ -95,10 +135,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
95 const char *subsystem; 135 const char *subsystem;
96 struct kobject *top_kobj; 136 struct kobject *top_kobj;
97 struct kset *kset; 137 struct kset *kset;
98 struct kset_uevent_ops *uevent_ops; 138 const struct kset_uevent_ops *uevent_ops;
99 u64 seq; 139 u64 seq;
100 int i = 0; 140 int i = 0;
101 int retval = 0; 141 int retval = 0;
142#ifdef CONFIG_NET
143 struct uevent_sock *ue_sk;
144#endif
102 145
103 pr_debug("kobject: '%s' (%p): %s\n", 146 pr_debug("kobject: '%s' (%p): %s\n",
104 kobject_name(kobj), kobj, __func__); 147 kobject_name(kobj), kobj, __func__);
@@ -210,7 +253,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
210 253
211#if defined(CONFIG_NET) 254#if defined(CONFIG_NET)
212 /* send netlink message */ 255 /* send netlink message */
213 if (uevent_sock) { 256 mutex_lock(&uevent_sock_mutex);
257 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
258 struct sock *uevent_sock = ue_sk->sk;
214 struct sk_buff *skb; 259 struct sk_buff *skb;
215 size_t len; 260 size_t len;
216 261
@@ -232,18 +277,21 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
232 } 277 }
233 278
234 NETLINK_CB(skb).dst_group = 1; 279 NETLINK_CB(skb).dst_group = 1;
235 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 280 retval = netlink_broadcast_filtered(uevent_sock, skb,
236 GFP_KERNEL); 281 0, 1, GFP_KERNEL,
282 kobj_bcast_filter,
283 kobj);
237 /* ENOBUFS should be handled in userspace */ 284 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS) 285 if (retval == -ENOBUFS)
239 retval = 0; 286 retval = 0;
240 } else 287 } else
241 retval = -ENOMEM; 288 retval = -ENOMEM;
242 } 289 }
290 mutex_unlock(&uevent_sock_mutex);
243#endif 291#endif
244 292
245 /* call uevent_helper, usually only enabled during early boot */ 293 /* call uevent_helper, usually only enabled during early boot */
246 if (uevent_helper[0]) { 294 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
247 char *argv [3]; 295 char *argv [3];
248 296
249 argv [0] = uevent_helper; 297 argv [0] = uevent_helper;
@@ -319,18 +367,59 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
319EXPORT_SYMBOL_GPL(add_uevent_var); 367EXPORT_SYMBOL_GPL(add_uevent_var);
320 368
321#if defined(CONFIG_NET) 369#if defined(CONFIG_NET)
322static int __init kobject_uevent_init(void) 370static int uevent_net_init(struct net *net)
323{ 371{
324 uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, 372 struct uevent_sock *ue_sk;
325 1, NULL, NULL, THIS_MODULE); 373
326 if (!uevent_sock) { 374 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
375 if (!ue_sk)
376 return -ENOMEM;
377
378 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
379 1, NULL, NULL, THIS_MODULE);
380 if (!ue_sk->sk) {
327 printk(KERN_ERR 381 printk(KERN_ERR
328 "kobject_uevent: unable to create netlink socket!\n"); 382 "kobject_uevent: unable to create netlink socket!\n");
383 kfree(ue_sk);
329 return -ENODEV; 384 return -ENODEV;
330 } 385 }
331 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); 386 mutex_lock(&uevent_sock_mutex);
387 list_add_tail(&ue_sk->list, &uevent_sock_list);
388 mutex_unlock(&uevent_sock_mutex);
332 return 0; 389 return 0;
333} 390}
334 391
392static void uevent_net_exit(struct net *net)
393{
394 struct uevent_sock *ue_sk;
395
396 mutex_lock(&uevent_sock_mutex);
397 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
398 if (sock_net(ue_sk->sk) == net)
399 goto found;
400 }
401 mutex_unlock(&uevent_sock_mutex);
402 return;
403
404found:
405 list_del(&ue_sk->list);
406 mutex_unlock(&uevent_sock_mutex);
407
408 netlink_kernel_release(ue_sk->sk);
409 kfree(ue_sk);
410}
411
412static struct pernet_operations uevent_net_ops = {
413 .init = uevent_net_init,
414 .exit = uevent_net_exit,
415};
416
417static int __init kobject_uevent_init(void)
418{
419 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
420 return register_pernet_subsys(&uevent_net_ops);
421}
422
423
335postcore_initcall(kobject_uevent_init); 424postcore_initcall(kobject_uevent_init);
336#endif 425#endif
diff --git a/lib/kref.c b/lib/kref.c
index 9ecd6e865610..d3d227a08a4b 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -13,17 +13,7 @@
13 13
14#include <linux/kref.h> 14#include <linux/kref.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16#include <linux/slab.h>
17/**
18 * kref_set - initialize object and set refcount to requested number.
19 * @kref: object in question.
20 * @num: initial reference counter
21 */
22void kref_set(struct kref *kref, int num)
23{
24 atomic_set(&kref->refcount, num);
25 smp_mb();
26}
27 17
28/** 18/**
29 * kref_init - initialize object. 19 * kref_init - initialize object.
@@ -31,7 +21,8 @@ void kref_set(struct kref *kref, int num)
31 */ 21 */
32void kref_init(struct kref *kref) 22void kref_init(struct kref *kref)
33{ 23{
34 kref_set(kref, 1); 24 atomic_set(&kref->refcount, 1);
25 smp_mb();
35} 26}
36 27
37/** 28/**
@@ -71,7 +62,6 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
71 return 0; 62 return 0;
72} 63}
73 64
74EXPORT_SYMBOL(kref_set);
75EXPORT_SYMBOL(kref_init); 65EXPORT_SYMBOL(kref_init);
76EXPORT_SYMBOL(kref_get); 66EXPORT_SYMBOL(kref_get);
77EXPORT_SYMBOL(kref_put); 67EXPORT_SYMBOL(kref_put);
diff --git a/lib/lcm.c b/lib/lcm.c
new file mode 100644
index 000000000000..157cd88a6ffc
--- /dev/null
+++ b/lib/lcm.c
@@ -0,0 +1,15 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Lowest common multiple */
6unsigned long lcm(unsigned long a, unsigned long b)
7{
8 if (a && b)
9 return (a * b) / gcd(a, b);
10 else if (b)
11 return b;
12
13 return a;
14}
15EXPORT_SYMBOL_GPL(lcm);
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 19d11e0bb958..4b5cb794c38b 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -4,99 +4,214 @@
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/list.h> 5#include <linux/list.h>
6 6
7#define MAX_LIST_LENGTH_BITS 20
8
9/*
10 * Returns a list organized in an intermediate format suited
11 * to chaining of merge() calls: null-terminated, no reserved or
12 * sentinel head node, "prev" links not maintained.
13 */
14static struct list_head *merge(void *priv,
15 int (*cmp)(void *priv, struct list_head *a,
16 struct list_head *b),
17 struct list_head *a, struct list_head *b)
18{
19 struct list_head head, *tail = &head;
20
21 while (a && b) {
22 /* if equal, take 'a' -- important for sort stability */
23 if ((*cmp)(priv, a, b) <= 0) {
24 tail->next = a;
25 a = a->next;
26 } else {
27 tail->next = b;
28 b = b->next;
29 }
30 tail = tail->next;
31 }
32 tail->next = a?:b;
33 return head.next;
34}
35
36/*
37 * Combine final list merge with restoration of standard doubly-linked
38 * list structure. This approach duplicates code from merge(), but
39 * runs faster than the tidier alternatives of either a separate final
40 * prev-link restoration pass, or maintaining the prev links
41 * throughout.
42 */
43static void merge_and_restore_back_links(void *priv,
44 int (*cmp)(void *priv, struct list_head *a,
45 struct list_head *b),
46 struct list_head *head,
47 struct list_head *a, struct list_head *b)
48{
49 struct list_head *tail = head;
50
51 while (a && b) {
52 /* if equal, take 'a' -- important for sort stability */
53 if ((*cmp)(priv, a, b) <= 0) {
54 tail->next = a;
55 a->prev = tail;
56 a = a->next;
57 } else {
58 tail->next = b;
59 b->prev = tail;
60 b = b->next;
61 }
62 tail = tail->next;
63 }
64 tail->next = a ? : b;
65
66 do {
67 /*
68 * In worst cases this loop may run many iterations.
69 * Continue callbacks to the client even though no
70 * element comparison is needed, so the client's cmp()
71 * routine can invoke cond_resched() periodically.
72 */
73 (*cmp)(priv, tail, tail);
74
75 tail->next->prev = tail;
76 tail = tail->next;
77 } while (tail->next);
78
79 tail->next = head;
80 head->prev = tail;
81}
82
7/** 83/**
8 * list_sort - sort a list. 84 * list_sort - sort a list
9 * @priv: private data, passed to @cmp 85 * @priv: private data, opaque to list_sort(), passed to @cmp
10 * @head: the list to sort 86 * @head: the list to sort
11 * @cmp: the elements comparison function 87 * @cmp: the elements comparison function
12 * 88 *
13 * This function has been implemented by Mark J Roberts <mjr@znex.org>. It 89 * This function implements "merge sort", which has O(nlog(n))
14 * implements "merge sort" which has O(nlog(n)) complexity. The list is sorted 90 * complexity.
15 * in ascending order.
16 * 91 *
17 * The comparison function @cmp is supposed to return a negative value if @a is 92 * The comparison function @cmp must return a negative value if @a
18 * less than @b, and a positive value if @a is greater than @b. If @a and @b 93 * should sort before @b, and a positive value if @a should sort after
19 * are equivalent, then it does not matter what this function returns. 94 * @b. If @a and @b are equivalent, and their original relative
95 * ordering is to be preserved, @cmp must return 0.
20 */ 96 */
21void list_sort(void *priv, struct list_head *head, 97void list_sort(void *priv, struct list_head *head,
22 int (*cmp)(void *priv, struct list_head *a, 98 int (*cmp)(void *priv, struct list_head *a,
23 struct list_head *b)) 99 struct list_head *b))
24{ 100{
25 struct list_head *p, *q, *e, *list, *tail, *oldhead; 101 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
26 int insize, nmerges, psize, qsize, i; 102 -- last slot is a sentinel */
103 int lev; /* index into part[] */
104 int max_lev = 0;
105 struct list_head *list;
27 106
28 if (list_empty(head)) 107 if (list_empty(head))
29 return; 108 return;
30 109
110 memset(part, 0, sizeof(part));
111
112 head->prev->next = NULL;
31 list = head->next; 113 list = head->next;
32 list_del(head);
33 insize = 1;
34 for (;;) {
35 p = oldhead = list;
36 list = tail = NULL;
37 nmerges = 0;
38
39 while (p) {
40 nmerges++;
41 q = p;
42 psize = 0;
43 for (i = 0; i < insize; i++) {
44 psize++;
45 q = q->next == oldhead ? NULL : q->next;
46 if (!q)
47 break;
48 }
49 114
50 qsize = insize; 115 while (list) {
51 while (psize > 0 || (qsize > 0 && q)) { 116 struct list_head *cur = list;
52 if (!psize) { 117 list = list->next;
53 e = q; 118 cur->next = NULL;
54 q = q->next; 119
55 qsize--; 120 for (lev = 0; part[lev]; lev++) {
56 if (q == oldhead) 121 cur = merge(priv, cmp, part[lev], cur);
57 q = NULL; 122 part[lev] = NULL;
58 } else if (!qsize || !q) { 123 }
59 e = p; 124 if (lev > max_lev) {
60 p = p->next; 125 if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
61 psize--; 126 printk_once(KERN_DEBUG "list passed to"
62 if (p == oldhead) 127 " list_sort() too long for"
63 p = NULL; 128 " efficiency\n");
64 } else if (cmp(priv, p, q) <= 0) { 129 lev--;
65 e = p;
66 p = p->next;
67 psize--;
68 if (p == oldhead)
69 p = NULL;
70 } else {
71 e = q;
72 q = q->next;
73 qsize--;
74 if (q == oldhead)
75 q = NULL;
76 }
77 if (tail)
78 tail->next = e;
79 else
80 list = e;
81 e->prev = tail;
82 tail = e;
83 } 130 }
84 p = q; 131 max_lev = lev;
85 } 132 }
133 part[lev] = cur;
134 }
86 135
87 tail->next = list; 136 for (lev = 0; lev < max_lev; lev++)
88 list->prev = tail; 137 if (part[lev])
138 list = merge(priv, cmp, part[lev], list);
89 139
90 if (nmerges <= 1) 140 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
91 break; 141}
142EXPORT_SYMBOL(list_sort);
92 143
93 insize *= 2; 144#ifdef DEBUG_LIST_SORT
94 } 145struct debug_el {
146 struct list_head l_h;
147 int value;
148 unsigned serial;
149};
95 150
96 head->next = list; 151static int cmp(void *priv, struct list_head *a, struct list_head *b)
97 head->prev = list->prev; 152{
98 list->prev->next = head; 153 return container_of(a, struct debug_el, l_h)->value
99 list->prev = head; 154 - container_of(b, struct debug_el, l_h)->value;
100} 155}
101 156
102EXPORT_SYMBOL(list_sort); 157/*
158 * The pattern of set bits in the list length determines which cases
159 * are hit in list_sort().
160 */
161#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
162
163static int __init list_sort_test(void)
164{
165 int i, r = 1, count;
166 struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
167 struct list_head *cur;
168
169 printk(KERN_WARNING "testing list_sort()\n");
170
171 cur = head;
172 for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
173 struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
174 BUG_ON(!el);
175 /* force some equivalencies */
176 el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
177 el->serial = i;
178
179 el->l_h.prev = cur;
180 cur->next = &el->l_h;
181 cur = cur->next;
182 }
183 head->prev = cur;
184
185 list_sort(NULL, head, cmp);
186
187 count = 1;
188 for (cur = head->next; cur->next != head; cur = cur->next) {
189 struct debug_el *el = container_of(cur, struct debug_el, l_h);
190 int cmp_result = cmp(NULL, cur, cur->next);
191 if (cur->next->prev != cur) {
192 printk(KERN_EMERG "list_sort() returned "
193 "a corrupted list!\n");
194 return 1;
195 } else if (cmp_result > 0) {
196 printk(KERN_EMERG "list_sort() failed to sort!\n");
197 return 1;
198 } else if (cmp_result == 0 &&
199 el->serial >= container_of(cur->next,
200 struct debug_el, l_h)->serial) {
201 printk(KERN_EMERG "list_sort() failed to preserve order"
202 " of equivalent elements!\n");
203 return 1;
204 }
205 kfree(cur->prev);
206 count++;
207 }
208 kfree(cur);
209 if (count != LIST_SORT_TEST_LENGTH) {
210 printk(KERN_EMERG "list_sort() returned list of"
211 "different length!\n");
212 return 1;
213 }
214 return 0;
215}
216module_init(list_sort_test);
217#endif
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index 9cee17142b2c..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,532 +0,0 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
32static void lmb_dump(struct lmb_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void lmb_dump_all(void)
49{
50 if (!lmb_debug)
51 return;
52
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
56
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
59}
60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long lmb_regions_adjacent(struct lmb_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void lmb_coalesce_regions(struct lmb_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long lmb_add(u64 base, u64 size)
197{
198 struct lmb_region *_rgn = &lmb.memory;
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
208long lmb_remove(u64 base, u64 size)
209{
210 struct lmb_region *rgn = &(lmb.memory);
211 u64 rgnbegin, rgnend;
212 u64 end = base + size;
213 int i;
214
215 rgnbegin = rgnend = 0; /* supress gcc warnings */
216
217 /* Find the region where (base, size) belongs to */
218 for (i=0; i < rgn->cnt; i++) {
219 rgnbegin = rgn->region[i].base;
220 rgnend = rgnbegin + rgn->region[i].size;
221
222 if ((rgnbegin <= base) && (end <= rgnend))
223 break;
224 }
225
226 /* Didn't find the region */
227 if (i == rgn->cnt)
228 return -1;
229
230 /* Check to see if we are removing entire region */
231 if ((rgnbegin == base) && (rgnend == end)) {
232 lmb_remove_region(rgn, i);
233 return 0;
234 }
235
236 /* Check to see if region is matching at the front */
237 if (rgnbegin == base) {
238 rgn->region[i].base = end;
239 rgn->region[i].size -= size;
240 return 0;
241 }
242
243 /* Check to see if the region is matching at the end */
244 if (rgnend == end) {
245 rgn->region[i].size -= size;
246 return 0;
247 }
248
249 /*
250 * We need to split the entry - adjust the current one to the
251 * beginging of the hole and add the region after hole.
252 */
253 rgn->region[i].size = base - rgn->region[i].base;
254 return lmb_add_region(rgn, end, rgnend - end);
255}
256
257long __init lmb_reserve(u64 base, u64 size)
258{
259 struct lmb_region *_rgn = &lmb.reserved;
260
261 BUG_ON(0 == size);
262
263 return lmb_add_region(_rgn, base, size);
264}
265
266long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
267{
268 unsigned long i;
269
270 for (i = 0; i < rgn->cnt; i++) {
271 u64 rgnbase = rgn->region[i].base;
272 u64 rgnsize = rgn->region[i].size;
273 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
274 break;
275 }
276
277 return (i < rgn->cnt) ? i : -1;
278}
279
280static u64 lmb_align_down(u64 addr, u64 size)
281{
282 return addr & ~(size - 1);
283}
284
285static u64 lmb_align_up(u64 addr, u64 size)
286{
287 return (addr + (size - 1)) & ~(size - 1);
288}
289
290static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
291 u64 size, u64 align)
292{
293 u64 base, res_base;
294 long j;
295
296 base = lmb_align_down((end - size), align);
297 while (start <= base) {
298 j = lmb_overlaps_region(&lmb.reserved, base, size);
299 if (j < 0) {
300 /* this area isn't reserved, take it */
301 if (lmb_add_region(&lmb.reserved, base, size) < 0)
302 base = ~(u64)0;
303 return base;
304 }
305 res_base = lmb.reserved.region[j].base;
306 if (res_base < size)
307 break;
308 base = lmb_align_down(res_base - size, align);
309 }
310
311 return ~(u64)0;
312}
313
314static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
315 u64 (*nid_range)(u64, u64, int *),
316 u64 size, u64 align, int nid)
317{
318 u64 start, end;
319
320 start = mp->base;
321 end = start + mp->size;
322
323 start = lmb_align_up(start, align);
324 while (start < end) {
325 u64 this_end;
326 int this_nid;
327
328 this_end = nid_range(start, end, &this_nid);
329 if (this_nid == nid) {
330 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
331 size, align);
332 if (ret != ~(u64)0)
333 return ret;
334 }
335 start = this_end;
336 }
337
338 return ~(u64)0;
339}
340
341u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
342 u64 (*nid_range)(u64 start, u64 end, int *nid))
343{
344 struct lmb_region *mem = &lmb.memory;
345 int i;
346
347 BUG_ON(0 == size);
348
349 size = lmb_align_up(size, align);
350
351 for (i = 0; i < mem->cnt; i++) {
352 u64 ret = lmb_alloc_nid_region(&mem->region[i],
353 nid_range,
354 size, align, nid);
355 if (ret != ~(u64)0)
356 return ret;
357 }
358
359 return lmb_alloc(size, align);
360}
361
362u64 __init lmb_alloc(u64 size, u64 align)
363{
364 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
365}
366
367u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
368{
369 u64 alloc;
370
371 alloc = __lmb_alloc_base(size, align, max_addr);
372
373 if (alloc == 0)
374 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
375 (unsigned long long) size, (unsigned long long) max_addr);
376
377 return alloc;
378}
379
380u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
381{
382 long i, j;
383 u64 base = 0;
384 u64 res_base;
385
386 BUG_ON(0 == size);
387
388 size = lmb_align_up(size, align);
389
390 /* On some platforms, make sure we allocate lowmem */
391 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
392 if (max_addr == LMB_ALLOC_ANYWHERE)
393 max_addr = LMB_REAL_LIMIT;
394
395 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
396 u64 lmbbase = lmb.memory.region[i].base;
397 u64 lmbsize = lmb.memory.region[i].size;
398
399 if (lmbsize < size)
400 continue;
401 if (max_addr == LMB_ALLOC_ANYWHERE)
402 base = lmb_align_down(lmbbase + lmbsize - size, align);
403 else if (lmbbase < max_addr) {
404 base = min(lmbbase + lmbsize, max_addr);
405 base = lmb_align_down(base - size, align);
406 } else
407 continue;
408
409 while (base && lmbbase <= base) {
410 j = lmb_overlaps_region(&lmb.reserved, base, size);
411 if (j < 0) {
412 /* this area isn't reserved, take it */
413 if (lmb_add_region(&lmb.reserved, base, size) < 0)
414 return 0;
415 return base;
416 }
417 res_base = lmb.reserved.region[j].base;
418 if (res_base < size)
419 break;
420 base = lmb_align_down(res_base - size, align);
421 }
422 }
423 return 0;
424}
425
426/* You must call lmb_analyze() before this. */
427u64 __init lmb_phys_mem_size(void)
428{
429 return lmb.memory.size;
430}
431
432u64 lmb_end_of_DRAM(void)
433{
434 int idx = lmb.memory.cnt - 1;
435
436 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
437}
438
439/* You must call lmb_analyze() after this. */
440void __init lmb_enforce_memory_limit(u64 memory_limit)
441{
442 unsigned long i;
443 u64 limit;
444 struct lmb_property *p;
445
446 if (!memory_limit)
447 return;
448
449 /* Truncate the lmb regions to satisfy the memory limit. */
450 limit = memory_limit;
451 for (i = 0; i < lmb.memory.cnt; i++) {
452 if (limit > lmb.memory.region[i].size) {
453 limit -= lmb.memory.region[i].size;
454 continue;
455 }
456
457 lmb.memory.region[i].size = limit;
458 lmb.memory.cnt = i + 1;
459 break;
460 }
461
462 if (lmb.memory.region[0].size < lmb.rmo_size)
463 lmb.rmo_size = lmb.memory.region[0].size;
464
465 memory_limit = lmb_end_of_DRAM();
466
467 /* And truncate any reserves above the limit also. */
468 for (i = 0; i < lmb.reserved.cnt; i++) {
469 p = &lmb.reserved.region[i];
470
471 if (p->base > memory_limit)
472 p->size = 0;
473 else if ((p->base + p->size) > memory_limit)
474 p->size = memory_limit - p->base;
475
476 if (p->size == 0) {
477 lmb_remove_region(&lmb.reserved, i);
478 i--;
479 }
480 }
481}
482
483int __init lmb_is_reserved(u64 addr)
484{
485 int i;
486
487 for (i = 0; i < lmb.reserved.cnt; i++) {
488 u64 upper = lmb.reserved.region[i].base +
489 lmb.reserved.region[i].size - 1;
490 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
491 return 1;
492 }
493 return 0;
494}
495
496int lmb_is_region_reserved(u64 base, u64 size)
497{
498 return lmb_overlaps_region(&lmb.reserved, base, size);
499}
500
501/*
502 * Given a <base, len>, find which memory regions belong to this range.
503 * Adjust the request and return a contiguous chunk.
504 */
505int lmb_find(struct lmb_property *res)
506{
507 int i;
508 u64 rstart, rend;
509
510 rstart = res->base;
511 rend = rstart + res->size - 1;
512
513 for (i = 0; i < lmb.memory.cnt; i++) {
514 u64 start = lmb.memory.region[i].base;
515 u64 end = start + lmb.memory.region[i].size - 1;
516
517 if (start > rend)
518 return -1;
519
520 if ((end >= rstart) && (start < rend)) {
521 /* adjust the request */
522 if (rstart < start)
523 rstart = start;
524 if (rend > end)
525 rend = end;
526 res->base = rstart;
527 res->size = rend - rstart + 1;
528 return 0;
529 }
530 }
531 return -1;
532}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 92cdd9936e3d..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/gfp.h>
32#include <linux/string.h> 31#include <linux/string.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
34#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
@@ -364,7 +363,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
364 unsigned int height, shift; 363 unsigned int height, shift;
365 struct radix_tree_node *node, **slot; 364 struct radix_tree_node *node, **slot;
366 365
367 node = rcu_dereference(root->rnode); 366 node = rcu_dereference_raw(root->rnode);
368 if (node == NULL) 367 if (node == NULL)
369 return NULL; 368 return NULL;
370 369
@@ -384,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
384 do { 383 do {
385 slot = (struct radix_tree_node **) 384 slot = (struct radix_tree_node **)
386 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 385 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
387 node = rcu_dereference(*slot); 386 node = rcu_dereference_raw(*slot);
388 if (node == NULL) 387 if (node == NULL)
389 return NULL; 388 return NULL;
390 389
@@ -556,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
556 * 555 *
557 * 0: tag not present or not set 556 * 0: tag not present or not set
558 * 1: tag set 557 * 1: tag set
558 *
559 * Note that the return value of this function may not be relied on, even if
560 * the RCU lock is held, unless tag modification and node deletion are excluded
561 * from concurrency.
559 */ 562 */
560int radix_tree_tag_get(struct radix_tree_root *root, 563int radix_tree_tag_get(struct radix_tree_root *root,
561 unsigned long index, unsigned int tag) 564 unsigned long index, unsigned int tag)
@@ -568,7 +571,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
568 if (!root_tag_get(root, tag)) 571 if (!root_tag_get(root, tag))
569 return 0; 572 return 0;
570 573
571 node = rcu_dereference(root->rnode); 574 node = rcu_dereference_raw(root->rnode);
572 if (node == NULL) 575 if (node == NULL)
573 return 0; 576 return 0;
574 577
@@ -596,13 +599,9 @@ int radix_tree_tag_get(struct radix_tree_root *root,
596 */ 599 */
597 if (!tag_get(node, tag, offset)) 600 if (!tag_get(node, tag, offset))
598 saw_unset_tag = 1; 601 saw_unset_tag = 1;
599 if (height == 1) { 602 if (height == 1)
600 int ret = tag_get(node, tag, offset); 603 return !!tag_get(node, tag, offset);
601 604 node = rcu_dereference_raw(node->slots[offset]);
602 BUG_ON(ret && saw_unset_tag);
603 return !!ret;
604 }
605 node = rcu_dereference(node->slots[offset]);
606 shift -= RADIX_TREE_MAP_SHIFT; 605 shift -= RADIX_TREE_MAP_SHIFT;
607 height--; 606 height--;
608 } 607 }
@@ -657,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
657 * 656 *
658 * Returns: the index of the hole if found, otherwise returns an index 657 * Returns: the index of the hole if found, otherwise returns an index
659 * outside of the set specified (in which case 'index - return >= max_scan' 658 * outside of the set specified (in which case 'index - return >= max_scan'
660 * will be true). In rare cases of wrap-around, LONG_MAX will be returned. 659 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
661 * 660 *
662 * radix_tree_next_hole may be called under rcu_read_lock. However, like 661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
663 * radix_tree_gang_lookup, this will not atomically search a snapshot of 662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -675,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
675 if (!radix_tree_lookup(root, index)) 674 if (!radix_tree_lookup(root, index))
676 break; 675 break;
677 index--; 676 index--;
678 if (index == LONG_MAX) 677 if (index == ULONG_MAX)
679 break; 678 break;
680 } 679 }
681 680
@@ -711,7 +710,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
711 } 710 }
712 711
713 shift -= RADIX_TREE_MAP_SHIFT; 712 shift -= RADIX_TREE_MAP_SHIFT;
714 slot = rcu_dereference(slot->slots[i]); 713 slot = rcu_dereference_raw(slot->slots[i]);
715 if (slot == NULL) 714 if (slot == NULL)
716 goto out; 715 goto out;
717 } 716 }
@@ -758,7 +757,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
758 unsigned long cur_index = first_index; 757 unsigned long cur_index = first_index;
759 unsigned int ret; 758 unsigned int ret;
760 759
761 node = rcu_dereference(root->rnode); 760 node = rcu_dereference_raw(root->rnode);
762 if (!node) 761 if (!node)
763 return 0; 762 return 0;
764 763
@@ -787,7 +786,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
787 slot = *(((void ***)results)[ret + i]); 786 slot = *(((void ***)results)[ret + i]);
788 if (!slot) 787 if (!slot)
789 continue; 788 continue;
790 results[ret + nr_found] = rcu_dereference(slot); 789 results[ret + nr_found] = rcu_dereference_raw(slot);
791 nr_found++; 790 nr_found++;
792 } 791 }
793 ret += nr_found; 792 ret += nr_found;
@@ -826,7 +825,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
826 unsigned long cur_index = first_index; 825 unsigned long cur_index = first_index;
827 unsigned int ret; 826 unsigned int ret;
828 827
829 node = rcu_dereference(root->rnode); 828 node = rcu_dereference_raw(root->rnode);
830 if (!node) 829 if (!node)
831 return 0; 830 return 0;
832 831
@@ -915,7 +914,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
915 } 914 }
916 } 915 }
917 shift -= RADIX_TREE_MAP_SHIFT; 916 shift -= RADIX_TREE_MAP_SHIFT;
918 slot = rcu_dereference(slot->slots[i]); 917 slot = rcu_dereference_raw(slot->slots[i]);
919 if (slot == NULL) 918 if (slot == NULL)
920 break; 919 break;
921 } 920 }
@@ -951,7 +950,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
951 if (!root_tag_get(root, tag)) 950 if (!root_tag_get(root, tag))
952 return 0; 951 return 0;
953 952
954 node = rcu_dereference(root->rnode); 953 node = rcu_dereference_raw(root->rnode);
955 if (!node) 954 if (!node)
956 return 0; 955 return 0;
957 956
@@ -980,7 +979,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
980 slot = *(((void ***)results)[ret + i]); 979 slot = *(((void ***)results)[ret + i]);
981 if (!slot) 980 if (!slot)
982 continue; 981 continue;
983 results[ret + nr_found] = rcu_dereference(slot); 982 results[ret + nr_found] = rcu_dereference_raw(slot);
984 nr_found++; 983 nr_found++;
985 } 984 }
986 ret += nr_found; 985 ret += nr_found;
@@ -1020,7 +1019,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1020 if (!root_tag_get(root, tag)) 1019 if (!root_tag_get(root, tag))
1021 return 0; 1020 return 0;
1022 1021
1023 node = rcu_dereference(root->rnode); 1022 node = rcu_dereference_raw(root->rnode);
1024 if (!node) 1023 if (!node)
1025 return 0; 1024 return 0;
1026 1025
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..870dc3fc0f0f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42struct rnd_state {
43 u32 s1, s2, s3;
44};
45
46static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 42static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
47 43
48static u32 __random32(struct rnd_state *state) 44/**
45 * prandom32 - seeded pseudo-random number generator.
46 * @state: pointer to state structure holding seeded state.
47 *
48 * This is used for pseudo-randomness with no outside seeding.
49 * For more random results, use random32().
50 */
51u32 prandom32(struct rnd_state *state)
49{ 52{
50#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 53#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
51 54
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
55 58
56 return (state->s1 ^ state->s2 ^ state->s3); 59 return (state->s1 ^ state->s2 ^ state->s3);
57} 60}
58 61EXPORT_SYMBOL(prandom32);
59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
63{
64 return (x < m) ? x + m : x;
65}
66 62
67/** 63/**
68 * random32 - pseudo random number generator 64 * random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
75{ 71{
76 unsigned long r; 72 unsigned long r;
77 struct rnd_state *state = &get_cpu_var(net_rand_state); 73 struct rnd_state *state = &get_cpu_var(net_rand_state);
78 r = __random32(state); 74 r = prandom32(state);
79 put_cpu_var(state); 75 put_cpu_var(state);
80 return r; 76 return r;
81} 77}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
118 state->s3 = __seed(LCG(state->s2), 15); 114 state->s3 = __seed(LCG(state->s2), 15);
119 115
120 /* "warm it up" */ 116 /* "warm it up" */
121 __random32(state); 117 prandom32(state);
122 __random32(state); 118 prandom32(state);
123 __random32(state); 119 prandom32(state);
124 __random32(state); 120 prandom32(state);
125 __random32(state); 121 prandom32(state);
126 __random32(state); 122 prandom32(state);
127 } 123 }
128 return 0; 124 return 0;
129} 125}
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
147 state->s3 = __seed(seeds[2], 15); 143 state->s3 = __seed(seeds[2], 15);
148 144
149 /* mix it in */ 145 /* mix it in */
150 __random32(state); 146 prandom32(state);
151 } 147 }
152 return 0; 148 return 0;
153} 149}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 09f5ce1810dc..027a03f4c56d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -16,9 +16,14 @@
16/* 16/*
17 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
18 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
19 * @func: name of calling function
19 * 20 *
20 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 21 * This enforces a rate limit: not more than @rs->burst callbacks
21 * in every @rs->ratelimit_jiffies 22 * in every @rs->interval
23 *
24 * RETURNS:
25 * 0 means callbacks will be suppressed.
26 * 1 means go ahead and do it.
22 */ 27 */
23int ___ratelimit(struct ratelimit_state *rs, const char *func) 28int ___ratelimit(struct ratelimit_state *rs, const char *func)
24{ 29{
@@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
35 * the entity that is holding the lock already: 40 * the entity that is holding the lock already:
36 */ 41 */
37 if (!spin_trylock_irqsave(&rs->lock, flags)) 42 if (!spin_trylock_irqsave(&rs->lock, flags))
38 return 1; 43 return 0;
39 44
40 if (!rs->begin) 45 if (!rs->begin)
41 rs->begin = jiffies; 46 rs->begin = jiffies;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -283,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
283} 283}
284EXPORT_SYMBOL(rb_erase); 284EXPORT_SYMBOL(rb_erase);
285 285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{
288 struct rb_node *parent;
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303}
304
305/*
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{
311 if (node->rb_left)
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317}
318
319/*
320 * before removing the node, find the deepest node on the rebalance path
321 * that will still be there after @node gets removed
322 */
323struct rb_node *rb_augment_erase_begin(struct rb_node *node)
324{
325 struct rb_node *deepest;
326
327 if (!node->rb_right && !node->rb_left)
328 deepest = rb_parent(node);
329 else if (!node->rb_right)
330 deepest = node->rb_left;
331 else if (!node->rb_left)
332 deepest = node->rb_right;
333 else {
334 deepest = rb_next(node);
335 if (deepest->rb_right)
336 deepest = deepest->rb_right;
337 else if (rb_parent(deepest) != node)
338 deepest = rb_parent(deepest);
339 }
340
341 return deepest;
342}
343
344/*
345 * after removal, update the tree to account for the removed entry
346 * and any rebalance damage.
347 */
348void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
349{
350 if (node)
351 rb_augment_path(node, func, data);
352}
353
286/* 354/*
287 * This function returns the first node (in sort order) of the tree. 355 * This function returns the first node (in sort order) of the tree.
288 */ 356 */
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ccf95bff7984..ffc9fc7f3b05 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
143{ 143{
144 struct rwsem_waiter waiter; 144 struct rwsem_waiter waiter;
145 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags;
146 147
147 spin_lock_irq(&sem->wait_lock); 148 spin_lock_irqsave(&sem->wait_lock, flags);
148 149
149 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
150 /* granted */ 151 /* granted */
151 sem->activity++; 152 sem->activity++;
152 spin_unlock_irq(&sem->wait_lock); 153 spin_unlock_irqrestore(&sem->wait_lock, flags);
153 goto out; 154 goto out;
154 } 155 }
155 156
@@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
164 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
165 166
166 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
167 spin_unlock_irq(&sem->wait_lock); 168 spin_unlock_irqrestore(&sem->wait_lock, flags);
168 169
169 /* wait to be given the lock */ 170 /* wait to be given the lock */
170 for (;;) { 171 for (;;) {
@@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
209{ 210{
210 struct rwsem_waiter waiter; 211 struct rwsem_waiter waiter;
211 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags;
212 214
213 spin_lock_irq(&sem->wait_lock); 215 spin_lock_irqsave(&sem->wait_lock, flags);
214 216
215 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
216 /* granted */ 218 /* granted */
217 sem->activity = -1; 219 sem->activity = -1;
218 spin_unlock_irq(&sem->wait_lock); 220 spin_unlock_irqrestore(&sem->wait_lock, flags);
219 goto out; 221 goto out;
220 } 222 }
221 223
@@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
230 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
231 233
232 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
233 spin_unlock_irq(&sem->wait_lock); 235 spin_unlock_irqrestore(&sem->wait_lock, flags);
234 236
235 /* wait to be given the lock */ 237 /* wait to be given the lock */
236 for (;;) { 238 for (;;) {
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e5665e..ceba8e28807a 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
136 out: 136 out:
137 return sem; 137 return sem;
138 138
139 /* undo the change to count, but check for a transition 1->0 */ 139 /* undo the change to the active count, but check for a transition
140 * 1->0 */
140 undo: 141 undo:
141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) 142 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
142 goto out; 143 goto out;
143 goto try_again; 144 goto try_again;
144} 145}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 0d475d8167bf..9afa25b52a83 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -7,6 +7,7 @@
7 * Version 2. See the file COPYING for more details. 7 * Version 2. See the file COPYING for more details.
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 238e72a18ce1..fdc77c82f922 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -15,7 +15,7 @@ void show_mem(void)
15 unsigned long total = 0, reserved = 0, shared = 0, 15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk(KERN_INFO "Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(); 19 show_free_areas();
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
@@ -49,15 +49,15 @@ void show_mem(void)
49 pgdat_resize_unlock(pgdat, &flags); 49 pgdat_resize_unlock(pgdat, &flags);
50 } 50 }
51 51
52 printk(KERN_INFO "%lu pages RAM\n", total); 52 printk("%lu pages RAM\n", total);
53#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
54 printk(KERN_INFO "%lu pages HighMem\n", highmem); 54 printk("%lu pages HighMem\n", highmem);
55#endif 55#endif
56 printk(KERN_INFO "%lu pages reserved\n", reserved); 56 printk("%lu pages reserved\n", reserved);
57 printk(KERN_INFO "%lu pages shared\n", shared); 57 printk("%lu pages shared\n", shared);
58 printk(KERN_INFO "%lu pages non-shared\n", nonshared); 58 printk("%lu pages non-shared\n", nonshared);
59#ifdef CONFIG_QUICKLIST 59#ifdef CONFIG_QUICKLIST
60 printk(KERN_INFO "%lu pages in pagetable cache\n", 60 printk("%lu pages in pagetable cache\n",
61 quicklist_total_size()); 61 quicklist_total_size());
62#endif 62#endif
63} 63}
diff --git a/lib/string.c b/lib/string.c
index a1cdcfcc42d0..f71bead1be3e 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len)
36 /* Yes, Virginia, it had better be unsigned */ 36 /* Yes, Virginia, it had better be unsigned */
37 unsigned char c1, c2; 37 unsigned char c1, c2;
38 38
39 c1 = c2 = 0; 39 if (!len)
40 if (len) { 40 return 0;
41 do { 41
42 c1 = *s1; 42 do {
43 c2 = *s2; 43 c1 = *s1++;
44 s1++; 44 c2 = *s2++;
45 s2++; 45 if (!c1 || !c2)
46 if (!c1) 46 break;
47 break; 47 if (c1 == c2)
48 if (!c2) 48 continue;
49 break; 49 c1 = tolower(c1);
50 if (c1 == c2) 50 c2 = tolower(c2);
51 continue; 51 if (c1 != c2)
52 c1 = tolower(c1); 52 break;
53 c2 = tolower(c2); 53 } while (--len);
54 if (c1 != c2)
55 break;
56 } while (--len);
57 }
58 return (int)c1 - (int)c2; 54 return (int)c1 - (int)c2;
59} 55}
60EXPORT_SYMBOL(strnicmp); 56EXPORT_SYMBOL(strnicmp);
@@ -693,13 +689,13 @@ EXPORT_SYMBOL(strstr);
693 */ 689 */
694char *strnstr(const char *s1, const char *s2, size_t len) 690char *strnstr(const char *s1, const char *s2, size_t len)
695{ 691{
696 size_t l1 = len, l2; 692 size_t l2;
697 693
698 l2 = strlen(s2); 694 l2 = strlen(s2);
699 if (!l2) 695 if (!l2)
700 return (char *)s1; 696 return (char *)s1;
701 while (l1 >= l2) { 697 while (len >= l2) {
702 l1--; 698 len--;
703 if (!memcmp(s1, s2, l2)) 699 if (!memcmp(s1, s2, l2))
704 return (char *)s1; 700 return (char *)s1;
705 s1++; 701 s1++;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 437eedb5a53b..a009055140ec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/gfp.h>
31 32
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -756,37 +757,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
756EXPORT_SYMBOL(swiotlb_sync_single_for_device); 757EXPORT_SYMBOL(swiotlb_sync_single_for_device);
757 758
758/* 759/*
759 * Same as above, but for a sub-range of the mapping.
760 */
761static void
762swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
763 unsigned long offset, size_t size,
764 int dir, int target)
765{
766 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
767}
768
769void
770swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
771 unsigned long offset, size_t size,
772 enum dma_data_direction dir)
773{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU);
776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778
779void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size,
782 enum dma_data_direction dir)
783{
784 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
785 SYNC_FOR_DEVICE);
786}
787EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788
789/*
790 * Map a set of buffers described by scatterlist in streaming mode for DMA. 760 * Map a set of buffers described by scatterlist in streaming mode for DMA.
791 * This is the scatter-gather version of the above swiotlb_map_page 761 * This is the scatter-gather version of the above swiotlb_map_page
792 * interface. Here the scatter gather list elements are each tagged with the 762 * interface. Here the scatter gather list elements are each tagged with the
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 9fbcb44c554f..d608331b3e47 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/err.h> 104#include <linux/err.h>
105#include <linux/textsearch.h> 105#include <linux/textsearch.h>
106#include <linux/slab.h>
106 107
107static LIST_HEAD(ts_ops); 108static LIST_HEAD(ts_ops);
108static DEFINE_SPINLOCK(ts_mod_lock); 109static DEFINE_SPINLOCK(ts_mod_lock);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
1/*
2 * Unified UUID/GUID definition
3 *
4 * Copyright (C) 2009, Intel Corp.
5 * Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/uuid.h>
24#include <linux/random.h>
25
26static void __uuid_gen_common(__u8 b[16])
27{
28 int i;
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80;
37}
38
39void uuid_le_gen(uuid_le *lu)
40{
41 __uuid_gen_common(lu->b);
42 /* version 4 : random generation */
43 lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
44}
45EXPORT_SYMBOL_GPL(uuid_le_gen);
46
47void uuid_be_gen(uuid_be *bu)
48{
49 __uuid_gen_common(bu->b);
50 /* version 4 : random generation */
51 bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
52}
53EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3b8aeec4e327..b8a2f549ab0e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
118 118
119 return simple_strtoull(cp, endp, base); 119 return simple_strtoull(cp, endp, base);
120} 120}
121EXPORT_SYMBOL(simple_strtoll);
121 122
122/** 123/**
123 * strict_strtoul - convert a string to an unsigned long strictly 124 * strict_strtoul - convert a string to an unsigned long strictly
@@ -266,7 +267,8 @@ int strict_strtoll(const char *cp, unsigned int base, long long *res)
266} 267}
267EXPORT_SYMBOL(strict_strtoll); 268EXPORT_SYMBOL(strict_strtoll);
268 269
269static int skip_atoi(const char **s) 270static noinline_for_stack
271int skip_atoi(const char **s)
270{ 272{
271 int i = 0; 273 int i = 0;
272 274
@@ -286,7 +288,8 @@ static int skip_atoi(const char **s)
286/* Formats correctly any integer in [0,99999]. 288/* Formats correctly any integer in [0,99999].
287 * Outputs from one to five digits depending on input. 289 * Outputs from one to five digits depending on input.
288 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ 290 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
289static char *put_dec_trunc(char *buf, unsigned q) 291static noinline_for_stack
292char *put_dec_trunc(char *buf, unsigned q)
290{ 293{
291 unsigned d3, d2, d1, d0; 294 unsigned d3, d2, d1, d0;
292 d1 = (q>>4) & 0xf; 295 d1 = (q>>4) & 0xf;
@@ -323,7 +326,8 @@ static char *put_dec_trunc(char *buf, unsigned q)
323 return buf; 326 return buf;
324} 327}
325/* Same with if's removed. Always emits five digits */ 328/* Same with if's removed. Always emits five digits */
326static char *put_dec_full(char *buf, unsigned q) 329static noinline_for_stack
330char *put_dec_full(char *buf, unsigned q)
327{ 331{
328 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 332 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
329 /* but anyway, gcc produces better code with full-sized ints */ 333 /* but anyway, gcc produces better code with full-sized ints */
@@ -365,7 +369,8 @@ static char *put_dec_full(char *buf, unsigned q)
365 return buf; 369 return buf;
366} 370}
367/* No inlining helps gcc to use registers better */ 371/* No inlining helps gcc to use registers better */
368static noinline char *put_dec(char *buf, unsigned long long num) 372static noinline_for_stack
373char *put_dec(char *buf, unsigned long long num)
369{ 374{
370 while (1) { 375 while (1) {
371 unsigned rem; 376 unsigned rem;
@@ -381,8 +386,8 @@ static noinline char *put_dec(char *buf, unsigned long long num)
381#define PLUS 4 /* show plus */ 386#define PLUS 4 /* show plus */
382#define SPACE 8 /* space if plus */ 387#define SPACE 8 /* space if plus */
383#define LEFT 16 /* left justified */ 388#define LEFT 16 /* left justified */
384#define SMALL 32 /* Must be 32 == 0x20 */ 389#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
385#define SPECIAL 64 /* 0x */ 390#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
386 391
387enum format_type { 392enum format_type {
388 FORMAT_TYPE_NONE, /* Just a string part */ 393 FORMAT_TYPE_NONE, /* Just a string part */
@@ -408,16 +413,17 @@ enum format_type {
408}; 413};
409 414
410struct printf_spec { 415struct printf_spec {
411 enum format_type type; 416 u8 type; /* format_type enum */
412 int flags; /* flags to number() */ 417 u8 flags; /* flags to number() */
413 int field_width; /* width of output field */ 418 u8 base; /* number base, 8, 10 or 16 only */
414 int base; 419 u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
415 int precision; /* # of digits/chars */ 420 s16 field_width; /* width of output field */
416 int qualifier; 421 s16 precision; /* # of digits/chars */
417}; 422};
418 423
419static char *number(char *buf, char *end, unsigned long long num, 424static noinline_for_stack
420 struct printf_spec spec) 425char *number(char *buf, char *end, unsigned long long num,
426 struct printf_spec spec)
421{ 427{
422 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 428 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
423 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 429 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -536,7 +542,8 @@ static char *number(char *buf, char *end, unsigned long long num,
536 return buf; 542 return buf;
537} 543}
538 544
539static char *string(char *buf, char *end, const char *s, struct printf_spec spec) 545static noinline_for_stack
546char *string(char *buf, char *end, const char *s, struct printf_spec spec)
540{ 547{
541 int len, i; 548 int len, i;
542 549
@@ -566,8 +573,9 @@ static char *string(char *buf, char *end, const char *s, struct printf_spec spec
566 return buf; 573 return buf;
567} 574}
568 575
569static char *symbol_string(char *buf, char *end, void *ptr, 576static noinline_for_stack
570 struct printf_spec spec, char ext) 577char *symbol_string(char *buf, char *end, void *ptr,
578 struct printf_spec spec, char ext)
571{ 579{
572 unsigned long value = (unsigned long) ptr; 580 unsigned long value = (unsigned long) ptr;
573#ifdef CONFIG_KALLSYMS 581#ifdef CONFIG_KALLSYMS
@@ -587,8 +595,9 @@ static char *symbol_string(char *buf, char *end, void *ptr,
587#endif 595#endif
588} 596}
589 597
590static char *resource_string(char *buf, char *end, struct resource *res, 598static noinline_for_stack
591 struct printf_spec spec, const char *fmt) 599char *resource_string(char *buf, char *end, struct resource *res,
600 struct printf_spec spec, const char *fmt)
592{ 601{
593#ifndef IO_RSRC_PRINTK_SIZE 602#ifndef IO_RSRC_PRINTK_SIZE
594#define IO_RSRC_PRINTK_SIZE 6 603#define IO_RSRC_PRINTK_SIZE 6
@@ -597,22 +606,35 @@ static char *resource_string(char *buf, char *end, struct resource *res,
597#ifndef MEM_RSRC_PRINTK_SIZE 606#ifndef MEM_RSRC_PRINTK_SIZE
598#define MEM_RSRC_PRINTK_SIZE 10 607#define MEM_RSRC_PRINTK_SIZE 10
599#endif 608#endif
600 struct printf_spec hex_spec = { 609 static const struct printf_spec io_spec = {
601 .base = 16, 610 .base = 16,
611 .field_width = IO_RSRC_PRINTK_SIZE,
602 .precision = -1, 612 .precision = -1,
603 .flags = SPECIAL | SMALL | ZEROPAD, 613 .flags = SPECIAL | SMALL | ZEROPAD,
604 }; 614 };
605 struct printf_spec dec_spec = { 615 static const struct printf_spec mem_spec = {
616 .base = 16,
617 .field_width = MEM_RSRC_PRINTK_SIZE,
618 .precision = -1,
619 .flags = SPECIAL | SMALL | ZEROPAD,
620 };
621 static const struct printf_spec bus_spec = {
622 .base = 16,
623 .field_width = 2,
624 .precision = -1,
625 .flags = SMALL | ZEROPAD,
626 };
627 static const struct printf_spec dec_spec = {
606 .base = 10, 628 .base = 10,
607 .precision = -1, 629 .precision = -1,
608 .flags = 0, 630 .flags = 0,
609 }; 631 };
610 struct printf_spec str_spec = { 632 static const struct printf_spec str_spec = {
611 .field_width = -1, 633 .field_width = -1,
612 .precision = 10, 634 .precision = 10,
613 .flags = LEFT, 635 .flags = LEFT,
614 }; 636 };
615 struct printf_spec flag_spec = { 637 static const struct printf_spec flag_spec = {
616 .base = 16, 638 .base = 16,
617 .precision = -1, 639 .precision = -1,
618 .flags = SPECIAL | SMALL, 640 .flags = SPECIAL | SMALL,
@@ -622,47 +644,48 @@ static char *resource_string(char *buf, char *end, struct resource *res,
622 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ 644 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
623#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) 645#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
624#define FLAG_BUF_SIZE (2 * sizeof(res->flags)) 646#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
625#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") 647#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
626#define RAW_BUF_SIZE sizeof("[mem - flags 0x]") 648#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
627 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 649 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
628 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; 650 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
629 651
630 char *p = sym, *pend = sym + sizeof(sym); 652 char *p = sym, *pend = sym + sizeof(sym);
631 int size = -1, addr = 0;
632 int decode = (fmt[0] == 'R') ? 1 : 0; 653 int decode = (fmt[0] == 'R') ? 1 : 0;
633 654 const struct printf_spec *specp;
634 if (res->flags & IORESOURCE_IO) {
635 size = IO_RSRC_PRINTK_SIZE;
636 addr = 1;
637 } else if (res->flags & IORESOURCE_MEM) {
638 size = MEM_RSRC_PRINTK_SIZE;
639 addr = 1;
640 }
641 655
642 *p++ = '['; 656 *p++ = '[';
643 if (res->flags & IORESOURCE_IO) 657 if (res->flags & IORESOURCE_IO) {
644 p = string(p, pend, "io ", str_spec); 658 p = string(p, pend, "io ", str_spec);
645 else if (res->flags & IORESOURCE_MEM) 659 specp = &io_spec;
660 } else if (res->flags & IORESOURCE_MEM) {
646 p = string(p, pend, "mem ", str_spec); 661 p = string(p, pend, "mem ", str_spec);
647 else if (res->flags & IORESOURCE_IRQ) 662 specp = &mem_spec;
663 } else if (res->flags & IORESOURCE_IRQ) {
648 p = string(p, pend, "irq ", str_spec); 664 p = string(p, pend, "irq ", str_spec);
649 else if (res->flags & IORESOURCE_DMA) 665 specp = &dec_spec;
666 } else if (res->flags & IORESOURCE_DMA) {
650 p = string(p, pend, "dma ", str_spec); 667 p = string(p, pend, "dma ", str_spec);
651 else { 668 specp = &dec_spec;
669 } else if (res->flags & IORESOURCE_BUS) {
670 p = string(p, pend, "bus ", str_spec);
671 specp = &bus_spec;
672 } else {
652 p = string(p, pend, "??? ", str_spec); 673 p = string(p, pend, "??? ", str_spec);
674 specp = &mem_spec;
653 decode = 0; 675 decode = 0;
654 } 676 }
655 hex_spec.field_width = size; 677 p = number(p, pend, res->start, *specp);
656 p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
657 if (res->start != res->end) { 678 if (res->start != res->end) {
658 *p++ = '-'; 679 *p++ = '-';
659 p = number(p, pend, res->end, addr ? hex_spec : dec_spec); 680 p = number(p, pend, res->end, *specp);
660 } 681 }
661 if (decode) { 682 if (decode) {
662 if (res->flags & IORESOURCE_MEM_64) 683 if (res->flags & IORESOURCE_MEM_64)
663 p = string(p, pend, " 64bit", str_spec); 684 p = string(p, pend, " 64bit", str_spec);
664 if (res->flags & IORESOURCE_PREFETCH) 685 if (res->flags & IORESOURCE_PREFETCH)
665 p = string(p, pend, " pref", str_spec); 686 p = string(p, pend, " pref", str_spec);
687 if (res->flags & IORESOURCE_WINDOW)
688 p = string(p, pend, " window", str_spec);
666 if (res->flags & IORESOURCE_DISABLED) 689 if (res->flags & IORESOURCE_DISABLED)
667 p = string(p, pend, " disabled", str_spec); 690 p = string(p, pend, " disabled", str_spec);
668 } else { 691 } else {
@@ -675,30 +698,63 @@ static char *resource_string(char *buf, char *end, struct resource *res,
675 return string(buf, end, sym, spec); 698 return string(buf, end, sym, spec);
676} 699}
677 700
678static char *mac_address_string(char *buf, char *end, u8 *addr, 701static noinline_for_stack
679 struct printf_spec spec, const char *fmt) 702char *mac_address_string(char *buf, char *end, u8 *addr,
703 struct printf_spec spec, const char *fmt)
680{ 704{
681 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; 705 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
682 char *p = mac_addr; 706 char *p = mac_addr;
683 int i; 707 int i;
708 char separator;
709
710 if (fmt[1] == 'F') { /* FDDI canonical format */
711 separator = '-';
712 } else {
713 separator = ':';
714 }
684 715
685 for (i = 0; i < 6; i++) { 716 for (i = 0; i < 6; i++) {
686 p = pack_hex_byte(p, addr[i]); 717 p = pack_hex_byte(p, addr[i]);
687 if (fmt[0] == 'M' && i != 5) 718 if (fmt[0] == 'M' && i != 5)
688 *p++ = ':'; 719 *p++ = separator;
689 } 720 }
690 *p = '\0'; 721 *p = '\0';
691 722
692 return string(buf, end, mac_addr, spec); 723 return string(buf, end, mac_addr, spec);
693} 724}
694 725
695static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) 726static noinline_for_stack
727char *ip4_string(char *p, const u8 *addr, const char *fmt)
696{ 728{
697 int i; 729 int i;
698 730 bool leading_zeros = (fmt[0] == 'i');
731 int index;
732 int step;
733
734 switch (fmt[2]) {
735 case 'h':
736#ifdef __BIG_ENDIAN
737 index = 0;
738 step = 1;
739#else
740 index = 3;
741 step = -1;
742#endif
743 break;
744 case 'l':
745 index = 3;
746 step = -1;
747 break;
748 case 'n':
749 case 'b':
750 default:
751 index = 0;
752 step = 1;
753 break;
754 }
699 for (i = 0; i < 4; i++) { 755 for (i = 0; i < 4; i++) {
700 char temp[3]; /* hold each IP quad in reverse order */ 756 char temp[3]; /* hold each IP quad in reverse order */
701 int digits = put_dec_trunc(temp, addr[i]) - temp; 757 int digits = put_dec_trunc(temp, addr[index]) - temp;
702 if (leading_zeros) { 758 if (leading_zeros) {
703 if (digits < 3) 759 if (digits < 3)
704 *p++ = '0'; 760 *p++ = '0';
@@ -710,13 +766,15 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
710 *p++ = temp[digits]; 766 *p++ = temp[digits];
711 if (i < 3) 767 if (i < 3)
712 *p++ = '.'; 768 *p++ = '.';
769 index += step;
713 } 770 }
714 *p = '\0'; 771 *p = '\0';
715 772
716 return p; 773 return p;
717} 774}
718 775
719static char *ip6_compressed_string(char *p, const char *addr) 776static noinline_for_stack
777char *ip6_compressed_string(char *p, const char *addr)
720{ 778{
721 int i, j, range; 779 int i, j, range;
722 unsigned char zerolength[8]; 780 unsigned char zerolength[8];
@@ -789,14 +847,15 @@ static char *ip6_compressed_string(char *p, const char *addr)
789 if (useIPv4) { 847 if (useIPv4) {
790 if (needcolon) 848 if (needcolon)
791 *p++ = ':'; 849 *p++ = ':';
792 p = ip4_string(p, &in6.s6_addr[12], false); 850 p = ip4_string(p, &in6.s6_addr[12], "I4");
793 } 851 }
794 *p = '\0'; 852 *p = '\0';
795 853
796 return p; 854 return p;
797} 855}
798 856
799static char *ip6_string(char *p, const char *addr, const char *fmt) 857static noinline_for_stack
858char *ip6_string(char *p, const char *addr, const char *fmt)
800{ 859{
801 int i; 860 int i;
802 861
@@ -811,8 +870,9 @@ static char *ip6_string(char *p, const char *addr, const char *fmt)
811 return p; 870 return p;
812} 871}
813 872
814static char *ip6_addr_string(char *buf, char *end, const u8 *addr, 873static noinline_for_stack
815 struct printf_spec spec, const char *fmt) 874char *ip6_addr_string(char *buf, char *end, const u8 *addr,
875 struct printf_spec spec, const char *fmt)
816{ 876{
817 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; 877 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
818 878
@@ -824,18 +884,20 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
824 return string(buf, end, ip6_addr, spec); 884 return string(buf, end, ip6_addr, spec);
825} 885}
826 886
827static char *ip4_addr_string(char *buf, char *end, const u8 *addr, 887static noinline_for_stack
828 struct printf_spec spec, const char *fmt) 888char *ip4_addr_string(char *buf, char *end, const u8 *addr,
889 struct printf_spec spec, const char *fmt)
829{ 890{
830 char ip4_addr[sizeof("255.255.255.255")]; 891 char ip4_addr[sizeof("255.255.255.255")];
831 892
832 ip4_string(ip4_addr, addr, fmt[0] == 'i'); 893 ip4_string(ip4_addr, addr, fmt);
833 894
834 return string(buf, end, ip4_addr, spec); 895 return string(buf, end, ip4_addr, spec);
835} 896}
836 897
837static char *uuid_string(char *buf, char *end, const u8 *addr, 898static noinline_for_stack
838 struct printf_spec spec, const char *fmt) 899char *uuid_string(char *buf, char *end, const u8 *addr,
900 struct printf_spec spec, const char *fmt)
839{ 901{
840 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; 902 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
841 char *p = uuid; 903 char *p = uuid;
@@ -896,12 +958,15 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
896 * - 'M' For a 6-byte MAC address, it prints the address in the 958 * - 'M' For a 6-byte MAC address, it prints the address in the
897 * usual colon-separated hex notation 959 * usual colon-separated hex notation
898 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 960 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
961 * - 'MF' For a 6-byte MAC FDDI address, it prints the address
962 * with a dash-separated hex notation
899 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 963 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
900 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 964 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
901 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 965 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
902 * - 'i' [46] for 'raw' IPv4/IPv6 addresses 966 * - 'i' [46] for 'raw' IPv4/IPv6 addresses
903 * IPv6 omits the colons (01020304...0f) 967 * IPv6 omits the colons (01020304...0f)
904 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 968 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
969 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
905 * - 'I6c' for IPv6 addresses printed as specified by 970 * - 'I6c' for IPv6 addresses printed as specified by
906 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 971 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
907 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 972 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
@@ -920,8 +985,9 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
920 * function pointers are really function descriptors, which contain a 985 * function pointers are really function descriptors, which contain a
921 * pointer to the real address. 986 * pointer to the real address.
922 */ 987 */
923static char *pointer(const char *fmt, char *buf, char *end, void *ptr, 988static noinline_for_stack
924 struct printf_spec spec) 989char *pointer(const char *fmt, char *buf, char *end, void *ptr,
990 struct printf_spec spec)
925{ 991{
926 if (!ptr) 992 if (!ptr)
927 return string(buf, end, "(null)", spec); 993 return string(buf, end, "(null)", spec);
@@ -939,6 +1005,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
939 return resource_string(buf, end, ptr, spec, fmt); 1005 return resource_string(buf, end, ptr, spec, fmt);
940 case 'M': /* Colon separated: 00:01:02:03:04:05 */ 1006 case 'M': /* Colon separated: 00:01:02:03:04:05 */
941 case 'm': /* Contiguous: 000102030405 */ 1007 case 'm': /* Contiguous: 000102030405 */
1008 /* [mM]F (FDDI, bit reversed) */
942 return mac_address_string(buf, end, ptr, spec, fmt); 1009 return mac_address_string(buf, end, ptr, spec, fmt);
943 case 'I': /* Formatted IP supported 1010 case 'I': /* Formatted IP supported
944 * 4: 1.2.3.4 1011 * 4: 1.2.3.4
@@ -989,7 +1056,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
989 * @precision: precision of a number 1056 * @precision: precision of a number
990 * @qualifier: qualifier of a number (long, size_t, ...) 1057 * @qualifier: qualifier of a number (long, size_t, ...)
991 */ 1058 */
992static int format_decode(const char *fmt, struct printf_spec *spec) 1059static noinline_for_stack
1060int format_decode(const char *fmt, struct printf_spec *spec)
993{ 1061{
994 const char *start = fmt; 1062 const char *start = fmt;
995 1063
@@ -1297,7 +1365,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1297 break; 1365 break;
1298 1366
1299 case FORMAT_TYPE_NRCHARS: { 1367 case FORMAT_TYPE_NRCHARS: {
1300 int qualifier = spec.qualifier; 1368 u8 qualifier = spec.qualifier;
1301 1369
1302 if (qualifier == 'l') { 1370 if (qualifier == 'l') {
1303 long *ip = va_arg(args, long *); 1371 long *ip = va_arg(args, long *);
@@ -1583,7 +1651,7 @@ do { \
1583 1651
1584 case FORMAT_TYPE_NRCHARS: { 1652 case FORMAT_TYPE_NRCHARS: {
1585 /* skip %n 's argument */ 1653 /* skip %n 's argument */
1586 int qualifier = spec.qualifier; 1654 u8 qualifier = spec.qualifier;
1587 void *skip_arg; 1655 void *skip_arg;
1588 if (qualifier == 'l') 1656 if (qualifier == 'l')
1589 skip_arg = va_arg(args, long *); 1657 skip_arg = va_arg(args, long *);
@@ -1849,7 +1917,9 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
1849 char *next; 1917 char *next;
1850 char digit; 1918 char digit;
1851 int num = 0; 1919 int num = 0;
1852 int qualifier, base, field_width; 1920 u8 qualifier;
1921 u8 base;
1922 s16 field_width;
1853 bool is_sign; 1923 bool is_sign;
1854 1924
1855 while (*fmt && *str) { 1925 while (*fmt && *str) {
@@ -1927,7 +1997,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
1927 { 1997 {
1928 char *s = (char *)va_arg(args, char *); 1998 char *s = (char *)va_arg(args, char *);
1929 if (field_width == -1) 1999 if (field_width == -1)
1930 field_width = INT_MAX; 2000 field_width = SHRT_MAX;
1931 /* first, skip leading white space in buffer */ 2001 /* first, skip leading white space in buffer */
1932 str = skip_spaces(str); 2002 str = skip_spaces(str);
1933 2003
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 215447c55261..2c13ecc5bb2c 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -8,21 +8,6 @@
8#include "inflate.h" 8#include "inflate.h"
9#include "inffast.h" 9#include "inffast.h"
10 10
11/* Only do the unaligned "Faster" variant when
12 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set
13 *
14 * On powerpc, it won't be as we don't include autoconf.h
15 * automatically for the boot wrapper, which is intended as
16 * we run in an environment where we may not be able to deal
17 * with (even rare) alignment faults. In addition, we do not
18 * define __KERNEL__ for arch/powerpc/boot unlike x86
19 */
20
21#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
22#include <asm/unaligned.h>
23#include <asm/byteorder.h>
24#endif
25
26#ifndef ASMINF 11#ifndef ASMINF
27 12
28/* Allow machine dependent optimization for post-increment or pre-increment. 13/* Allow machine dependent optimization for post-increment or pre-increment.
@@ -36,14 +21,31 @@
36 - Pentium III (Anderson) 21 - Pentium III (Anderson)
37 - M68060 (Nikl) 22 - M68060 (Nikl)
38 */ 23 */
24union uu {
25 unsigned short us;
26 unsigned char b[2];
27};
28
29/* Endian independed version */
30static inline unsigned short
31get_unaligned16(const unsigned short *p)
32{
33 union uu mm;
34 unsigned char *b = (unsigned char *)p;
35
36 mm.b[0] = b[0];
37 mm.b[1] = b[1];
38 return mm.us;
39}
40
39#ifdef POSTINC 41#ifdef POSTINC
40# define OFF 0 42# define OFF 0
41# define PUP(a) *(a)++ 43# define PUP(a) *(a)++
42# define UP_UNALIGNED(a) get_unaligned((a)++) 44# define UP_UNALIGNED(a) get_unaligned16((a)++)
43#else 45#else
44# define OFF 1 46# define OFF 1
45# define PUP(a) *++(a) 47# define PUP(a) *++(a)
46# define UP_UNALIGNED(a) get_unaligned(++(a)) 48# define UP_UNALIGNED(a) get_unaligned16(++(a))
47#endif 49#endif
48 50
49/* 51/*
@@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start)
256 } 258 }
257 } 259 }
258 else { 260 else {
259#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
260 unsigned short *sout; 261 unsigned short *sout;
261 unsigned long loops; 262 unsigned long loops;
262 263
@@ -274,22 +275,25 @@ void inflate_fast(z_streamp strm, unsigned start)
274 sfrom = (unsigned short *)(from - OFF); 275 sfrom = (unsigned short *)(from - OFF);
275 loops = len >> 1; 276 loops = len >> 1;
276 do 277 do
278#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
279 PUP(sout) = PUP(sfrom);
280#else
277 PUP(sout) = UP_UNALIGNED(sfrom); 281 PUP(sout) = UP_UNALIGNED(sfrom);
282#endif
278 while (--loops); 283 while (--loops);
279 out = (unsigned char *)sout + OFF; 284 out = (unsigned char *)sout + OFF;
280 from = (unsigned char *)sfrom + OFF; 285 from = (unsigned char *)sfrom + OFF;
281 } else { /* dist == 1 or dist == 2 */ 286 } else { /* dist == 1 or dist == 2 */
282 unsigned short pat16; 287 unsigned short pat16;
283 288
284 pat16 = *(sout-2+2*OFF); 289 pat16 = *(sout-1+OFF);
285 if (dist == 1) 290 if (dist == 1) {
286#if defined(__BIG_ENDIAN) 291 union uu mm;
287 pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); 292 /* copy one char pattern to both bytes */
288#elif defined(__LITTLE_ENDIAN) 293 mm.us = pat16;
289 pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); 294 mm.b[0] = mm.b[1];
290#else 295 pat16 = mm.us;
291#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined 296 }
292#endif
293 loops = len >> 1; 297 loops = len >> 1;
294 do 298 do
295 PUP(sout) = pat16; 299 PUP(sout) = pat16;
@@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start)
298 } 302 }
299 if (len & 1) 303 if (len & 1)
300 PUP(out) = PUP(from); 304 PUP(out) = PUP(from);
301#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
302 from = out - dist; /* copy direct from output */
303 do { /* minimum length is three */
304 PUP(out) = PUP(from);
305 PUP(out) = PUP(from);
306 PUP(out) = PUP(from);
307 len -= 3;
308 } while (len > 2);
309 if (len) {
310 PUP(out) = PUP(from);
311 if (len > 1)
312 PUP(out) = PUP(from);
313 }
314#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
315 } 305 }
316 } 306 }
317 else if ((op & 64) == 0) { /* 2nd level distance code */ 307 else if ((op & 64) == 0) { /* 2nd level distance code */