aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug56
-rw-r--r--lib/Kconfig.kgdb24
-rw-r--r--lib/Makefile10
-rw-r--r--lib/atomic64.c4
-rw-r--r--lib/atomic64_test.c166
-rw-r--r--lib/btree.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/cpumask.c1
-rw-r--r--lib/crc32.c25
-rw-r--r--lib/debugobjects.c64
-rw-r--r--lib/decompress_unlzo.c22
-rw-r--r--lib/devres.c3
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c5
-rw-r--r--lib/flex_array.c2
-rw-r--r--lib/gen_crc32table.c47
-rw-r--r--lib/genalloc.c2
-rw-r--r--lib/hexdump.c54
-rw-r--r--lib/hweight.c19
-rw-r--r--lib/idr.c11
-rw-r--r--lib/inflate.c1
-rw-r--r--lib/kasprintf.c1
-rw-r--r--lib/kobject.c115
-rw-r--r--lib/kobject_uevent.c113
-rw-r--r--lib/kref.c16
-rw-r--r--lib/lcm.c15
-rw-r--r--lib/lmb.c541
-rw-r--r--lib/radix-tree.c17
-rw-r--r--lib/random32.c40
-rw-r--r--lib/ratelimit.c11
-rw-r--r--lib/rbtree.c68
-rw-r--r--lib/rwsem-spinlock.c14
-rw-r--r--lib/rwsem.c5
-rw-r--r--lib/scatterlist.c1
-rw-r--r--lib/swiotlb.c169
-rw-r--r--lib/textsearch.c1
-rw-r--r--lib/uuid.c53
-rw-r--r--lib/vsprintf.c89
40 files changed, 1036 insertions, 822 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 170d8ca901d8..5b916bc0fbae 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -181,9 +181,6 @@ config HAS_DMA
181config CHECK_SIGNATURE 181config CHECK_SIGNATURE
182 bool 182 bool
183 183
184config HAVE_LMB
185 boolean
186
187config CPUMASK_OFFSTACK 184config CPUMASK_OFFSTACK
188 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS 185 bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
189 help 186 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8e5ec5e1ab91..dfdc0347b05d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -103,7 +103,8 @@ config HEADERS_CHECK
103 103
104config DEBUG_SECTION_MISMATCH 104config DEBUG_SECTION_MISMATCH
105 bool "Enable full Section mismatch analysis" 105 bool "Enable full Section mismatch analysis"
106 depends on UNDEFINED 106 depends on UNDEFINED || (BLACKFIN)
107 default y
107 # This option is on purpose disabled for now. 108 # This option is on purpose disabled for now.
108 # It will be enabled when we are down to a reasonable number 109 # It will be enabled when we are down to a reasonable number
109 # of section mismatch warnings (< 10 for an allyesconfig build) 110 # of section mismatch warnings (< 10 for an allyesconfig build)
@@ -355,7 +356,7 @@ config SLUB_STATS
355config DEBUG_KMEMLEAK 356config DEBUG_KMEMLEAK
356 bool "Kernel memory leak detector" 357 bool "Kernel memory leak detector"
357 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
358 (X86 || ARM || PPC || S390 || SUPERH) 359 (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
359 360
360 select DEBUG_FS if SYSFS 361 select DEBUG_FS if SYSFS
361 select STACKTRACE if STACKTRACE_SUPPORT 362 select STACKTRACE if STACKTRACE_SUPPORT
@@ -511,6 +512,18 @@ config PROVE_RCU
511 512
512 Say N if you are unsure. 513 Say N if you are unsure.
513 514
515config PROVE_RCU_REPEATEDLY
516 bool "RCU debugging: don't disable PROVE_RCU on first splat"
517 depends on PROVE_RCU
518 default n
519 help
520 By itself, PROVE_RCU will disable checking upon issuing the
521 first warning (or "splat"). This feature prevents such
522 disabling, allowing multiple RCU-lockdep warnings to be printed
523 on a single reboot.
524
525 Say N if you are unsure.
526
514config LOCKDEP 527config LOCKDEP
515 bool 528 bool
516 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 529 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -792,7 +805,7 @@ config RCU_CPU_STALL_DETECTOR
792config RCU_CPU_STALL_VERBOSE 805config RCU_CPU_STALL_VERBOSE
793 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 806 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
794 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 807 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
795 default n 808 default y
796 help 809 help
797 This option causes RCU to printk detailed per-task information 810 This option causes RCU to printk detailed per-task information
798 for any tasks that are stalling the current RCU grace period. 811 for any tasks that are stalling the current RCU grace period.
@@ -885,6 +898,18 @@ config LKDTM
885 Documentation on how to use the module can be found in 898 Documentation on how to use the module can be found in
886 Documentation/fault-injection/provoke-crashes.txt 899 Documentation/fault-injection/provoke-crashes.txt
887 900
901config CPU_NOTIFIER_ERROR_INJECT
902 tristate "CPU notifier error injection module"
903 depends on HOTPLUG_CPU && DEBUG_KERNEL
904 help
905 This option provides a kernel module that can be used to test
906 the error handling of the cpu notifiers
907
908 To compile this code as a module, choose M here: the module will
909 be called cpu-notifier-error-inject.
910
911 If unsure, say N.
912
888config FAULT_INJECTION 913config FAULT_INJECTION
889 bool "Fault-injection framework" 914 bool "Fault-injection framework"
890 depends on DEBUG_KERNEL 915 depends on DEBUG_KERNEL
@@ -912,7 +937,7 @@ config FAIL_MAKE_REQUEST
912 Provide fault-injection capability for disk IO. 937 Provide fault-injection capability for disk IO.
913 938
914config FAIL_IO_TIMEOUT 939config FAIL_IO_TIMEOUT
915 bool "Faul-injection capability for faking disk interrupts" 940 bool "Fault-injection capability for faking disk interrupts"
916 depends on FAULT_INJECTION && BLOCK 941 depends on FAULT_INJECTION && BLOCK
917 help 942 help
918 Provide fault-injection capability on end IO handling. This 943 Provide fault-injection capability on end IO handling. This
@@ -1026,10 +1051,10 @@ config DYNAMIC_DEBUG
1026 1051
1027 Usage: 1052 Usage:
1028 1053
1029 Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file, 1054 Dynamic debugging is controlled via the 'dynamic_debug/control' file,
1030 which is contained in the 'debugfs' filesystem. Thus, the debugfs 1055 which is contained in the 'debugfs' filesystem. Thus, the debugfs
1031 filesystem must first be mounted before making use of this feature. 1056 filesystem must first be mounted before making use of this feature.
1032 We refer the control file as: <debugfs>/dynamic_debug/ddebug. This 1057 We refer the control file as: <debugfs>/dynamic_debug/control. This
1033 file contains a list of the debug statements that can be enabled. The 1058 file contains a list of the debug statements that can be enabled. The
1034 format for each line of the file is: 1059 format for each line of the file is:
1035 1060
@@ -1044,7 +1069,7 @@ config DYNAMIC_DEBUG
1044 1069
1045 From a live system: 1070 From a live system:
1046 1071
1047 nullarbor:~ # cat <debugfs>/dynamic_debug/ddebug 1072 nullarbor:~ # cat <debugfs>/dynamic_debug/control
1048 # filename:lineno [module]function flags format 1073 # filename:lineno [module]function flags format
1049 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" 1074 fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012"
1050 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" 1075 fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012"
@@ -1054,23 +1079,23 @@ config DYNAMIC_DEBUG
1054 1079
1055 // enable the message at line 1603 of file svcsock.c 1080 // enable the message at line 1603 of file svcsock.c
1056 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > 1081 nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
1057 <debugfs>/dynamic_debug/ddebug 1082 <debugfs>/dynamic_debug/control
1058 1083
1059 // enable all the messages in file svcsock.c 1084 // enable all the messages in file svcsock.c
1060 nullarbor:~ # echo -n 'file svcsock.c +p' > 1085 nullarbor:~ # echo -n 'file svcsock.c +p' >
1061 <debugfs>/dynamic_debug/ddebug 1086 <debugfs>/dynamic_debug/control
1062 1087
1063 // enable all the messages in the NFS server module 1088 // enable all the messages in the NFS server module
1064 nullarbor:~ # echo -n 'module nfsd +p' > 1089 nullarbor:~ # echo -n 'module nfsd +p' >
1065 <debugfs>/dynamic_debug/ddebug 1090 <debugfs>/dynamic_debug/control
1066 1091
1067 // enable all 12 messages in the function svc_process() 1092 // enable all 12 messages in the function svc_process()
1068 nullarbor:~ # echo -n 'func svc_process +p' > 1093 nullarbor:~ # echo -n 'func svc_process +p' >
1069 <debugfs>/dynamic_debug/ddebug 1094 <debugfs>/dynamic_debug/control
1070 1095
1071 // disable all 12 messages in the function svc_process() 1096 // disable all 12 messages in the function svc_process()
1072 nullarbor:~ # echo -n 'func svc_process -p' > 1097 nullarbor:~ # echo -n 'func svc_process -p' >
1073 <debugfs>/dynamic_debug/ddebug 1098 <debugfs>/dynamic_debug/control
1074 1099
1075 See Documentation/dynamic-debug-howto.txt for additional information. 1100 See Documentation/dynamic-debug-howto.txt for additional information.
1076 1101
@@ -1085,6 +1110,13 @@ config DMA_API_DEBUG
1085 This option causes a performance degredation. Use only if you want 1110 This option causes a performance degredation. Use only if you want
1086 to debug device drivers. If unsure, say N. 1111 to debug device drivers. If unsure, say N.
1087 1112
1113config ATOMIC64_SELFTEST
1114 bool "Perform an atomic64_t self-test at boot"
1115 help
1116 Enable this option to test the atomic64_t functions at boot.
1117
1118 If unsure, say N.
1119
1088source "samples/Kconfig" 1120source "samples/Kconfig"
1089 1121
1090source "lib/Kconfig.kgdb" 1122source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..43cb93fa2651 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -3,7 +3,7 @@ config HAVE_ARCH_KGDB
3 bool 3 bool
4 4
5menuconfig KGDB 5menuconfig KGDB
6 bool "KGDB: kernel debugging with remote gdb" 6 bool "KGDB: kernel debugger"
7 depends on HAVE_ARCH_KGDB 7 depends on HAVE_ARCH_KGDB
8 depends on DEBUG_KERNEL && EXPERIMENTAL 8 depends on DEBUG_KERNEL && EXPERIMENTAL
9 help 9 help
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59 59
60config KGDB_LOW_LEVEL_TRAP
61 bool "KGDB: Allow debugging with traps in notifiers"
62 depends on X86 || MIPS
63 default n
64 help
65 This will add an extra call back to kgdb for the breakpoint
66 exception handler on which will will allow kgdb to step
67 through a notify handler.
68
69config KGDB_KDB
70 bool "KGDB_KDB: include kdb frontend for kgdb"
71 default n
72 help
73 KDB frontend for kernel
74
75config KDB_KEYBOARD
76 bool "KGDB_KDB: keyboard as input device"
77 depends on VT && KGDB_KDB
78 default n
79 help
80 KDB can use a PS/2 type keyboard for an input device
81
60endif # KGDB 82endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 2e152aed7198..0bfabba1bb32 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o list_sort.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -39,7 +39,10 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o 39lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 40lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
42
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45
43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
44obj-$(CONFIG_BTREE) += btree.o 47obj-$(CONFIG_BTREE) += btree.o
45obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
@@ -82,11 +85,10 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
82obj-$(CONFIG_SWIOTLB) += swiotlb.o 85obj-$(CONFIG_SWIOTLB) += swiotlb.o
83obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
84obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
88obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
85 89
86lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
87 91
88obj-$(CONFIG_HAVE_LMB) += lmb.o
89
90obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o 92obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
91 93
92obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o 94obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
@@ -101,6 +103,8 @@ obj-$(CONFIG_GENERIC_CSUM) += checksum.o
101 103
102obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o 104obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
103 105
106obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
107
104hostprogs-y := gen_crc32table 108hostprogs-y := gen_crc32table
105clean-files := crc32table.h 109clean-files := crc32table.h
106 110
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 8bee16ec7524..a21c12bc727c 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -162,12 +162,12 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 spinlock_t *lock = lock_addr(v);
165 int ret = 1; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 0; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000000..44524cc8c32a
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,166 @@
1/*
2 * Testsuite for atomic64_t functions
3 *
4 * Copyright © 2010 Luca Barbieri
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <asm/atomic.h>
14
15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
16static __init int test_atomic64(void)
17{
18 long long v0 = 0xaaa31337c001d00dLL;
19 long long v1 = 0xdeadbeefdeafcafeLL;
20 long long v2 = 0xfaceabadf00df001LL;
21 long long onestwos = 0x1111111122222222LL;
22 long long one = 1LL;
23
24 atomic64_t v = ATOMIC64_INIT(v0);
25 long long r = v0;
26 BUG_ON(v.counter != r);
27
28 atomic64_set(&v, v1);
29 r = v1;
30 BUG_ON(v.counter != r);
31 BUG_ON(atomic64_read(&v) != r);
32
33 INIT(v0);
34 atomic64_add(onestwos, &v);
35 r += onestwos;
36 BUG_ON(v.counter != r);
37
38 INIT(v0);
39 atomic64_add(-one, &v);
40 r += -one;
41 BUG_ON(v.counter != r);
42
43 INIT(v0);
44 r += onestwos;
45 BUG_ON(atomic64_add_return(onestwos, &v) != r);
46 BUG_ON(v.counter != r);
47
48 INIT(v0);
49 r += -one;
50 BUG_ON(atomic64_add_return(-one, &v) != r);
51 BUG_ON(v.counter != r);
52
53 INIT(v0);
54 atomic64_sub(onestwos, &v);
55 r -= onestwos;
56 BUG_ON(v.counter != r);
57
58 INIT(v0);
59 atomic64_sub(-one, &v);
60 r -= -one;
61 BUG_ON(v.counter != r);
62
63 INIT(v0);
64 r -= onestwos;
65 BUG_ON(atomic64_sub_return(onestwos, &v) != r);
66 BUG_ON(v.counter != r);
67
68 INIT(v0);
69 r -= -one;
70 BUG_ON(atomic64_sub_return(-one, &v) != r);
71 BUG_ON(v.counter != r);
72
73 INIT(v0);
74 atomic64_inc(&v);
75 r += one;
76 BUG_ON(v.counter != r);
77
78 INIT(v0);
79 r += one;
80 BUG_ON(atomic64_inc_return(&v) != r);
81 BUG_ON(v.counter != r);
82
83 INIT(v0);
84 atomic64_dec(&v);
85 r -= one;
86 BUG_ON(v.counter != r);
87
88 INIT(v0);
89 r -= one;
90 BUG_ON(atomic64_dec_return(&v) != r);
91 BUG_ON(v.counter != r);
92
93 INIT(v0);
94 BUG_ON(atomic64_xchg(&v, v1) != v0);
95 r = v1;
96 BUG_ON(v.counter != r);
97
98 INIT(v0);
99 BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
100 r = v1;
101 BUG_ON(v.counter != r);
102
103 INIT(v0);
104 BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
105 BUG_ON(v.counter != r);
106
107 INIT(v0);
108 BUG_ON(atomic64_add_unless(&v, one, v0));
109 BUG_ON(v.counter != r);
110
111 INIT(v0);
112 BUG_ON(!atomic64_add_unless(&v, one, v1));
113 r += one;
114 BUG_ON(v.counter != r);
115
116#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
117 defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
118 INIT(onestwos);
119 BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
120 r -= one;
121 BUG_ON(v.counter != r);
122
123 INIT(0);
124 BUG_ON(atomic64_dec_if_positive(&v) != -one);
125 BUG_ON(v.counter != r);
126
127 INIT(-one);
128 BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
129 BUG_ON(v.counter != r);
130#else
131#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
132#endif
133
134 INIT(onestwos);
135 BUG_ON(!atomic64_inc_not_zero(&v));
136 r += one;
137 BUG_ON(v.counter != r);
138
139 INIT(0);
140 BUG_ON(atomic64_inc_not_zero(&v));
141 BUG_ON(v.counter != r);
142
143 INIT(-one);
144 BUG_ON(!atomic64_inc_not_zero(&v));
145 r += one;
146 BUG_ON(v.counter != r);
147
148#ifdef CONFIG_X86
149 printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
150#ifdef CONFIG_X86_64
151 "x86-64",
152#elif defined(CONFIG_X86_CMPXCHG64)
153 "i586+",
154#else
155 "i386+",
156#endif
157 boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
158 boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
159#else
160 printk(KERN_INFO "atomic64 test passed\n");
161#endif
162
163 return 0;
164}
165
166core_initcall(test_atomic64);
diff --git a/lib/btree.c b/lib/btree.c
index 41859a820218..c9c6f0351526 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -95,7 +95,8 @@ static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
95 unsigned long *node; 95 unsigned long *node;
96 96
97 node = mempool_alloc(head->mempool, gfp); 97 node = mempool_alloc(head->mempool, gfp);
98 memset(node, 0, NODESIZE); 98 if (likely(node))
99 memset(node, 0, NODESIZE);
99 return node; 100 return node;
100} 101}
101 102
diff --git a/lib/bug.c b/lib/bug.c
index 300e41afbf97..f13daf435211 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -165,7 +165,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
165 (void *)bugaddr); 165 (void *)bugaddr);
166 166
167 show_regs(regs); 167 show_regs(regs);
168 add_taint(TAINT_WARN); 168 add_taint(BUG_GET_TAINT(bug));
169 return BUG_TRAP_TYPE_WARN; 169 return BUG_TRAP_TYPE_WARN;
170 } 170 }
171 171
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <linux/cpu.h>
3#include <linux/module.h>
4#include <linux/notifier.h>
5
6static int priority;
7static int cpu_up_prepare_error;
8static int cpu_down_prepare_error;
9
10module_param(priority, int, 0);
11MODULE_PARM_DESC(priority, "specify cpu notifier priority");
12
13module_param(cpu_up_prepare_error, int, 0644);
14MODULE_PARM_DESC(cpu_up_prepare_error,
15 "specify error code to inject CPU_UP_PREPARE action");
16
17module_param(cpu_down_prepare_error, int, 0644);
18MODULE_PARM_DESC(cpu_down_prepare_error,
19 "specify error code to inject CPU_DOWN_PREPARE action");
20
21static int err_inject_cpu_callback(struct notifier_block *nfb,
22 unsigned long action, void *hcpu)
23{
24 int err = 0;
25
26 switch (action) {
27 case CPU_UP_PREPARE:
28 case CPU_UP_PREPARE_FROZEN:
29 err = cpu_up_prepare_error;
30 break;
31 case CPU_DOWN_PREPARE:
32 case CPU_DOWN_PREPARE_FROZEN:
33 err = cpu_down_prepare_error;
34 break;
35 }
36 if (err)
37 printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
38
39 return notifier_from_errno(err);
40}
41
42static struct notifier_block err_inject_cpu_notifier = {
43 .notifier_call = err_inject_cpu_callback,
44};
45
46static int err_inject_init(void)
47{
48 err_inject_cpu_notifier.priority = priority;
49
50 return register_hotcpu_notifier(&err_inject_cpu_notifier);
51}
52
53static void err_inject_exit(void)
54{
55 unregister_hotcpu_notifier(&err_inject_cpu_notifier);
56}
57
58module_init(err_inject_init);
59module_exit(err_inject_exit);
60
61MODULE_DESCRIPTION("CPU notifier error injection module");
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 7bb4142a502f..05d6aca7fc19 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -1,3 +1,4 @@
1#include <linux/slab.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
2#include <linux/bitops.h> 3#include <linux/bitops.h>
3#include <linux/cpumask.h> 4#include <linux/cpumask.h>
diff --git a/lib/crc32.c b/lib/crc32.c
index 0f45fbff34cb..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -25,7 +25,6 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <asm/atomic.h> 29#include <asm/atomic.h>
31#include "crc32defs.h" 30#include "crc32defs.h"
@@ -49,12 +48,20 @@ MODULE_LICENSE("GPL");
49#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8 48#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
50 49
51static inline u32 50static inline u32
52crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) 51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
53{ 52{
54# ifdef __LITTLE_ENDIAN 53# ifdef __LITTLE_ENDIAN
55# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8) 54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \
57 tab[1][(crc >> 16) & 255] ^ \
58 tab[0][(crc >> 24) & 255]
56# else 59# else
57# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) 60# define DO_CRC(x) crc = tab[0][((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
61# define DO_CRC4 crc = tab[0][(crc) & 255] ^ \
62 tab[1][(crc >> 8) & 255] ^ \
63 tab[2][(crc >> 16) & 255] ^ \
64 tab[3][(crc >> 24) & 255]
58# endif 65# endif
59 const u32 *b; 66 const u32 *b;
60 size_t rem_len; 67 size_t rem_len;
@@ -71,10 +78,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
71 b = (const u32 *)buf; 78 b = (const u32 *)buf;
72 for (--b; len; --len) { 79 for (--b; len; --len) {
73 crc ^= *++b; /* use pre increment for speed */ 80 crc ^= *++b; /* use pre increment for speed */
74 DO_CRC(0); 81 DO_CRC4;
75 DO_CRC(0);
76 DO_CRC(0);
77 DO_CRC(0);
78 } 82 }
79 len = rem_len; 83 len = rem_len;
80 /* And the last few bytes */ 84 /* And the last few bytes */
@@ -86,6 +90,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
86 } 90 }
87 return crc; 91 return crc;
88#undef DO_CRC 92#undef DO_CRC
93#undef DO_CRC4
89} 94}
90#endif 95#endif
91/** 96/**
@@ -118,7 +123,7 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
118u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 123u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
119{ 124{
120# if CRC_LE_BITS == 8 125# if CRC_LE_BITS == 8
121 const u32 *tab = crc32table_le; 126 const u32 (*tab)[] = crc32table_le;
122 127
123 crc = __cpu_to_le32(crc); 128 crc = __cpu_to_le32(crc);
124 crc = crc32_body(crc, p, len, tab); 129 crc = crc32_body(crc, p, len, tab);
@@ -175,7 +180,7 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
175u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 180u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
176{ 181{
177# if CRC_BE_BITS == 8 182# if CRC_BE_BITS == 8
178 const u32 *tab = crc32table_be; 183 const u32 (*tab)[] = crc32table_be;
179 184
180 crc = __cpu_to_be32(crc); 185 crc = __cpu_to_be32(crc);
181 crc = crc32_body(crc, p, len, tab); 186 crc = crc32_body(crc, p, len, tab);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a9a8996d286a..deebcc57d4e6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/seq_file.h> 13#include <linux/seq_file.h>
14#include <linux/debugfs.h> 14#include <linux/debugfs.h>
15#include <linux/slab.h>
15#include <linux/hash.h> 16#include <linux/hash.h>
16 17
17#define ODEBUG_HASH_BITS 14 18#define ODEBUG_HASH_BITS 14
@@ -140,6 +141,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
140 obj->object = addr; 141 obj->object = addr;
141 obj->descr = descr; 142 obj->descr = descr;
142 obj->state = ODEBUG_STATE_NONE; 143 obj->state = ODEBUG_STATE_NONE;
144 obj->astate = 0;
143 hlist_del(&obj->node); 145 hlist_del(&obj->node);
144 146
145 hlist_add_head(&obj->node, &b->list); 147 hlist_add_head(&obj->node, &b->list);
@@ -251,8 +253,10 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
251 253
252 if (limit < 5 && obj->descr != descr_test) { 254 if (limit < 5 && obj->descr != descr_test) {
253 limit++; 255 limit++;
254 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 256 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
255 obj_states[obj->state], obj->descr->name); 257 "object type: %s\n",
258 msg, obj_states[obj->state], obj->astate,
259 obj->descr->name);
256 } 260 }
257 debug_objects_warnings++; 261 debug_objects_warnings++;
258} 262}
@@ -446,7 +450,10 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
446 case ODEBUG_STATE_INIT: 450 case ODEBUG_STATE_INIT:
447 case ODEBUG_STATE_INACTIVE: 451 case ODEBUG_STATE_INACTIVE:
448 case ODEBUG_STATE_ACTIVE: 452 case ODEBUG_STATE_ACTIVE:
449 obj->state = ODEBUG_STATE_INACTIVE; 453 if (!obj->astate)
454 obj->state = ODEBUG_STATE_INACTIVE;
455 else
456 debug_print_object(obj, "deactivate");
450 break; 457 break;
451 458
452 case ODEBUG_STATE_DESTROYED: 459 case ODEBUG_STATE_DESTROYED:
@@ -552,6 +559,53 @@ out_unlock:
552 raw_spin_unlock_irqrestore(&db->lock, flags); 559 raw_spin_unlock_irqrestore(&db->lock, flags);
553} 560}
554 561
562/**
563 * debug_object_active_state - debug checks object usage state machine
564 * @addr: address of the object
565 * @descr: pointer to an object specific debug description structure
566 * @expect: expected state
567 * @next: state to move to if expected state is found
568 */
569void
570debug_object_active_state(void *addr, struct debug_obj_descr *descr,
571 unsigned int expect, unsigned int next)
572{
573 struct debug_bucket *db;
574 struct debug_obj *obj;
575 unsigned long flags;
576
577 if (!debug_objects_enabled)
578 return;
579
580 db = get_bucket((unsigned long) addr);
581
582 raw_spin_lock_irqsave(&db->lock, flags);
583
584 obj = lookup_object(addr, db);
585 if (obj) {
586 switch (obj->state) {
587 case ODEBUG_STATE_ACTIVE:
588 if (obj->astate == expect)
589 obj->astate = next;
590 else
591 debug_print_object(obj, "active_state");
592 break;
593
594 default:
595 debug_print_object(obj, "active_state");
596 break;
597 }
598 } else {
599 struct debug_obj o = { .object = addr,
600 .state = ODEBUG_STATE_NOTAVAILABLE,
601 .descr = descr };
602
603 debug_print_object(&o, "active_state");
604 }
605
606 raw_spin_unlock_irqrestore(&db->lock, flags);
607}
608
555#ifdef CONFIG_DEBUG_OBJECTS_FREE 609#ifdef CONFIG_DEBUG_OBJECTS_FREE
556static void __debug_check_no_obj_freed(const void *address, unsigned long size) 610static void __debug_check_no_obj_freed(const void *address, unsigned long size)
557{ 611{
@@ -773,7 +827,7 @@ static int __init fixup_free(void *addr, enum debug_obj_state state)
773 } 827 }
774} 828}
775 829
776static int 830static int __init
777check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 831check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
778{ 832{
779 struct debug_bucket *db; 833 struct debug_bucket *db;
@@ -916,7 +970,7 @@ void __init debug_objects_early_init(void)
916/* 970/*
917 * Convert the statically allocated objects to dynamic ones: 971 * Convert the statically allocated objects to dynamic ones:
918 */ 972 */
919static int debug_objects_replace_static_objects(void) 973static int __init debug_objects_replace_static_objects(void)
920{ 974{
921 struct debug_bucket *db = obj_hash; 975 struct debug_bucket *db = obj_hash;
922 struct hlist_node *node, *tmp; 976 struct hlist_node *node, *tmp;
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index db521f45626e..bcb3a4bd68ff 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -97,7 +97,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
97 u32 src_len, dst_len; 97 u32 src_len, dst_len;
98 size_t tmp; 98 size_t tmp;
99 u8 *in_buf, *in_buf_save, *out_buf; 99 u8 *in_buf, *in_buf_save, *out_buf;
100 int obytes_processed = 0; 100 int ret = -1;
101 101
102 set_error_fn(error_fn); 102 set_error_fn(error_fn);
103 103
@@ -174,15 +174,22 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
174 174
175 /* decompress */ 175 /* decompress */
176 tmp = dst_len; 176 tmp = dst_len;
177 r = lzo1x_decompress_safe((u8 *) in_buf, src_len, 177
178 /* When the input data is not compressed at all,
179 * lzo1x_decompress_safe will fail, so call memcpy()
180 * instead */
181 if (unlikely(dst_len == src_len))
182 memcpy(out_buf, in_buf, src_len);
183 else {
184 r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
178 out_buf, &tmp); 185 out_buf, &tmp);
179 186
180 if (r != LZO_E_OK || dst_len != tmp) { 187 if (r != LZO_E_OK || dst_len != tmp) {
181 error("Compressed data violation"); 188 error("Compressed data violation");
182 goto exit_2; 189 goto exit_2;
190 }
183 } 191 }
184 192
185 obytes_processed += dst_len;
186 if (flush) 193 if (flush)
187 flush(out_buf, dst_len); 194 flush(out_buf, dst_len);
188 if (output) 195 if (output)
@@ -196,6 +203,7 @@ STATIC inline int INIT unlzo(u8 *input, int in_len,
196 in_buf += src_len; 203 in_buf += src_len;
197 } 204 }
198 205
206 ret = 0;
199exit_2: 207exit_2:
200 if (!input) 208 if (!input)
201 free(in_buf); 209 free(in_buf);
@@ -203,7 +211,7 @@ exit_1:
203 if (!output) 211 if (!output)
204 free(out_buf); 212 free(out_buf);
205exit: 213exit:
206 return obytes_processed; 214 return ret;
207} 215}
208 216
209#define decompress unlzo 217#define decompress unlzo
diff --git a/lib/devres.c b/lib/devres.c
index 72c8909006da..6efddf53b90c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,5 +1,6 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/gfp.h>
3#include <linux/module.h> 4#include <linux/module.h>
4 5
5void devm_ioremap_release(struct device *dev, void *res) 6void devm_ioremap_release(struct device *dev, void *res)
@@ -327,7 +328,7 @@ EXPORT_SYMBOL(pcim_iomap_regions_request_all);
327 * @pdev: PCI device to map IO resources for 328 * @pdev: PCI device to map IO resources for
328 * @mask: Mask of BARs to unmap and release 329 * @mask: Mask of BARs to unmap and release
329 * 330 *
330 * Unamp and release regions specified by @mask. 331 * Unmap and release regions specified by @mask.
331 */ 332 */
332void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) 333void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
333{ 334{
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ba8b67039d13..01e64270e246 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -570,7 +570,7 @@ static ssize_t filter_write(struct file *file, const char __user *userbuf,
570 * Now parse out the first token and use it as the name for the 570 * Now parse out the first token and use it as the name for the
571 * driver to filter for. 571 * driver to filter for.
572 */ 572 */
573 for (i = 0; i < NAME_MAX_LEN; ++i) { 573 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
574 current_driver_name[i] = buf[i]; 574 current_driver_name[i] = buf[i];
575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
576 break; 576 break;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index f93502915988..02afc2533728 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -25,6 +25,7 @@
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h>
28 29
29extern struct _ddebug __start___verbose[]; 30extern struct _ddebug __start___verbose[];
30extern struct _ddebug __stop___verbose[]; 31extern struct _ddebug __stop___verbose[];
@@ -455,7 +456,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
455 __func__, (int)len); 456 __func__, (int)len);
456 457
457 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 458 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
458 if (nwords < 0) 459 if (nwords <= 0)
459 return -EINVAL; 460 return -EINVAL;
460 if (ddebug_parse_query(words, nwords-1, &query)) 461 if (ddebug_parse_query(words, nwords-1, &query))
461 return -EINVAL; 462 return -EINVAL;
@@ -691,7 +692,7 @@ static void ddebug_table_free(struct ddebug_table *dt)
691 * Called in response to a module being unloaded. Removes 692 * Called in response to a module being unloaded. Removes
692 * any ddebug_table's which point at the module. 693 * any ddebug_table's which point at the module.
693 */ 694 */
694int ddebug_remove_module(char *mod_name) 695int ddebug_remove_module(const char *mod_name)
695{ 696{
696 struct ddebug_table *dt, *nextdt; 697 struct ddebug_table *dt, *nextdt;
697 int ret = -ENOENT; 698 int ret = -ENOENT;
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e4483e..41b1804fa728 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
99 ret->element_size = element_size; 99 ret->element_size = element_size;
100 ret->total_nr_elements = total; 100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE, 102 memset(&ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT); 103 FLEX_ARRAY_BASE_BYTES_LEFT);
104 return ret; 104 return ret;
105} 105}
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index bea5d97df991..85d0e412a04f 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -7,8 +7,8 @@
7#define LE_TABLE_SIZE (1 << CRC_LE_BITS) 7#define LE_TABLE_SIZE (1 << CRC_LE_BITS)
8#define BE_TABLE_SIZE (1 << CRC_BE_BITS) 8#define BE_TABLE_SIZE (1 << CRC_BE_BITS)
9 9
10static uint32_t crc32table_le[LE_TABLE_SIZE]; 10static uint32_t crc32table_le[4][LE_TABLE_SIZE];
11static uint32_t crc32table_be[BE_TABLE_SIZE]; 11static uint32_t crc32table_be[4][BE_TABLE_SIZE];
12 12
13/** 13/**
14 * crc32init_le() - allocate and initialize LE table data 14 * crc32init_le() - allocate and initialize LE table data
@@ -22,12 +22,19 @@ static void crc32init_le(void)
22 unsigned i, j; 22 unsigned i, j;
23 uint32_t crc = 1; 23 uint32_t crc = 1;
24 24
25 crc32table_le[0] = 0; 25 crc32table_le[0][0] = 0;
26 26
27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) { 27 for (i = 1 << (CRC_LE_BITS - 1); i; i >>= 1) {
28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); 28 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i) 29 for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
30 crc32table_le[i + j] = crc ^ crc32table_le[j]; 30 crc32table_le[0][i + j] = crc ^ crc32table_le[0][j];
31 }
32 for (i = 0; i < LE_TABLE_SIZE; i++) {
33 crc = crc32table_le[0][i];
34 for (j = 1; j < 4; j++) {
35 crc = crc32table_le[0][crc & 0xff] ^ (crc >> 8);
36 crc32table_le[j][i] = crc;
37 }
31 } 38 }
32} 39}
33 40
@@ -39,25 +46,35 @@ static void crc32init_be(void)
39 unsigned i, j; 46 unsigned i, j;
40 uint32_t crc = 0x80000000; 47 uint32_t crc = 0x80000000;
41 48
42 crc32table_be[0] = 0; 49 crc32table_be[0][0] = 0;
43 50
44 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) { 51 for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
45 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0); 52 crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
46 for (j = 0; j < i; j++) 53 for (j = 0; j < i; j++)
47 crc32table_be[i + j] = crc ^ crc32table_be[j]; 54 crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
55 }
56 for (i = 0; i < BE_TABLE_SIZE; i++) {
57 crc = crc32table_be[0][i];
58 for (j = 1; j < 4; j++) {
59 crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
60 crc32table_be[j][i] = crc;
61 }
48 } 62 }
49} 63}
50 64
51static void output_table(uint32_t table[], int len, char *trans) 65static void output_table(uint32_t table[4][256], int len, char *trans)
52{ 66{
53 int i; 67 int i, j;
54 68
55 for (i = 0; i < len - 1; i++) { 69 for (j = 0 ; j < 4; j++) {
56 if (i % ENTRIES_PER_LINE == 0) 70 printf("{");
57 printf("\n"); 71 for (i = 0; i < len - 1; i++) {
58 printf("%s(0x%8.8xL), ", trans, table[i]); 72 if (i % ENTRIES_PER_LINE == 0)
73 printf("\n");
74 printf("%s(0x%8.8xL), ", trans, table[j][i]);
75 }
76 printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
59 } 77 }
60 printf("%s(0x%8.8xL)\n", trans, table[len - 1]);
61} 78}
62 79
63int main(int argc, char** argv) 80int main(int argc, char** argv)
@@ -66,14 +83,14 @@ int main(int argc, char** argv)
66 83
67 if (CRC_LE_BITS > 1) { 84 if (CRC_LE_BITS > 1) {
68 crc32init_le(); 85 crc32init_le();
69 printf("static const u32 crc32table_le[] = {"); 86 printf("static const u32 crc32table_le[4][256] = {");
70 output_table(crc32table_le, LE_TABLE_SIZE, "tole"); 87 output_table(crc32table_le, LE_TABLE_SIZE, "tole");
71 printf("};\n"); 88 printf("};\n");
72 } 89 }
73 90
74 if (CRC_BE_BITS > 1) { 91 if (CRC_BE_BITS > 1) {
75 crc32init_be(); 92 crc32init_be();
76 printf("static const u32 crc32table_be[] = {"); 93 printf("static const u32 crc32table_be[4][256] = {");
77 output_table(crc32table_be, BE_TABLE_SIZE, "tobe"); 94 output_table(crc32table_be, BE_TABLE_SIZE, "tobe");
78 printf("};\n"); 95 printf("};\n");
79 } 96 }
diff --git a/lib/genalloc.c b/lib/genalloc.c
index e67f97495dd5..1923f1490e72 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -10,6 +10,7 @@
10 * Version 2. See the file COPYING for more details. 10 * Version 2. See the file COPYING for more details.
11 */ 11 */
12 12
13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/bitmap.h> 15#include <linux/bitmap.h>
15#include <linux/genalloc.h> 16#include <linux/genalloc.h>
@@ -127,7 +128,6 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
127 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); 128 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
128 129
129 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 130 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
130 end_bit -= nbits + 1;
131 131
132 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 39af2560f765..5d7a4802c562 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -16,6 +16,24 @@ const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 16EXPORT_SYMBOL(hex_asc);
17 17
18/** 18/**
19 * hex_to_bin - convert a hex digit to its real value
20 * @ch: ascii character represents hex digit
21 *
22 * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
23 * input.
24 */
25int hex_to_bin(char ch)
26{
27 if ((ch >= '0') && (ch <= '9'))
28 return ch - '0';
29 ch = tolower(ch);
30 if ((ch >= 'a') && (ch <= 'f'))
31 return ch - 'a' + 10;
32 return -1;
33}
34EXPORT_SYMBOL(hex_to_bin);
35
36/**
19 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 37 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
20 * @buf: data blob to dump 38 * @buf: data blob to dump
21 * @len: number of bytes in the @buf 39 * @len: number of bytes in the @buf
@@ -34,7 +52,7 @@ EXPORT_SYMBOL(hex_asc);
34 * 52 *
35 * E.g.: 53 * E.g.:
36 * hex_dump_to_buffer(frame->data, frame->len, 16, 1, 54 * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
37 * linebuf, sizeof(linebuf), 1); 55 * linebuf, sizeof(linebuf), true);
38 * 56 *
39 * example output buffer: 57 * example output buffer:
40 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 58 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -65,8 +83,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
65 83
66 for (j = 0; j < ngroups; j++) 84 for (j = 0; j < ngroups; j++)
67 lx += scnprintf(linebuf + lx, linebuflen - lx, 85 lx += scnprintf(linebuf + lx, linebuflen - lx,
68 "%s%16.16llx", j ? " " : "", 86 "%s%16.16llx", j ? " " : "",
69 (unsigned long long)*(ptr8 + j)); 87 (unsigned long long)*(ptr8 + j));
70 ascii_column = 17 * ngroups + 2; 88 ascii_column = 17 * ngroups + 2;
71 break; 89 break;
72 } 90 }
@@ -77,7 +95,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
77 95
78 for (j = 0; j < ngroups; j++) 96 for (j = 0; j < ngroups; j++)
79 lx += scnprintf(linebuf + lx, linebuflen - lx, 97 lx += scnprintf(linebuf + lx, linebuflen - lx,
80 "%s%8.8x", j ? " " : "", *(ptr4 + j)); 98 "%s%8.8x", j ? " " : "", *(ptr4 + j));
81 ascii_column = 9 * ngroups + 2; 99 ascii_column = 9 * ngroups + 2;
82 break; 100 break;
83 } 101 }
@@ -88,7 +106,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
88 106
89 for (j = 0; j < ngroups; j++) 107 for (j = 0; j < ngroups; j++)
90 lx += scnprintf(linebuf + lx, linebuflen - lx, 108 lx += scnprintf(linebuf + lx, linebuflen - lx,
91 "%s%4.4x", j ? " " : "", *(ptr2 + j)); 109 "%s%4.4x", j ? " " : "", *(ptr2 + j));
92 ascii_column = 5 * ngroups + 2; 110 ascii_column = 5 * ngroups + 2;
93 break; 111 break;
94 } 112 }
@@ -111,9 +129,10 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
111 129
112 while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) 130 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
113 linebuf[lx++] = ' '; 131 linebuf[lx++] = ' ';
114 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) 132 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
115 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] 133 ch = ptr[j];
116 : '.'; 134 linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
135 }
117nil: 136nil:
118 linebuf[lx++] = '\0'; 137 linebuf[lx++] = '\0';
119} 138}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
143 * 162 *
144 * E.g.: 163 * E.g.:
145 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, 164 * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
146 * 16, 1, frame->data, frame->len, 1); 165 * 16, 1, frame->data, frame->len, true);
147 * 166 *
148 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: 167 * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
149 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO 168 * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
@@ -151,12 +170,12 @@ EXPORT_SYMBOL(hex_dump_to_buffer);
151 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. 170 * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
152 */ 171 */
153void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, 172void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
154 int rowsize, int groupsize, 173 int rowsize, int groupsize,
155 const void *buf, size_t len, bool ascii) 174 const void *buf, size_t len, bool ascii)
156{ 175{
157 const u8 *ptr = buf; 176 const u8 *ptr = buf;
158 int i, linelen, remaining = len; 177 int i, linelen, remaining = len;
159 unsigned char linebuf[200]; 178 unsigned char linebuf[32 * 3 + 2 + 32 + 1];
160 179
161 if (rowsize != 16 && rowsize != 32) 180 if (rowsize != 16 && rowsize != 32)
162 rowsize = 16; 181 rowsize = 16;
@@ -164,13 +183,14 @@ void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
164 for (i = 0; i < len; i += rowsize) { 183 for (i = 0; i < len; i += rowsize) {
165 linelen = min(remaining, rowsize); 184 linelen = min(remaining, rowsize);
166 remaining -= rowsize; 185 remaining -= rowsize;
186
167 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, 187 hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
168 linebuf, sizeof(linebuf), ascii); 188 linebuf, sizeof(linebuf), ascii);
169 189
170 switch (prefix_type) { 190 switch (prefix_type) {
171 case DUMP_PREFIX_ADDRESS: 191 case DUMP_PREFIX_ADDRESS:
172 printk("%s%s%*p: %s\n", level, prefix_str, 192 printk("%s%s%p: %s\n",
173 (int)(2 * sizeof(void *)), ptr + i, linebuf); 193 level, prefix_str, ptr + i, linebuf);
174 break; 194 break;
175 case DUMP_PREFIX_OFFSET: 195 case DUMP_PREFIX_OFFSET:
176 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); 196 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
@@ -196,9 +216,9 @@ EXPORT_SYMBOL(print_hex_dump);
196 * rowsize of 16, groupsize of 1, and ASCII output included. 216 * rowsize of 16, groupsize of 1, and ASCII output included.
197 */ 217 */
198void print_hex_dump_bytes(const char *prefix_str, int prefix_type, 218void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
199 const void *buf, size_t len) 219 const void *buf, size_t len)
200{ 220{
201 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, 221 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
202 buf, len, 1); 222 buf, len, true);
203} 223}
204EXPORT_SYMBOL(print_hex_dump_bytes); 224EXPORT_SYMBOL(print_hex_dump_bytes);
diff --git a/lib/hweight.c b/lib/hweight.c
index 63ee4eb1228d..3c79d50814cf 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -9,7 +9,7 @@
9 * The Hamming Weight of a number is the total number of bits set in it. 9 * The Hamming Weight of a number is the total number of bits set in it.
10 */ 10 */
11 11
12unsigned int hweight32(unsigned int w) 12unsigned int __sw_hweight32(unsigned int w)
13{ 13{
14#ifdef ARCH_HAS_FAST_MULTIPLIER 14#ifdef ARCH_HAS_FAST_MULTIPLIER
15 w -= (w >> 1) & 0x55555555; 15 w -= (w >> 1) & 0x55555555;
@@ -24,29 +24,30 @@ unsigned int hweight32(unsigned int w)
24 return (res + (res >> 16)) & 0x000000FF; 24 return (res + (res >> 16)) & 0x000000FF;
25#endif 25#endif
26} 26}
27EXPORT_SYMBOL(hweight32); 27EXPORT_SYMBOL(__sw_hweight32);
28 28
29unsigned int hweight16(unsigned int w) 29unsigned int __sw_hweight16(unsigned int w)
30{ 30{
31 unsigned int res = w - ((w >> 1) & 0x5555); 31 unsigned int res = w - ((w >> 1) & 0x5555);
32 res = (res & 0x3333) + ((res >> 2) & 0x3333); 32 res = (res & 0x3333) + ((res >> 2) & 0x3333);
33 res = (res + (res >> 4)) & 0x0F0F; 33 res = (res + (res >> 4)) & 0x0F0F;
34 return (res + (res >> 8)) & 0x00FF; 34 return (res + (res >> 8)) & 0x00FF;
35} 35}
36EXPORT_SYMBOL(hweight16); 36EXPORT_SYMBOL(__sw_hweight16);
37 37
38unsigned int hweight8(unsigned int w) 38unsigned int __sw_hweight8(unsigned int w)
39{ 39{
40 unsigned int res = w - ((w >> 1) & 0x55); 40 unsigned int res = w - ((w >> 1) & 0x55);
41 res = (res & 0x33) + ((res >> 2) & 0x33); 41 res = (res & 0x33) + ((res >> 2) & 0x33);
42 return (res + (res >> 4)) & 0x0F; 42 return (res + (res >> 4)) & 0x0F;
43} 43}
44EXPORT_SYMBOL(hweight8); 44EXPORT_SYMBOL(__sw_hweight8);
45 45
46unsigned long hweight64(__u64 w) 46unsigned long __sw_hweight64(__u64 w)
47{ 47{
48#if BITS_PER_LONG == 32 48#if BITS_PER_LONG == 32
49 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); 49 return __sw_hweight32((unsigned int)(w >> 32)) +
50 __sw_hweight32((unsigned int)w);
50#elif BITS_PER_LONG == 64 51#elif BITS_PER_LONG == 64
51#ifdef ARCH_HAS_FAST_MULTIPLIER 52#ifdef ARCH_HAS_FAST_MULTIPLIER
52 w -= (w >> 1) & 0x5555555555555555ul; 53 w -= (w >> 1) & 0x5555555555555555ul;
@@ -63,4 +64,4 @@ unsigned long hweight64(__u64 w)
63#endif 64#endif
64#endif 65#endif
65} 66}
66EXPORT_SYMBOL(hweight64); 67EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
index 2eb1dca03681..7f1a4f0acf50 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
445void idr_remove_all(struct idr *idp) 445void idr_remove_all(struct idr *idp)
446{ 446{
447 int n, id, max; 447 int n, id, max;
448 int bt_mask;
448 struct idr_layer *p; 449 struct idr_layer *p;
449 struct idr_layer *pa[MAX_LEVEL]; 450 struct idr_layer *pa[MAX_LEVEL];
450 struct idr_layer **paa = &pa[0]; 451 struct idr_layer **paa = &pa[0];
@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
462 p = p->ary[(id >> n) & IDR_MASK]; 463 p = p->ary[(id >> n) & IDR_MASK];
463 } 464 }
464 465
466 bt_mask = id;
465 id += 1 << n; 467 id += 1 << n;
466 while (n < fls(id)) { 468 /* Get the highest bit that the above add changed from 0->1. */
469 while (n < fls(id ^ bt_mask)) {
467 if (p) 470 if (p)
468 free_layer(p); 471 free_layer(p);
469 n += IDR_BITS; 472 n += IDR_BITS;
@@ -599,7 +602,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
599 /* find first ent */ 602 /* find first ent */
600 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
601 max = 1 << n; 604 max = 1 << n;
602 p = rcu_dereference(idp->top); 605 p = rcu_dereference_raw(idp->top);
603 if (!p) 606 if (!p)
604 return NULL; 607 return NULL;
605 608
@@ -607,7 +610,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
607 while (n > 0 && p) { 610 while (n > 0 && p) {
608 n -= IDR_BITS; 611 n -= IDR_BITS;
609 *paa++ = p; 612 *paa++ = p;
610 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); 613 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
611 } 614 }
612 615
613 if (p) { 616 if (p) {
@@ -623,7 +626,7 @@ void *idr_get_next(struct idr *idp, int *nextidp)
623 } 626 }
624 return NULL; 627 return NULL;
625} 628}
626 629EXPORT_SYMBOL(idr_get_next);
627 630
628 631
629/** 632/**
diff --git a/lib/inflate.c b/lib/inflate.c
index d10255973a9f..677b738c2204 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -103,6 +103,7 @@
103 the two sets of lengths. 103 the two sets of lengths.
104 */ 104 */
105#include <linux/compiler.h> 105#include <linux/compiler.h>
106#include <linux/slab.h>
106 107
107#ifdef RCSID 108#ifdef RCSID
108static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; 109static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
index c5ff1fd10030..9c4233b23783 100644
--- a/lib/kasprintf.c
+++ b/lib/kasprintf.c
@@ -6,6 +6,7 @@
6 6
7#include <stdarg.h> 7#include <stdarg.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/slab.h>
9#include <linux/types.h> 10#include <linux/types.h>
10#include <linux/string.h> 11#include <linux/string.h>
11 12
diff --git a/lib/kobject.c b/lib/kobject.c
index 8115eb1bbf4d..f07c57252e82 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -850,6 +850,121 @@ struct kset *kset_create_and_add(const char *name,
850} 850}
851EXPORT_SYMBOL_GPL(kset_create_and_add); 851EXPORT_SYMBOL_GPL(kset_create_and_add);
852 852
853
854static DEFINE_SPINLOCK(kobj_ns_type_lock);
855static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
856
857int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
858{
859 enum kobj_ns_type type = ops->type;
860 int error;
861
862 spin_lock(&kobj_ns_type_lock);
863
864 error = -EINVAL;
865 if (type >= KOBJ_NS_TYPES)
866 goto out;
867
868 error = -EINVAL;
869 if (type <= KOBJ_NS_TYPE_NONE)
870 goto out;
871
872 error = -EBUSY;
873 if (kobj_ns_ops_tbl[type])
874 goto out;
875
876 error = 0;
877 kobj_ns_ops_tbl[type] = ops;
878
879out:
880 spin_unlock(&kobj_ns_type_lock);
881 return error;
882}
883
884int kobj_ns_type_registered(enum kobj_ns_type type)
885{
886 int registered = 0;
887
888 spin_lock(&kobj_ns_type_lock);
889 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
890 registered = kobj_ns_ops_tbl[type] != NULL;
891 spin_unlock(&kobj_ns_type_lock);
892
893 return registered;
894}
895
896const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent)
897{
898 const struct kobj_ns_type_operations *ops = NULL;
899
900 if (parent && parent->ktype->child_ns_type)
901 ops = parent->ktype->child_ns_type(parent);
902
903 return ops;
904}
905
906const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
907{
908 return kobj_child_ns_ops(kobj->parent);
909}
910
911
912const void *kobj_ns_current(enum kobj_ns_type type)
913{
914 const void *ns = NULL;
915
916 spin_lock(&kobj_ns_type_lock);
917 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
918 kobj_ns_ops_tbl[type])
919 ns = kobj_ns_ops_tbl[type]->current_ns();
920 spin_unlock(&kobj_ns_type_lock);
921
922 return ns;
923}
924
925const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
926{
927 const void *ns = NULL;
928
929 spin_lock(&kobj_ns_type_lock);
930 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
931 kobj_ns_ops_tbl[type])
932 ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
933 spin_unlock(&kobj_ns_type_lock);
934
935 return ns;
936}
937
938const void *kobj_ns_initial(enum kobj_ns_type type)
939{
940 const void *ns = NULL;
941
942 spin_lock(&kobj_ns_type_lock);
943 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
944 kobj_ns_ops_tbl[type])
945 ns = kobj_ns_ops_tbl[type]->initial_ns();
946 spin_unlock(&kobj_ns_type_lock);
947
948 return ns;
949}
950
951/*
952 * kobj_ns_exit - invalidate a namespace tag
953 *
954 * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
955 * @ns: the actual namespace being invalidated
956 *
957 * This is called when a tag is no longer valid. For instance,
958 * when a network namespace exits, it uses this helper to
959 * make sure no sb's sysfs_info points to the now-invalidated
960 * netns.
961 */
962void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
963{
964 sysfs_exit_ns(type, ns);
965}
966
967
853EXPORT_SYMBOL(kobject_get); 968EXPORT_SYMBOL(kobject_get);
854EXPORT_SYMBOL(kobject_put); 969EXPORT_SYMBOL(kobject_put);
855EXPORT_SYMBOL(kobject_del); 970EXPORT_SYMBOL(kobject_del);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index c9d3a3e8405d..b93579504dfa 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -18,18 +18,25 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kobject.h> 19#include <linux/kobject.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21#include <linux/slab.h>
22#include <linux/user_namespace.h>
22#include <linux/socket.h> 23#include <linux/socket.h>
23#include <linux/skbuff.h> 24#include <linux/skbuff.h>
24#include <linux/netlink.h> 25#include <linux/netlink.h>
25#include <net/sock.h> 26#include <net/sock.h>
27#include <net/net_namespace.h>
26 28
27 29
28u64 uevent_seqnum; 30u64 uevent_seqnum;
29char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 31char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
30static DEFINE_SPINLOCK(sequence_lock); 32static DEFINE_SPINLOCK(sequence_lock);
31#if defined(CONFIG_NET) 33#ifdef CONFIG_NET
32static struct sock *uevent_sock; 34struct uevent_sock {
35 struct list_head list;
36 struct sock *sk;
37};
38static LIST_HEAD(uevent_sock_list);
39static DEFINE_MUTEX(uevent_sock_mutex);
33#endif 40#endif
34 41
35/* the strings here must match the enum in include/linux/kobject.h */ 42/* the strings here must match the enum in include/linux/kobject.h */
@@ -76,6 +83,39 @@ out:
76 return ret; 83 return ret;
77} 84}
78 85
86#ifdef CONFIG_NET
87static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
88{
89 struct kobject *kobj = data;
90 const struct kobj_ns_type_operations *ops;
91
92 ops = kobj_ns_ops(kobj);
93 if (ops) {
94 const void *sock_ns, *ns;
95 ns = kobj->ktype->namespace(kobj);
96 sock_ns = ops->netlink_ns(dsk);
97 return sock_ns != ns;
98 }
99
100 return 0;
101}
102#endif
103
104static int kobj_usermode_filter(struct kobject *kobj)
105{
106 const struct kobj_ns_type_operations *ops;
107
108 ops = kobj_ns_ops(kobj);
109 if (ops) {
110 const void *init_ns, *ns;
111 ns = kobj->ktype->namespace(kobj);
112 init_ns = ops->initial_ns();
113 return ns != init_ns;
114 }
115
116 return 0;
117}
118
79/** 119/**
80 * kobject_uevent_env - send an uevent with environmental data 120 * kobject_uevent_env - send an uevent with environmental data
81 * 121 *
@@ -99,6 +139,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
99 u64 seq; 139 u64 seq;
100 int i = 0; 140 int i = 0;
101 int retval = 0; 141 int retval = 0;
142#ifdef CONFIG_NET
143 struct uevent_sock *ue_sk;
144#endif
102 145
103 pr_debug("kobject: '%s' (%p): %s\n", 146 pr_debug("kobject: '%s' (%p): %s\n",
104 kobject_name(kobj), kobj, __func__); 147 kobject_name(kobj), kobj, __func__);
@@ -210,7 +253,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
210 253
211#if defined(CONFIG_NET) 254#if defined(CONFIG_NET)
212 /* send netlink message */ 255 /* send netlink message */
213 if (uevent_sock) { 256 mutex_lock(&uevent_sock_mutex);
257 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
258 struct sock *uevent_sock = ue_sk->sk;
214 struct sk_buff *skb; 259 struct sk_buff *skb;
215 size_t len; 260 size_t len;
216 261
@@ -232,18 +277,21 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
232 } 277 }
233 278
234 NETLINK_CB(skb).dst_group = 1; 279 NETLINK_CB(skb).dst_group = 1;
235 retval = netlink_broadcast(uevent_sock, skb, 0, 1, 280 retval = netlink_broadcast_filtered(uevent_sock, skb,
236 GFP_KERNEL); 281 0, 1, GFP_KERNEL,
282 kobj_bcast_filter,
283 kobj);
237 /* ENOBUFS should be handled in userspace */ 284 /* ENOBUFS should be handled in userspace */
238 if (retval == -ENOBUFS) 285 if (retval == -ENOBUFS)
239 retval = 0; 286 retval = 0;
240 } else 287 } else
241 retval = -ENOMEM; 288 retval = -ENOMEM;
242 } 289 }
290 mutex_unlock(&uevent_sock_mutex);
243#endif 291#endif
244 292
245 /* call uevent_helper, usually only enabled during early boot */ 293 /* call uevent_helper, usually only enabled during early boot */
246 if (uevent_helper[0]) { 294 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
247 char *argv [3]; 295 char *argv [3];
248 296
249 argv [0] = uevent_helper; 297 argv [0] = uevent_helper;
@@ -319,18 +367,59 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
319EXPORT_SYMBOL_GPL(add_uevent_var); 367EXPORT_SYMBOL_GPL(add_uevent_var);
320 368
321#if defined(CONFIG_NET) 369#if defined(CONFIG_NET)
322static int __init kobject_uevent_init(void) 370static int uevent_net_init(struct net *net)
323{ 371{
324 uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, 372 struct uevent_sock *ue_sk;
325 1, NULL, NULL, THIS_MODULE); 373
326 if (!uevent_sock) { 374 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
375 if (!ue_sk)
376 return -ENOMEM;
377
378 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
379 1, NULL, NULL, THIS_MODULE);
380 if (!ue_sk->sk) {
327 printk(KERN_ERR 381 printk(KERN_ERR
328 "kobject_uevent: unable to create netlink socket!\n"); 382 "kobject_uevent: unable to create netlink socket!\n");
383 kfree(ue_sk);
329 return -ENODEV; 384 return -ENODEV;
330 } 385 }
331 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV); 386 mutex_lock(&uevent_sock_mutex);
387 list_add_tail(&ue_sk->list, &uevent_sock_list);
388 mutex_unlock(&uevent_sock_mutex);
332 return 0; 389 return 0;
333} 390}
334 391
392static void uevent_net_exit(struct net *net)
393{
394 struct uevent_sock *ue_sk;
395
396 mutex_lock(&uevent_sock_mutex);
397 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
398 if (sock_net(ue_sk->sk) == net)
399 goto found;
400 }
401 mutex_unlock(&uevent_sock_mutex);
402 return;
403
404found:
405 list_del(&ue_sk->list);
406 mutex_unlock(&uevent_sock_mutex);
407
408 netlink_kernel_release(ue_sk->sk);
409 kfree(ue_sk);
410}
411
412static struct pernet_operations uevent_net_ops = {
413 .init = uevent_net_init,
414 .exit = uevent_net_exit,
415};
416
417static int __init kobject_uevent_init(void)
418{
419 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
420 return register_pernet_subsys(&uevent_net_ops);
421}
422
423
335postcore_initcall(kobject_uevent_init); 424postcore_initcall(kobject_uevent_init);
336#endif 425#endif
diff --git a/lib/kref.c b/lib/kref.c
index 9ecd6e865610..d3d227a08a4b 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -13,17 +13,7 @@
13 13
14#include <linux/kref.h> 14#include <linux/kref.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16#include <linux/slab.h>
17/**
18 * kref_set - initialize object and set refcount to requested number.
19 * @kref: object in question.
20 * @num: initial reference counter
21 */
22void kref_set(struct kref *kref, int num)
23{
24 atomic_set(&kref->refcount, num);
25 smp_mb();
26}
27 17
28/** 18/**
29 * kref_init - initialize object. 19 * kref_init - initialize object.
@@ -31,7 +21,8 @@ void kref_set(struct kref *kref, int num)
31 */ 21 */
32void kref_init(struct kref *kref) 22void kref_init(struct kref *kref)
33{ 23{
34 kref_set(kref, 1); 24 atomic_set(&kref->refcount, 1);
25 smp_mb();
35} 26}
36 27
37/** 28/**
@@ -71,7 +62,6 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
71 return 0; 62 return 0;
72} 63}
73 64
74EXPORT_SYMBOL(kref_set);
75EXPORT_SYMBOL(kref_init); 65EXPORT_SYMBOL(kref_init);
76EXPORT_SYMBOL(kref_get); 66EXPORT_SYMBOL(kref_get);
77EXPORT_SYMBOL(kref_put); 67EXPORT_SYMBOL(kref_put);
diff --git a/lib/lcm.c b/lib/lcm.c
new file mode 100644
index 000000000000..157cd88a6ffc
--- /dev/null
+++ b/lib/lcm.c
@@ -0,0 +1,15 @@
1#include <linux/kernel.h>
2#include <linux/gcd.h>
3#include <linux/module.h>
4
5/* Lowest common multiple */
6unsigned long lcm(unsigned long a, unsigned long b)
7{
8 if (a && b)
9 return (a * b) / gcd(a, b);
10 else if (b)
11 return b;
12
13 return a;
14}
15EXPORT_SYMBOL_GPL(lcm);
diff --git a/lib/lmb.c b/lib/lmb.c
deleted file mode 100644
index b1fc52606524..000000000000
--- a/lib/lmb.c
+++ /dev/null
@@ -1,541 +0,0 @@
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/bitops.h>
16#include <linux/lmb.h>
17
18#define LMB_ALLOC_ANYWHERE 0
19
20struct lmb lmb;
21
22static int lmb_debug;
23
24static int __init early_lmb(char *p)
25{
26 if (p && strstr(p, "debug"))
27 lmb_debug = 1;
28 return 0;
29}
30early_param("lmb", early_lmb);
31
32static void lmb_dump(struct lmb_region *region, char *name)
33{
34 unsigned long long base, size;
35 int i;
36
37 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
38
39 for (i = 0; i < region->cnt; i++) {
40 base = region->region[i].base;
41 size = region->region[i].size;
42
43 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
44 name, i, base, base + size - 1, size);
45 }
46}
47
48void lmb_dump_all(void)
49{
50 if (!lmb_debug)
51 return;
52
53 pr_info("LMB configuration:\n");
54 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size);
55 pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size);
56
57 lmb_dump(&lmb.memory, "memory");
58 lmb_dump(&lmb.reserved, "reserved");
59}
60
61static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
62 u64 size2)
63{
64 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
65}
66
67static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
68{
69 if (base2 == base1 + size1)
70 return 1;
71 else if (base1 == base2 + size2)
72 return -1;
73
74 return 0;
75}
76
77static long lmb_regions_adjacent(struct lmb_region *rgn,
78 unsigned long r1, unsigned long r2)
79{
80 u64 base1 = rgn->region[r1].base;
81 u64 size1 = rgn->region[r1].size;
82 u64 base2 = rgn->region[r2].base;
83 u64 size2 = rgn->region[r2].size;
84
85 return lmb_addrs_adjacent(base1, size1, base2, size2);
86}
87
88static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
89{
90 unsigned long i;
91
92 for (i = r; i < rgn->cnt - 1; i++) {
93 rgn->region[i].base = rgn->region[i + 1].base;
94 rgn->region[i].size = rgn->region[i + 1].size;
95 }
96 rgn->cnt--;
97}
98
99/* Assumption: base addr of region 1 < base addr of region 2 */
100static void lmb_coalesce_regions(struct lmb_region *rgn,
101 unsigned long r1, unsigned long r2)
102{
103 rgn->region[r1].size += rgn->region[r2].size;
104 lmb_remove_region(rgn, r2);
105}
106
107void __init lmb_init(void)
108{
109 /* Create a dummy zero size LMB which will get coalesced away later.
110 * This simplifies the lmb_add() code below...
111 */
112 lmb.memory.region[0].base = 0;
113 lmb.memory.region[0].size = 0;
114 lmb.memory.cnt = 1;
115
116 /* Ditto. */
117 lmb.reserved.region[0].base = 0;
118 lmb.reserved.region[0].size = 0;
119 lmb.reserved.cnt = 1;
120}
121
122void __init lmb_analyze(void)
123{
124 int i;
125
126 lmb.memory.size = 0;
127
128 for (i = 0; i < lmb.memory.cnt; i++)
129 lmb.memory.size += lmb.memory.region[i].size;
130}
131
132static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
133{
134 unsigned long coalesced = 0;
135 long adjacent, i;
136
137 if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
138 rgn->region[0].base = base;
139 rgn->region[0].size = size;
140 return 0;
141 }
142
143 /* First try and coalesce this LMB with another. */
144 for (i = 0; i < rgn->cnt; i++) {
145 u64 rgnbase = rgn->region[i].base;
146 u64 rgnsize = rgn->region[i].size;
147
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
150 return 0;
151
152 adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
153 if (adjacent > 0) {
154 rgn->region[i].base -= size;
155 rgn->region[i].size += size;
156 coalesced++;
157 break;
158 } else if (adjacent < 0) {
159 rgn->region[i].size += size;
160 coalesced++;
161 break;
162 }
163 }
164
165 if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i+1)) {
166 lmb_coalesce_regions(rgn, i, i+1);
167 coalesced++;
168 }
169
170 if (coalesced)
171 return coalesced;
172 if (rgn->cnt >= MAX_LMB_REGIONS)
173 return -1;
174
175 /* Couldn't coalesce the LMB, so add it to the sorted table. */
176 for (i = rgn->cnt - 1; i >= 0; i--) {
177 if (base < rgn->region[i].base) {
178 rgn->region[i+1].base = rgn->region[i].base;
179 rgn->region[i+1].size = rgn->region[i].size;
180 } else {
181 rgn->region[i+1].base = base;
182 rgn->region[i+1].size = size;
183 break;
184 }
185 }
186
187 if (base < rgn->region[0].base) {
188 rgn->region[0].base = base;
189 rgn->region[0].size = size;
190 }
191 rgn->cnt++;
192
193 return 0;
194}
195
196long lmb_add(u64 base, u64 size)
197{
198 struct lmb_region *_rgn = &lmb.memory;
199
200 /* On pSeries LPAR systems, the first LMB is our RMO region. */
201 if (base == 0)
202 lmb.rmo_size = size;
203
204 return lmb_add_region(_rgn, base, size);
205
206}
207
208static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size)
209{
210 u64 rgnbegin, rgnend;
211 u64 end = base + size;
212 int i;
213
214 rgnbegin = rgnend = 0; /* supress gcc warnings */
215
216 /* Find the region where (base, size) belongs to */
217 for (i=0; i < rgn->cnt; i++) {
218 rgnbegin = rgn->region[i].base;
219 rgnend = rgnbegin + rgn->region[i].size;
220
221 if ((rgnbegin <= base) && (end <= rgnend))
222 break;
223 }
224
225 /* Didn't find the region */
226 if (i == rgn->cnt)
227 return -1;
228
229 /* Check to see if we are removing entire region */
230 if ((rgnbegin == base) && (rgnend == end)) {
231 lmb_remove_region(rgn, i);
232 return 0;
233 }
234
235 /* Check to see if region is matching at the front */
236 if (rgnbegin == base) {
237 rgn->region[i].base = end;
238 rgn->region[i].size -= size;
239 return 0;
240 }
241
242 /* Check to see if the region is matching at the end */
243 if (rgnend == end) {
244 rgn->region[i].size -= size;
245 return 0;
246 }
247
248 /*
249 * We need to split the entry - adjust the current one to the
250 * beginging of the hole and add the region after hole.
251 */
252 rgn->region[i].size = base - rgn->region[i].base;
253 return lmb_add_region(rgn, end, rgnend - end);
254}
255
256long lmb_remove(u64 base, u64 size)
257{
258 return __lmb_remove(&lmb.memory, base, size);
259}
260
261long __init lmb_free(u64 base, u64 size)
262{
263 return __lmb_remove(&lmb.reserved, base, size);
264}
265
266long __init lmb_reserve(u64 base, u64 size)
267{
268 struct lmb_region *_rgn = &lmb.reserved;
269
270 BUG_ON(0 == size);
271
272 return lmb_add_region(_rgn, base, size);
273}
274
275long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
276{
277 unsigned long i;
278
279 for (i = 0; i < rgn->cnt; i++) {
280 u64 rgnbase = rgn->region[i].base;
281 u64 rgnsize = rgn->region[i].size;
282 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
283 break;
284 }
285
286 return (i < rgn->cnt) ? i : -1;
287}
288
289static u64 lmb_align_down(u64 addr, u64 size)
290{
291 return addr & ~(size - 1);
292}
293
294static u64 lmb_align_up(u64 addr, u64 size)
295{
296 return (addr + (size - 1)) & ~(size - 1);
297}
298
299static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
300 u64 size, u64 align)
301{
302 u64 base, res_base;
303 long j;
304
305 base = lmb_align_down((end - size), align);
306 while (start <= base) {
307 j = lmb_overlaps_region(&lmb.reserved, base, size);
308 if (j < 0) {
309 /* this area isn't reserved, take it */
310 if (lmb_add_region(&lmb.reserved, base, size) < 0)
311 base = ~(u64)0;
312 return base;
313 }
314 res_base = lmb.reserved.region[j].base;
315 if (res_base < size)
316 break;
317 base = lmb_align_down(res_base - size, align);
318 }
319
320 return ~(u64)0;
321}
322
323static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
324 u64 (*nid_range)(u64, u64, int *),
325 u64 size, u64 align, int nid)
326{
327 u64 start, end;
328
329 start = mp->base;
330 end = start + mp->size;
331
332 start = lmb_align_up(start, align);
333 while (start < end) {
334 u64 this_end;
335 int this_nid;
336
337 this_end = nid_range(start, end, &this_nid);
338 if (this_nid == nid) {
339 u64 ret = lmb_alloc_nid_unreserved(start, this_end,
340 size, align);
341 if (ret != ~(u64)0)
342 return ret;
343 }
344 start = this_end;
345 }
346
347 return ~(u64)0;
348}
349
350u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
351 u64 (*nid_range)(u64 start, u64 end, int *nid))
352{
353 struct lmb_region *mem = &lmb.memory;
354 int i;
355
356 BUG_ON(0 == size);
357
358 size = lmb_align_up(size, align);
359
360 for (i = 0; i < mem->cnt; i++) {
361 u64 ret = lmb_alloc_nid_region(&mem->region[i],
362 nid_range,
363 size, align, nid);
364 if (ret != ~(u64)0)
365 return ret;
366 }
367
368 return lmb_alloc(size, align);
369}
370
371u64 __init lmb_alloc(u64 size, u64 align)
372{
373 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
374}
375
376u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
377{
378 u64 alloc;
379
380 alloc = __lmb_alloc_base(size, align, max_addr);
381
382 if (alloc == 0)
383 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
384 (unsigned long long) size, (unsigned long long) max_addr);
385
386 return alloc;
387}
388
389u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
390{
391 long i, j;
392 u64 base = 0;
393 u64 res_base;
394
395 BUG_ON(0 == size);
396
397 size = lmb_align_up(size, align);
398
399 /* On some platforms, make sure we allocate lowmem */
400 /* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
401 if (max_addr == LMB_ALLOC_ANYWHERE)
402 max_addr = LMB_REAL_LIMIT;
403
404 for (i = lmb.memory.cnt - 1; i >= 0; i--) {
405 u64 lmbbase = lmb.memory.region[i].base;
406 u64 lmbsize = lmb.memory.region[i].size;
407
408 if (lmbsize < size)
409 continue;
410 if (max_addr == LMB_ALLOC_ANYWHERE)
411 base = lmb_align_down(lmbbase + lmbsize - size, align);
412 else if (lmbbase < max_addr) {
413 base = min(lmbbase + lmbsize, max_addr);
414 base = lmb_align_down(base - size, align);
415 } else
416 continue;
417
418 while (base && lmbbase <= base) {
419 j = lmb_overlaps_region(&lmb.reserved, base, size);
420 if (j < 0) {
421 /* this area isn't reserved, take it */
422 if (lmb_add_region(&lmb.reserved, base, size) < 0)
423 return 0;
424 return base;
425 }
426 res_base = lmb.reserved.region[j].base;
427 if (res_base < size)
428 break;
429 base = lmb_align_down(res_base - size, align);
430 }
431 }
432 return 0;
433}
434
435/* You must call lmb_analyze() before this. */
436u64 __init lmb_phys_mem_size(void)
437{
438 return lmb.memory.size;
439}
440
441u64 lmb_end_of_DRAM(void)
442{
443 int idx = lmb.memory.cnt - 1;
444
445 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
446}
447
448/* You must call lmb_analyze() after this. */
449void __init lmb_enforce_memory_limit(u64 memory_limit)
450{
451 unsigned long i;
452 u64 limit;
453 struct lmb_property *p;
454
455 if (!memory_limit)
456 return;
457
458 /* Truncate the lmb regions to satisfy the memory limit. */
459 limit = memory_limit;
460 for (i = 0; i < lmb.memory.cnt; i++) {
461 if (limit > lmb.memory.region[i].size) {
462 limit -= lmb.memory.region[i].size;
463 continue;
464 }
465
466 lmb.memory.region[i].size = limit;
467 lmb.memory.cnt = i + 1;
468 break;
469 }
470
471 if (lmb.memory.region[0].size < lmb.rmo_size)
472 lmb.rmo_size = lmb.memory.region[0].size;
473
474 memory_limit = lmb_end_of_DRAM();
475
476 /* And truncate any reserves above the limit also. */
477 for (i = 0; i < lmb.reserved.cnt; i++) {
478 p = &lmb.reserved.region[i];
479
480 if (p->base > memory_limit)
481 p->size = 0;
482 else if ((p->base + p->size) > memory_limit)
483 p->size = memory_limit - p->base;
484
485 if (p->size == 0) {
486 lmb_remove_region(&lmb.reserved, i);
487 i--;
488 }
489 }
490}
491
492int __init lmb_is_reserved(u64 addr)
493{
494 int i;
495
496 for (i = 0; i < lmb.reserved.cnt; i++) {
497 u64 upper = lmb.reserved.region[i].base +
498 lmb.reserved.region[i].size - 1;
499 if ((addr >= lmb.reserved.region[i].base) && (addr <= upper))
500 return 1;
501 }
502 return 0;
503}
504
505int lmb_is_region_reserved(u64 base, u64 size)
506{
507 return lmb_overlaps_region(&lmb.reserved, base, size);
508}
509
510/*
511 * Given a <base, len>, find which memory regions belong to this range.
512 * Adjust the request and return a contiguous chunk.
513 */
514int lmb_find(struct lmb_property *res)
515{
516 int i;
517 u64 rstart, rend;
518
519 rstart = res->base;
520 rend = rstart + res->size - 1;
521
522 for (i = 0; i < lmb.memory.cnt; i++) {
523 u64 start = lmb.memory.region[i].base;
524 u64 end = start + lmb.memory.region[i].size - 1;
525
526 if (start > rend)
527 return -1;
528
529 if ((end >= rstart) && (start < rend)) {
530 /* adjust the request */
531 if (rstart < start)
532 rstart = start;
533 if (rend > end)
534 rend = end;
535 res->base = rstart;
536 res->size = rend - rstart + 1;
537 return 0;
538 }
539 }
540 return -1;
541}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6b9670d6bbf9..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -28,7 +28,6 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/gfp.h>
32#include <linux/string.h> 31#include <linux/string.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
34#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
@@ -556,6 +555,10 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
556 * 555 *
557 * 0: tag not present or not set 556 * 0: tag not present or not set
558 * 1: tag set 557 * 1: tag set
558 *
559 * Note that the return value of this function may not be relied on, even if
560 * the RCU lock is held, unless tag modification and node deletion are excluded
561 * from concurrency.
559 */ 562 */
560int radix_tree_tag_get(struct radix_tree_root *root, 563int radix_tree_tag_get(struct radix_tree_root *root,
561 unsigned long index, unsigned int tag) 564 unsigned long index, unsigned int tag)
@@ -596,12 +599,8 @@ int radix_tree_tag_get(struct radix_tree_root *root,
596 */ 599 */
597 if (!tag_get(node, tag, offset)) 600 if (!tag_get(node, tag, offset))
598 saw_unset_tag = 1; 601 saw_unset_tag = 1;
599 if (height == 1) { 602 if (height == 1)
600 int ret = tag_get(node, tag, offset); 603 return !!tag_get(node, tag, offset);
601
602 BUG_ON(ret && saw_unset_tag);
603 return !!ret;
604 }
605 node = rcu_dereference_raw(node->slots[offset]); 604 node = rcu_dereference_raw(node->slots[offset]);
606 shift -= RADIX_TREE_MAP_SHIFT; 605 shift -= RADIX_TREE_MAP_SHIFT;
607 height--; 606 height--;
@@ -657,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
657 * 656 *
658 * Returns: the index of the hole if found, otherwise returns an index 657 * Returns: the index of the hole if found, otherwise returns an index
659 * outside of the set specified (in which case 'index - return >= max_scan' 658 * outside of the set specified (in which case 'index - return >= max_scan'
660 * will be true). In rare cases of wrap-around, LONG_MAX will be returned. 659 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
661 * 660 *
662 * radix_tree_next_hole may be called under rcu_read_lock. However, like 661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
663 * radix_tree_gang_lookup, this will not atomically search a snapshot of 662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -675,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
675 if (!radix_tree_lookup(root, index)) 674 if (!radix_tree_lookup(root, index))
676 break; 675 break;
677 index--; 676 index--;
678 if (index == LONG_MAX) 677 if (index == ULONG_MAX)
679 break; 678 break;
680 } 679 }
681 680
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..fc3545a32771 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42struct rnd_state {
43 u32 s1, s2, s3;
44};
45
46static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 42static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
47 43
48static u32 __random32(struct rnd_state *state) 44/**
45 * prandom32 - seeded pseudo-random number generator.
46 * @state: pointer to state structure holding seeded state.
47 *
48 * This is used for pseudo-randomness with no outside seeding.
49 * For more random results, use random32().
50 */
51u32 prandom32(struct rnd_state *state)
49{ 52{
50#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 53#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
51 54
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
55 58
56 return (state->s1 ^ state->s2 ^ state->s3); 59 return (state->s1 ^ state->s2 ^ state->s3);
57} 60}
58 61EXPORT_SYMBOL(prandom32);
59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
63{
64 return (x < m) ? x + m : x;
65}
66 62
67/** 63/**
68 * random32 - pseudo random number generator 64 * random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
75{ 71{
76 unsigned long r; 72 unsigned long r;
77 struct rnd_state *state = &get_cpu_var(net_rand_state); 73 struct rnd_state *state = &get_cpu_var(net_rand_state);
78 r = __random32(state); 74 r = prandom32(state);
79 put_cpu_var(state); 75 put_cpu_var(state);
80 return r; 76 return r;
81} 77}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
118 state->s3 = __seed(LCG(state->s2), 15); 114 state->s3 = __seed(LCG(state->s2), 15);
119 115
120 /* "warm it up" */ 116 /* "warm it up" */
121 __random32(state); 117 prandom32(state);
122 __random32(state); 118 prandom32(state);
123 __random32(state); 119 prandom32(state);
124 __random32(state); 120 prandom32(state);
125 __random32(state); 121 prandom32(state);
126 __random32(state); 122 prandom32(state);
127 } 123 }
128 return 0; 124 return 0;
129} 125}
@@ -131,7 +127,7 @@ core_initcall(random32_init);
131 127
132/* 128/*
133 * Generate better values after random number generator 129 * Generate better values after random number generator
134 * is fully initalized. 130 * is fully initialized.
135 */ 131 */
136static int __init random32_reseed(void) 132static int __init random32_reseed(void)
137{ 133{
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
147 state->s3 = __seed(seeds[2], 15); 143 state->s3 = __seed(seeds[2], 15);
148 144
149 /* mix it in */ 145 /* mix it in */
150 __random32(state); 146 prandom32(state);
151 } 147 }
152 return 0; 148 return 0;
153} 149}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 09f5ce1810dc..027a03f4c56d 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -16,9 +16,14 @@
16/* 16/*
17 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
18 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
19 * @func: name of calling function
19 * 20 *
20 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 21 * This enforces a rate limit: not more than @rs->burst callbacks
21 * in every @rs->ratelimit_jiffies 22 * in every @rs->interval
23 *
24 * RETURNS:
25 * 0 means callbacks will be suppressed.
26 * 1 means go ahead and do it.
22 */ 27 */
23int ___ratelimit(struct ratelimit_state *rs, const char *func) 28int ___ratelimit(struct ratelimit_state *rs, const char *func)
24{ 29{
@@ -35,7 +40,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
35 * the entity that is holding the lock already: 40 * the entity that is holding the lock already:
36 */ 41 */
37 if (!spin_trylock_irqsave(&rs->lock, flags)) 42 if (!spin_trylock_irqsave(&rs->lock, flags))
38 return 1; 43 return 0;
39 44
40 if (!rs->begin) 45 if (!rs->begin)
41 rs->begin = jiffies; 46 rs->begin = jiffies;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index e2aa3be29858..4693f79195d3 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -283,6 +283,74 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
283} 283}
284EXPORT_SYMBOL(rb_erase); 284EXPORT_SYMBOL(rb_erase);
285 285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{
288 struct rb_node *parent;
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303}
304
305/*
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{
311 if (node->rb_left)
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317}
318
319/*
320 * before removing the node, find the deepest node on the rebalance path
321 * that will still be there after @node gets removed
322 */
323struct rb_node *rb_augment_erase_begin(struct rb_node *node)
324{
325 struct rb_node *deepest;
326
327 if (!node->rb_right && !node->rb_left)
328 deepest = rb_parent(node);
329 else if (!node->rb_right)
330 deepest = node->rb_left;
331 else if (!node->rb_left)
332 deepest = node->rb_right;
333 else {
334 deepest = rb_next(node);
335 if (deepest->rb_right)
336 deepest = deepest->rb_right;
337 else if (rb_parent(deepest) != node)
338 deepest = rb_parent(deepest);
339 }
340
341 return deepest;
342}
343
344/*
345 * after removal, update the tree to account for the removed entry
346 * and any rebalance damage.
347 */
348void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
349{
350 if (node)
351 rb_augment_path(node, func, data);
352}
353
286/* 354/*
287 * This function returns the first node (in sort order) of the tree. 355 * This function returns the first node (in sort order) of the tree.
288 */ 356 */
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ccf95bff7984..ffc9fc7f3b05 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
143{ 143{
144 struct rwsem_waiter waiter; 144 struct rwsem_waiter waiter;
145 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags;
146 147
147 spin_lock_irq(&sem->wait_lock); 148 spin_lock_irqsave(&sem->wait_lock, flags);
148 149
149 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
150 /* granted */ 151 /* granted */
151 sem->activity++; 152 sem->activity++;
152 spin_unlock_irq(&sem->wait_lock); 153 spin_unlock_irqrestore(&sem->wait_lock, flags);
153 goto out; 154 goto out;
154 } 155 }
155 156
@@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
164 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
165 166
166 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
167 spin_unlock_irq(&sem->wait_lock); 168 spin_unlock_irqrestore(&sem->wait_lock, flags);
168 169
169 /* wait to be given the lock */ 170 /* wait to be given the lock */
170 for (;;) { 171 for (;;) {
@@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
209{ 210{
210 struct rwsem_waiter waiter; 211 struct rwsem_waiter waiter;
211 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags;
212 214
213 spin_lock_irq(&sem->wait_lock); 215 spin_lock_irqsave(&sem->wait_lock, flags);
214 216
215 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
216 /* granted */ 218 /* granted */
217 sem->activity = -1; 219 sem->activity = -1;
218 spin_unlock_irq(&sem->wait_lock); 220 spin_unlock_irqrestore(&sem->wait_lock, flags);
219 goto out; 221 goto out;
220 } 222 }
221 223
@@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
230 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
231 233
232 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
233 spin_unlock_irq(&sem->wait_lock); 235 spin_unlock_irqrestore(&sem->wait_lock, flags);
234 236
235 /* wait to be given the lock */ 237 /* wait to be given the lock */
236 for (;;) { 238 for (;;) {
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 3e3365e5665e..ceba8e28807a 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -136,9 +136,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
136 out: 136 out:
137 return sem; 137 return sem;
138 138
139 /* undo the change to count, but check for a transition 1->0 */ 139 /* undo the change to the active count, but check for a transition
140 * 1->0 */
140 undo: 141 undo:
141 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) 142 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
142 goto out; 143 goto out;
143 goto try_again; 144 goto try_again;
144} 145}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 0d475d8167bf..9afa25b52a83 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -7,6 +7,7 @@
7 * Version 2. See the file COPYING for more details. 7 * Version 2. See the file COPYING for more details.
8 */ 8 */
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h>
10#include <linux/scatterlist.h> 11#include <linux/scatterlist.h>
11#include <linux/highmem.h> 12#include <linux/highmem.h>
12 13
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 437eedb5a53b..34e3082632d8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -28,6 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/ctype.h> 29#include <linux/ctype.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/gfp.h>
31 32
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/dma.h> 34#include <asm/dma.h>
@@ -49,19 +50,11 @@
49 */ 50 */
50#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
51 52
52/*
53 * Enumeration for sync targets
54 */
55enum dma_sync_target {
56 SYNC_FOR_CPU = 0,
57 SYNC_FOR_DEVICE = 1,
58};
59
60int swiotlb_force; 53int swiotlb_force;
61 54
62/* 55/*
63 * Used to do a quick range check in unmap_single and 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and
64 * sync_single_*, to see if the memory was in fact allocated by this 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
65 * API. 58 * API.
66 */ 59 */
67static char *io_tlb_start, *io_tlb_end; 60static char *io_tlb_start, *io_tlb_end;
@@ -139,28 +132,14 @@ void swiotlb_print_info(void)
139 (unsigned long long)pend); 132 (unsigned long long)pend);
140} 133}
141 134
142/* 135void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
143 * Statically reserve bounce buffer space and initialize bounce buffer data
144 * structures for the software IO TLB used to implement the DMA API.
145 */
146void __init
147swiotlb_init_with_default_size(size_t default_size, int verbose)
148{ 136{
149 unsigned long i, bytes; 137 unsigned long i, bytes;
150 138
151 if (!io_tlb_nslabs) { 139 bytes = nslabs << IO_TLB_SHIFT;
152 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
153 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
154 }
155
156 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
157 140
158 /* 141 io_tlb_nslabs = nslabs;
159 * Get IO TLB memory from the low pages 142 io_tlb_start = tlb;
160 */
161 io_tlb_start = alloc_bootmem_low_pages(bytes);
162 if (!io_tlb_start)
163 panic("Cannot allocate SWIOTLB buffer");
164 io_tlb_end = io_tlb_start + bytes; 143 io_tlb_end = io_tlb_start + bytes;
165 144
166 /* 145 /*
@@ -184,6 +163,32 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
184 swiotlb_print_info(); 163 swiotlb_print_info();
185} 164}
186 165
166/*
167 * Statically reserve bounce buffer space and initialize bounce buffer data
168 * structures for the software IO TLB used to implement the DMA API.
169 */
170void __init
171swiotlb_init_with_default_size(size_t default_size, int verbose)
172{
173 unsigned long bytes;
174
175 if (!io_tlb_nslabs) {
176 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
177 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
178 }
179
180 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
181
182 /*
183 * Get IO TLB memory from the low pages
184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes);
186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer");
188
189 swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
190}
191
187void __init 192void __init
188swiotlb_init(int verbose) 193swiotlb_init(int verbose)
189{ 194{
@@ -322,8 +327,8 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
322/* 327/*
323 * Bounce: copy the swiotlb buffer back to the original dma location 328 * Bounce: copy the swiotlb buffer back to the original dma location
324 */ 329 */
325static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 330void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
326 enum dma_data_direction dir) 331 enum dma_data_direction dir)
327{ 332{
328 unsigned long pfn = PFN_DOWN(phys); 333 unsigned long pfn = PFN_DOWN(phys);
329 334
@@ -359,26 +364,25 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
359 memcpy(phys_to_virt(phys), dma_addr, size); 364 memcpy(phys_to_virt(phys), dma_addr, size);
360 } 365 }
361} 366}
367EXPORT_SYMBOL_GPL(swiotlb_bounce);
362 368
363/* 369void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
364 * Allocates bounce buffer and returns its kernel virtual address. 370 phys_addr_t phys, size_t size,
365 */ 371 enum dma_data_direction dir)
366static void *
367map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
368{ 372{
369 unsigned long flags; 373 unsigned long flags;
370 char *dma_addr; 374 char *dma_addr;
371 unsigned int nslots, stride, index, wrap; 375 unsigned int nslots, stride, index, wrap;
372 int i; 376 int i;
373 unsigned long start_dma_addr;
374 unsigned long mask; 377 unsigned long mask;
375 unsigned long offset_slots; 378 unsigned long offset_slots;
376 unsigned long max_slots; 379 unsigned long max_slots;
377 380
378 mask = dma_get_seg_boundary(hwdev); 381 mask = dma_get_seg_boundary(hwdev);
379 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
380 382
381 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 383 tbl_dma_addr &= mask;
384
385 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
382 386
383 /* 387 /*
384 * Carefully handle integer overflow which can occur when mask == ~0UL. 388 * Carefully handle integer overflow which can occur when mask == ~0UL.
@@ -465,12 +469,27 @@ found:
465 469
466 return dma_addr; 470 return dma_addr;
467} 471}
472EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
473
474/*
475 * Allocates bounce buffer and returns its kernel virtual address.
476 */
477
478static void *
479map_single(struct device *hwdev, phys_addr_t phys, size_t size,
480 enum dma_data_direction dir)
481{
482 dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
483
484 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
485}
468 486
469/* 487/*
470 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 488 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
471 */ 489 */
472static void 490void
473do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 491swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
492 enum dma_data_direction dir)
474{ 493{
475 unsigned long flags; 494 unsigned long flags;
476 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 495 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -508,10 +527,12 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
508 } 527 }
509 spin_unlock_irqrestore(&io_tlb_lock, flags); 528 spin_unlock_irqrestore(&io_tlb_lock, flags);
510} 529}
530EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
511 531
512static void 532void
513sync_single(struct device *hwdev, char *dma_addr, size_t size, 533swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
514 int dir, int target) 534 enum dma_data_direction dir,
535 enum dma_sync_target target)
515{ 536{
516 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 537 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
517 phys_addr_t phys = io_tlb_orig_addr[index]; 538 phys_addr_t phys = io_tlb_orig_addr[index];
@@ -535,6 +556,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
535 BUG(); 556 BUG();
536 } 557 }
537} 558}
559EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
538 560
539void * 561void *
540swiotlb_alloc_coherent(struct device *hwdev, size_t size, 562swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -558,8 +580,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
558 } 580 }
559 if (!ret) { 581 if (!ret) {
560 /* 582 /*
561 * We are either out of memory or the device can't DMA 583 * We are either out of memory or the device can't DMA to
562 * to GFP_DMA memory; fall back on map_single(), which 584 * GFP_DMA memory; fall back on map_single(), which
563 * will grab memory from the lowest available address range. 585 * will grab memory from the lowest available address range.
564 */ 586 */
565 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@@ -577,7 +599,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
577 (unsigned long long)dev_addr); 599 (unsigned long long)dev_addr);
578 600
579 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
580 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 602 swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
581 return NULL; 603 return NULL;
582 } 604 }
583 *dma_handle = dev_addr; 605 *dma_handle = dev_addr;
@@ -595,13 +617,14 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
595 if (!is_swiotlb_buffer(paddr)) 617 if (!is_swiotlb_buffer(paddr))
596 free_pages((unsigned long)vaddr, get_order(size)); 618 free_pages((unsigned long)vaddr, get_order(size));
597 else 619 else
598 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 620 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
599 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 621 swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
600} 622}
601EXPORT_SYMBOL(swiotlb_free_coherent); 623EXPORT_SYMBOL(swiotlb_free_coherent);
602 624
603static void 625static void
604swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 626swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
627 int do_panic)
605{ 628{
606 /* 629 /*
607 * Ran out of IOMMU space for this operation. This is very bad. 630 * Ran out of IOMMU space for this operation. This is very bad.
@@ -679,14 +702,14 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
679 * whatever the device wrote there. 702 * whatever the device wrote there.
680 */ 703 */
681static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 704static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
682 size_t size, int dir) 705 size_t size, enum dma_data_direction dir)
683{ 706{
684 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
685 708
686 BUG_ON(dir == DMA_NONE); 709 BUG_ON(dir == DMA_NONE);
687 710
688 if (is_swiotlb_buffer(paddr)) { 711 if (is_swiotlb_buffer(paddr)) {
689 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 712 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
690 return; 713 return;
691 } 714 }
692 715
@@ -722,14 +745,16 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
722 */ 745 */
723static void 746static void
724swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 747swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
725 size_t size, int dir, int target) 748 size_t size, enum dma_data_direction dir,
749 enum dma_sync_target target)
726{ 750{
727 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
728 752
729 BUG_ON(dir == DMA_NONE); 753 BUG_ON(dir == DMA_NONE);
730 754
731 if (is_swiotlb_buffer(paddr)) { 755 if (is_swiotlb_buffer(paddr)) {
732 sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
757 target);
733 return; 758 return;
734 } 759 }
735 760
@@ -756,37 +781,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
756EXPORT_SYMBOL(swiotlb_sync_single_for_device); 781EXPORT_SYMBOL(swiotlb_sync_single_for_device);
757 782
758/* 783/*
759 * Same as above, but for a sub-range of the mapping.
760 */
761static void
762swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
763 unsigned long offset, size_t size,
764 int dir, int target)
765{
766 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
767}
768
769void
770swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
771 unsigned long offset, size_t size,
772 enum dma_data_direction dir)
773{
774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
775 SYNC_FOR_CPU);
776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
778
779void
780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
781 unsigned long offset, size_t size,
782 enum dma_data_direction dir)
783{
784 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
785 SYNC_FOR_DEVICE);
786}
787EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788
789/*
790 * Map a set of buffers described by scatterlist in streaming mode for DMA. 784 * Map a set of buffers described by scatterlist in streaming mode for DMA.
791 * This is the scatter-gather version of the above swiotlb_map_page 785 * This is the scatter-gather version of the above swiotlb_map_page
792 * interface. Here the scatter gather list elements are each tagged with the 786 * interface. Here the scatter gather list elements are each tagged with the
@@ -839,7 +833,7 @@ EXPORT_SYMBOL(swiotlb_map_sg_attrs);
839 833
840int 834int
841swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 835swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
842 int dir) 836 enum dma_data_direction dir)
843{ 837{
844 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
845} 839}
@@ -866,7 +860,7 @@ EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
866 860
867void 861void
868swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 862swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
869 int dir) 863 enum dma_data_direction dir)
870{ 864{
871 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
872} 866}
@@ -881,7 +875,8 @@ EXPORT_SYMBOL(swiotlb_unmap_sg);
881 */ 875 */
882static void 876static void
883swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 877swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
884 int nelems, int dir, int target) 878 int nelems, enum dma_data_direction dir,
879 enum dma_sync_target target)
885{ 880{
886 struct scatterlist *sg; 881 struct scatterlist *sg;
887 int i; 882 int i;
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 9fbcb44c554f..d608331b3e47 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/err.h> 104#include <linux/err.h>
105#include <linux/textsearch.h> 105#include <linux/textsearch.h>
106#include <linux/slab.h>
106 107
107static LIST_HEAD(ts_ops); 108static LIST_HEAD(ts_ops);
108static DEFINE_SPINLOCK(ts_mod_lock); 109static DEFINE_SPINLOCK(ts_mod_lock);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
1/*
2 * Unified UUID/GUID definition
3 *
4 * Copyright (C) 2009, Intel Corp.
5 * Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/uuid.h>
24#include <linux/random.h>
25
26static void __uuid_gen_common(__u8 b[16])
27{
28 int i;
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80;
37}
38
39void uuid_le_gen(uuid_le *lu)
40{
41 __uuid_gen_common(lu->b);
42 /* version 4 : random generation */
43 lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
44}
45EXPORT_SYMBOL_GPL(uuid_le_gen);
46
47void uuid_be_gen(uuid_be *bu)
48{
49 __uuid_gen_common(bu->b);
50 /* version 4 : random generation */
51 bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
52}
53EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24112e5a5780..4ee19d0d3910 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -118,6 +118,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
118 118
119 return simple_strtoull(cp, endp, base); 119 return simple_strtoull(cp, endp, base);
120} 120}
121EXPORT_SYMBOL(simple_strtoll);
121 122
122/** 123/**
123 * strict_strtoul - convert a string to an unsigned long strictly 124 * strict_strtoul - convert a string to an unsigned long strictly
@@ -266,7 +267,8 @@ int strict_strtoll(const char *cp, unsigned int base, long long *res)
266} 267}
267EXPORT_SYMBOL(strict_strtoll); 268EXPORT_SYMBOL(strict_strtoll);
268 269
269static int skip_atoi(const char **s) 270static noinline_for_stack
271int skip_atoi(const char **s)
270{ 272{
271 int i = 0; 273 int i = 0;
272 274
@@ -286,7 +288,8 @@ static int skip_atoi(const char **s)
286/* Formats correctly any integer in [0,99999]. 288/* Formats correctly any integer in [0,99999].
287 * Outputs from one to five digits depending on input. 289 * Outputs from one to five digits depending on input.
288 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ 290 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
289static char *put_dec_trunc(char *buf, unsigned q) 291static noinline_for_stack
292char *put_dec_trunc(char *buf, unsigned q)
290{ 293{
291 unsigned d3, d2, d1, d0; 294 unsigned d3, d2, d1, d0;
292 d1 = (q>>4) & 0xf; 295 d1 = (q>>4) & 0xf;
@@ -323,7 +326,8 @@ static char *put_dec_trunc(char *buf, unsigned q)
323 return buf; 326 return buf;
324} 327}
325/* Same with if's removed. Always emits five digits */ 328/* Same with if's removed. Always emits five digits */
326static char *put_dec_full(char *buf, unsigned q) 329static noinline_for_stack
330char *put_dec_full(char *buf, unsigned q)
327{ 331{
328 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 332 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
329 /* but anyway, gcc produces better code with full-sized ints */ 333 /* but anyway, gcc produces better code with full-sized ints */
@@ -365,7 +369,8 @@ static char *put_dec_full(char *buf, unsigned q)
365 return buf; 369 return buf;
366} 370}
367/* No inlining helps gcc to use registers better */ 371/* No inlining helps gcc to use registers better */
368static noinline char *put_dec(char *buf, unsigned long long num) 372static noinline_for_stack
373char *put_dec(char *buf, unsigned long long num)
369{ 374{
370 while (1) { 375 while (1) {
371 unsigned rem; 376 unsigned rem;
@@ -408,16 +413,17 @@ enum format_type {
408}; 413};
409 414
410struct printf_spec { 415struct printf_spec {
411 u16 type; 416 u8 type; /* format_type enum */
412 s16 field_width; /* width of output field */
413 u8 flags; /* flags to number() */ 417 u8 flags; /* flags to number() */
414 u8 base; 418 u8 base; /* number base, 8, 10 or 16 only */
415 s8 precision; /* # of digits/chars */ 419 u8 qualifier; /* number qualifier, one of 'hHlLtzZ' */
416 u8 qualifier; 420 s16 field_width; /* width of output field */
421 s16 precision; /* # of digits/chars */
417}; 422};
418 423
419static char *number(char *buf, char *end, unsigned long long num, 424static noinline_for_stack
420 struct printf_spec spec) 425char *number(char *buf, char *end, unsigned long long num,
426 struct printf_spec spec)
421{ 427{
422 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ 428 /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
423 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ 429 static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
@@ -536,7 +542,8 @@ static char *number(char *buf, char *end, unsigned long long num,
536 return buf; 542 return buf;
537} 543}
538 544
539static char *string(char *buf, char *end, const char *s, struct printf_spec spec) 545static noinline_for_stack
546char *string(char *buf, char *end, const char *s, struct printf_spec spec)
540{ 547{
541 int len, i; 548 int len, i;
542 549
@@ -566,8 +573,9 @@ static char *string(char *buf, char *end, const char *s, struct printf_spec spec
566 return buf; 573 return buf;
567} 574}
568 575
569static char *symbol_string(char *buf, char *end, void *ptr, 576static noinline_for_stack
570 struct printf_spec spec, char ext) 577char *symbol_string(char *buf, char *end, void *ptr,
578 struct printf_spec spec, char ext)
571{ 579{
572 unsigned long value = (unsigned long) ptr; 580 unsigned long value = (unsigned long) ptr;
573#ifdef CONFIG_KALLSYMS 581#ifdef CONFIG_KALLSYMS
@@ -587,8 +595,9 @@ static char *symbol_string(char *buf, char *end, void *ptr,
587#endif 595#endif
588} 596}
589 597
590static char *resource_string(char *buf, char *end, struct resource *res, 598static noinline_for_stack
591 struct printf_spec spec, const char *fmt) 599char *resource_string(char *buf, char *end, struct resource *res,
600 struct printf_spec spec, const char *fmt)
592{ 601{
593#ifndef IO_RSRC_PRINTK_SIZE 602#ifndef IO_RSRC_PRINTK_SIZE
594#define IO_RSRC_PRINTK_SIZE 6 603#define IO_RSRC_PRINTK_SIZE 6
@@ -689,8 +698,9 @@ static char *resource_string(char *buf, char *end, struct resource *res,
689 return string(buf, end, sym, spec); 698 return string(buf, end, sym, spec);
690} 699}
691 700
692static char *mac_address_string(char *buf, char *end, u8 *addr, 701static noinline_for_stack
693 struct printf_spec spec, const char *fmt) 702char *mac_address_string(char *buf, char *end, u8 *addr,
703 struct printf_spec spec, const char *fmt)
694{ 704{
695 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; 705 char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
696 char *p = mac_addr; 706 char *p = mac_addr;
@@ -713,7 +723,8 @@ static char *mac_address_string(char *buf, char *end, u8 *addr,
713 return string(buf, end, mac_addr, spec); 723 return string(buf, end, mac_addr, spec);
714} 724}
715 725
716static char *ip4_string(char *p, const u8 *addr, const char *fmt) 726static noinline_for_stack
727char *ip4_string(char *p, const u8 *addr, const char *fmt)
717{ 728{
718 int i; 729 int i;
719 bool leading_zeros = (fmt[0] == 'i'); 730 bool leading_zeros = (fmt[0] == 'i');
@@ -762,7 +773,8 @@ static char *ip4_string(char *p, const u8 *addr, const char *fmt)
762 return p; 773 return p;
763} 774}
764 775
765static char *ip6_compressed_string(char *p, const char *addr) 776static noinline_for_stack
777char *ip6_compressed_string(char *p, const char *addr)
766{ 778{
767 int i, j, range; 779 int i, j, range;
768 unsigned char zerolength[8]; 780 unsigned char zerolength[8];
@@ -842,7 +854,8 @@ static char *ip6_compressed_string(char *p, const char *addr)
842 return p; 854 return p;
843} 855}
844 856
845static char *ip6_string(char *p, const char *addr, const char *fmt) 857static noinline_for_stack
858char *ip6_string(char *p, const char *addr, const char *fmt)
846{ 859{
847 int i; 860 int i;
848 861
@@ -857,8 +870,9 @@ static char *ip6_string(char *p, const char *addr, const char *fmt)
857 return p; 870 return p;
858} 871}
859 872
860static char *ip6_addr_string(char *buf, char *end, const u8 *addr, 873static noinline_for_stack
861 struct printf_spec spec, const char *fmt) 874char *ip6_addr_string(char *buf, char *end, const u8 *addr,
875 struct printf_spec spec, const char *fmt)
862{ 876{
863 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; 877 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
864 878
@@ -870,8 +884,9 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
870 return string(buf, end, ip6_addr, spec); 884 return string(buf, end, ip6_addr, spec);
871} 885}
872 886
873static char *ip4_addr_string(char *buf, char *end, const u8 *addr, 887static noinline_for_stack
874 struct printf_spec spec, const char *fmt) 888char *ip4_addr_string(char *buf, char *end, const u8 *addr,
889 struct printf_spec spec, const char *fmt)
875{ 890{
876 char ip4_addr[sizeof("255.255.255.255")]; 891 char ip4_addr[sizeof("255.255.255.255")];
877 892
@@ -880,8 +895,9 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
880 return string(buf, end, ip4_addr, spec); 895 return string(buf, end, ip4_addr, spec);
881} 896}
882 897
883static char *uuid_string(char *buf, char *end, const u8 *addr, 898static noinline_for_stack
884 struct printf_spec spec, const char *fmt) 899char *uuid_string(char *buf, char *end, const u8 *addr,
900 struct printf_spec spec, const char *fmt)
885{ 901{
886 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")]; 902 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
887 char *p = uuid; 903 char *p = uuid;
@@ -964,13 +980,19 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
964 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15] 980 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
965 * little endian output byte order is: 981 * little endian output byte order is:
966 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15] 982 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
983 * - 'V' For a struct va_format which contains a format string * and va_list *,
984 * call vsnprintf(->format, *->va_list).
985 * Implements a "recursive vsnprintf".
986 * Do not use this feature without some mechanism to verify the
987 * correctness of the format string and va_list arguments.
967 * 988 *
968 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 989 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
969 * function pointers are really function descriptors, which contain a 990 * function pointers are really function descriptors, which contain a
970 * pointer to the real address. 991 * pointer to the real address.
971 */ 992 */
972static char *pointer(const char *fmt, char *buf, char *end, void *ptr, 993static noinline_for_stack
973 struct printf_spec spec) 994char *pointer(const char *fmt, char *buf, char *end, void *ptr,
995 struct printf_spec spec)
974{ 996{
975 if (!ptr) 997 if (!ptr)
976 return string(buf, end, "(null)", spec); 998 return string(buf, end, "(null)", spec);
@@ -1008,6 +1030,10 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1008 break; 1030 break;
1009 case 'U': 1031 case 'U':
1010 return uuid_string(buf, end, ptr, spec, fmt); 1032 return uuid_string(buf, end, ptr, spec, fmt);
1033 case 'V':
1034 return buf + vsnprintf(buf, end - buf,
1035 ((struct va_format *)ptr)->fmt,
1036 *(((struct va_format *)ptr)->va));
1011 } 1037 }
1012 spec.flags |= SMALL; 1038 spec.flags |= SMALL;
1013 if (spec.field_width == -1) { 1039 if (spec.field_width == -1) {
@@ -1039,7 +1065,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1039 * @precision: precision of a number 1065 * @precision: precision of a number
1040 * @qualifier: qualifier of a number (long, size_t, ...) 1066 * @qualifier: qualifier of a number (long, size_t, ...)
1041 */ 1067 */
1042static int format_decode(const char *fmt, struct printf_spec *spec) 1068static noinline_for_stack
1069int format_decode(const char *fmt, struct printf_spec *spec)
1043{ 1070{
1044 const char *start = fmt; 1071 const char *start = fmt;
1045 1072
@@ -1979,7 +2006,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
1979 { 2006 {
1980 char *s = (char *)va_arg(args, char *); 2007 char *s = (char *)va_arg(args, char *);
1981 if (field_width == -1) 2008 if (field_width == -1)
1982 field_width = SHORT_MAX; 2009 field_width = SHRT_MAX;
1983 /* first, skip leading white space in buffer */ 2010 /* first, skip leading white space in buffer */
1984 str = skip_spaces(str); 2011 str = skip_spaces(str);
1985 2012