aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug44
-rw-r--r--lib/Makefile6
-rw-r--r--lib/atomic64.c66
-rw-r--r--lib/dynamic_debug.c174
-rw-r--r--lib/hexdump.c15
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/llist.c74
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
-rw-r--r--lib/sha1.c1
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/xz/xz_dec_bcj.c27
17 files changed, 271 insertions, 231 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6c695ff9caba..32f3e5ae2be5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -276,7 +276,4 @@ config CORDIC
276 so its calculations are in fixed point. Modules can select this 276 so its calculations are in fixed point. Modules can select this
277 when they require this function. Module will be called cordic. 277 when they require this function. Module will be called cordic.
278 278
279config LLIST
280 bool
281
282endmenu 279endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c0cb9c4bc46d..75330bd87565 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -117,31 +117,31 @@ config DEBUG_SECTION_MISMATCH
117 help 117 help
118 The section mismatch analysis checks if there are illegal 118 The section mismatch analysis checks if there are illegal
119 references from one section to another section. 119 references from one section to another section.
120 Linux will during link or during runtime drop some sections 120 During linktime or runtime, some sections are dropped;
121 and any use of code/data previously in these sections will 121 any use of code/data previously in these sections would
122 most likely result in an oops. 122 most likely result in an oops.
123 In the code functions and variables are annotated with 123 In the code, functions and variables are annotated with
124 __init, __devinit etc. (see full list in include/linux/init.h) 124 __init, __devinit, etc. (see the full list in include/linux/init.h),
125 which results in the code/data being placed in specific sections. 125 which results in the code/data being placed in specific sections.
126 The section mismatch analysis is always done after a full 126 The section mismatch analysis is always performed after a full
127 kernel build but enabling this option will in addition 127 kernel build, and enabling this option causes the following
128 do the following: 128 additional steps to occur:
129 - Add the option -fno-inline-functions-called-once to gcc 129 - Add the option -fno-inline-functions-called-once to gcc commands.
130 When inlining a function annotated __init in a non-init 130 When inlining a function annotated with __init in a non-init
131 function we would lose the section information and thus 131 function, we would lose the section information and thus
132 the analysis would not catch the illegal reference. 132 the analysis would not catch the illegal reference.
133 This option tells gcc to inline less but will also 133 This option tells gcc to inline less (but it does result in
134 result in a larger kernel. 134 a larger kernel).
135 - Run the section mismatch analysis for each module/built-in.o 135 - Run the section mismatch analysis for each module/built-in.o file.
136 When we run the section mismatch analysis on vmlinux.o we 136 When we run the section mismatch analysis on vmlinux.o, we
137 lose valueble information about where the mismatch was 137 lose valueble information about where the mismatch was
138 introduced. 138 introduced.
139 Running the analysis for each module/built-in.o file 139 Running the analysis for each module/built-in.o file
140 will tell where the mismatch happens much closer to the 140 tells where the mismatch happens much closer to the
141 source. The drawback is that we will report the same 141 source. The drawback is that the same mismatch is
142 mismatch at least twice. 142 reported at least twice.
143 - Enable verbose reporting from modpost to help solving 143 - Enable verbose reporting from modpost in order to help resolve
144 the section mismatches reported. 144 the section mismatches that are reported.
145 145
146config DEBUG_KERNEL 146config DEBUG_KERNEL
147 bool "Kernel debugging" 147 bool "Kernel debugging"
@@ -835,7 +835,7 @@ config DEBUG_CREDENTIALS
835 835
836# 836#
837# Select this config option from the architecture Kconfig, if it 837# Select this config option from the architecture Kconfig, if it
838# it is preferred to always offer frame pointers as a config 838# is preferred to always offer frame pointers as a config
839# option on the architecture (regardless of KERNEL_DEBUG): 839# option on the architecture (regardless of KERNEL_DEBUG):
840# 840#
841config ARCH_WANT_FRAME_POINTERS 841config ARCH_WANT_FRAME_POINTERS
@@ -1081,7 +1081,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1081 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1081 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1082 depends on !X86_64 1082 depends on !X86_64
1083 select STACKTRACE 1083 select STACKTRACE
1084 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE 1084 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
1085 help 1085 help
1086 Provide stacktrace filter for fault-injection capabilities 1086 Provide stacktrace filter for fault-injection capabilities
1087 1087
@@ -1091,7 +1091,7 @@ config LATENCYTOP
1091 depends on DEBUG_KERNEL 1091 depends on DEBUG_KERNEL
1092 depends on STACKTRACE_SUPPORT 1092 depends on STACKTRACE_SUPPORT
1093 depends on PROC_FS 1093 depends on PROC_FS
1094 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE 1094 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
1095 select KALLSYMS 1095 select KALLSYMS
1096 select KALLSYMS_ALL 1096 select KALLSYMS_ALL
1097 select STACKTRACE 1097 select STACKTRACE
diff --git a/lib/Makefile b/lib/Makefile
index d5d175c8a6ca..a4da283f5dc0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o find_next_bit.o 15 is_single_threaded.o plist.o decompress.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o find_last_bit.o 25 bsearch.o find_last_bit.o find_next_bit.o llist.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
@@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
115 115
116obj-$(CONFIG_CORDIC) += cordic.o 116obj-$(CONFIG_CORDIC) += cordic.o
117 117
118obj-$(CONFIG_LLIST) += llist.o
119
120hostprogs-y := gen_crc32table 118hostprogs-y := gen_crc32table
121clean-files := crc32table.h 119clean-files := crc32table.h
122 120
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e12ae0dd08a8..3975470caf4f 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -29,11 +29,11 @@
29 * Ensure each lock is in a separate cacheline. 29 * Ensure each lock is in a separate cacheline.
30 */ 30 */
31static union { 31static union {
32 spinlock_t lock; 32 raw_spinlock_t lock;
33 char pad[L1_CACHE_BYTES]; 33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35 35
36static inline spinlock_t *lock_addr(const atomic64_t *v) 36static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37{ 37{
38 unsigned long addr = (unsigned long) v; 38 unsigned long addr = (unsigned long) v;
39 39
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
45long long atomic64_read(const atomic64_t *v) 45long long atomic64_read(const atomic64_t *v)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
48 spinlock_t *lock = lock_addr(v); 48 raw_spinlock_t *lock = lock_addr(v);
49 long long val; 49 long long val;
50 50
51 spin_lock_irqsave(lock, flags); 51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter; 52 val = v->counter;
53 spin_unlock_irqrestore(lock, flags); 53 raw_spin_unlock_irqrestore(lock, flags);
54 return val; 54 return val;
55} 55}
56EXPORT_SYMBOL(atomic64_read); 56EXPORT_SYMBOL(atomic64_read);
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
58void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, long long i)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 spinlock_t *lock = lock_addr(v); 61 raw_spinlock_t *lock = lock_addr(v);
62 62
63 spin_lock_irqsave(lock, flags); 63 raw_spin_lock_irqsave(lock, flags);
64 v->counter = i; 64 v->counter = i;
65 spin_unlock_irqrestore(lock, flags); 65 raw_spin_unlock_irqrestore(lock, flags);
66} 66}
67EXPORT_SYMBOL(atomic64_set); 67EXPORT_SYMBOL(atomic64_set);
68 68
69void atomic64_add(long long a, atomic64_t *v) 69void atomic64_add(long long a, atomic64_t *v)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 spinlock_t *lock = lock_addr(v); 72 raw_spinlock_t *lock = lock_addr(v);
73 73
74 spin_lock_irqsave(lock, flags); 74 raw_spin_lock_irqsave(lock, flags);
75 v->counter += a; 75 v->counter += a;
76 spin_unlock_irqrestore(lock, flags); 76 raw_spin_unlock_irqrestore(lock, flags);
77} 77}
78EXPORT_SYMBOL(atomic64_add); 78EXPORT_SYMBOL(atomic64_add);
79 79
80long long atomic64_add_return(long long a, atomic64_t *v) 80long long atomic64_add_return(long long a, atomic64_t *v)
81{ 81{
82 unsigned long flags; 82 unsigned long flags;
83 spinlock_t *lock = lock_addr(v); 83 raw_spinlock_t *lock = lock_addr(v);
84 long long val; 84 long long val;
85 85
86 spin_lock_irqsave(lock, flags); 86 raw_spin_lock_irqsave(lock, flags);
87 val = v->counter += a; 87 val = v->counter += a;
88 spin_unlock_irqrestore(lock, flags); 88 raw_spin_unlock_irqrestore(lock, flags);
89 return val; 89 return val;
90} 90}
91EXPORT_SYMBOL(atomic64_add_return); 91EXPORT_SYMBOL(atomic64_add_return);
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
93void atomic64_sub(long long a, atomic64_t *v) 93void atomic64_sub(long long a, atomic64_t *v)
94{ 94{
95 unsigned long flags; 95 unsigned long flags;
96 spinlock_t *lock = lock_addr(v); 96 raw_spinlock_t *lock = lock_addr(v);
97 97
98 spin_lock_irqsave(lock, flags); 98 raw_spin_lock_irqsave(lock, flags);
99 v->counter -= a; 99 v->counter -= a;
100 spin_unlock_irqrestore(lock, flags); 100 raw_spin_unlock_irqrestore(lock, flags);
101} 101}
102EXPORT_SYMBOL(atomic64_sub); 102EXPORT_SYMBOL(atomic64_sub);
103 103
104long long atomic64_sub_return(long long a, atomic64_t *v) 104long long atomic64_sub_return(long long a, atomic64_t *v)
105{ 105{
106 unsigned long flags; 106 unsigned long flags;
107 spinlock_t *lock = lock_addr(v); 107 raw_spinlock_t *lock = lock_addr(v);
108 long long val; 108 long long val;
109 109
110 spin_lock_irqsave(lock, flags); 110 raw_spin_lock_irqsave(lock, flags);
111 val = v->counter -= a; 111 val = v->counter -= a;
112 spin_unlock_irqrestore(lock, flags); 112 raw_spin_unlock_irqrestore(lock, flags);
113 return val; 113 return val;
114} 114}
115EXPORT_SYMBOL(atomic64_sub_return); 115EXPORT_SYMBOL(atomic64_sub_return);
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
117long long atomic64_dec_if_positive(atomic64_t *v) 117long long atomic64_dec_if_positive(atomic64_t *v)
118{ 118{
119 unsigned long flags; 119 unsigned long flags;
120 spinlock_t *lock = lock_addr(v); 120 raw_spinlock_t *lock = lock_addr(v);
121 long long val; 121 long long val;
122 122
123 spin_lock_irqsave(lock, flags); 123 raw_spin_lock_irqsave(lock, flags);
124 val = v->counter - 1; 124 val = v->counter - 1;
125 if (val >= 0) 125 if (val >= 0)
126 v->counter = val; 126 v->counter = val;
127 spin_unlock_irqrestore(lock, flags); 127 raw_spin_unlock_irqrestore(lock, flags);
128 return val; 128 return val;
129} 129}
130EXPORT_SYMBOL(atomic64_dec_if_positive); 130EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133{ 133{
134 unsigned long flags; 134 unsigned long flags;
135 spinlock_t *lock = lock_addr(v); 135 raw_spinlock_t *lock = lock_addr(v);
136 long long val; 136 long long val;
137 137
138 spin_lock_irqsave(lock, flags); 138 raw_spin_lock_irqsave(lock, flags);
139 val = v->counter; 139 val = v->counter;
140 if (val == o) 140 if (val == o)
141 v->counter = n; 141 v->counter = n;
142 spin_unlock_irqrestore(lock, flags); 142 raw_spin_unlock_irqrestore(lock, flags);
143 return val; 143 return val;
144} 144}
145EXPORT_SYMBOL(atomic64_cmpxchg); 145EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
147long long atomic64_xchg(atomic64_t *v, long long new) 147long long atomic64_xchg(atomic64_t *v, long long new)
148{ 148{
149 unsigned long flags; 149 unsigned long flags;
150 spinlock_t *lock = lock_addr(v); 150 raw_spinlock_t *lock = lock_addr(v);
151 long long val; 151 long long val;
152 152
153 spin_lock_irqsave(lock, flags); 153 raw_spin_lock_irqsave(lock, flags);
154 val = v->counter; 154 val = v->counter;
155 v->counter = new; 155 v->counter = new;
156 spin_unlock_irqrestore(lock, flags); 156 raw_spin_unlock_irqrestore(lock, flags);
157 return val; 157 return val;
158} 158}
159EXPORT_SYMBOL(atomic64_xchg); 159EXPORT_SYMBOL(atomic64_xchg);
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
161int atomic64_add_unless(atomic64_t *v, long long a, long long u) 161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 raw_spinlock_t *lock = lock_addr(v);
165 int ret = 0; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 raw_spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 1; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 raw_spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
174} 174}
175EXPORT_SYMBOL(atomic64_add_unless); 175EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
179 int i; 179 int i;
180 180
181 for (i = 0; i < NR_LOCKS; ++i) 181 for (i = 0; i < NR_LOCKS; ++i)
182 spin_lock_init(&atomic64_lock[i].lock); 182 raw_spin_lock_init(&atomic64_lock[i].lock);
183 return 0; 183 return 0;
184} 184}
185 185
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 75ca78f3a8c9..dcdade39e47f 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -10,11 +10,12 @@
10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. 10 * Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
16#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
17#include <linux/version.h>
18#include <linux/types.h> 19#include <linux/types.h>
19#include <linux/mutex.h> 20#include <linux/mutex.h>
20#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
@@ -30,6 +31,8 @@
30#include <linux/jump_label.h> 31#include <linux/jump_label.h>
31#include <linux/hardirq.h> 32#include <linux/hardirq.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/device.h>
35#include <linux/netdevice.h>
33 36
34extern struct _ddebug __start___verbose[]; 37extern struct _ddebug __start___verbose[];
35extern struct _ddebug __stop___verbose[]; 38extern struct _ddebug __stop___verbose[];
@@ -38,7 +41,6 @@ struct ddebug_table {
38 struct list_head link; 41 struct list_head link;
39 char *mod_name; 42 char *mod_name;
40 unsigned int num_ddebugs; 43 unsigned int num_ddebugs;
41 unsigned int num_enabled;
42 struct _ddebug *ddebugs; 44 struct _ddebug *ddebugs;
43}; 45};
44 46
@@ -148,19 +150,13 @@ static void ddebug_change(const struct ddebug_query *query,
148 newflags = (dp->flags & mask) | flags; 150 newflags = (dp->flags & mask) | flags;
149 if (newflags == dp->flags) 151 if (newflags == dp->flags)
150 continue; 152 continue;
151
152 if (!newflags)
153 dt->num_enabled--;
154 else if (!dp->flags)
155 dt->num_enabled++;
156 dp->flags = newflags; 153 dp->flags = newflags;
157 if (newflags) 154 if (newflags)
158 dp->enabled = 1; 155 dp->enabled = 1;
159 else 156 else
160 dp->enabled = 0; 157 dp->enabled = 0;
161 if (verbose) 158 if (verbose)
162 printk(KERN_INFO 159 pr_info("changed %s:%d [%s]%s %s\n",
163 "ddebug: changed %s:%d [%s]%s %s\n",
164 dp->filename, dp->lineno, 160 dp->filename, dp->lineno,
165 dt->mod_name, dp->function, 161 dt->mod_name, dp->function,
166 ddebug_describe_flags(dp, flagbuf, 162 ddebug_describe_flags(dp, flagbuf,
@@ -170,7 +166,7 @@ static void ddebug_change(const struct ddebug_query *query,
170 mutex_unlock(&ddebug_lock); 166 mutex_unlock(&ddebug_lock);
171 167
172 if (!nfound && verbose) 168 if (!nfound && verbose)
173 printk(KERN_INFO "ddebug: no matches for query\n"); 169 pr_info("no matches for query\n");
174} 170}
175 171
176/* 172/*
@@ -215,10 +211,10 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
215 211
216 if (verbose) { 212 if (verbose) {
217 int i; 213 int i;
218 printk(KERN_INFO "%s: split into words:", __func__); 214 pr_info("split into words:");
219 for (i = 0 ; i < nwords ; i++) 215 for (i = 0 ; i < nwords ; i++)
220 printk(" \"%s\"", words[i]); 216 pr_cont(" \"%s\"", words[i]);
221 printk("\n"); 217 pr_cont("\n");
222 } 218 }
223 219
224 return nwords; 220 return nwords;
@@ -330,16 +326,15 @@ static int ddebug_parse_query(char *words[], int nwords,
330 } 326 }
331 } else { 327 } else {
332 if (verbose) 328 if (verbose)
333 printk(KERN_ERR "%s: unknown keyword \"%s\"\n", 329 pr_err("unknown keyword \"%s\"\n", words[i]);
334 __func__, words[i]);
335 return -EINVAL; 330 return -EINVAL;
336 } 331 }
337 } 332 }
338 333
339 if (verbose) 334 if (verbose)
340 printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" " 335 pr_info("q->function=\"%s\" q->filename=\"%s\" "
341 "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n", 336 "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n",
342 __func__, query->function, query->filename, 337 query->function, query->filename,
343 query->module, query->format, query->first_lineno, 338 query->module, query->format, query->first_lineno,
344 query->last_lineno); 339 query->last_lineno);
345 340
@@ -368,7 +363,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
368 return -EINVAL; 363 return -EINVAL;
369 } 364 }
370 if (verbose) 365 if (verbose)
371 printk(KERN_INFO "%s: op='%c'\n", __func__, op); 366 pr_info("op='%c'\n", op);
372 367
373 for ( ; *str ; ++str) { 368 for ( ; *str ; ++str) {
374 for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { 369 for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
@@ -383,7 +378,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
383 if (flags == 0) 378 if (flags == 0)
384 return -EINVAL; 379 return -EINVAL;
385 if (verbose) 380 if (verbose)
386 printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags); 381 pr_info("flags=0x%x\n", flags);
387 382
388 /* calculate final *flagsp, *maskp according to mask and op */ 383 /* calculate final *flagsp, *maskp according to mask and op */
389 switch (op) { 384 switch (op) {
@@ -401,8 +396,7 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
401 break; 396 break;
402 } 397 }
403 if (verbose) 398 if (verbose)
404 printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n", 399 pr_info("*flagsp=0x%x *maskp=0x%x\n", *flagsp, *maskp);
405 __func__, *flagsp, *maskp);
406 return 0; 400 return 0;
407} 401}
408 402
@@ -427,40 +421,117 @@ static int ddebug_exec_query(char *query_string)
427 return 0; 421 return 0;
428} 422}
429 423
424#define PREFIX_SIZE 64
425
426static int remaining(int wrote)
427{
428 if (PREFIX_SIZE - wrote > 0)
429 return PREFIX_SIZE - wrote;
430 return 0;
431}
432
433static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
434{
435 int pos_after_tid;
436 int pos = 0;
437
438 pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG);
439 if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
440 if (in_interrupt())
441 pos += snprintf(buf + pos, remaining(pos), "%s ",
442 "<intr>");
443 else
444 pos += snprintf(buf + pos, remaining(pos), "[%d] ",
445 task_pid_vnr(current));
446 }
447 pos_after_tid = pos;
448 if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
449 pos += snprintf(buf + pos, remaining(pos), "%s:",
450 desc->modname);
451 if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
452 pos += snprintf(buf + pos, remaining(pos), "%s:",
453 desc->function);
454 if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
455 pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno);
456 if (pos - pos_after_tid)
457 pos += snprintf(buf + pos, remaining(pos), " ");
458 if (pos >= PREFIX_SIZE)
459 buf[PREFIX_SIZE - 1] = '\0';
460
461 return buf;
462}
463
430int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) 464int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
431{ 465{
432 va_list args; 466 va_list args;
433 int res; 467 int res;
468 struct va_format vaf;
469 char buf[PREFIX_SIZE];
434 470
435 BUG_ON(!descriptor); 471 BUG_ON(!descriptor);
436 BUG_ON(!fmt); 472 BUG_ON(!fmt);
437 473
438 va_start(args, fmt); 474 va_start(args, fmt);
439 res = printk(KERN_DEBUG); 475 vaf.fmt = fmt;
440 if (descriptor->flags & _DPRINTK_FLAGS_INCL_TID) { 476 vaf.va = &args;
441 if (in_interrupt()) 477 res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
442 res += printk(KERN_CONT "<intr> ");
443 else
444 res += printk(KERN_CONT "[%d] ", task_pid_vnr(current));
445 }
446 if (descriptor->flags & _DPRINTK_FLAGS_INCL_MODNAME)
447 res += printk(KERN_CONT "%s:", descriptor->modname);
448 if (descriptor->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
449 res += printk(KERN_CONT "%s:", descriptor->function);
450 if (descriptor->flags & _DPRINTK_FLAGS_INCL_LINENO)
451 res += printk(KERN_CONT "%d ", descriptor->lineno);
452 res += vprintk(fmt, args);
453 va_end(args); 478 va_end(args);
454 479
455 return res; 480 return res;
456} 481}
457EXPORT_SYMBOL(__dynamic_pr_debug); 482EXPORT_SYMBOL(__dynamic_pr_debug);
458 483
484int __dynamic_dev_dbg(struct _ddebug *descriptor,
485 const struct device *dev, const char *fmt, ...)
486{
487 struct va_format vaf;
488 va_list args;
489 int res;
490 char buf[PREFIX_SIZE];
491
492 BUG_ON(!descriptor);
493 BUG_ON(!fmt);
494
495 va_start(args, fmt);
496 vaf.fmt = fmt;
497 vaf.va = &args;
498 res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
499 va_end(args);
500
501 return res;
502}
503EXPORT_SYMBOL(__dynamic_dev_dbg);
504
505#ifdef CONFIG_NET
506
507int __dynamic_netdev_dbg(struct _ddebug *descriptor,
508 const struct net_device *dev, const char *fmt, ...)
509{
510 struct va_format vaf;
511 va_list args;
512 int res;
513 char buf[PREFIX_SIZE];
514
515 BUG_ON(!descriptor);
516 BUG_ON(!fmt);
517
518 va_start(args, fmt);
519 vaf.fmt = fmt;
520 vaf.va = &args;
521 res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf);
522 va_end(args);
523
524 return res;
525}
526EXPORT_SYMBOL(__dynamic_netdev_dbg);
527
528#endif
529
459static __initdata char ddebug_setup_string[1024]; 530static __initdata char ddebug_setup_string[1024];
460static __init int ddebug_setup_query(char *str) 531static __init int ddebug_setup_query(char *str)
461{ 532{
462 if (strlen(str) >= 1024) { 533 if (strlen(str) >= 1024) {
463 pr_warning("ddebug boot param string too large\n"); 534 pr_warn("ddebug boot param string too large\n");
464 return 0; 535 return 0;
465 } 536 }
466 strcpy(ddebug_setup_string, str); 537 strcpy(ddebug_setup_string, str);
@@ -488,8 +559,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
488 return -EFAULT; 559 return -EFAULT;
489 tmpbuf[len] = '\0'; 560 tmpbuf[len] = '\0';
490 if (verbose) 561 if (verbose)
491 printk(KERN_INFO "%s: read %d bytes from userspace\n", 562 pr_info("read %d bytes from userspace\n", (int)len);
492 __func__, (int)len);
493 563
494 ret = ddebug_exec_query(tmpbuf); 564 ret = ddebug_exec_query(tmpbuf);
495 if (ret) 565 if (ret)
@@ -552,8 +622,7 @@ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
552 int n = *pos; 622 int n = *pos;
553 623
554 if (verbose) 624 if (verbose)
555 printk(KERN_INFO "%s: called m=%p *pos=%lld\n", 625 pr_info("called m=%p *pos=%lld\n", m, (unsigned long long)*pos);
556 __func__, m, (unsigned long long)*pos);
557 626
558 mutex_lock(&ddebug_lock); 627 mutex_lock(&ddebug_lock);
559 628
@@ -578,8 +647,8 @@ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
578 struct _ddebug *dp; 647 struct _ddebug *dp;
579 648
580 if (verbose) 649 if (verbose)
581 printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n", 650 pr_info("called m=%p p=%p *pos=%lld\n",
582 __func__, m, p, (unsigned long long)*pos); 651 m, p, (unsigned long long)*pos);
583 652
584 if (p == SEQ_START_TOKEN) 653 if (p == SEQ_START_TOKEN)
585 dp = ddebug_iter_first(iter); 654 dp = ddebug_iter_first(iter);
@@ -602,8 +671,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
602 char flagsbuf[8]; 671 char flagsbuf[8];
603 672
604 if (verbose) 673 if (verbose)
605 printk(KERN_INFO "%s: called m=%p p=%p\n", 674 pr_info("called m=%p p=%p\n", m, p);
606 __func__, m, p);
607 675
608 if (p == SEQ_START_TOKEN) { 676 if (p == SEQ_START_TOKEN) {
609 seq_puts(m, 677 seq_puts(m,
@@ -628,8 +696,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
628static void ddebug_proc_stop(struct seq_file *m, void *p) 696static void ddebug_proc_stop(struct seq_file *m, void *p)
629{ 697{
630 if (verbose) 698 if (verbose)
631 printk(KERN_INFO "%s: called m=%p p=%p\n", 699 pr_info("called m=%p p=%p\n", m, p);
632 __func__, m, p);
633 mutex_unlock(&ddebug_lock); 700 mutex_unlock(&ddebug_lock);
634} 701}
635 702
@@ -652,7 +719,7 @@ static int ddebug_proc_open(struct inode *inode, struct file *file)
652 int err; 719 int err;
653 720
654 if (verbose) 721 if (verbose)
655 printk(KERN_INFO "%s: called\n", __func__); 722 pr_info("called\n");
656 723
657 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 724 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
658 if (iter == NULL) 725 if (iter == NULL)
@@ -696,7 +763,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
696 } 763 }
697 dt->mod_name = new_name; 764 dt->mod_name = new_name;
698 dt->num_ddebugs = n; 765 dt->num_ddebugs = n;
699 dt->num_enabled = 0;
700 dt->ddebugs = tab; 766 dt->ddebugs = tab;
701 767
702 mutex_lock(&ddebug_lock); 768 mutex_lock(&ddebug_lock);
@@ -704,8 +770,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
704 mutex_unlock(&ddebug_lock); 770 mutex_unlock(&ddebug_lock);
705 771
706 if (verbose) 772 if (verbose)
707 printk(KERN_INFO "%u debug prints in module %s\n", 773 pr_info("%u debug prints in module %s\n", n, dt->mod_name);
708 n, dt->mod_name);
709 return 0; 774 return 0;
710} 775}
711EXPORT_SYMBOL_GPL(ddebug_add_module); 776EXPORT_SYMBOL_GPL(ddebug_add_module);
@@ -727,8 +792,7 @@ int ddebug_remove_module(const char *mod_name)
727 int ret = -ENOENT; 792 int ret = -ENOENT;
728 793
729 if (verbose) 794 if (verbose)
730 printk(KERN_INFO "%s: removing module \"%s\"\n", 795 pr_info("removing module \"%s\"\n", mod_name);
731 __func__, mod_name);
732 796
733 mutex_lock(&ddebug_lock); 797 mutex_lock(&ddebug_lock);
734 list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { 798 list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
@@ -804,8 +868,8 @@ static int __init dynamic_debug_init(void)
804 if (ddebug_setup_string[0] != '\0') { 868 if (ddebug_setup_string[0] != '\0') {
805 ret = ddebug_exec_query(ddebug_setup_string); 869 ret = ddebug_exec_query(ddebug_setup_string);
806 if (ret) 870 if (ret)
807 pr_warning("Invalid ddebug boot param %s", 871 pr_warn("Invalid ddebug boot param %s",
808 ddebug_setup_string); 872 ddebug_setup_string);
809 else 873 else
810 pr_info("ddebug initialized with string %s", 874 pr_info("ddebug initialized with string %s",
811 ddebug_setup_string); 875 ddebug_setup_string);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index f5fe6ba7a3ab..51d5ae210244 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -38,14 +38,21 @@ EXPORT_SYMBOL(hex_to_bin);
38 * @dst: binary result 38 * @dst: binary result
39 * @src: ascii hexadecimal string 39 * @src: ascii hexadecimal string
40 * @count: result length 40 * @count: result length
41 *
42 * Return 0 on success, -1 in case of bad input.
41 */ 43 */
42void hex2bin(u8 *dst, const char *src, size_t count) 44int hex2bin(u8 *dst, const char *src, size_t count)
43{ 45{
44 while (count--) { 46 while (count--) {
45 *dst = hex_to_bin(*src++) << 4; 47 int hi = hex_to_bin(*src++);
46 *dst += hex_to_bin(*src++); 48 int lo = hex_to_bin(*src++);
47 dst++; 49
50 if ((hi < 0) || (lo < 0))
51 return -1;
52
53 *dst++ = (hi << 4) | lo;
48 } 54 }
55 return 0;
49} 56}
50EXPORT_SYMBOL(hex2bin); 57EXPORT_SYMBOL(hex2bin);
51 58
diff --git a/lib/idr.c b/lib/idr.c
index db040ce3fa73..5acf9bb10968 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -860,7 +860,7 @@ EXPORT_SYMBOL(ida_get_new_above);
860 * and go back to the idr_pre_get() call. If the idr is full, it will 860 * and go back to the idr_pre_get() call. If the idr is full, it will
861 * return %-ENOSPC. 861 * return %-ENOSPC.
862 * 862 *
863 * @id returns a value in the range %0 ... %0x7fffffff. 863 * @p_id returns a value in the range %0 ... %0x7fffffff.
864 */ 864 */
865int ida_get_new(struct ida *ida, int *p_id) 865int ida_get_new(struct ida *ida, int *p_id)
866{ 866{
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 70af0a7f97c0..ad72a03ce5e9 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -282,7 +282,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
282 kobj_bcast_filter, 282 kobj_bcast_filter,
283 kobj); 283 kobj);
284 /* ENOBUFS should be handled in userspace */ 284 /* ENOBUFS should be handled in userspace */
285 if (retval == -ENOBUFS) 285 if (retval == -ENOBUFS || retval == -ESRCH)
286 retval = 0; 286 retval = 0;
287 } else 287 } else
288 retval = -ENOMEM; 288 retval = -ENOMEM;
diff --git a/lib/llist.c b/lib/llist.c
index da445724fa1f..700cff77a387 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -3,8 +3,8 @@
3 * 3 *
4 * The basic atomic operation of this list is cmpxchg on long. On 4 * The basic atomic operation of this list is cmpxchg on long. On
5 * architectures that don't have NMI-safe cmpxchg implementation, the 5 * architectures that don't have NMI-safe cmpxchg implementation, the
6 * list can NOT be used in NMI handler. So code uses the list in NMI 6 * list can NOT be used in NMI handlers. So code that uses the list in
7 * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 7 * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
8 * 8 *
9 * Copyright 2010,2011 Intel Corp. 9 * Copyright 2010,2011 Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com> 10 * Author: Huang Ying <ying.huang@intel.com>
@@ -30,48 +30,28 @@
30#include <asm/system.h> 30#include <asm/system.h>
31 31
32/** 32/**
33 * llist_add - add a new entry
34 * @new: new entry to be added
35 * @head: the head for your lock-less list
36 */
37void llist_add(struct llist_node *new, struct llist_head *head)
38{
39 struct llist_node *entry, *old_entry;
40
41#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
42 BUG_ON(in_nmi());
43#endif
44
45 entry = head->first;
46 do {
47 old_entry = entry;
48 new->next = entry;
49 cpu_relax();
50 } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry);
51}
52EXPORT_SYMBOL_GPL(llist_add);
53
54/**
55 * llist_add_batch - add several linked entries in batch 33 * llist_add_batch - add several linked entries in batch
56 * @new_first: first entry in batch to be added 34 * @new_first: first entry in batch to be added
57 * @new_last: last entry in batch to be added 35 * @new_last: last entry in batch to be added
58 * @head: the head for your lock-less list 36 * @head: the head for your lock-less list
37 *
38 * Return whether list is empty before adding.
59 */ 39 */
60void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, 40bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
61 struct llist_head *head) 41 struct llist_head *head)
62{ 42{
63 struct llist_node *entry, *old_entry; 43 struct llist_node *entry, *old_entry;
64 44
65#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
66 BUG_ON(in_nmi());
67#endif
68
69 entry = head->first; 45 entry = head->first;
70 do { 46 for (;;) {
71 old_entry = entry; 47 old_entry = entry;
72 new_last->next = entry; 48 new_last->next = entry;
73 cpu_relax(); 49 entry = cmpxchg(&head->first, old_entry, new_first);
74 } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); 50 if (entry == old_entry)
51 break;
52 }
53
54 return old_entry == NULL;
75} 55}
76EXPORT_SYMBOL_GPL(llist_add_batch); 56EXPORT_SYMBOL_GPL(llist_add_batch);
77 57
@@ -93,37 +73,17 @@ struct llist_node *llist_del_first(struct llist_head *head)
93{ 73{
94 struct llist_node *entry, *old_entry, *next; 74 struct llist_node *entry, *old_entry, *next;
95 75
96#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
97 BUG_ON(in_nmi());
98#endif
99
100 entry = head->first; 76 entry = head->first;
101 do { 77 for (;;) {
102 if (entry == NULL) 78 if (entry == NULL)
103 return NULL; 79 return NULL;
104 old_entry = entry; 80 old_entry = entry;
105 next = entry->next; 81 next = entry->next;
106 cpu_relax(); 82 entry = cmpxchg(&head->first, old_entry, next);
107 } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); 83 if (entry == old_entry)
84 break;
85 }
108 86
109 return entry; 87 return entry;
110} 88}
111EXPORT_SYMBOL_GPL(llist_del_first); 89EXPORT_SYMBOL_GPL(llist_del_first);
112
113/**
114 * llist_del_all - delete all entries from lock-less list
115 * @head: the head of lock-less list to delete all entries
116 *
117 * If list is empty, return NULL, otherwise, delete all entries and
118 * return the pointer to the first entry. The order of entries
119 * deleted is from the newest to the oldest added one.
120 */
121struct llist_node *llist_del_all(struct llist_head *head)
122{
123#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
124 BUG_ON(in_nmi());
125#endif
126
127 return xchg(&head->first, NULL);
128}
129EXPORT_SYMBOL_GPL(llist_del_all);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b53..f087105ed914 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
59{ 59{
60 int cpu; 60 int cpu;
61 61
62 spin_lock(&fbc->lock); 62 raw_spin_lock(&fbc->lock);
63 for_each_possible_cpu(cpu) { 63 for_each_possible_cpu(cpu) {
64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
65 *pcount = 0; 65 *pcount = 0;
66 } 66 }
67 fbc->count = amount; 67 fbc->count = amount;
68 spin_unlock(&fbc->lock); 68 raw_spin_unlock(&fbc->lock);
69} 69}
70EXPORT_SYMBOL(percpu_counter_set); 70EXPORT_SYMBOL(percpu_counter_set);
71 71
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
76 preempt_disable(); 76 preempt_disable();
77 count = __this_cpu_read(*fbc->counters) + amount; 77 count = __this_cpu_read(*fbc->counters) + amount;
78 if (count >= batch || count <= -batch) { 78 if (count >= batch || count <= -batch) {
79 spin_lock(&fbc->lock); 79 raw_spin_lock(&fbc->lock);
80 fbc->count += count; 80 fbc->count += count;
81 __this_cpu_write(*fbc->counters, 0); 81 __this_cpu_write(*fbc->counters, 0);
82 spin_unlock(&fbc->lock); 82 raw_spin_unlock(&fbc->lock);
83 } else { 83 } else {
84 __this_cpu_write(*fbc->counters, count); 84 __this_cpu_write(*fbc->counters, count);
85 } 85 }
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
96 s64 ret; 96 s64 ret;
97 int cpu; 97 int cpu;
98 98
99 spin_lock(&fbc->lock); 99 raw_spin_lock(&fbc->lock);
100 ret = fbc->count; 100 ret = fbc->count;
101 for_each_online_cpu(cpu) { 101 for_each_online_cpu(cpu) {
102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
103 ret += *pcount; 103 ret += *pcount;
104 } 104 }
105 spin_unlock(&fbc->lock); 105 raw_spin_unlock(&fbc->lock);
106 return ret; 106 return ret;
107} 107}
108EXPORT_SYMBOL(__percpu_counter_sum); 108EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
111 struct lock_class_key *key) 111 struct lock_class_key *key)
112{ 112{
113 spin_lock_init(&fbc->lock); 113 raw_spin_lock_init(&fbc->lock);
114 lockdep_set_class(&fbc->lock, key); 114 lockdep_set_class(&fbc->lock, key);
115 fbc->count = amount; 115 fbc->count = amount;
116 fbc->counters = alloc_percpu(s32); 116 fbc->counters = alloc_percpu(s32);
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
173 s32 *pcount; 173 s32 *pcount;
174 unsigned long flags; 174 unsigned long flags;
175 175
176 spin_lock_irqsave(&fbc->lock, flags); 176 raw_spin_lock_irqsave(&fbc->lock, flags);
177 pcount = per_cpu_ptr(fbc->counters, cpu); 177 pcount = per_cpu_ptr(fbc->counters, cpu);
178 fbc->count += *pcount; 178 fbc->count += *pcount;
179 *pcount = 0; 179 *pcount = 0;
180 spin_unlock_irqrestore(&fbc->lock, flags); 180 raw_spin_unlock_irqrestore(&fbc->lock, flags);
181 } 181 }
182 mutex_unlock(&percpu_counters_lock); 182 mutex_unlock(&percpu_counters_lock);
183#endif 183#endif
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de2..05df84801b56 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
190 190
191int prop_local_init_percpu(struct prop_local_percpu *pl) 191int prop_local_init_percpu(struct prop_local_percpu *pl)
192{ 192{
193 spin_lock_init(&pl->lock); 193 raw_spin_lock_init(&pl->lock);
194 pl->shift = 0; 194 pl->shift = 0;
195 pl->period = 0; 195 pl->period = 0;
196 return percpu_counter_init(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
226 if (pl->period == global_period) 226 if (pl->period == global_period)
227 return; 227 return;
228 228
229 spin_lock_irqsave(&pl->lock, flags); 229 raw_spin_lock_irqsave(&pl->lock, flags);
230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
231 231
232 /* 232 /*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
247 percpu_counter_set(&pl->events, 0); 247 percpu_counter_set(&pl->events, 0);
248 248
249 pl->period = global_period; 249 pl->period = global_period;
250 spin_unlock_irqrestore(&pl->lock, flags); 250 raw_spin_unlock_irqrestore(&pl->lock, flags);
251} 251}
252 252
253/* 253/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
324 324
325int prop_local_init_single(struct prop_local_single *pl) 325int prop_local_init_single(struct prop_local_single *pl)
326{ 326{
327 spin_lock_init(&pl->lock); 327 raw_spin_lock_init(&pl->lock);
328 pl->shift = 0; 328 pl->shift = 0;
329 pl->period = 0; 329 pl->period = 0;
330 pl->events = 0; 330 pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
356 if (pl->period == global_period) 356 if (pl->period == global_period)
357 return; 357 return;
358 358
359 spin_lock_irqsave(&pl->lock, flags); 359 raw_spin_lock_irqsave(&pl->lock, flags);
360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
361 /* 361 /*
362 * For each missed period, we half the local counter. 362 * For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
367 else 367 else
368 pl->events = 0; 368 pl->events = 0;
369 pl->period = global_period; 369 pl->period = global_period;
370 spin_unlock_irqrestore(&pl->lock, flags); 370 raw_spin_unlock_irqrestore(&pl->lock, flags);
371} 371}
372 372
373/* 373/*
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 027a03f4c56d..c96d500577de 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
39 * in addition to the one that will be printed by 39 * in addition to the one that will be printed by
40 * the entity that is holding the lock already: 40 * the entity that is holding the lock already:
41 */ 41 */
42 if (!spin_trylock_irqsave(&rs->lock, flags)) 42 if (!raw_spin_trylock_irqsave(&rs->lock, flags))
43 return 0; 43 return 0;
44 44
45 if (!rs->begin) 45 if (!rs->begin)
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
60 rs->missed++; 60 rs->missed++;
61 ret = 0; 61 ret = 0;
62 } 62 }
63 spin_unlock_irqrestore(&rs->lock, flags); 63 raw_spin_unlock_irqrestore(&rs->lock, flags);
64 64
65 return ret; 65 return ret;
66} 66}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ffc9fc7f3b05..f2393c21fe85 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
22 int ret = 1; 22 int ret = 1;
23 unsigned long flags; 23 unsigned long flags;
24 24
25 if (spin_trylock_irqsave(&sem->wait_lock, flags)) { 25 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0); 26 ret = (sem->activity != 0);
27 spin_unlock_irqrestore(&sem->wait_lock, flags); 27 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
28 } 28 }
29 return ret; 29 return ret;
30} 30}
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
44 lockdep_init_map(&sem->dep_map, name, key, 0); 44 lockdep_init_map(&sem->dep_map, name, key, 0);
45#endif 45#endif
46 sem->activity = 0; 46 sem->activity = 0;
47 spin_lock_init(&sem->wait_lock); 47 raw_spin_lock_init(&sem->wait_lock);
48 INIT_LIST_HEAD(&sem->wait_list); 48 INIT_LIST_HEAD(&sem->wait_list);
49} 49}
50EXPORT_SYMBOL(__init_rwsem); 50EXPORT_SYMBOL(__init_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
145 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags; 146 unsigned long flags;
147 147
148 spin_lock_irqsave(&sem->wait_lock, flags); 148 raw_spin_lock_irqsave(&sem->wait_lock, flags);
149 149
150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
151 /* granted */ 151 /* granted */
152 sem->activity++; 152 sem->activity++;
153 spin_unlock_irqrestore(&sem->wait_lock, flags); 153 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
154 goto out; 154 goto out;
155 } 155 }
156 156
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
165 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
166 166
167 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
168 spin_unlock_irqrestore(&sem->wait_lock, flags); 168 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
169 169
170 /* wait to be given the lock */ 170 /* wait to be given the lock */
171 for (;;) { 171 for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
189 int ret = 0; 189 int ret = 0;
190 190
191 191
192 spin_lock_irqsave(&sem->wait_lock, flags); 192 raw_spin_lock_irqsave(&sem->wait_lock, flags);
193 193
194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
195 /* granted */ 195 /* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
197 ret = 1; 197 ret = 1;
198 } 198 }
199 199
200 spin_unlock_irqrestore(&sem->wait_lock, flags); 200 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
201 201
202 return ret; 202 return ret;
203} 203}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
212 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags; 213 unsigned long flags;
214 214
215 spin_lock_irqsave(&sem->wait_lock, flags); 215 raw_spin_lock_irqsave(&sem->wait_lock, flags);
216 216
217 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
218 /* granted */ 218 /* granted */
219 sem->activity = -1; 219 sem->activity = -1;
220 spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221 goto out; 221 goto out;
222 } 222 }
223 223
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
232 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
233 233
234 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
235 spin_unlock_irqrestore(&sem->wait_lock, flags); 235 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
236 236
237 /* wait to be given the lock */ 237 /* wait to be given the lock */
238 for (;;) { 238 for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
260 unsigned long flags; 260 unsigned long flags;
261 int ret = 0; 261 int ret = 0;
262 262
263 spin_lock_irqsave(&sem->wait_lock, flags); 263 raw_spin_lock_irqsave(&sem->wait_lock, flags);
264 264
265 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 265 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
266 /* granted */ 266 /* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
268 ret = 1; 268 ret = 1;
269 } 269 }
270 270
271 spin_unlock_irqrestore(&sem->wait_lock, flags); 271 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
272 272
273 return ret; 273 return ret;
274} 274}
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
280{ 280{
281 unsigned long flags; 281 unsigned long flags;
282 282
283 spin_lock_irqsave(&sem->wait_lock, flags); 283 raw_spin_lock_irqsave(&sem->wait_lock, flags);
284 284
285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 285 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286 sem = __rwsem_wake_one_writer(sem); 286 sem = __rwsem_wake_one_writer(sem);
287 287
288 spin_unlock_irqrestore(&sem->wait_lock, flags); 288 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
289} 289}
290 290
291/* 291/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
295{ 295{
296 unsigned long flags; 296 unsigned long flags;
297 297
298 spin_lock_irqsave(&sem->wait_lock, flags); 298 raw_spin_lock_irqsave(&sem->wait_lock, flags);
299 299
300 sem->activity = 0; 300 sem->activity = 0;
301 if (!list_empty(&sem->wait_list)) 301 if (!list_empty(&sem->wait_list))
302 sem = __rwsem_do_wake(sem, 1); 302 sem = __rwsem_do_wake(sem, 1);
303 303
304 spin_unlock_irqrestore(&sem->wait_lock, flags); 304 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
305} 305}
306 306
307/* 307/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
312{ 312{
313 unsigned long flags; 313 unsigned long flags;
314 314
315 spin_lock_irqsave(&sem->wait_lock, flags); 315 raw_spin_lock_irqsave(&sem->wait_lock, flags);
316 316
317 sem->activity = 1; 317 sem->activity = 1;
318 if (!list_empty(&sem->wait_list)) 318 if (!list_empty(&sem->wait_list))
319 sem = __rwsem_do_wake(sem, 0); 319 sem = __rwsem_do_wake(sem, 0);
320 320
321 spin_unlock_irqrestore(&sem->wait_lock, flags); 321 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
322} 322}
323 323
diff --git a/lib/rwsem.c b/lib/rwsem.c
index aa7c3052261f..410aa1189b13 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
22 lockdep_init_map(&sem->dep_map, name, key, 0); 22 lockdep_init_map(&sem->dep_map, name, key, 0);
23#endif 23#endif
24 sem->count = RWSEM_UNLOCKED_VALUE; 24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock); 25 raw_spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list); 26 INIT_LIST_HEAD(&sem->wait_list);
27} 27}
28 28
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
180 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 180 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
181 181
182 /* set up my own style of waitqueue */ 182 /* set up my own style of waitqueue */
183 spin_lock_irq(&sem->wait_lock); 183 raw_spin_lock_irq(&sem->wait_lock);
184 waiter.task = tsk; 184 waiter.task = tsk;
185 waiter.flags = flags; 185 waiter.flags = flags;
186 get_task_struct(tsk); 186 get_task_struct(tsk);
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS) 204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
206 206
207 spin_unlock_irq(&sem->wait_lock); 207 raw_spin_unlock_irq(&sem->wait_lock);
208 208
209 /* wait to be given the lock */ 209 /* wait to be given the lock */
210 for (;;) { 210 for (;;) {
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
245{ 245{
246 unsigned long flags; 246 unsigned long flags;
247 247
248 spin_lock_irqsave(&sem->wait_lock, flags); 248 raw_spin_lock_irqsave(&sem->wait_lock, flags);
249 249
250 /* do nothing if list empty */ 250 /* do nothing if list empty */
251 if (!list_empty(&sem->wait_list)) 251 if (!list_empty(&sem->wait_list))
252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); 252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
253 253
254 spin_unlock_irqrestore(&sem->wait_lock, flags); 254 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
255 255
256 return sem; 256 return sem;
257} 257}
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
265{ 265{
266 unsigned long flags; 266 unsigned long flags;
267 267
268 spin_lock_irqsave(&sem->wait_lock, flags); 268 raw_spin_lock_irqsave(&sem->wait_lock, flags);
269 269
270 /* do nothing if list empty */ 270 /* do nothing if list empty */
271 if (!list_empty(&sem->wait_list)) 271 if (!list_empty(&sem->wait_list))
272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
273 273
274 spin_unlock_irqrestore(&sem->wait_lock, flags); 274 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
275 275
276 return sem; 276 return sem;
277} 277}
diff --git a/lib/sha1.c b/lib/sha1.c
index f33271dd00cb..1de509a159c8 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/cryptohash.h>
11#include <asm/unaligned.h> 12#include <asm/unaligned.h>
12 13
13/* 14/*
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 4689cb073da4..503f087382a4 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
22 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id(): 23 * smp_processor_id():
24 */ 24 */
25 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu)))
26 goto out; 26 goto out;
27 27
28 /* 28 /*
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index e51e2558ca9d..a768e6d28bbb 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -441,8 +441,12 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
441 * next filter in the chain. Apply the BCJ filter on the new data 441 * next filter in the chain. Apply the BCJ filter on the new data
442 * in the output buffer. If everything cannot be filtered, copy it 442 * in the output buffer. If everything cannot be filtered, copy it
443 * to temp and rewind the output buffer position accordingly. 443 * to temp and rewind the output buffer position accordingly.
444 *
445 * This needs to be always run when temp.size == 0 to handle a special
446 * case where the output buffer is full and the next filter has no
447 * more output coming but hasn't returned XZ_STREAM_END yet.
444 */ 448 */
445 if (s->temp.size < b->out_size - b->out_pos) { 449 if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
446 out_start = b->out_pos; 450 out_start = b->out_pos;
447 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); 451 memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
448 b->out_pos += s->temp.size; 452 b->out_pos += s->temp.size;
@@ -465,16 +469,25 @@ XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
465 s->temp.size = b->out_pos - out_start; 469 s->temp.size = b->out_pos - out_start;
466 b->out_pos -= s->temp.size; 470 b->out_pos -= s->temp.size;
467 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); 471 memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
472
473 /*
474 * If there wasn't enough input to the next filter to fill
475 * the output buffer with unfiltered data, there's no point
476 * to try decoding more data to temp.
477 */
478 if (b->out_pos + s->temp.size < b->out_size)
479 return XZ_OK;
468 } 480 }
469 481
470 /* 482 /*
471 * If we have unfiltered data in temp, try to fill by decoding more 483 * We have unfiltered data in temp. If the output buffer isn't full
472 * data from the next filter. Apply the BCJ filter on temp. Then we 484 * yet, try to fill the temp buffer by decoding more data from the
473 * hopefully can fill the actual output buffer by copying filtered 485 * next filter. Apply the BCJ filter on temp. Then we hopefully can
474 * data from temp. A mix of filtered and unfiltered data may be left 486 * fill the actual output buffer by copying filtered data from temp.
475 * in temp; it will be taken care on the next call to this function. 487 * A mix of filtered and unfiltered data may be left in temp; it will
488 * be taken care on the next call to this function.
476 */ 489 */
477 if (s->temp.size > 0) { 490 if (b->out_pos < b->out_size) {
478 /* Make b->out{,_pos,_size} temporarily point to s->temp. */ 491 /* Make b->out{,_pos,_size} temporarily point to s->temp. */
479 s->out = b->out; 492 s->out = b->out;
480 s->out_pos = b->out_pos; 493 s->out_pos = b->out_pos;