aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-26 09:30:40 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-26 09:30:40 -0400
commit1503af661947b7a4a09355cc2ae6aa0d43f16776 (patch)
tree5bfcfadf2dd2d98c2ad251c96d7ee43a6903819a /lib
parenta31863168660c6b6f6c7ffe05bb6a38e97803326 (diff)
parent024e8ac04453b3525448c31ef39848cf675ba6db (diff)
Merge branch 'linus' into x86/header-guards
Conflicts: include/asm-x86/gpio.h include/asm-x86/ide.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/debugobjects.c4
-rw-r--r--lib/idr.c140
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/ratelimit.c55
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/textsearch.c2
17 files changed, 396 insertions, 202 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index df27132a56f4..e1d4764435ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
74 debugging files into. Enable this option to be able to read and 74 debugging files into. Enable this option to be able to read and
75 write to these files. 75 write to these files.
76 76
77 For detailed documentation on the debugfs API, see
78 Documentation/DocBook/filesystems.
79
77 If unsure, say N. 80 If unsure, say N.
78 81
79config HEADERS_CHECK 82config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
147 help 150 help
148 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
149 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
150 mode for more than 10 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
151 chance to run. 154 chance to run.
152 155
153 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
159 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
160 support it.) 163 support it.)
161 164
165config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP
168 help
169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a
172 chance to run.
173
174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP.
179
180 Say N if unsure.
181
182config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int
184 depends on DETECT_SOFTLOCKUP
185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188
162config SCHED_DEBUG 189config SCHED_DEBUG
163 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
164 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
@@ -478,6 +505,18 @@ config DEBUG_WRITECOUNT
478 505
479 If unsure, say N. 506 If unsure, say N.
480 507
508config DEBUG_MEMORY_INIT
509 bool "Debug memory initialisation" if EMBEDDED
510 default !EMBEDDED
511 help
512 Enable this for additional checks during memory initialisation.
513 The sanity checks verify aspects of the VM such as the memory model
514 and other information provided by the architecture. Verbose
515 information will be printed at KERN_DEBUG loglevel depending
516 on the mminit_loglevel= command-line option.
517
518 If unsure, say Y
519
481config DEBUG_LIST 520config DEBUG_LIST
482 bool "Debug linked list manipulation" 521 bool "Debug linked list manipulation"
483 depends on DEBUG_KERNEL 522 depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB 2config HAVE_ARCH_KGDB
6 bool 3 bool
7 4
diff --git a/lib/Makefile b/lib/Makefile
index 818c4d455518..9085ad6fa53d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,7 +18,7 @@ lib-$(CONFIG_SMP) += cpumask.o
18 18
19lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
20 20
21obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
23 23
24ifeq ($(CONFIG_DEBUG_KOBJECT),y) 24ifeq ($(CONFIG_DEBUG_KOBJECT),y)
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
1#include <linux/bcd.h>
2#include <linux/module.h>
3
4unsigned bcd2bin(unsigned char val)
5{
6 return (val & 0x0f) + (val >> 4) * 10;
7}
8EXPORT_SYMBOL(bcd2bin);
9
10unsigned char bin2bcd(unsigned val)
11{
12 return ((val / 10) << 4) + val % 10;
13}
14EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..5ba8a942a478 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
116/** 116/**
117 * memparse - parse a string with mem suffixes into a number 117 * memparse - parse a string with mem suffixes into a number
118 * @ptr: Where parse begins 118 * @ptr: Where parse begins
119 * @retptr: (output) Pointer to next char after parse completes 119 * @retptr: (output) Optional pointer to next char after parse completes
120 * 120 *
121 * Parses a string into a number. The number stored at @ptr is 121 * Parses a string into a number. The number stored at @ptr is
122 * potentially suffixed with %K (for kilobytes, or 1024 bytes), 122 * potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
126 * megabyte, or one gigabyte, respectively. 126 * megabyte, or one gigabyte, respectively.
127 */ 127 */
128 128
129unsigned long long memparse (char *ptr, char **retptr) 129unsigned long long memparse(char *ptr, char **retptr)
130{ 130{
131 unsigned long long ret = simple_strtoull (ptr, retptr, 0); 131 char *endptr; /* local pointer to end of parsed string */
132 132
133 switch (**retptr) { 133 unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
134
135 switch (*endptr) {
134 case 'G': 136 case 'G':
135 case 'g': 137 case 'g':
136 ret <<= 10; 138 ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
140 case 'K': 142 case 'K':
141 case 'k': 143 case 'k':
142 ret <<= 10; 144 ret <<= 10;
143 (*retptr)++; 145 endptr++;
144 default: 146 default:
145 break; 147 break;
146 } 148 }
149
150 if (retptr)
151 *retptr = endptr;
152
147 return ret; 153 return ret;
148} 154}
149 155
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 85b18d79be89..f86196390cfd 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -226,15 +226,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
226 226
227static void debug_object_is_on_stack(void *addr, int onstack) 227static void debug_object_is_on_stack(void *addr, int onstack)
228{ 228{
229 void *stack = current->stack;
230 int is_on_stack; 229 int is_on_stack;
231 static int limit; 230 static int limit;
232 231
233 if (limit > 4) 232 if (limit > 4)
234 return; 233 return;
235 234
236 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); 235 is_on_stack = object_is_on_stack(addr);
237
238 if (is_on_stack == onstack) 236 if (is_on_stack == onstack)
239 return; 237 return;
240 238
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..3476f8203e97 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
9 * Small id to pointer translation service. 11 * Small id to pointer translation service.
10 * 12 *
11 * It uses a radix tree like structure as a sparse array indexed 13 * It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
35 37
36static struct kmem_cache *idr_layer_cache; 38static struct kmem_cache *idr_layer_cache;
37 39
38static struct idr_layer *alloc_layer(struct idr *idp) 40static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 41{
40 struct idr_layer *p; 42 struct idr_layer *p;
41 unsigned long flags; 43 unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
50 return(p); 52 return(p);
51} 53}
52 54
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
53/* only called when idp->lock is held */ 68/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 70{
56 p->ary[0] = idp->id_free; 71 p->ary[0] = idp->id_free;
57 idp->id_free = p; 72 idp->id_free = p;
58 idp->id_free_cnt++; 73 idp->id_free_cnt++;
59} 74}
60 75
61static void free_layer(struct idr *idp, struct idr_layer *p) 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 77{
63 unsigned long flags; 78 unsigned long flags;
64 79
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 81 * Depends on the return element being zeroed.
67 */ 82 */
68 spin_lock_irqsave(&idp->lock, flags); 83 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 84 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 85 spin_unlock_irqrestore(&idp->lock, flags);
71} 86}
72 87
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
96 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
97 * 112 *
98 * This function should be called prior to locking and calling the 113 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy 114 * idr_get_new* functions. It preallocates enough memory to satisfy
100 * the worst possible allocation. 115 * the worst possible allocation.
101 * 116 *
102 * If the system is REALLY out of memory this function returns 0, 117 * If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 124 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 125 if (new == NULL)
111 return (0); 126 return (0);
112 free_layer(idp, new); 127 move_to_free_list(idp, new);
113 } 128 }
114 return 1; 129 return 1;
115} 130}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
143 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
144 if (!(p = pa[l])) { 159 if (!(p = pa[l])) {
145 *starting_id = id; 160 *starting_id = id;
146 return -2; 161 return IDR_NEED_TO_GROW;
147 } 162 }
148 163
149 /* If we need to go up one layer, continue the 164 /* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
160 id = ((id >> sh) ^ n ^ m) << sh; 175 id = ((id >> sh) ^ n ^ m) << sh;
161 } 176 }
162 if ((id >= MAX_ID_BIT) || (id < 0)) 177 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3; 178 return IDR_NOMORE_SPACE;
164 if (l == 0) 179 if (l == 0)
165 break; 180 break;
166 /* 181 /*
167 * Create the layer below if it is missing. 182 * Create the layer below if it is missing.
168 */ 183 */
169 if (!p->ary[m]) { 184 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 185 new = get_from_free_list(idp);
186 if (!new)
171 return -1; 187 return -1;
172 p->ary[m] = new; 188 rcu_assign_pointer(p->ary[m], new);
173 p->count++; 189 p->count++;
174 } 190 }
175 pa[l--] = p; 191 pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
192 p = idp->top; 208 p = idp->top;
193 layers = idp->layers; 209 layers = idp->layers;
194 if (unlikely(!p)) { 210 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 211 if (!(p = get_from_free_list(idp)))
196 return -1; 212 return -1;
197 layers = 1; 213 layers = 1;
198 } 214 }
@@ -204,7 +220,7 @@ build_up:
204 layers++; 220 layers++;
205 if (!p->count) 221 if (!p->count)
206 continue; 222 continue;
207 if (!(new = alloc_layer(idp))) { 223 if (!(new = get_from_free_list(idp))) {
208 /* 224 /*
209 * The allocation failed. If we built part of 225 * The allocation failed. If we built part of
210 * the structure tear it down. 226 * the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
214 p = p->ary[0]; 230 p = p->ary[0];
215 new->ary[0] = NULL; 231 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 232 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 233 __move_to_free_list(idp, new);
218 } 234 }
219 spin_unlock_irqrestore(&idp->lock, flags); 235 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 236 return -1;
@@ -225,10 +241,10 @@ build_up:
225 __set_bit(0, &new->bitmap); 241 __set_bit(0, &new->bitmap);
226 p = new; 242 p = new;
227 } 243 }
228 idp->top = p; 244 rcu_assign_pointer(idp->top, p);
229 idp->layers = layers; 245 idp->layers = layers;
230 v = sub_alloc(idp, &id, pa); 246 v = sub_alloc(idp, &id, pa);
231 if (v == -2) 247 if (v == IDR_NEED_TO_GROW)
232 goto build_up; 248 goto build_up;
233 return(v); 249 return(v);
234} 250}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
244 * Successfully found an empty slot. Install the user 260 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full. 261 * pointer and mark the slot full.
246 */ 262 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 263 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
264 (struct idr_layer *)ptr);
248 pa[0]->count++; 265 pa[0]->count++;
249 idr_mark_full(pa, id); 266 idr_mark_full(pa, id);
250 } 267 }
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 * This is a cheap hack until the IDR code can be fixed to 294 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values. 295 * return proper error values.
279 */ 296 */
280 if (rv < 0) { 297 if (rv < 0)
281 if (rv == -1) 298 return _idr_rc_to_errno(rv);
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv; 299 *id = rv;
287 return 0; 300 return 0;
288} 301}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
312 * This is a cheap hack until the IDR code can be fixed to 325 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values. 326 * return proper error values.
314 */ 327 */
315 if (rv < 0) { 328 if (rv < 0)
316 if (rv == -1) 329 return _idr_rc_to_errno(rv);
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv; 330 *id = rv;
322 return 0; 331 return 0;
323} 332}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
325 334
326static void idr_remove_warning(int id) 335static void idr_remove_warning(int id)
327{ 336{
328 printk("idr_remove called for id=%d which is not allocated.\n", id); 337 printk(KERN_WARNING
338 "idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack(); 339 dump_stack();
330} 340}
331 341
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top; 344 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL]; 345 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0]; 346 struct idr_layer ***paa = &pa[0];
347 struct idr_layer *to_free;
337 int n; 348 int n;
338 349
339 *paa = NULL; 350 *paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
349 n = id & IDR_MASK; 360 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 361 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap); 362 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 363 rcu_assign_pointer(p->ary[n], NULL);
364 to_free = NULL;
353 while(*paa && ! --((**paa)->count)){ 365 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 366 if (to_free)
367 free_layer(to_free);
368 to_free = **paa;
355 **paa-- = NULL; 369 **paa-- = NULL;
356 } 370 }
357 if (!*paa) 371 if (!*paa)
358 idp->layers = 0; 372 idp->layers = 0;
373 if (to_free)
374 free_layer(to_free);
359 } else 375 } else
360 idr_remove_warning(id); 376 idr_remove_warning(id);
361} 377}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
368void idr_remove(struct idr *idp, int id) 384void idr_remove(struct idr *idp, int id)
369{ 385{
370 struct idr_layer *p; 386 struct idr_layer *p;
387 struct idr_layer *to_free;
371 388
372 /* Mask off upper bits we don't use for the search. */ 389 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK; 390 id &= MAX_ID_MASK;
374 391
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 392 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 393 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer 394 idp->top->ary[0]) {
378 395 /*
396 * Single child at leftmost slot: we can shrink the tree.
397 * This level is not needed anymore since when layers are
398 * inserted, they are inserted at the top of the existing
399 * tree.
400 */
401 to_free = idp->top;
379 p = idp->top->ary[0]; 402 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 403 rcu_assign_pointer(idp->top, p);
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers; 404 --idp->layers;
405 to_free->bitmap = to_free->count = 0;
406 free_layer(to_free);
384 } 407 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 408 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 409 p = get_from_free_list(idp);
410 /*
411 * Note: we don't call the rcu callback here, since the only
412 * layers that fall into the freelist are those that have been
413 * preallocated.
414 */
387 kmem_cache_free(idr_layer_cache, p); 415 kmem_cache_free(idr_layer_cache, p);
388 } 416 }
389 return; 417 return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
424 452
425 id += 1 << n; 453 id += 1 << n;
426 while (n < fls(id)) { 454 while (n < fls(id)) {
427 if (p) { 455 if (p)
428 memset(p, 0, sizeof *p); 456 free_layer(p);
429 free_layer(idp, p);
430 }
431 n += IDR_BITS; 457 n += IDR_BITS;
432 p = *--paa; 458 p = *--paa;
433 } 459 }
434 } 460 }
435 idp->top = NULL; 461 rcu_assign_pointer(idp->top, NULL);
436 idp->layers = 0; 462 idp->layers = 0;
437} 463}
438EXPORT_SYMBOL(idr_remove_all); 464EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 470void idr_destroy(struct idr *idp)
445{ 471{
446 while (idp->id_free_cnt) { 472 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 473 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 474 kmem_cache_free(idr_layer_cache, p);
449 } 475 }
450} 476}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
459 * return indicates that @id is not valid or you passed %NULL in 485 * return indicates that @id is not valid or you passed %NULL in
460 * idr_get_new(). 486 * idr_get_new().
461 * 487 *
462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 488 * This function can be called under rcu_read_lock(), given that the leaf
489 * pointers lifetimes are correctly managed.
463 */ 490 */
464void *idr_find(struct idr *idp, int id) 491void *idr_find(struct idr *idp, int id)
465{ 492{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
467 struct idr_layer *p; 494 struct idr_layer *p;
468 495
469 n = idp->layers * IDR_BITS; 496 n = idp->layers * IDR_BITS;
470 p = idp->top; 497 p = rcu_dereference(idp->top);
471 498
472 /* Mask off upper bits we don't use for the search. */ 499 /* Mask off upper bits we don't use for the search. */
473 id &= MAX_ID_MASK; 500 id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
477 504
478 while (n > 0 && p) { 505 while (n > 0 && p) {
479 n -= IDR_BITS; 506 n -= IDR_BITS;
480 p = p->ary[(id >> n) & IDR_MASK]; 507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
481 } 508 }
482 return((void *)p); 509 return((void *)p);
483} 510}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
510 struct idr_layer **paa = &pa[0]; 537 struct idr_layer **paa = &pa[0];
511 538
512 n = idp->layers * IDR_BITS; 539 n = idp->layers * IDR_BITS;
513 p = idp->top; 540 p = rcu_dereference(idp->top);
514 max = 1 << n; 541 max = 1 << n;
515 542
516 id = 0; 543 id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
518 while (n > 0 && p) { 545 while (n > 0 && p) {
519 n -= IDR_BITS; 546 n -= IDR_BITS;
520 *paa++ = p; 547 *paa++ = p;
521 p = p->ary[(id >> n) & IDR_MASK]; 548 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
522 } 549 }
523 550
524 if (p) { 551 if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
548 * A -ENOENT return indicates that @id was not found. 575 * A -ENOENT return indicates that @id was not found.
549 * A -EINVAL return indicates that @id was not within valid constraints. 576 * A -EINVAL return indicates that @id was not within valid constraints.
550 * 577 *
551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 578 * The caller must serialize with writers.
552 */ 579 */
553void *idr_replace(struct idr *idp, void *ptr, int id) 580void *idr_replace(struct idr *idp, void *ptr, int id)
554{ 581{
@@ -574,7 +601,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
574 return ERR_PTR(-ENOENT); 601 return ERR_PTR(-ENOENT);
575 602
576 old_p = p->ary[n]; 603 old_p = p->ary[n];
577 p->ary[n] = ptr; 604 rcu_assign_pointer(p->ary[n], ptr);
578 605
579 return old_p; 606 return old_p;
580} 607}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
694 restart: 721 restart:
695 /* get vacant slot */ 722 /* get vacant slot */
696 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 723 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 if (t < 0) { 724 if (t < 0)
698 if (t == -1) 725 return _idr_rc_to_errno(t);
699 return -EAGAIN;
700 else /* will be -3 */
701 return -ENOSPC;
702 }
703 726
704 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 727 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 return -ENOSPC; 728 return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
720 return -EAGAIN; 743 return -EAGAIN;
721 744
722 memset(bitmap, 0, sizeof(struct ida_bitmap)); 745 memset(bitmap, 0, sizeof(struct ida_bitmap));
723 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 746 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
747 (void *)bitmap);
724 pa[0]->count++; 748 pa[0]->count++;
725 } 749 }
726 750
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 773 * allocation.
750 */ 774 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 775 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 776 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 777 if (p)
754 kmem_cache_free(idr_layer_cache, p); 778 kmem_cache_free(idr_layer_cache, p);
755 } 779 }
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} 230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
231#define DUMPBITS(n) {b>>=(n);k-=(n);} 231#define DUMPBITS(n) {b>>=(n);k-=(n);}
232 232
233#ifndef NO_INFLATE_MALLOC
234/* A trivial malloc implementation, adapted from
235 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
236 */
237
238static unsigned long malloc_ptr;
239static int malloc_count;
240
241static void *malloc(int size)
242{
243 void *p;
244
245 if (size < 0)
246 error("Malloc error");
247 if (!malloc_ptr)
248 malloc_ptr = free_mem_ptr;
249
250 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
251
252 p = (void *)malloc_ptr;
253 malloc_ptr += size;
254
255 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
256 error("Out of memory");
257
258 malloc_count++;
259 return p;
260}
261
262static void free(void *where)
263{
264 malloc_count--;
265 if (!malloc_count)
266 malloc_ptr = free_mem_ptr;
267}
268#else
269#define malloc(a) kmalloc(a, GFP_KERNEL)
270#define free(a) kfree(a)
271#endif
233 272
234/* 273/*
235 Huffman code decoding is performed using a multi-level table lookup. 274 Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
1045 int e; /* last block flag */ 1084 int e; /* last block flag */
1046 int r; /* result code */ 1085 int r; /* result code */
1047 unsigned h; /* maximum struct huft's malloc'ed */ 1086 unsigned h; /* maximum struct huft's malloc'ed */
1048 void *ptr;
1049 1087
1050 /* initialize window, bit buffer */ 1088 /* initialize window, bit buffer */
1051 wp = 0; 1089 wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
1057 h = 0; 1095 h = 0;
1058 do { 1096 do {
1059 hufts = 0; 1097 hufts = 0;
1060 gzip_mark(&ptr); 1098#ifdef ARCH_HAS_DECOMP_WDOG
1061 if ((r = inflate_block(&e)) != 0) { 1099 arch_decomp_wdog();
1062 gzip_release(&ptr); 1100#endif
1063 return r; 1101 r = inflate_block(&e);
1064 } 1102 if (r)
1065 gzip_release(&ptr); 1103 return r;
1066 if (hufts > h) 1104 if (hufts > h)
1067 h = hufts; 1105 h = hufts;
1068 } while (!e); 1106 } while (!e);
diff --git a/lib/kobject.c b/lib/kobject.c
index dcade0543bd2..bd732ffebc85 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
164 return -ENOENT; 164 return -ENOENT;
165 165
166 if (!kobj->name || !kobj->name[0]) { 166 if (!kobj->name || !kobj->name[0]) {
167 pr_debug("kobject: (%p): attempted to be registered with empty " 167 WARN(1, "kobject: (%p): attempted to be registered with empty "
168 "name!\n", kobj); 168 "name!\n", kobj);
169 WARN_ON(1);
170 return -EINVAL; 169 return -EINVAL;
171 } 170 }
172 171
@@ -216,13 +215,19 @@ static int kobject_add_internal(struct kobject *kobj)
216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
217 va_list vargs) 216 va_list vargs)
218{ 217{
219 /* Free the old name, if necessary. */ 218 const char *old_name = kobj->name;
220 kfree(kobj->name); 219 char *s;
221 220
222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 221 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
223 if (!kobj->name) 222 if (!kobj->name)
224 return -ENOMEM; 223 return -ENOMEM;
225 224
225 /* ewww... some of these buggers have '/' in the name ... */
226 s = strchr(kobj->name, '/');
227 if (s)
228 s[0] = '!';
229
230 kfree(old_name);
226 return 0; 231 return 0;
227} 232}
228 233
@@ -577,12 +582,10 @@ static void kobject_release(struct kref *kref)
577void kobject_put(struct kobject *kobj) 582void kobject_put(struct kobject *kobj)
578{ 583{
579 if (kobj) { 584 if (kobj) {
580 if (!kobj->state_initialized) { 585 if (!kobj->state_initialized)
581 printk(KERN_WARNING "kobject: '%s' (%p): is not " 586 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
582 "initialized, yet kobject_put() is being " 587 "initialized, yet kobject_put() is being "
583 "called.\n", kobject_name(kobj), kobj); 588 "called.\n", kobject_name(kobj), kobj);
584 WARN_ON(1);
585 }
586 kref_put(&kobj->kref, kobject_release); 589 kref_put(&kobj->kref, kobject_release);
587 } 590 }
588} 591}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..9f8d599459d1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
245 if (retval) 245 if (retval)
246 goto exit; 246 goto exit;
247 247
248 call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); 248 retval = call_usermodehelper(argv[0], argv,
249 env->envp, UMH_WAIT_EXEC);
249 } 250 }
250 251
251exit: 252exit:
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
20 struct list_head *prev, 20 struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 if (unlikely(next->prev != prev)) { 23 WARN(next->prev != prev,
24 printk(KERN_ERR "list_add corruption. next->prev should be " 24 "list_add corruption. next->prev should be "
25 "prev (%p), but was %p. (next=%p).\n", 25 "prev (%p), but was %p. (next=%p).\n",
26 prev, next->prev, next); 26 prev, next->prev, next);
27 BUG(); 27 WARN(prev->next != next,
28 } 28 "list_add corruption. prev->next should be "
29 if (unlikely(prev->next != next)) { 29 "next (%p), but was %p. (prev=%p).\n",
30 printk(KERN_ERR "list_add corruption. prev->next should be " 30 next, prev->next, prev);
31 "next (%p), but was %p. (prev=%p).\n",
32 next, prev->next, prev);
33 BUG();
34 }
35 next->prev = new; 31 next->prev = new;
36 new->next = next; 32 new->next = next;
37 new->prev = prev; 33 new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
40EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
41 37
42/** 38/**
43 * list_add - add a new entry
44 * @new: new entry to be added
45 * @head: list head to add it after
46 *
47 * Insert a new entry after the specified head.
48 * This is good for implementing stacks.
49 */
50void list_add(struct list_head *new, struct list_head *head)
51{
52 __list_add(new, head, head->next);
53}
54EXPORT_SYMBOL(list_add);
55
56/**
57 * list_del - deletes entry from list. 39 * list_del - deletes entry from list.
58 * @entry: the element to delete from the list. 40 * @entry: the element to delete from the list.
59 * Note: list_empty on entry does not return true after this, the entry is 41 * Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
61 */ 43 */
62void list_del(struct list_head *entry) 44void list_del(struct list_head *entry)
63{ 45{
64 if (unlikely(entry->prev->next != entry)) { 46 WARN(entry->prev->next != entry,
65 printk(KERN_ERR "list_del corruption. prev->next should be %p, " 47 "list_del corruption. prev->next should be %p, "
66 "but was %p\n", entry, entry->prev->next); 48 "but was %p\n", entry, entry->prev->next);
67 BUG(); 49 WARN(entry->next->prev != entry,
68 } 50 "list_del corruption. next->prev should be %p, "
69 if (unlikely(entry->next->prev != entry)) { 51 "but was %p\n", entry, entry->next->prev);
70 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
71 "but was %p\n", entry, entry->next->prev);
72 BUG();
73 }
74 __list_del(entry->prev, entry->next); 52 __list_del(entry->prev, entry->next);
75 entry->next = LIST_POISON1; 53 entry->next = LIST_POISON1;
76 entry->prev = LIST_POISON2; 54 entry->prev = LIST_POISON2;
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
138 t += 31 + *ip++; 138 t += 31 + *ip++;
139 } 139 }
140 m_pos = op - 1; 140 m_pos = op - 1;
141 m_pos -= le16_to_cpu(get_unaligned( 141 m_pos -= get_unaligned_le16(ip) >> 2;
142 (const unsigned short *)ip)) >> 2;
143 ip += 2; 142 ip += 2;
144 } else if (t >= 16) { 143 } else if (t >= 16) {
145 m_pos = op; 144 m_pos = op;
@@ -157,8 +156,7 @@ match:
157 } 156 }
158 t += 7 + *ip++; 157 t += 7 + *ip++;
159 } 158 }
160 m_pos -= le16_to_cpu(get_unaligned( 159 m_pos -= get_unaligned_le16(ip) >> 2;
161 (const unsigned short *)ip)) >> 2;
162 ip += 2; 160 ip += 2;
163 if (m_pos == op) 161 if (m_pos == op)
164 goto eof_found; 162 goto eof_found;
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..35136671b215 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> 4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
5 * 5 *
6 * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
7 * parameter. Now every user can use their own standalone ratelimit_state.
8 *
6 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
7 * 10 *
8 */ 11 */
@@ -11,41 +14,43 @@
11#include <linux/jiffies.h> 14#include <linux/jiffies.h>
12#include <linux/module.h> 15#include <linux/module.h>
13 16
17static DEFINE_SPINLOCK(ratelimit_lock);
18static unsigned long flags;
19
14/* 20/*
15 * __ratelimit - rate limiting 21 * __ratelimit - rate limiting
16 * @ratelimit_jiffies: minimum time in jiffies between two callbacks 22 * @rs: ratelimit_state data
17 * @ratelimit_burst: number of callbacks we do before ratelimiting
18 * 23 *
19 * This enforces a rate limit: not more than @ratelimit_burst callbacks 24 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
20 * in every ratelimit_jiffies 25 * in every @rs->ratelimit_jiffies
21 */ 26 */
22int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) 27int __ratelimit(struct ratelimit_state *rs)
23{ 28{
24 static DEFINE_SPINLOCK(ratelimit_lock); 29 if (!rs->interval)
25 static unsigned toks = 10 * 5 * HZ; 30 return 1;
26 static unsigned long last_msg;
27 static int missed;
28 unsigned long flags;
29 unsigned long now = jiffies;
30 31
31 spin_lock_irqsave(&ratelimit_lock, flags); 32 spin_lock_irqsave(&ratelimit_lock, flags);
32 toks += now - last_msg; 33 if (!rs->begin)
33 last_msg = now; 34 rs->begin = jiffies;
34 if (toks > (ratelimit_burst * ratelimit_jiffies))
35 toks = ratelimit_burst * ratelimit_jiffies;
36 if (toks >= ratelimit_jiffies) {
37 int lost = missed;
38 35
39 missed = 0; 36 if (time_is_before_jiffies(rs->begin + rs->interval)) {
40 toks -= ratelimit_jiffies; 37 if (rs->missed)
41 spin_unlock_irqrestore(&ratelimit_lock, flags); 38 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
42 if (lost) 39 __func__, rs->missed);
43 printk(KERN_WARNING "%s: %d messages suppressed\n", 40 rs->begin = 0;
44 __func__, lost); 41 rs->printed = 0;
45 return 1; 42 rs->missed = 0;
46 } 43 }
47 missed++; 44 if (rs->burst && rs->burst > rs->printed)
45 goto print;
46
47 rs->missed++;
48 spin_unlock_irqrestore(&ratelimit_lock, flags); 48 spin_unlock_irqrestore(&ratelimit_lock, flags);
49 return 0; 49 return 0;
50
51print:
52 rs->printed++;
53 spin_unlock_irqrestore(&ratelimit_lock, flags);
54 return 1;
50} 55}
51EXPORT_SYMBOL(__ratelimit); 56EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
295EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296 296
297/** 297/**
298 * sg_miter_start - start mapping iteration over a sg list
299 * @miter: sg mapping iter to be started
300 * @sgl: sg list to iterate over
301 * @nents: number of sg entries
302 *
303 * Description:
304 * Starts mapping iterator @miter.
305 *
306 * Context:
307 * Don't care.
308 */
309void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
310 unsigned int nents, unsigned int flags)
311{
312 memset(miter, 0, sizeof(struct sg_mapping_iter));
313
314 miter->__sg = sgl;
315 miter->__nents = nents;
316 miter->__offset = 0;
317 miter->__flags = flags;
318}
319EXPORT_SYMBOL(sg_miter_start);
320
321/**
322 * sg_miter_next - proceed mapping iterator to the next mapping
323 * @miter: sg mapping iter to proceed
324 *
325 * Description:
326 * Proceeds @miter@ to the next mapping. @miter@ should have been
327 * started using sg_miter_start(). On successful return,
328 * @miter@->page, @miter@->addr and @miter@->length point to the
329 * current mapping.
330 *
331 * Context:
332 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
333 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
334 *
335 * Returns:
336 * true if @miter contains the next mapping. false if end of sg
337 * list is reached.
338 */
339bool sg_miter_next(struct sg_mapping_iter *miter)
340{
341 unsigned int off, len;
342
343 /* check for end and drop resources from the last iteration */
344 if (!miter->__nents)
345 return false;
346
347 sg_miter_stop(miter);
348
349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) {
351 miter->__sg = sg_next(miter->__sg);
352 miter->__offset = 0;
353 }
354
355 /* map the next page */
356 off = miter->__sg->offset + miter->__offset;
357 len = miter->__sg->length - miter->__offset;
358
359 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
360 off &= ~PAGE_MASK;
361 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
362 miter->consumed = miter->length;
363
364 if (miter->__flags & SG_MITER_ATOMIC)
365 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
366 else
367 miter->addr = kmap(miter->page) + off;
368
369 return true;
370}
371EXPORT_SYMBOL(sg_miter_next);
372
373/**
374 * sg_miter_stop - stop mapping iteration
375 * @miter: sg mapping iter to be stopped
376 *
377 * Description:
378 * Stops mapping iterator @miter. @miter should have been started
379 * started using sg_miter_start(). A stopped iteration can be
380 * resumed by calling sg_miter_next() on it. This is useful when
381 * resources (kmap) need to be released during iteration.
382 *
383 * Context:
384 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
385 */
386void sg_miter_stop(struct sg_mapping_iter *miter)
387{
388 WARN_ON(miter->consumed > miter->length);
389
390 /* drop resources from the last iteration */
391 if (miter->addr) {
392 miter->__offset += miter->consumed;
393
394 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else
398 kunmap(miter->addr);
399
400 miter->page = NULL;
401 miter->addr = NULL;
402 miter->length = 0;
403 miter->consumed = 0;
404 }
405}
406EXPORT_SYMBOL(sg_miter_stop);
407
408/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list 410 * @sgl: The SG list
300 * @nents: Number of SG entries 411 * @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 420static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer) 421 void *buf, size_t buflen, int to_buffer)
311{ 422{
312 struct scatterlist *sg; 423 unsigned int offset = 0;
313 size_t buf_off = 0; 424 struct sg_mapping_iter miter;
314 int i; 425
315 426 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
316 WARN_ON(!irqs_disabled()); 427
317 428 while (sg_miter_next(&miter) && offset < buflen) {
318 for_each_sg(sgl, sg, nents, i) { 429 unsigned int len;
319 struct page *page; 430
320 int n = 0; 431 len = min(miter.length, buflen - offset);
321 unsigned int sg_off = sg->offset; 432
322 unsigned int sg_copy = sg->length; 433 if (to_buffer)
323 434 memcpy(buf + offset, miter.addr, len);
324 if (sg_copy > buflen) 435 else {
325 sg_copy = buflen; 436 memcpy(miter.addr, buf + offset, len);
326 buflen -= sg_copy; 437 flush_kernel_dcache_page(miter.page);
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 } 438 }
356 439
357 if (!buflen) 440 offset += len;
358 break;
359 } 441 }
360 442
361 return buf_off; 443 sg_miter_stop(&miter);
444
445 return offset;
362} 446}
363 447
364/** 448/**
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask; 14 cpumask_of_cpu_ptr_declare(this_mask);
15 15
16 if (likely(preempt_count)) 16 if (likely(preempt_count))
17 goto out; 17 goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 this_mask = cpumask_of_cpu(this_cpu); 26 cpumask_of_cpu_ptr_next(this_mask, this_cpu);
27 27
28 if (cpus_equal(current->cpus_allowed, this_mask)) 28 if (cpus_equal(current->cpus_allowed, *this_mask))
29 goto out; 29 goto out;
30 30
31 /* 31 /*
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 4b7c6075256f..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -267,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
267 return ERR_PTR(-EINVAL); 267 return ERR_PTR(-EINVAL);
268 268
269 ops = lookup_ts_algo(algo); 269 ops = lookup_ts_algo(algo);
270#ifdef CONFIG_KMOD 270#ifdef CONFIG_MODULES
271 /* 271 /*
272 * Why not always autoload you may ask. Some users are 272 * Why not always autoload you may ask. Some users are
273 * in a situation where requesting a module may deadlock, 273 * in a situation where requesting a module may deadlock,