aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Kconfig.kgdb16
-rw-r--r--lib/Makefile3
-rw-r--r--lib/debugobjects.c890
-rw-r--r--lib/devres.c6
-rw-r--r--lib/div64.c35
-rw-r--r--lib/find_next_bit.c22
-rw-r--r--lib/idr.c12
-rw-r--r--lib/inflate.c3
-rw-r--r--lib/iomap.c2
-rw-r--r--lib/klist.c235
-rw-r--r--lib/kobject.c44
-rw-r--r--lib/kobject_uevent.c10
-rw-r--r--lib/lmb.c99
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/proportions.c38
-rw-r--r--lib/ratelimit.c51
-rw-r--r--lib/string.c27
-rw-r--r--lib/swiotlb.c149
19 files changed, 1405 insertions, 269 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 754cc0027f2..d2099f41aa1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -194,6 +194,37 @@ config TIMER_STATS
194 (it defaults to deactivated on bootup and will only be activated 194 (it defaults to deactivated on bootup and will only be activated
195 if some application like powertop activates it explicitly). 195 if some application like powertop activates it explicitly).
196 196
197config DEBUG_OBJECTS
198 bool "Debug object operations"
199 depends on DEBUG_KERNEL
200 help
201 If you say Y here, additional code will be inserted into the
202 kernel to track the life time of various objects and validate
203 the operations on those objects.
204
205config DEBUG_OBJECTS_SELFTEST
206 bool "Debug objects selftest"
207 depends on DEBUG_OBJECTS
208 help
209 This enables the selftest of the object debug code.
210
211config DEBUG_OBJECTS_FREE
212 bool "Debug objects in freed memory"
213 depends on DEBUG_OBJECTS
214 help
215 This enables checks whether a k/v free operation frees an area
216 which contains an object which has not been deactivated
217 properly. This can make kmalloc/kfree-intensive workloads
218 much slower.
219
220config DEBUG_OBJECTS_TIMERS
221 bool "Debug timer objects"
222 depends on DEBUG_OBJECTS
223 help
224 If you say Y here, additional code will be inserted into the
225 timer routines to track the life time of timer objects and
226 validate the timer operations.
227
197config DEBUG_SLAB 228config DEBUG_SLAB
198 bool "Debug slab memory allocations" 229 bool "Debug slab memory allocations"
199 depends on DEBUG_KERNEL && SLAB 230 depends on DEBUG_KERNEL && SLAB
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab0..a5d4b1dac2a 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,10 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB
6 bool
7
2menuconfig KGDB 8menuconfig KGDB
3 bool "KGDB: kernel debugging with remote gdb" 9 bool "KGDB: kernel debugging with remote gdb"
4 select FRAME_POINTER 10 select FRAME_POINTER
@@ -10,15 +16,10 @@ menuconfig KGDB
10 at http://kgdb.sourceforge.net as well as in DocBook form 16 at http://kgdb.sourceforge.net as well as in DocBook form
11 in Documentation/DocBook/. If unsure, say N. 17 in Documentation/DocBook/. If unsure, say N.
12 18
13config HAVE_ARCH_KGDB_SHADOW_INFO 19if KGDB
14 bool
15
16config HAVE_ARCH_KGDB
17 bool
18 20
19config KGDB_SERIAL_CONSOLE 21config KGDB_SERIAL_CONSOLE
20 tristate "KGDB: use kgdb over the serial console" 22 tristate "KGDB: use kgdb over the serial console"
21 depends on KGDB
22 select CONSOLE_POLL 23 select CONSOLE_POLL
23 select MAGIC_SYSRQ 24 select MAGIC_SYSRQ
24 default y 25 default y
@@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE
28 29
29config KGDB_TESTS 30config KGDB_TESTS
30 bool "KGDB: internal test suite" 31 bool "KGDB: internal test suite"
31 depends on KGDB
32 default n 32 default n
33 help 33 help
34 This is a kgdb I/O module specifically designed to test 34 This is a kgdb I/O module specifically designed to test
@@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING
56 boot. See the drivers/misc/kgdbts.c for detailed 56 boot. See the drivers/misc/kgdbts.c for detailed
57 information about other strings you could use beyond the 57 information about other strings you could use beyond the
58 default of V1F100. 58 default of V1F100.
59
60endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 2d7001b7f5a..74b0cfb1fcc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 6 rbtree.o radix-tree.o dump_stack.o \
7 idr.o int_sqrt.o extable.o prio_tree.o \ 7 idr.o int_sqrt.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
9 proportions.o prio_heap.o 9 proportions.o prio_heap.o ratelimit.o
10 10
11lib-$(CONFIG_MMU) += ioremap.o 11lib-$(CONFIG_MMU) += ioremap.o
12lib-$(CONFIG_SMP) += cpumask.o 12lib-$(CONFIG_SMP) += cpumask.o
@@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
36obj-$(CONFIG_PLIST) += plist.o 36obj-$(CONFIG_PLIST) += plist.o
37obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 37obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
38obj-$(CONFIG_DEBUG_LIST) += list_debug.o 38obj-$(CONFIG_DEBUG_LIST) += list_debug.o
39obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
39 40
40ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 41ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
41 lib-y += dec_and_lock.o 42 lib-y += dec_and_lock.o
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
new file mode 100644
index 00000000000..a76a5e122ae
--- /dev/null
+++ b/lib/debugobjects.c
@@ -0,0 +1,890 @@
1/*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10#include <linux/debugobjects.h>
11#include <linux/interrupt.h>
12#include <linux/seq_file.h>
13#include <linux/debugfs.h>
14#include <linux/hash.h>
15
16#define ODEBUG_HASH_BITS 14
17#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
18
19#define ODEBUG_POOL_SIZE 512
20#define ODEBUG_POOL_MIN_LEVEL 256
21
22#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
23#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
24#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
25
26struct debug_bucket {
27 struct hlist_head list;
28 spinlock_t lock;
29};
30
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
34
35static DEFINE_SPINLOCK(pool_lock);
36
37static HLIST_HEAD(obj_pool);
38
39static int obj_pool_min_free = ODEBUG_POOL_SIZE;
40static int obj_pool_free = ODEBUG_POOL_SIZE;
41static int obj_pool_used;
42static int obj_pool_max_used;
43static struct kmem_cache *obj_cache;
44
45static int debug_objects_maxchain __read_mostly;
46static int debug_objects_fixups __read_mostly;
47static int debug_objects_warnings __read_mostly;
48static int debug_objects_enabled __read_mostly;
49static struct debug_obj_descr *descr_test __read_mostly;
50
51static int __init enable_object_debug(char *str)
52{
53 debug_objects_enabled = 1;
54 return 0;
55}
56early_param("debug_objects", enable_object_debug);
57
58static const char *obj_states[ODEBUG_STATE_MAX] = {
59 [ODEBUG_STATE_NONE] = "none",
60 [ODEBUG_STATE_INIT] = "initialized",
61 [ODEBUG_STATE_INACTIVE] = "inactive",
62 [ODEBUG_STATE_ACTIVE] = "active",
63 [ODEBUG_STATE_DESTROYED] = "destroyed",
64 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
65};
66
67static int fill_pool(void)
68{
69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
70 struct debug_obj *new;
71
72 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
73 return obj_pool_free;
74
75 if (unlikely(!obj_cache))
76 return obj_pool_free;
77
78 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
79
80 new = kmem_cache_zalloc(obj_cache, gfp);
81 if (!new)
82 return obj_pool_free;
83
84 spin_lock(&pool_lock);
85 hlist_add_head(&new->node, &obj_pool);
86 obj_pool_free++;
87 spin_unlock(&pool_lock);
88 }
89 return obj_pool_free;
90}
91
92/*
93 * Lookup an object in the hash bucket.
94 */
95static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
96{
97 struct hlist_node *node;
98 struct debug_obj *obj;
99 int cnt = 0;
100
101 hlist_for_each_entry(obj, node, &b->list, node) {
102 cnt++;
103 if (obj->object == addr)
104 return obj;
105 }
106 if (cnt > debug_objects_maxchain)
107 debug_objects_maxchain = cnt;
108
109 return NULL;
110}
111
112/*
113 * Allocate a new object. If the pool is empty and no refill possible,
114 * switch off the debugger.
115 */
116static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118{
119 struct debug_obj *obj = NULL;
120 int retry = 0;
121
122repeat:
123 spin_lock(&pool_lock);
124 if (obj_pool.first) {
125 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
126
127 obj->object = addr;
128 obj->descr = descr;
129 obj->state = ODEBUG_STATE_NONE;
130 hlist_del(&obj->node);
131
132 hlist_add_head(&obj->node, &b->list);
133
134 obj_pool_used++;
135 if (obj_pool_used > obj_pool_max_used)
136 obj_pool_max_used = obj_pool_used;
137
138 obj_pool_free--;
139 if (obj_pool_free < obj_pool_min_free)
140 obj_pool_min_free = obj_pool_free;
141 }
142 spin_unlock(&pool_lock);
143
144 if (fill_pool() && !obj && !retry++)
145 goto repeat;
146
147 return obj;
148}
149
150/*
151 * Put the object back into the pool or give it back to kmem_cache:
152 */
153static void free_object(struct debug_obj *obj)
154{
155 unsigned long idx = (unsigned long)(obj - obj_static_pool);
156
157 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
158 spin_lock(&pool_lock);
159 hlist_add_head(&obj->node, &obj_pool);
160 obj_pool_free++;
161 obj_pool_used--;
162 spin_unlock(&pool_lock);
163 } else {
164 spin_lock(&pool_lock);
165 obj_pool_used--;
166 spin_unlock(&pool_lock);
167 kmem_cache_free(obj_cache, obj);
168 }
169}
170
171/*
172 * We run out of memory. That means we probably have tons of objects
173 * allocated.
174 */
175static void debug_objects_oom(void)
176{
177 struct debug_bucket *db = obj_hash;
178 struct hlist_node *node, *tmp;
179 struct debug_obj *obj;
180 unsigned long flags;
181 int i;
182
183 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
184
185 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
186 spin_lock_irqsave(&db->lock, flags);
187 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
188 hlist_del(&obj->node);
189 free_object(obj);
190 }
191 spin_unlock_irqrestore(&db->lock, flags);
192 }
193}
194
195/*
196 * We use the pfn of the address for the hash. That way we can check
197 * for freed objects simply by checking the affected bucket.
198 */
199static struct debug_bucket *get_bucket(unsigned long addr)
200{
201 unsigned long hash;
202
203 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
204 return &obj_hash[hash];
205}
206
207static void debug_print_object(struct debug_obj *obj, char *msg)
208{
209 static int limit;
210
211 if (limit < 5 && obj->descr != descr_test) {
212 limit++;
213 printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
214 obj_states[obj->state], obj->descr->name);
215 WARN_ON(1);
216 }
217 debug_objects_warnings++;
218}
219
220/*
221 * Try to repair the damage, so we have a better chance to get useful
222 * debug output.
223 */
224static void
225debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
226 void * addr, enum debug_obj_state state)
227{
228 if (fixup)
229 debug_objects_fixups += fixup(addr, state);
230}
231
232static void debug_object_is_on_stack(void *addr, int onstack)
233{
234 void *stack = current->stack;
235 int is_on_stack;
236 static int limit;
237
238 if (limit > 4)
239 return;
240
241 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
242
243 if (is_on_stack == onstack)
244 return;
245
246 limit++;
247 if (is_on_stack)
248 printk(KERN_WARNING
249 "ODEBUG: object is on stack, but not annotated\n");
250 else
251 printk(KERN_WARNING
252 "ODEBUG: object is not on stack, but annotated\n");
253 WARN_ON(1);
254}
255
256static void
257__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
258{
259 enum debug_obj_state state;
260 struct debug_bucket *db;
261 struct debug_obj *obj;
262 unsigned long flags;
263
264 db = get_bucket((unsigned long) addr);
265
266 spin_lock_irqsave(&db->lock, flags);
267
268 obj = lookup_object(addr, db);
269 if (!obj) {
270 obj = alloc_object(addr, db, descr);
271 if (!obj) {
272 debug_objects_enabled = 0;
273 spin_unlock_irqrestore(&db->lock, flags);
274 debug_objects_oom();
275 return;
276 }
277 debug_object_is_on_stack(addr, onstack);
278 }
279
280 switch (obj->state) {
281 case ODEBUG_STATE_NONE:
282 case ODEBUG_STATE_INIT:
283 case ODEBUG_STATE_INACTIVE:
284 obj->state = ODEBUG_STATE_INIT;
285 break;
286
287 case ODEBUG_STATE_ACTIVE:
288 debug_print_object(obj, "init");
289 state = obj->state;
290 spin_unlock_irqrestore(&db->lock, flags);
291 debug_object_fixup(descr->fixup_init, addr, state);
292 return;
293
294 case ODEBUG_STATE_DESTROYED:
295 debug_print_object(obj, "init");
296 break;
297 default:
298 break;
299 }
300
301 spin_unlock_irqrestore(&db->lock, flags);
302}
303
304/**
305 * debug_object_init - debug checks when an object is initialized
306 * @addr: address of the object
307 * @descr: pointer to an object specific debug description structure
308 */
309void debug_object_init(void *addr, struct debug_obj_descr *descr)
310{
311 if (!debug_objects_enabled)
312 return;
313
314 __debug_object_init(addr, descr, 0);
315}
316
317/**
318 * debug_object_init_on_stack - debug checks when an object on stack is
319 * initialized
320 * @addr: address of the object
321 * @descr: pointer to an object specific debug description structure
322 */
323void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
324{
325 if (!debug_objects_enabled)
326 return;
327
328 __debug_object_init(addr, descr, 1);
329}
330
331/**
332 * debug_object_activate - debug checks when an object is activated
333 * @addr: address of the object
334 * @descr: pointer to an object specific debug description structure
335 */
336void debug_object_activate(void *addr, struct debug_obj_descr *descr)
337{
338 enum debug_obj_state state;
339 struct debug_bucket *db;
340 struct debug_obj *obj;
341 unsigned long flags;
342
343 if (!debug_objects_enabled)
344 return;
345
346 db = get_bucket((unsigned long) addr);
347
348 spin_lock_irqsave(&db->lock, flags);
349
350 obj = lookup_object(addr, db);
351 if (obj) {
352 switch (obj->state) {
353 case ODEBUG_STATE_INIT:
354 case ODEBUG_STATE_INACTIVE:
355 obj->state = ODEBUG_STATE_ACTIVE;
356 break;
357
358 case ODEBUG_STATE_ACTIVE:
359 debug_print_object(obj, "activate");
360 state = obj->state;
361 spin_unlock_irqrestore(&db->lock, flags);
362 debug_object_fixup(descr->fixup_activate, addr, state);
363 return;
364
365 case ODEBUG_STATE_DESTROYED:
366 debug_print_object(obj, "activate");
367 break;
368 default:
369 break;
370 }
371 spin_unlock_irqrestore(&db->lock, flags);
372 return;
373 }
374
375 spin_unlock_irqrestore(&db->lock, flags);
376 /*
377 * This happens when a static object is activated. We
378 * let the type specific code decide whether this is
379 * true or not.
380 */
381 debug_object_fixup(descr->fixup_activate, addr,
382 ODEBUG_STATE_NOTAVAILABLE);
383}
384
385/**
386 * debug_object_deactivate - debug checks when an object is deactivated
387 * @addr: address of the object
388 * @descr: pointer to an object specific debug description structure
389 */
390void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
391{
392 struct debug_bucket *db;
393 struct debug_obj *obj;
394 unsigned long flags;
395
396 if (!debug_objects_enabled)
397 return;
398
399 db = get_bucket((unsigned long) addr);
400
401 spin_lock_irqsave(&db->lock, flags);
402
403 obj = lookup_object(addr, db);
404 if (obj) {
405 switch (obj->state) {
406 case ODEBUG_STATE_INIT:
407 case ODEBUG_STATE_INACTIVE:
408 case ODEBUG_STATE_ACTIVE:
409 obj->state = ODEBUG_STATE_INACTIVE;
410 break;
411
412 case ODEBUG_STATE_DESTROYED:
413 debug_print_object(obj, "deactivate");
414 break;
415 default:
416 break;
417 }
418 } else {
419 struct debug_obj o = { .object = addr,
420 .state = ODEBUG_STATE_NOTAVAILABLE,
421 .descr = descr };
422
423 debug_print_object(&o, "deactivate");
424 }
425
426 spin_unlock_irqrestore(&db->lock, flags);
427}
428
429/**
430 * debug_object_destroy - debug checks when an object is destroyed
431 * @addr: address of the object
432 * @descr: pointer to an object specific debug description structure
433 */
434void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
435{
436 enum debug_obj_state state;
437 struct debug_bucket *db;
438 struct debug_obj *obj;
439 unsigned long flags;
440
441 if (!debug_objects_enabled)
442 return;
443
444 db = get_bucket((unsigned long) addr);
445
446 spin_lock_irqsave(&db->lock, flags);
447
448 obj = lookup_object(addr, db);
449 if (!obj)
450 goto out_unlock;
451
452 switch (obj->state) {
453 case ODEBUG_STATE_NONE:
454 case ODEBUG_STATE_INIT:
455 case ODEBUG_STATE_INACTIVE:
456 obj->state = ODEBUG_STATE_DESTROYED;
457 break;
458 case ODEBUG_STATE_ACTIVE:
459 debug_print_object(obj, "destroy");
460 state = obj->state;
461 spin_unlock_irqrestore(&db->lock, flags);
462 debug_object_fixup(descr->fixup_destroy, addr, state);
463 return;
464
465 case ODEBUG_STATE_DESTROYED:
466 debug_print_object(obj, "destroy");
467 break;
468 default:
469 break;
470 }
471out_unlock:
472 spin_unlock_irqrestore(&db->lock, flags);
473}
474
475/**
476 * debug_object_free - debug checks when an object is freed
477 * @addr: address of the object
478 * @descr: pointer to an object specific debug description structure
479 */
480void debug_object_free(void *addr, struct debug_obj_descr *descr)
481{
482 enum debug_obj_state state;
483 struct debug_bucket *db;
484 struct debug_obj *obj;
485 unsigned long flags;
486
487 if (!debug_objects_enabled)
488 return;
489
490 db = get_bucket((unsigned long) addr);
491
492 spin_lock_irqsave(&db->lock, flags);
493
494 obj = lookup_object(addr, db);
495 if (!obj)
496 goto out_unlock;
497
498 switch (obj->state) {
499 case ODEBUG_STATE_ACTIVE:
500 debug_print_object(obj, "free");
501 state = obj->state;
502 spin_unlock_irqrestore(&db->lock, flags);
503 debug_object_fixup(descr->fixup_free, addr, state);
504 return;
505 default:
506 hlist_del(&obj->node);
507 free_object(obj);
508 break;
509 }
510out_unlock:
511 spin_unlock_irqrestore(&db->lock, flags);
512}
513
514#ifdef CONFIG_DEBUG_OBJECTS_FREE
515static void __debug_check_no_obj_freed(const void *address, unsigned long size)
516{
517 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
518 struct hlist_node *node, *tmp;
519 struct debug_obj_descr *descr;
520 enum debug_obj_state state;
521 struct debug_bucket *db;
522 struct debug_obj *obj;
523 int cnt;
524
525 saddr = (unsigned long) address;
526 eaddr = saddr + size;
527 paddr = saddr & ODEBUG_CHUNK_MASK;
528 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
529 chunks >>= ODEBUG_CHUNK_SHIFT;
530
531 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
532 db = get_bucket(paddr);
533
534repeat:
535 cnt = 0;
536 spin_lock_irqsave(&db->lock, flags);
537 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
538 cnt++;
539 oaddr = (unsigned long) obj->object;
540 if (oaddr < saddr || oaddr >= eaddr)
541 continue;
542
543 switch (obj->state) {
544 case ODEBUG_STATE_ACTIVE:
545 debug_print_object(obj, "free");
546 descr = obj->descr;
547 state = obj->state;
548 spin_unlock_irqrestore(&db->lock, flags);
549 debug_object_fixup(descr->fixup_free,
550 (void *) oaddr, state);
551 goto repeat;
552 default:
553 hlist_del(&obj->node);
554 free_object(obj);
555 break;
556 }
557 }
558 spin_unlock_irqrestore(&db->lock, flags);
559 if (cnt > debug_objects_maxchain)
560 debug_objects_maxchain = cnt;
561 }
562}
563
564void debug_check_no_obj_freed(const void *address, unsigned long size)
565{
566 if (debug_objects_enabled)
567 __debug_check_no_obj_freed(address, size);
568}
569#endif
570
571#ifdef CONFIG_DEBUG_FS
572
573static int debug_stats_show(struct seq_file *m, void *v)
574{
575 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
576 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
577 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
578 seq_printf(m, "pool_free :%d\n", obj_pool_free);
579 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
580 seq_printf(m, "pool_used :%d\n", obj_pool_used);
581 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
582 return 0;
583}
584
585static int debug_stats_open(struct inode *inode, struct file *filp)
586{
587 return single_open(filp, debug_stats_show, NULL);
588}
589
590static const struct file_operations debug_stats_fops = {
591 .open = debug_stats_open,
592 .read = seq_read,
593 .llseek = seq_lseek,
594 .release = single_release,
595};
596
597static int __init debug_objects_init_debugfs(void)
598{
599 struct dentry *dbgdir, *dbgstats;
600
601 if (!debug_objects_enabled)
602 return 0;
603
604 dbgdir = debugfs_create_dir("debug_objects", NULL);
605 if (!dbgdir)
606 return -ENOMEM;
607
608 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
609 &debug_stats_fops);
610 if (!dbgstats)
611 goto err;
612
613 return 0;
614
615err:
616 debugfs_remove(dbgdir);
617
618 return -ENOMEM;
619}
620__initcall(debug_objects_init_debugfs);
621
622#else
623static inline void debug_objects_init_debugfs(void) { }
624#endif
625
626#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
627
628/* Random data structure for the self test */
629struct self_test {
630 unsigned long dummy1[6];
631 int static_init;
632 unsigned long dummy2[3];
633};
634
635static __initdata struct debug_obj_descr descr_type_test;
636
637/*
638 * fixup_init is called when:
639 * - an active object is initialized
640 */
641static int __init fixup_init(void *addr, enum debug_obj_state state)
642{
643 struct self_test *obj = addr;
644
645 switch (state) {
646 case ODEBUG_STATE_ACTIVE:
647 debug_object_deactivate(obj, &descr_type_test);
648 debug_object_init(obj, &descr_type_test);
649 return 1;
650 default:
651 return 0;
652 }
653}
654
655/*
656 * fixup_activate is called when:
657 * - an active object is activated
658 * - an unknown object is activated (might be a statically initialized object)
659 */
660static int __init fixup_activate(void *addr, enum debug_obj_state state)
661{
662 struct self_test *obj = addr;
663
664 switch (state) {
665 case ODEBUG_STATE_NOTAVAILABLE:
666 if (obj->static_init == 1) {
667 debug_object_init(obj, &descr_type_test);
668 debug_object_activate(obj, &descr_type_test);
669 /*
670 * Real code should return 0 here ! This is
671 * not a fixup of some bad behaviour. We
672 * merily call the debug_init function to keep
673 * track of the object.
674 */
675 return 1;
676 } else {
677 /* Real code needs to emit a warning here */
678 }
679 return 0;
680
681 case ODEBUG_STATE_ACTIVE:
682 debug_object_deactivate(obj, &descr_type_test);
683 debug_object_activate(obj, &descr_type_test);
684 return 1;
685
686 default:
687 return 0;
688 }
689}
690
691/*
692 * fixup_destroy is called when:
693 * - an active object is destroyed
694 */
695static int __init fixup_destroy(void *addr, enum debug_obj_state state)
696{
697 struct self_test *obj = addr;
698
699 switch (state) {
700 case ODEBUG_STATE_ACTIVE:
701 debug_object_deactivate(obj, &descr_type_test);
702 debug_object_destroy(obj, &descr_type_test);
703 return 1;
704 default:
705 return 0;
706 }
707}
708
709/*
710 * fixup_free is called when:
711 * - an active object is freed
712 */
713static int __init fixup_free(void *addr, enum debug_obj_state state)
714{
715 struct self_test *obj = addr;
716
717 switch (state) {
718 case ODEBUG_STATE_ACTIVE:
719 debug_object_deactivate(obj, &descr_type_test);
720 debug_object_free(obj, &descr_type_test);
721 return 1;
722 default:
723 return 0;
724 }
725}
726
727static int
728check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
729{
730 struct debug_bucket *db;
731 struct debug_obj *obj;
732 unsigned long flags;
733 int res = -EINVAL;
734
735 db = get_bucket((unsigned long) addr);
736
737 spin_lock_irqsave(&db->lock, flags);
738
739 obj = lookup_object(addr, db);
740 if (!obj && state != ODEBUG_STATE_NONE) {
741 printk(KERN_ERR "ODEBUG: selftest object not found\n");
742 WARN_ON(1);
743 goto out;
744 }
745 if (obj && obj->state != state) {
746 printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
747 obj->state, state);
748 WARN_ON(1);
749 goto out;
750 }
751 if (fixups != debug_objects_fixups) {
752 printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
753 fixups, debug_objects_fixups);
754 WARN_ON(1);
755 goto out;
756 }
757 if (warnings != debug_objects_warnings) {
758 printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
759 warnings, debug_objects_warnings);
760 WARN_ON(1);
761 goto out;
762 }
763 res = 0;
764out:
765 spin_unlock_irqrestore(&db->lock, flags);
766 if (res)
767 debug_objects_enabled = 0;
768 return res;
769}
770
771static __initdata struct debug_obj_descr descr_type_test = {
772 .name = "selftest",
773 .fixup_init = fixup_init,
774 .fixup_activate = fixup_activate,
775 .fixup_destroy = fixup_destroy,
776 .fixup_free = fixup_free,
777};
778
779static __initdata struct self_test obj = { .static_init = 0 };
780
781static void __init debug_objects_selftest(void)
782{
783 int fixups, oldfixups, warnings, oldwarnings;
784 unsigned long flags;
785
786 local_irq_save(flags);
787
788 fixups = oldfixups = debug_objects_fixups;
789 warnings = oldwarnings = debug_objects_warnings;
790 descr_test = &descr_type_test;
791
792 debug_object_init(&obj, &descr_type_test);
793 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
794 goto out;
795 debug_object_activate(&obj, &descr_type_test);
796 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
797 goto out;
798 debug_object_activate(&obj, &descr_type_test);
799 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
800 goto out;
801 debug_object_deactivate(&obj, &descr_type_test);
802 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
803 goto out;
804 debug_object_destroy(&obj, &descr_type_test);
805 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
806 goto out;
807 debug_object_init(&obj, &descr_type_test);
808 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
809 goto out;
810 debug_object_activate(&obj, &descr_type_test);
811 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
812 goto out;
813 debug_object_deactivate(&obj, &descr_type_test);
814 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
815 goto out;
816 debug_object_free(&obj, &descr_type_test);
817 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
818 goto out;
819
820 obj.static_init = 1;
821 debug_object_activate(&obj, &descr_type_test);
822 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
823 goto out;
824 debug_object_init(&obj, &descr_type_test);
825 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
826 goto out;
827 debug_object_free(&obj, &descr_type_test);
828 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
829 goto out;
830
831#ifdef CONFIG_DEBUG_OBJECTS_FREE
832 debug_object_init(&obj, &descr_type_test);
833 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
834 goto out;
835 debug_object_activate(&obj, &descr_type_test);
836 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
837 goto out;
838 __debug_check_no_obj_freed(&obj, sizeof(obj));
839 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
840 goto out;
841#endif
842 printk(KERN_INFO "ODEBUG: selftest passed\n");
843
844out:
845 debug_objects_fixups = oldfixups;
846 debug_objects_warnings = oldwarnings;
847 descr_test = NULL;
848
849 local_irq_restore(flags);
850}
851#else
852static inline void debug_objects_selftest(void) { }
853#endif
854
855/*
856 * Called during early boot to initialize the hash buckets and link
857 * the static object pool objects into the poll list. After this call
858 * the object tracker is fully operational.
859 */
860void __init debug_objects_early_init(void)
861{
862 int i;
863
864 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
865 spin_lock_init(&obj_hash[i].lock);
866
867 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
868 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
869}
870
871/*
872 * Called after the kmem_caches are functional to setup a dedicated
873 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
874 * prevents that the debug code is called on kmem_cache_free() for the
875 * debug tracker objects to avoid recursive calls.
876 */
877void __init debug_objects_mem_init(void)
878{
879 if (!debug_objects_enabled)
880 return;
881
882 obj_cache = kmem_cache_create("debug_objects_cache",
883 sizeof (struct debug_obj), 0,
884 SLAB_DEBUG_OBJECTS, NULL);
885
886 if (!obj_cache)
887 debug_objects_enabled = 0;
888 else
889 debug_objects_selftest();
890}
diff --git a/lib/devres.c b/lib/devres.c
index edc27a5d1b7..72c8909006d 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -2,7 +2,7 @@
2#include <linux/io.h> 2#include <linux/io.h>
3#include <linux/module.h> 3#include <linux/module.h>
4 4
5static void devm_ioremap_release(struct device *dev, void *res) 5void devm_ioremap_release(struct device *dev, void *res)
6{ 6{
7 iounmap(*(void __iomem **)res); 7 iounmap(*(void __iomem **)res);
8} 8}
@@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
20 * 20 *
21 * Managed ioremap(). Map is automatically unmapped on driver detach. 21 * Managed ioremap(). Map is automatically unmapped on driver detach.
22 */ 22 */
23void __iomem *devm_ioremap(struct device *dev, unsigned long offset, 23void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
24 unsigned long size) 24 unsigned long size)
25{ 25{
26 void __iomem **ptr, *addr; 26 void __iomem **ptr, *addr;
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap);
49 * Managed ioremap_nocache(). Map is automatically unmapped on driver 49 * Managed ioremap_nocache(). Map is automatically unmapped on driver
50 * detach. 50 * detach.
51 */ 51 */
52void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, 52void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
53 unsigned long size) 53 unsigned long size)
54{ 54{
55 void __iomem **ptr, *addr; 55 void __iomem **ptr, *addr;
diff --git a/lib/div64.c b/lib/div64.c
index b71cf93c529..bb5bd0c0f03 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -16,9 +16,8 @@
16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. 16 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
17 */ 17 */
18 18
19#include <linux/types.h>
20#include <linux/module.h> 19#include <linux/module.h>
21#include <asm/div64.h> 20#include <linux/math64.h>
22 21
23/* Not needed on 64bit architectures */ 22/* Not needed on 64bit architectures */
24#if BITS_PER_LONG == 32 23#if BITS_PER_LONG == 32
@@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
58 57
59EXPORT_SYMBOL(__div64_32); 58EXPORT_SYMBOL(__div64_32);
60 59
60#ifndef div_s64_rem
61s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
62{
63 u64 quotient;
64
65 if (dividend < 0) {
66 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
67 *remainder = -*remainder;
68 if (divisor > 0)
69 quotient = -quotient;
70 } else {
71 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
72 if (divisor < 0)
73 quotient = -quotient;
74 }
75 return quotient;
76}
77EXPORT_SYMBOL(div_s64_rem);
78#endif
79
61/* 64bit divisor, dividend and result. dynamic precision */ 80/* 64bit divisor, dividend and result. dynamic precision */
62uint64_t div64_64(uint64_t dividend, uint64_t divisor) 81#ifndef div64_u64
82u64 div64_u64(u64 dividend, u64 divisor)
63{ 83{
64 uint32_t high, d; 84 u32 high, d;
65 85
66 high = divisor >> 32; 86 high = divisor >> 32;
67 if (high) { 87 if (high) {
@@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor)
72 } else 92 } else
73 d = divisor; 93 d = divisor;
74 94
75 do_div(dividend, d); 95 return div_u64(dividend, d);
76
77 return dividend;
78} 96}
79EXPORT_SYMBOL(div64_64); 97EXPORT_SYMBOL(div64_u64);
98#endif
80 99
81#endif /* BITS_PER_LONG == 32 */ 100#endif /* BITS_PER_LONG == 32 */
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index d3f5784807b..24c59ded47a 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -20,8 +20,8 @@
20/* 20/*
21 * Find the next set bit in a memory region. 21 * Find the next set bit in a memory region.
22 */ 22 */
23unsigned long __find_next_bit(const unsigned long *addr, 23unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
24 unsigned long size, unsigned long offset) 24 unsigned long offset)
25{ 25{
26 const unsigned long *p = addr + BITOP_WORD(offset); 26 const unsigned long *p = addr + BITOP_WORD(offset);
27 unsigned long result = offset & ~(BITS_PER_LONG-1); 27 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -58,14 +58,14 @@ found_first:
58found_middle: 58found_middle:
59 return result + __ffs(tmp); 59 return result + __ffs(tmp);
60} 60}
61EXPORT_SYMBOL(__find_next_bit); 61EXPORT_SYMBOL(find_next_bit);
62 62
63/* 63/*
64 * This implementation of find_{first,next}_zero_bit was stolen from 64 * This implementation of find_{first,next}_zero_bit was stolen from
65 * Linus' asm-alpha/bitops.h. 65 * Linus' asm-alpha/bitops.h.
66 */ 66 */
67unsigned long __find_next_zero_bit(const unsigned long *addr, 67unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
68 unsigned long size, unsigned long offset) 68 unsigned long offset)
69{ 69{
70 const unsigned long *p = addr + BITOP_WORD(offset); 70 const unsigned long *p = addr + BITOP_WORD(offset);
71 unsigned long result = offset & ~(BITS_PER_LONG-1); 71 unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -102,15 +102,14 @@ found_first:
102found_middle: 102found_middle:
103 return result + ffz(tmp); 103 return result + ffz(tmp);
104} 104}
105EXPORT_SYMBOL(__find_next_zero_bit); 105EXPORT_SYMBOL(find_next_zero_bit);
106#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ 106#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
107 107
108#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 108#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
109/* 109/*
110 * Find the first set bit in a memory region. 110 * Find the first set bit in a memory region.
111 */ 111 */
112unsigned long __find_first_bit(const unsigned long *addr, 112unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
113 unsigned long size)
114{ 113{
115 const unsigned long *p = addr; 114 const unsigned long *p = addr;
116 unsigned long result = 0; 115 unsigned long result = 0;
@@ -131,13 +130,12 @@ unsigned long __find_first_bit(const unsigned long *addr,
131found: 130found:
132 return result + __ffs(tmp); 131 return result + __ffs(tmp);
133} 132}
134EXPORT_SYMBOL(__find_first_bit); 133EXPORT_SYMBOL(find_first_bit);
135 134
136/* 135/*
137 * Find the first cleared bit in a memory region. 136 * Find the first cleared bit in a memory region.
138 */ 137 */
139unsigned long __find_first_zero_bit(const unsigned long *addr, 138unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
140 unsigned long size)
141{ 139{
142 const unsigned long *p = addr; 140 const unsigned long *p = addr;
143 unsigned long result = 0; 141 unsigned long result = 0;
@@ -158,7 +156,7 @@ unsigned long __find_first_zero_bit(const unsigned long *addr,
158found: 156found:
159 return result + ffz(tmp); 157 return result + ffz(tmp);
160} 158}
161EXPORT_SYMBOL(__find_first_zero_bit); 159EXPORT_SYMBOL(find_first_zero_bit);
162#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 160#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
163 161
164#ifdef __BIG_ENDIAN 162#ifdef __BIG_ENDIAN
diff --git a/lib/idr.c b/lib/idr.c
index afbb0b1023d..7a02e173f02 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id)
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 385 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 386 p = alloc_layer(idp);
387 kmem_cache_free(idr_layer_cache, p); 387 kmem_cache_free(idr_layer_cache, p);
388 return;
389 } 388 }
389 return;
390} 390}
391EXPORT_SYMBOL(idr_remove); 391EXPORT_SYMBOL(idr_remove);
392 392
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
585 memset(idr_layer, 0, sizeof(struct idr_layer)); 585 memset(idr_layer, 0, sizeof(struct idr_layer));
586} 586}
587 587
588static int init_id_cache(void) 588void __init idr_init_cache(void)
589{ 589{
590 if (!idr_layer_cache) 590 idr_layer_cache = kmem_cache_create("idr_layer_cache",
591 idr_layer_cache = kmem_cache_create("idr_layer_cache", 591 sizeof(struct idr_layer), 0, SLAB_PANIC,
592 sizeof(struct idr_layer), 0, 0, idr_cache_ctor); 592 idr_cache_ctor);
593 return 0;
594} 593}
595 594
596/** 595/**
@@ -602,7 +601,6 @@ static int init_id_cache(void)
602 */ 601 */
603void idr_init(struct idr *idp) 602void idr_init(struct idr *idp)
604{ 603{
605 init_id_cache();
606 memset(idp, 0, sizeof(struct idr)); 604 memset(idp, 0, sizeof(struct idr));
607 spin_lock_init(&idp->lock); 605 spin_lock_init(&idp->lock);
608} 606}
diff --git a/lib/inflate.c b/lib/inflate.c
index 845f91d3ac1..9762294be06 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -811,6 +811,9 @@ DEBG("<dyn");
811 ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ 811 ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */
812#endif 812#endif
813 813
814 if (ll == NULL)
815 return 1;
816
814 /* make local bit buffer */ 817 /* make local bit buffer */
815 b = bb; 818 b = bb;
816 k = bk; 819 k = bk;
diff --git a/lib/iomap.c b/lib/iomap.c
index dd6ca48fe6b..37a3ea4cac9 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap);
257void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) 257void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
258{ 258{
259 resource_size_t start = pci_resource_start(dev, bar); 259 resource_size_t start = pci_resource_start(dev, bar);
260 unsigned long len = pci_resource_len(dev, bar); 260 resource_size_t len = pci_resource_len(dev, bar);
261 unsigned long flags = pci_resource_flags(dev, bar); 261 unsigned long flags = pci_resource_flags(dev, bar);
262 262
263 if (!len || !start) 263 if (!len || !start)
diff --git a/lib/klist.c b/lib/klist.c
index 120bd175aa7..cca37f96faa 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -1,38 +1,37 @@
1/* 1/*
2 * klist.c - Routines for manipulating klists. 2 * klist.c - Routines for manipulating klists.
3 * 3 *
4 * Copyright (C) 2005 Patrick Mochel
4 * 5 *
5 * This klist interface provides a couple of structures that wrap around 6 * This file is released under the GPL v2.
6 * struct list_head to provide explicit list "head" (struct klist) and
7 * list "node" (struct klist_node) objects. For struct klist, a spinlock
8 * is included that protects access to the actual list itself. struct
9 * klist_node provides a pointer to the klist that owns it and a kref
10 * reference count that indicates the number of current users of that node
11 * in the list.
12 * 7 *
13 * The entire point is to provide an interface for iterating over a list 8 * This klist interface provides a couple of structures that wrap around
14 * that is safe and allows for modification of the list during the 9 * struct list_head to provide explicit list "head" (struct klist) and list
15 * iteration (e.g. insertion and removal), including modification of the 10 * "node" (struct klist_node) objects. For struct klist, a spinlock is
16 * current node on the list. 11 * included that protects access to the actual list itself. struct
12 * klist_node provides a pointer to the klist that owns it and a kref
13 * reference count that indicates the number of current users of that node
14 * in the list.
17 * 15 *
18 * It works using a 3rd object type - struct klist_iter - that is declared 16 * The entire point is to provide an interface for iterating over a list
19 * and initialized before an iteration. klist_next() is used to acquire the 17 * that is safe and allows for modification of the list during the
20 * next element in the list. It returns NULL if there are no more items. 18 * iteration (e.g. insertion and removal), including modification of the
21 * Internally, that routine takes the klist's lock, decrements the reference 19 * current node on the list.
22 * count of the previous klist_node and increments the count of the next
23 * klist_node. It then drops the lock and returns.
24 * 20 *
25 * There are primitives for adding and removing nodes to/from a klist. 21 * It works using a 3rd object type - struct klist_iter - that is declared
26 * When deleting, klist_del() will simply decrement the reference count. 22 * and initialized before an iteration. klist_next() is used to acquire the
27 * Only when the count goes to 0 is the node removed from the list. 23 * next element in the list. It returns NULL if there are no more items.
28 * klist_remove() will try to delete the node from the list and block 24 * Internally, that routine takes the klist's lock, decrements the
29 * until it is actually removed. This is useful for objects (like devices) 25 * reference count of the previous klist_node and increments the count of
30 * that have been removed from the system and must be freed (but must wait 26 * the next klist_node. It then drops the lock and returns.
31 * until all accessors have finished).
32 * 27 *
33 * Copyright (C) 2005 Patrick Mochel 28 * There are primitives for adding and removing nodes to/from a klist.
34 * 29 * When deleting, klist_del() will simply decrement the reference count.
35 * This file is released under the GPL v2. 30 * Only when the count goes to 0 is the node removed from the list.
31 * klist_remove() will try to delete the node from the list and block until
32 * it is actually removed. This is useful for objects (like devices) that
33 * have been removed from the system and must be freed (but must wait until
34 * all accessors have finished).
36 */ 35 */
37 36
38#include <linux/klist.h> 37#include <linux/klist.h>
@@ -40,10 +39,10 @@
40 39
41 40
42/** 41/**
43 * klist_init - Initialize a klist structure. 42 * klist_init - Initialize a klist structure.
44 * @k: The klist we're initializing. 43 * @k: The klist we're initializing.
45 * @get: The get function for the embedding object (NULL if none) 44 * @get: The get function for the embedding object (NULL if none)
46 * @put: The put function for the embedding object (NULL if none) 45 * @put: The put function for the embedding object (NULL if none)
47 * 46 *
48 * Initialises the klist structure. If the klist_node structures are 47 * Initialises the klist structure. If the klist_node structures are
49 * going to be embedded in refcounted objects (necessary for safe 48 * going to be embedded in refcounted objects (necessary for safe
@@ -51,8 +50,7 @@
51 * functions that take and release references on the embedding 50 * functions that take and release references on the embedding
52 * objects. 51 * objects.
53 */ 52 */
54 53void klist_init(struct klist *k, void (*get)(struct klist_node *),
55void klist_init(struct klist * k, void (*get)(struct klist_node *),
56 void (*put)(struct klist_node *)) 54 void (*put)(struct klist_node *))
57{ 55{
58 INIT_LIST_HEAD(&k->k_list); 56 INIT_LIST_HEAD(&k->k_list);
@@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *),
60 k->get = get; 58 k->get = get;
61 k->put = put; 59 k->put = put;
62} 60}
63
64EXPORT_SYMBOL_GPL(klist_init); 61EXPORT_SYMBOL_GPL(klist_init);
65 62
66 63static void add_head(struct klist *k, struct klist_node *n)
67static void add_head(struct klist * k, struct klist_node * n)
68{ 64{
69 spin_lock(&k->k_lock); 65 spin_lock(&k->k_lock);
70 list_add(&n->n_node, &k->k_list); 66 list_add(&n->n_node, &k->k_list);
71 spin_unlock(&k->k_lock); 67 spin_unlock(&k->k_lock);
72} 68}
73 69
74static void add_tail(struct klist * k, struct klist_node * n) 70static void add_tail(struct klist *k, struct klist_node *n)
75{ 71{
76 spin_lock(&k->k_lock); 72 spin_lock(&k->k_lock);
77 list_add_tail(&n->n_node, &k->k_list); 73 list_add_tail(&n->n_node, &k->k_list);
78 spin_unlock(&k->k_lock); 74 spin_unlock(&k->k_lock);
79} 75}
80 76
81 77static void klist_node_init(struct klist *k, struct klist_node *n)
82static void klist_node_init(struct klist * k, struct klist_node * n)
83{ 78{
84 INIT_LIST_HEAD(&n->n_node); 79 INIT_LIST_HEAD(&n->n_node);
85 init_completion(&n->n_removed); 80 init_completion(&n->n_removed);
@@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
89 k->get(n); 84 k->get(n);
90} 85}
91 86
92
93/** 87/**
94 * klist_add_head - Initialize a klist_node and add it to front. 88 * klist_add_head - Initialize a klist_node and add it to front.
95 * @n: node we're adding. 89 * @n: node we're adding.
96 * @k: klist it's going on. 90 * @k: klist it's going on.
97 */ 91 */
98 92void klist_add_head(struct klist_node *n, struct klist *k)
99void klist_add_head(struct klist_node * n, struct klist * k)
100{ 93{
101 klist_node_init(k, n); 94 klist_node_init(k, n);
102 add_head(k, n); 95 add_head(k, n);
103} 96}
104
105EXPORT_SYMBOL_GPL(klist_add_head); 97EXPORT_SYMBOL_GPL(klist_add_head);
106 98
107
108/** 99/**
109 * klist_add_tail - Initialize a klist_node and add it to back. 100 * klist_add_tail - Initialize a klist_node and add it to back.
110 * @n: node we're adding. 101 * @n: node we're adding.
111 * @k: klist it's going on. 102 * @k: klist it's going on.
112 */ 103 */
113 104void klist_add_tail(struct klist_node *n, struct klist *k)
114void klist_add_tail(struct klist_node * n, struct klist * k)
115{ 105{
116 klist_node_init(k, n); 106 klist_node_init(k, n);
117 add_tail(k, n); 107 add_tail(k, n);
118} 108}
119
120EXPORT_SYMBOL_GPL(klist_add_tail); 109EXPORT_SYMBOL_GPL(klist_add_tail);
121 110
111/**
112 * klist_add_after - Init a klist_node and add it after an existing node
113 * @n: node we're adding.
114 * @pos: node to put @n after
115 */
116void klist_add_after(struct klist_node *n, struct klist_node *pos)
117{
118 struct klist *k = pos->n_klist;
119
120 klist_node_init(k, n);
121 spin_lock(&k->k_lock);
122 list_add(&n->n_node, &pos->n_node);
123 spin_unlock(&k->k_lock);
124}
125EXPORT_SYMBOL_GPL(klist_add_after);
126
127/**
128 * klist_add_before - Init a klist_node and add it before an existing node
129 * @n: node we're adding.
130 * @pos: node to put @n after
131 */
132void klist_add_before(struct klist_node *n, struct klist_node *pos)
133{
134 struct klist *k = pos->n_klist;
135
136 klist_node_init(k, n);
137 spin_lock(&k->k_lock);
138 list_add_tail(&n->n_node, &pos->n_node);
139 spin_unlock(&k->k_lock);
140}
141EXPORT_SYMBOL_GPL(klist_add_before);
122 142
123static void klist_release(struct kref * kref) 143static void klist_release(struct kref *kref)
124{ 144{
125 struct klist_node * n = container_of(kref, struct klist_node, n_ref); 145 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
126 146
127 list_del(&n->n_node); 147 list_del(&n->n_node);
128 complete(&n->n_removed); 148 complete(&n->n_removed);
129 n->n_klist = NULL; 149 n->n_klist = NULL;
130} 150}
131 151
132static int klist_dec_and_del(struct klist_node * n) 152static int klist_dec_and_del(struct klist_node *n)
133{ 153{
134 return kref_put(&n->n_ref, klist_release); 154 return kref_put(&n->n_ref, klist_release);
135} 155}
136 156
137
138/** 157/**
139 * klist_del - Decrement the reference count of node and try to remove. 158 * klist_del - Decrement the reference count of node and try to remove.
140 * @n: node we're deleting. 159 * @n: node we're deleting.
141 */ 160 */
142 161void klist_del(struct klist_node *n)
143void klist_del(struct klist_node * n)
144{ 162{
145 struct klist * k = n->n_klist; 163 struct klist *k = n->n_klist;
146 void (*put)(struct klist_node *) = k->put; 164 void (*put)(struct klist_node *) = k->put;
147 165
148 spin_lock(&k->k_lock); 166 spin_lock(&k->k_lock);
@@ -152,48 +170,40 @@ void klist_del(struct klist_node * n)
152 if (put) 170 if (put)
153 put(n); 171 put(n);
154} 172}
155
156EXPORT_SYMBOL_GPL(klist_del); 173EXPORT_SYMBOL_GPL(klist_del);
157 174
158
159/** 175/**
160 * klist_remove - Decrement the refcount of node and wait for it to go away. 176 * klist_remove - Decrement the refcount of node and wait for it to go away.
161 * @n: node we're removing. 177 * @n: node we're removing.
162 */ 178 */
163 179void klist_remove(struct klist_node *n)
164void klist_remove(struct klist_node * n)
165{ 180{
166 klist_del(n); 181 klist_del(n);
167 wait_for_completion(&n->n_removed); 182 wait_for_completion(&n->n_removed);
168} 183}
169
170EXPORT_SYMBOL_GPL(klist_remove); 184EXPORT_SYMBOL_GPL(klist_remove);
171 185
172
173/** 186/**
174 * klist_node_attached - Say whether a node is bound to a list or not. 187 * klist_node_attached - Say whether a node is bound to a list or not.
175 * @n: Node that we're testing. 188 * @n: Node that we're testing.
176 */ 189 */
177 190int klist_node_attached(struct klist_node *n)
178int klist_node_attached(struct klist_node * n)
179{ 191{
180 return (n->n_klist != NULL); 192 return (n->n_klist != NULL);
181} 193}
182
183EXPORT_SYMBOL_GPL(klist_node_attached); 194EXPORT_SYMBOL_GPL(klist_node_attached);
184 195
185
186/** 196/**
187 * klist_iter_init_node - Initialize a klist_iter structure. 197 * klist_iter_init_node - Initialize a klist_iter structure.
188 * @k: klist we're iterating. 198 * @k: klist we're iterating.
189 * @i: klist_iter we're filling. 199 * @i: klist_iter we're filling.
190 * @n: node to start with. 200 * @n: node to start with.
191 * 201 *
192 * Similar to klist_iter_init(), but starts the action off with @n, 202 * Similar to klist_iter_init(), but starts the action off with @n,
193 * instead of with the list head. 203 * instead of with the list head.
194 */ 204 */
195 205void klist_iter_init_node(struct klist *k, struct klist_iter *i,
196void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n) 206 struct klist_node *n)
197{ 207{
198 i->i_klist = k; 208 i->i_klist = k;
199 i->i_head = &k->k_list; 209 i->i_head = &k->k_list;
@@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_
201 if (n) 211 if (n)
202 kref_get(&n->n_ref); 212 kref_get(&n->n_ref);
203} 213}
204
205EXPORT_SYMBOL_GPL(klist_iter_init_node); 214EXPORT_SYMBOL_GPL(klist_iter_init_node);
206 215
207
208/** 216/**
209 * klist_iter_init - Iniitalize a klist_iter structure. 217 * klist_iter_init - Iniitalize a klist_iter structure.
210 * @k: klist we're iterating. 218 * @k: klist we're iterating.
211 * @i: klist_iter structure we're filling. 219 * @i: klist_iter structure we're filling.
212 * 220 *
213 * Similar to klist_iter_init_node(), but start with the list head. 221 * Similar to klist_iter_init_node(), but start with the list head.
214 */ 222 */
215 223void klist_iter_init(struct klist *k, struct klist_iter *i)
216void klist_iter_init(struct klist * k, struct klist_iter * i)
217{ 224{
218 klist_iter_init_node(k, i, NULL); 225 klist_iter_init_node(k, i, NULL);
219} 226}
220
221EXPORT_SYMBOL_GPL(klist_iter_init); 227EXPORT_SYMBOL_GPL(klist_iter_init);
222 228
223
224/** 229/**
225 * klist_iter_exit - Finish a list iteration. 230 * klist_iter_exit - Finish a list iteration.
226 * @i: Iterator structure. 231 * @i: Iterator structure.
227 * 232 *
228 * Must be called when done iterating over list, as it decrements the 233 * Must be called when done iterating over list, as it decrements the
229 * refcount of the current node. Necessary in case iteration exited before 234 * refcount of the current node. Necessary in case iteration exited before
230 * the end of the list was reached, and always good form. 235 * the end of the list was reached, and always good form.
231 */ 236 */
232 237void klist_iter_exit(struct klist_iter *i)
233void klist_iter_exit(struct klist_iter * i)
234{ 238{
235 if (i->i_cur) { 239 if (i->i_cur) {
236 klist_del(i->i_cur); 240 klist_del(i->i_cur);
237 i->i_cur = NULL; 241 i->i_cur = NULL;
238 } 242 }
239} 243}
240
241EXPORT_SYMBOL_GPL(klist_iter_exit); 244EXPORT_SYMBOL_GPL(klist_iter_exit);
242 245
243 246static struct klist_node *to_klist_node(struct list_head *n)
244static struct klist_node * to_klist_node(struct list_head * n)
245{ 247{
246 return container_of(n, struct klist_node, n_node); 248 return container_of(n, struct klist_node, n_node);
247} 249}
248 250
249
250/** 251/**
251 * klist_next - Ante up next node in list. 252 * klist_next - Ante up next node in list.
252 * @i: Iterator structure. 253 * @i: Iterator structure.
253 * 254 *
254 * First grab list lock. Decrement the reference count of the previous 255 * First grab list lock. Decrement the reference count of the previous
255 * node, if there was one. Grab the next node, increment its reference 256 * node, if there was one. Grab the next node, increment its reference
256 * count, drop the lock, and return that next node. 257 * count, drop the lock, and return that next node.
257 */ 258 */
258 259struct klist_node *klist_next(struct klist_iter *i)
259struct klist_node * klist_next(struct klist_iter * i)
260{ 260{
261 struct list_head * next; 261 struct list_head *next;
262 struct klist_node * lnode = i->i_cur; 262 struct klist_node *lnode = i->i_cur;
263 struct klist_node * knode = NULL; 263 struct klist_node *knode = NULL;
264 void (*put)(struct klist_node *) = i->i_klist->put; 264 void (*put)(struct klist_node *) = i->i_klist->put;
265 265
266 spin_lock(&i->i_klist->k_lock); 266 spin_lock(&i->i_klist->k_lock);
@@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i)
281 put(lnode); 281 put(lnode);
282 return knode; 282 return knode;
283} 283}
284
285EXPORT_SYMBOL_GPL(klist_next); 284EXPORT_SYMBOL_GPL(klist_next);
286
287
diff --git a/lib/kobject.c b/lib/kobject.c
index 2c649037092..718e5101c26 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -90,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
90 } 90 }
91 91
92 pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj), 92 pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
93 kobj, __FUNCTION__, path); 93 kobj, __func__, path);
94} 94}
95 95
96/** 96/**
@@ -181,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj)
181 } 181 }
182 182
183 pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n", 183 pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
184 kobject_name(kobj), kobj, __FUNCTION__, 184 kobject_name(kobj), kobj, __func__,
185 parent ? kobject_name(parent) : "<NULL>", 185 parent ? kobject_name(parent) : "<NULL>",
186 kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); 186 kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
187 187
@@ -196,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj)
196 printk(KERN_ERR "%s failed for %s with " 196 printk(KERN_ERR "%s failed for %s with "
197 "-EEXIST, don't try to register things with " 197 "-EEXIST, don't try to register things with "
198 "the same name in the same directory.\n", 198 "the same name in the same directory.\n",
199 __FUNCTION__, kobject_name(kobj)); 199 __func__, kobject_name(kobj));
200 else 200 else
201 printk(KERN_ERR "%s failed for %s (%d)\n", 201 printk(KERN_ERR "%s failed for %s (%d)\n",
202 __FUNCTION__, kobject_name(kobj), error); 202 __func__, kobject_name(kobj), error);
203 dump_stack(); 203 dump_stack();
204 } else 204 } else
205 kobj->state_in_sysfs = 1; 205 kobj->state_in_sysfs = 1;
@@ -216,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj)
216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
217 va_list vargs) 217 va_list vargs)
218{ 218{
219 va_list aq;
220 char *name;
221
222 va_copy(aq, vargs);
223 name = kvasprintf(GFP_KERNEL, fmt, vargs);
224 va_end(aq);
225
226 if (!name)
227 return -ENOMEM;
228
229 /* Free the old name, if necessary. */ 219 /* Free the old name, if necessary. */
230 kfree(kobj->name); 220 kfree(kobj->name);
231 221
232 /* Now, set the new name */ 222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
233 kobj->name = name; 223 if (!kobj->name)
224 return -ENOMEM;
234 225
235 return 0; 226 return 0;
236} 227}
@@ -246,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
246 */ 237 */
247int kobject_set_name(struct kobject *kobj, const char *fmt, ...) 238int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
248{ 239{
249 va_list args; 240 va_list vargs;
250 int retval; 241 int retval;
251 242
252 va_start(args, fmt); 243 va_start(vargs, fmt);
253 retval = kobject_set_name_vargs(kobj, fmt, args); 244 retval = kobject_set_name_vargs(kobj, fmt, vargs);
254 va_end(args); 245 va_end(vargs);
255 246
256 return retval; 247 return retval;
257} 248}
@@ -301,12 +292,9 @@ EXPORT_SYMBOL(kobject_init);
301static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, 292static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
302 const char *fmt, va_list vargs) 293 const char *fmt, va_list vargs)
303{ 294{
304 va_list aq;
305 int retval; 295 int retval;
306 296
307 va_copy(aq, vargs); 297 retval = kobject_set_name_vargs(kobj, fmt, vargs);
308 retval = kobject_set_name_vargs(kobj, fmt, aq);
309 va_end(aq);
310 if (retval) { 298 if (retval) {
311 printk(KERN_ERR "kobject: can not set name properly!\n"); 299 printk(KERN_ERR "kobject: can not set name properly!\n");
312 return retval; 300 return retval;
@@ -540,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj)
540 const char *name = kobj->name; 528 const char *name = kobj->name;
541 529
542 pr_debug("kobject: '%s' (%p): %s\n", 530 pr_debug("kobject: '%s' (%p): %s\n",
543 kobject_name(kobj), kobj, __FUNCTION__); 531 kobject_name(kobj), kobj, __func__);
544 532
545 if (t && !t->release) 533 if (t && !t->release)
546 pr_debug("kobject: '%s' (%p): does not have a release() " 534 pr_debug("kobject: '%s' (%p): does not have a release() "
@@ -600,7 +588,7 @@ void kobject_put(struct kobject *kobj)
600 588
601static void dynamic_kobj_release(struct kobject *kobj) 589static void dynamic_kobj_release(struct kobject *kobj)
602{ 590{
603 pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__); 591 pr_debug("kobject: (%p): %s\n", kobj, __func__);
604 kfree(kobj); 592 kfree(kobj);
605} 593}
606 594
@@ -657,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
657 retval = kobject_add(kobj, parent, "%s", name); 645 retval = kobject_add(kobj, parent, "%s", name);
658 if (retval) { 646 if (retval) {
659 printk(KERN_WARNING "%s: kobject_add error: %d\n", 647 printk(KERN_WARNING "%s: kobject_add error: %d\n",
660 __FUNCTION__, retval); 648 __func__, retval);
661 kobject_put(kobj); 649 kobject_put(kobj);
662 kobj = NULL; 650 kobj = NULL;
663 } 651 }
@@ -765,7 +753,7 @@ static void kset_release(struct kobject *kobj)
765{ 753{
766 struct kset *kset = container_of(kobj, struct kset, kobj); 754 struct kset *kset = container_of(kobj, struct kset, kobj);
767 pr_debug("kobject: '%s' (%p): %s\n", 755 pr_debug("kobject: '%s' (%p): %s\n",
768 kobject_name(kobj), kobj, __FUNCTION__); 756 kobject_name(kobj), kobj, __func__);
769 kfree(kset); 757 kfree(kset);
770} 758}
771 759
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fb6b86cf6b..2fa545a6316 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -101,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
101 int retval = 0; 101 int retval = 0;
102 102
103 pr_debug("kobject: '%s' (%p): %s\n", 103 pr_debug("kobject: '%s' (%p): %s\n",
104 kobject_name(kobj), kobj, __FUNCTION__); 104 kobject_name(kobj), kobj, __func__);
105 105
106 /* search the kset we belong to */ 106 /* search the kset we belong to */
107 top_kobj = kobj; 107 top_kobj = kobj;
@@ -111,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
111 if (!top_kobj->kset) { 111 if (!top_kobj->kset) {
112 pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " 112 pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
113 "without kset!\n", kobject_name(kobj), kobj, 113 "without kset!\n", kobject_name(kobj), kobj,
114 __FUNCTION__); 114 __func__);
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
117 117
@@ -123,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
123 if (!uevent_ops->filter(kset, kobj)) { 123 if (!uevent_ops->filter(kset, kobj)) {
124 pr_debug("kobject: '%s' (%p): %s: filter function " 124 pr_debug("kobject: '%s' (%p): %s: filter function "
125 "caused the event to drop!\n", 125 "caused the event to drop!\n",
126 kobject_name(kobj), kobj, __FUNCTION__); 126 kobject_name(kobj), kobj, __func__);
127 return 0; 127 return 0;
128 } 128 }
129 129
@@ -135,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
135 if (!subsystem) { 135 if (!subsystem) {
136 pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " 136 pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
137 "event to drop!\n", kobject_name(kobj), kobj, 137 "event to drop!\n", kobject_name(kobj), kobj,
138 __FUNCTION__); 138 __func__);
139 return 0; 139 return 0;
140 } 140 }
141 141
@@ -177,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
177 if (retval) { 177 if (retval) {
178 pr_debug("kobject: '%s' (%p): %s: uevent() returned " 178 pr_debug("kobject: '%s' (%p): %s: uevent() returned "
179 "%d\n", kobject_name(kobj), kobj, 179 "%d\n", kobject_name(kobj), kobj,
180 __FUNCTION__, retval); 180 __func__, retval);
181 goto exit; 181 goto exit;
182 } 182 }
183 } 183 }
diff --git a/lib/lmb.c b/lib/lmb.c
index 207147ab25e..83287d3869a 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -46,14 +46,13 @@ void lmb_dump_all(void)
46#endif /* DEBUG */ 46#endif /* DEBUG */
47} 47}
48 48
49static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1, 49static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
50 u64 base2, u64 size2) 50 u64 size2)
51{ 51{
52 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 52 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
53} 53}
54 54
55static long __init lmb_addrs_adjacent(u64 base1, u64 size1, 55static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
56 u64 base2, u64 size2)
57{ 56{
58 if (base2 == base1 + size1) 57 if (base2 == base1 + size1)
59 return 1; 58 return 1;
@@ -63,7 +62,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
63 return 0; 62 return 0;
64} 63}
65 64
66static long __init lmb_regions_adjacent(struct lmb_region *rgn, 65static long lmb_regions_adjacent(struct lmb_region *rgn,
67 unsigned long r1, unsigned long r2) 66 unsigned long r1, unsigned long r2)
68{ 67{
69 u64 base1 = rgn->region[r1].base; 68 u64 base1 = rgn->region[r1].base;
@@ -74,7 +73,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn,
74 return lmb_addrs_adjacent(base1, size1, base2, size2); 73 return lmb_addrs_adjacent(base1, size1, base2, size2);
75} 74}
76 75
77static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) 76static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
78{ 77{
79 unsigned long i; 78 unsigned long i;
80 79
@@ -86,7 +85,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
86} 85}
87 86
88/* Assumption: base addr of region 1 < base addr of region 2 */ 87/* Assumption: base addr of region 1 < base addr of region 2 */
89static void __init lmb_coalesce_regions(struct lmb_region *rgn, 88static void lmb_coalesce_regions(struct lmb_region *rgn,
90 unsigned long r1, unsigned long r2) 89 unsigned long r1, unsigned long r2)
91{ 90{
92 rgn->region[r1].size += rgn->region[r2].size; 91 rgn->region[r1].size += rgn->region[r2].size;
@@ -118,7 +117,7 @@ void __init lmb_analyze(void)
118 lmb.memory.size += lmb.memory.region[i].size; 117 lmb.memory.size += lmb.memory.region[i].size;
119} 118}
120 119
121static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) 120static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
122{ 121{
123 unsigned long coalesced = 0; 122 unsigned long coalesced = 0;
124 long adjacent, i; 123 long adjacent, i;
@@ -182,7 +181,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
182 return 0; 181 return 0;
183} 182}
184 183
185long __init lmb_add(u64 base, u64 size) 184long lmb_add(u64 base, u64 size)
186{ 185{
187 struct lmb_region *_rgn = &lmb.memory; 186 struct lmb_region *_rgn = &lmb.memory;
188 187
@@ -194,6 +193,55 @@ long __init lmb_add(u64 base, u64 size)
194 193
195} 194}
196 195
196long lmb_remove(u64 base, u64 size)
197{
198 struct lmb_region *rgn = &(lmb.memory);
199 u64 rgnbegin, rgnend;
200 u64 end = base + size;
201 int i;
202
203 rgnbegin = rgnend = 0; /* supress gcc warnings */
204
205 /* Find the region where (base, size) belongs to */
206 for (i=0; i < rgn->cnt; i++) {
207 rgnbegin = rgn->region[i].base;
208 rgnend = rgnbegin + rgn->region[i].size;
209
210 if ((rgnbegin <= base) && (end <= rgnend))
211 break;
212 }
213
214 /* Didn't find the region */
215 if (i == rgn->cnt)
216 return -1;
217
218 /* Check to see if we are removing entire region */
219 if ((rgnbegin == base) && (rgnend == end)) {
220 lmb_remove_region(rgn, i);
221 return 0;
222 }
223
224 /* Check to see if region is matching at the front */
225 if (rgnbegin == base) {
226 rgn->region[i].base = end;
227 rgn->region[i].size -= size;
228 return 0;
229 }
230
231 /* Check to see if the region is matching at the end */
232 if (rgnend == end) {
233 rgn->region[i].size -= size;
234 return 0;
235 }
236
237 /*
238 * We need to split the entry - adjust the current one to the
239 * beginging of the hole and add the region after hole.
240 */
241 rgn->region[i].size = base - rgn->region[i].base;
242 return lmb_add_region(rgn, end, rgnend - end);
243}
244
197long __init lmb_reserve(u64 base, u64 size) 245long __init lmb_reserve(u64 base, u64 size)
198{ 246{
199 struct lmb_region *_rgn = &lmb.reserved; 247 struct lmb_region *_rgn = &lmb.reserved;
@@ -426,3 +474,36 @@ int __init lmb_is_reserved(u64 addr)
426 } 474 }
427 return 0; 475 return 0;
428} 476}
477
478/*
479 * Given a <base, len>, find which memory regions belong to this range.
480 * Adjust the request and return a contiguous chunk.
481 */
482int lmb_find(struct lmb_property *res)
483{
484 int i;
485 u64 rstart, rend;
486
487 rstart = res->base;
488 rend = rstart + res->size - 1;
489
490 for (i = 0; i < lmb.memory.cnt; i++) {
491 u64 start = lmb.memory.region[i].base;
492 u64 end = start + lmb.memory.region[i].size - 1;
493
494 if (start > rend)
495 return -1;
496
497 if ((end >= rstart) && (start < rend)) {
498 /* adjust the request */
499 if (rstart < start)
500 rstart = start;
501 if (rend > end)
502 rend = end;
503 res->base = rstart;
504 res->size = rend - rstart + 1;
505 return 0;
506 }
507 }
508 return -1;
509}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 393a0e915c2..119174494cb 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
102 return; 102 return;
103 103
104 free_percpu(fbc->counters); 104 free_percpu(fbc->counters);
105 fbc->counters = NULL;
105#ifdef CONFIG_HOTPLUG_CPU 106#ifdef CONFIG_HOTPLUG_CPU
106 mutex_lock(&percpu_counters_lock); 107 mutex_lock(&percpu_counters_lock);
107 list_del(&fbc->list); 108 list_del(&fbc->list);
diff --git a/lib/proportions.c b/lib/proportions.c
index 9508d9a7af3..4f387a643d7 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,12 +73,6 @@
73#include <linux/proportions.h> 73#include <linux/proportions.h>
74#include <linux/rcupdate.h> 74#include <linux/rcupdate.h>
75 75
76/*
77 * Limit the time part in order to ensure there are some bits left for the
78 * cycle counter.
79 */
80#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
81
82int prop_descriptor_init(struct prop_descriptor *pd, int shift) 76int prop_descriptor_init(struct prop_descriptor *pd, int shift)
83{ 77{
84 int err; 78 int err;
@@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
268} 262}
269 263
270/* 264/*
265 * identical to __prop_inc_percpu, except that it limits this pl's fraction to
266 * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
267 */
268void __prop_inc_percpu_max(struct prop_descriptor *pd,
269 struct prop_local_percpu *pl, long frac)
270{
271 struct prop_global *pg = prop_get_global(pd);
272
273 prop_norm_percpu(pg, pl);
274
275 if (unlikely(frac != PROP_FRAC_BASE)) {
276 unsigned long period_2 = 1UL << (pg->shift - 1);
277 unsigned long counter_mask = period_2 - 1;
278 unsigned long global_count;
279 long numerator, denominator;
280
281 numerator = percpu_counter_read_positive(&pl->events);
282 global_count = percpu_counter_read(&pg->events);
283 denominator = period_2 + (global_count & counter_mask);
284
285 if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
286 goto out_put;
287 }
288
289 percpu_counter_add(&pl->events, 1);
290 percpu_counter_add(&pg->events, 1);
291
292out_put:
293 prop_put_global(pd, pg);
294}
295
296/*
271 * Obtain a fraction of this proportion 297 * Obtain a fraction of this proportion
272 * 298 *
273 * p_{j} = x_{j} / (period/2 + t % period/2) 299 * p_{j} = x_{j} / (period/2 + t % period/2)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
new file mode 100644
index 00000000000..485e3040dcd
--- /dev/null
+++ b/lib/ratelimit.c
@@ -0,0 +1,51 @@
1/*
2 * ratelimit.c - Do something with rate limit.
3 *
4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
5 *
6 * This file is released under the GPLv2.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/jiffies.h>
12#include <linux/module.h>
13
14/*
15 * __ratelimit - rate limiting
16 * @ratelimit_jiffies: minimum time in jiffies between two callbacks
17 * @ratelimit_burst: number of callbacks we do before ratelimiting
18 *
19 * This enforces a rate limit: not more than @ratelimit_burst callbacks
20 * in every ratelimit_jiffies
21 */
22int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
23{
24 static DEFINE_SPINLOCK(ratelimit_lock);
25 static unsigned toks = 10 * 5 * HZ;
26 static unsigned long last_msg;
27 static int missed;
28 unsigned long flags;
29 unsigned long now = jiffies;
30
31 spin_lock_irqsave(&ratelimit_lock, flags);
32 toks += now - last_msg;
33 last_msg = now;
34 if (toks > (ratelimit_burst * ratelimit_jiffies))
35 toks = ratelimit_burst * ratelimit_jiffies;
36 if (toks >= ratelimit_jiffies) {
37 int lost = missed;
38
39 missed = 0;
40 toks -= ratelimit_jiffies;
41 spin_unlock_irqrestore(&ratelimit_lock, flags);
42 if (lost)
43 printk(KERN_WARNING "%s: %d messages suppressed\n",
44 __func__, lost);
45 return 1;
46 }
47 missed++;
48 spin_unlock_irqrestore(&ratelimit_lock, flags);
49 return 0;
50}
51EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/string.c b/lib/string.c
index 5efafed3d6b..b19b87af65a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct)
493EXPORT_SYMBOL(strsep); 493EXPORT_SYMBOL(strsep);
494#endif 494#endif
495 495
496/**
497 * sysfs_streq - return true if strings are equal, modulo trailing newline
498 * @s1: one string
499 * @s2: another string
500 *
501 * This routine returns true iff two strings are equal, treating both
502 * NUL and newline-then-NUL as equivalent string terminations. It's
503 * geared for use with sysfs input strings, which generally terminate
504 * with newlines but are compared against values without newlines.
505 */
506bool sysfs_streq(const char *s1, const char *s2)
507{
508 while (*s1 && *s1 == *s2) {
509 s1++;
510 s2++;
511 }
512
513 if (*s1 == *s2)
514 return true;
515 if (!*s1 && *s2 == '\n' && !s2[1])
516 return true;
517 if (*s1 == '\n' && !s1[1] && !*s2)
518 return true;
519 return false;
520}
521EXPORT_SYMBOL(sysfs_streq);
522
496#ifndef __HAVE_ARCH_MEMSET 523#ifndef __HAVE_ARCH_MEMSET
497/** 524/**
498 * memset - Fill a region of memory with the given value 525 * memset - Fill a region of memory with the given value
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 025922807e6..d568894df8c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/iommu-helper.h>
34 35
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
282 return (addr & ~mask) != 0; 283 return (addr & ~mask) != 0;
283} 284}
284 285
285static inline unsigned int is_span_boundary(unsigned int index,
286 unsigned int nslots,
287 unsigned long offset_slots,
288 unsigned long max_slots)
289{
290 unsigned long offset = (offset_slots + index) & (max_slots - 1);
291 return offset + nslots > max_slots;
292}
293
294/* 286/*
295 * Allocates bounce buffer and returns its kernel virtual address. 287 * Allocates bounce buffer and returns its kernel virtual address.
296 */ 288 */
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
331 * request and allocate a buffer from that IO TLB pool. 323 * request and allocate a buffer from that IO TLB pool.
332 */ 324 */
333 spin_lock_irqsave(&io_tlb_lock, flags); 325 spin_lock_irqsave(&io_tlb_lock, flags);
334 { 326 index = ALIGN(io_tlb_index, stride);
335 index = ALIGN(io_tlb_index, stride); 327 if (index >= io_tlb_nslabs)
336 if (index >= io_tlb_nslabs) 328 index = 0;
337 index = 0; 329 wrap = index;
338 wrap = index; 330
339 331 do {
340 do { 332 while (iommu_is_span_boundary(index, nslots, offset_slots,
341 while (is_span_boundary(index, nslots, offset_slots, 333 max_slots)) {
342 max_slots)) {
343 index += stride;
344 if (index >= io_tlb_nslabs)
345 index = 0;
346 if (index == wrap)
347 goto not_found;
348 }
349
350 /*
351 * If we find a slot that indicates we have 'nslots'
352 * number of contiguous buffers, we allocate the
353 * buffers from that slot and mark the entries as '0'
354 * indicating unavailable.
355 */
356 if (io_tlb_list[index] >= nslots) {
357 int count = 0;
358
359 for (i = index; i < (int) (index + nslots); i++)
360 io_tlb_list[i] = 0;
361 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
362 io_tlb_list[i] = ++count;
363 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
364
365 /*
366 * Update the indices to avoid searching in
367 * the next round.
368 */
369 io_tlb_index = ((index + nslots) < io_tlb_nslabs
370 ? (index + nslots) : 0);
371
372 goto found;
373 }
374 index += stride; 334 index += stride;
375 if (index >= io_tlb_nslabs) 335 if (index >= io_tlb_nslabs)
376 index = 0; 336 index = 0;
377 } while (index != wrap); 337 if (index == wrap)
338 goto not_found;
339 }
378 340
379 not_found: 341 /*
380 spin_unlock_irqrestore(&io_tlb_lock, flags); 342 * If we find a slot that indicates we have 'nslots' number of
381 return NULL; 343 * contiguous buffers, we allocate the buffers from that slot
382 } 344 * and mark the entries as '0' indicating unavailable.
383 found: 345 */
346 if (io_tlb_list[index] >= nslots) {
347 int count = 0;
348
349 for (i = index; i < (int) (index + nslots); i++)
350 io_tlb_list[i] = 0;
351 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
352 io_tlb_list[i] = ++count;
353 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
354
355 /*
356 * Update the indices to avoid searching in the next
357 * round.
358 */
359 io_tlb_index = ((index + nslots) < io_tlb_nslabs
360 ? (index + nslots) : 0);
361
362 goto found;
363 }
364 index += stride;
365 if (index >= io_tlb_nslabs)
366 index = 0;
367 } while (index != wrap);
368
369not_found:
370 spin_unlock_irqrestore(&io_tlb_lock, flags);
371 return NULL;
372found:
384 spin_unlock_irqrestore(&io_tlb_lock, flags); 373 spin_unlock_irqrestore(&io_tlb_lock, flags);
385 374
386 /* 375 /*
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
566 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 555 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
567 */ 556 */
568dma_addr_t 557dma_addr_t
569swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 558swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
559 int dir, struct dma_attrs *attrs)
570{ 560{
571 dma_addr_t dev_addr = virt_to_bus(ptr); 561 dma_addr_t dev_addr = virt_to_bus(ptr);
572 void *map; 562 void *map;
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
599 589
600 return dev_addr; 590 return dev_addr;
601} 591}
592EXPORT_SYMBOL(swiotlb_map_single_attrs);
593
594dma_addr_t
595swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
596{
597 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
598}
602 599
603/* 600/*
604 * Unmap a single streaming mode DMA translation. The dma_addr and size must 601 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
609 * whatever the device wrote there. 606 * whatever the device wrote there.
610 */ 607 */
611void 608void
612swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 609swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
613 int dir) 610 size_t size, int dir, struct dma_attrs *attrs)
614{ 611{
615 char *dma_addr = bus_to_virt(dev_addr); 612 char *dma_addr = bus_to_virt(dev_addr);
616 613
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
620 else if (dir == DMA_FROM_DEVICE) 617 else if (dir == DMA_FROM_DEVICE)
621 dma_mark_clean(dma_addr, size); 618 dma_mark_clean(dma_addr, size);
622} 619}
620EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
623 621
622void
623swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
624 int dir)
625{
626 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
627}
624/* 628/*
625 * Make physical memory consistent for a single streaming mode DMA translation 629 * Make physical memory consistent for a single streaming mode DMA translation
626 * after a transfer. 630 * after a transfer.
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
691 SYNC_FOR_DEVICE); 695 SYNC_FOR_DEVICE);
692} 696}
693 697
698void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
699 struct dma_attrs *);
694/* 700/*
695 * Map a set of buffers described by scatterlist in streaming mode for DMA. 701 * Map a set of buffers described by scatterlist in streaming mode for DMA.
696 * This is the scatter-gather version of the above swiotlb_map_single 702 * This is the scatter-gather version of the above swiotlb_map_single
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
708 * same here. 714 * same here.
709 */ 715 */
710int 716int
711swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 717swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
712 int dir) 718 int dir, struct dma_attrs *attrs)
713{ 719{
714 struct scatterlist *sg; 720 struct scatterlist *sg;
715 void *addr; 721 void *addr;
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 /* Don't panic here, we expect map_sg users 733 /* Don't panic here, we expect map_sg users
728 to do proper error handling. */ 734 to do proper error handling. */
729 swiotlb_full(hwdev, sg->length, dir, 0); 735 swiotlb_full(hwdev, sg->length, dir, 0);
730 swiotlb_unmap_sg(hwdev, sgl, i, dir); 736 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
737 attrs);
731 sgl[0].dma_length = 0; 738 sgl[0].dma_length = 0;
732 return 0; 739 return 0;
733 } 740 }
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
738 } 745 }
739 return nelems; 746 return nelems;
740} 747}
748EXPORT_SYMBOL(swiotlb_map_sg_attrs);
749
750int
751swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
752 int dir)
753{
754 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
755}
741 756
742/* 757/*
743 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 758 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
744 * concerning calls here are the same as for swiotlb_unmap_single() above. 759 * concerning calls here are the same as for swiotlb_unmap_single() above.
745 */ 760 */
746void 761void
747swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 762swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
748 int dir) 763 int nelems, int dir, struct dma_attrs *attrs)
749{ 764{
750 struct scatterlist *sg; 765 struct scatterlist *sg;
751 int i; 766 int i;
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
760 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 775 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
761 } 776 }
762} 777}
778EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
779
780void
781swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
782 int dir)
783{
784 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
785}
763 786
764/* 787/*
765 * Make physical memory consistent for a set of streaming mode DMA translations 788 * Make physical memory consistent for a set of streaming mode DMA translations