diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 80 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 11 | ||||
-rw-r--r-- | lib/Makefile | 7 | ||||
-rw-r--r-- | lib/bitmap.c | 11 | ||||
-rw-r--r-- | lib/cmdline.c | 2 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 46 | ||||
-rw-r--r-- | lib/idr.c | 2 | ||||
-rw-r--r-- | lib/iomap.c | 3 | ||||
-rw-r--r-- | lib/iommu-helper.c | 5 | ||||
-rw-r--r-- | lib/klist.c | 96 | ||||
-rw-r--r-- | lib/kobject.c | 3 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/lmb.c | 2 | ||||
-rw-r--r-- | lib/percpu_counter.c | 8 | ||||
-rw-r--r-- | lib/plist.c | 13 | ||||
-rw-r--r-- | lib/radix-tree.c | 180 | ||||
-rw-r--r-- | lib/random32.c | 48 | ||||
-rw-r--r-- | lib/ratelimit.c | 3 | ||||
-rw-r--r-- | lib/scatterlist.c | 4 | ||||
-rw-r--r-- | lib/show_mem.c | 63 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 5 | ||||
-rw-r--r-- | lib/string_helpers.c | 64 | ||||
-rw-r--r-- | lib/swiotlb.c | 51 | ||||
-rw-r--r-- | lib/syscall.c | 75 | ||||
-rw-r--r-- | lib/vsprintf.c | 13 |
26 files changed, 634 insertions, 169 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e1d4764435ed..aa81d2848448 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -394,7 +394,7 @@ config LOCKDEP | |||
394 | bool | 394 | bool |
395 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 395 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
396 | select STACKTRACE | 396 | select STACKTRACE |
397 | select FRAME_POINTER if !X86 && !MIPS | 397 | select FRAME_POINTER if !X86 && !MIPS && !PPC |
398 | select KALLSYMS | 398 | select KALLSYMS |
399 | select KALLSYMS_ALL | 399 | select KALLSYMS_ALL |
400 | 400 | ||
@@ -495,6 +495,15 @@ config DEBUG_VM | |||
495 | 495 | ||
496 | If unsure, say N. | 496 | If unsure, say N. |
497 | 497 | ||
498 | config DEBUG_VIRTUAL | ||
499 | bool "Debug VM translations" | ||
500 | depends on DEBUG_KERNEL && X86 | ||
501 | help | ||
502 | Enable some costly sanity checks in virtual to page code. This can | ||
503 | catch mistakes with virt_to_page() and friends. | ||
504 | |||
505 | If unsure, say N. | ||
506 | |||
498 | config DEBUG_WRITECOUNT | 507 | config DEBUG_WRITECOUNT |
499 | bool "Debug filesystem writers count" | 508 | bool "Debug filesystem writers count" |
500 | depends on DEBUG_KERNEL | 509 | depends on DEBUG_KERNEL |
@@ -597,6 +606,19 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
597 | Say N here if you want the RCU torture tests to start only | 606 | Say N here if you want the RCU torture tests to start only |
598 | after being manually enabled via /proc. | 607 | after being manually enabled via /proc. |
599 | 608 | ||
609 | config RCU_CPU_STALL_DETECTOR | ||
610 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
611 | depends on CLASSIC_RCU | ||
612 | default n | ||
613 | help | ||
614 | This option causes RCU to printk information on which | ||
615 | CPUs are delaying the current grace period, but only when | ||
616 | the grace period extends for excessive time periods. | ||
617 | |||
618 | Say Y if you want RCU to perform such checks. | ||
619 | |||
620 | Say N if you are unsure. | ||
621 | |||
600 | config KPROBES_SANITY_TEST | 622 | config KPROBES_SANITY_TEST |
601 | bool "Kprobes sanity tests" | 623 | bool "Kprobes sanity tests" |
602 | depends on DEBUG_KERNEL | 624 | depends on DEBUG_KERNEL |
@@ -624,6 +646,28 @@ config BACKTRACE_SELF_TEST | |||
624 | 646 | ||
625 | Say N if you are unsure. | 647 | Say N if you are unsure. |
626 | 648 | ||
649 | config DEBUG_BLOCK_EXT_DEVT | ||
650 | bool "Force extended block device numbers and spread them" | ||
651 | depends on DEBUG_KERNEL | ||
652 | depends on BLOCK | ||
653 | default n | ||
654 | help | ||
655 | Conventionally, block device numbers are allocated from | ||
656 | predetermined contiguous area. However, extended block area | ||
657 | may introduce non-contiguous block device numbers. This | ||
658 | option forces most block device numbers to be allocated from | ||
659 | the extended space and spreads them to discover kernel or | ||
660 | userland code paths which assume predetermined contiguous | ||
661 | device number allocation. | ||
662 | |||
663 | Note that turning on this debug option shuffles all the | ||
664 | device numbers for all IDE and SCSI devices including libata | ||
665 | ones, so root partition specified using device number | ||
666 | directly (via rdev or root=MAJ:MIN) won't work anymore. | ||
667 | Textual device names (root=/dev/sdXn) will continue to work. | ||
668 | |||
669 | Say N if you are unsure. | ||
670 | |||
627 | config LKDTM | 671 | config LKDTM |
628 | tristate "Linux Kernel Dump Test Tool Module" | 672 | tristate "Linux Kernel Dump Test Tool Module" |
629 | depends on DEBUG_KERNEL | 673 | depends on DEBUG_KERNEL |
@@ -661,10 +705,21 @@ config FAIL_PAGE_ALLOC | |||
661 | 705 | ||
662 | config FAIL_MAKE_REQUEST | 706 | config FAIL_MAKE_REQUEST |
663 | bool "Fault-injection capability for disk IO" | 707 | bool "Fault-injection capability for disk IO" |
664 | depends on FAULT_INJECTION | 708 | depends on FAULT_INJECTION && BLOCK |
665 | help | 709 | help |
666 | Provide fault-injection capability for disk IO. | 710 | Provide fault-injection capability for disk IO. |
667 | 711 | ||
712 | config FAIL_IO_TIMEOUT | ||
713 | bool "Faul-injection capability for faking disk interrupts" | ||
714 | depends on FAULT_INJECTION && BLOCK | ||
715 | help | ||
716 | Provide fault-injection capability on end IO handling. This | ||
717 | will make the block layer "forget" an interrupt as configured, | ||
718 | thus exercising the error handling. | ||
719 | |||
720 | Only works with drivers that use the generic timeout handling, | ||
721 | for others it wont do anything. | ||
722 | |||
668 | config FAULT_INJECTION_DEBUG_FS | 723 | config FAULT_INJECTION_DEBUG_FS |
669 | bool "Debugfs entries for fault-injection capabilities" | 724 | bool "Debugfs entries for fault-injection capabilities" |
670 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS | 725 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS |
@@ -676,13 +731,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
676 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 731 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
677 | depends on !X86_64 | 732 | depends on !X86_64 |
678 | select STACKTRACE | 733 | select STACKTRACE |
679 | select FRAME_POINTER | 734 | select FRAME_POINTER if !PPC |
680 | help | 735 | help |
681 | Provide stacktrace filter for fault-injection capabilities | 736 | Provide stacktrace filter for fault-injection capabilities |
682 | 737 | ||
683 | config LATENCYTOP | 738 | config LATENCYTOP |
684 | bool "Latency measuring infrastructure" | 739 | bool "Latency measuring infrastructure" |
685 | select FRAME_POINTER if !MIPS | 740 | select FRAME_POINTER if !MIPS && !PPC |
686 | select KALLSYMS | 741 | select KALLSYMS |
687 | select KALLSYMS_ALL | 742 | select KALLSYMS_ALL |
688 | select STACKTRACE | 743 | select STACKTRACE |
@@ -693,6 +748,14 @@ config LATENCYTOP | |||
693 | Enable this option if you want to use the LatencyTOP tool | 748 | Enable this option if you want to use the LatencyTOP tool |
694 | to find out which userspace is blocking on what kernel operations. | 749 | to find out which userspace is blocking on what kernel operations. |
695 | 750 | ||
751 | config SYSCTL_SYSCALL_CHECK | ||
752 | bool "Sysctl checks" | ||
753 | depends on SYSCTL_SYSCALL | ||
754 | ---help--- | ||
755 | sys_sysctl uses binary paths that have been found challenging | ||
756 | to properly maintain and use. This enables checks that help | ||
757 | you to keep things correct. | ||
758 | |||
696 | source kernel/trace/Kconfig | 759 | source kernel/trace/Kconfig |
697 | 760 | ||
698 | config PROVIDE_OHCI1394_DMA_INIT | 761 | config PROVIDE_OHCI1394_DMA_INIT |
@@ -735,6 +798,15 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
735 | 798 | ||
736 | If unsure, say N. | 799 | If unsure, say N. |
737 | 800 | ||
801 | menuconfig BUILD_DOCSRC | ||
802 | bool "Build targets in Documentation/ tree" | ||
803 | depends on HEADERS_CHECK | ||
804 | help | ||
805 | This option attempts to build objects from the source files in the | ||
806 | kernel Documentation/ tree. | ||
807 | |||
808 | Say N if you are unsure. | ||
809 | |||
738 | source "samples/Kconfig" | 810 | source "samples/Kconfig" |
739 | 811 | ||
740 | source "lib/Kconfig.kgdb" | 812 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 2cfd2721f7ed..9b5d1d7f2ef7 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
@@ -4,14 +4,17 @@ config HAVE_ARCH_KGDB | |||
4 | 4 | ||
5 | menuconfig KGDB | 5 | menuconfig KGDB |
6 | bool "KGDB: kernel debugging with remote gdb" | 6 | bool "KGDB: kernel debugging with remote gdb" |
7 | select FRAME_POINTER | ||
8 | depends on HAVE_ARCH_KGDB | 7 | depends on HAVE_ARCH_KGDB |
9 | depends on DEBUG_KERNEL && EXPERIMENTAL | 8 | depends on DEBUG_KERNEL && EXPERIMENTAL |
10 | help | 9 | help |
11 | If you say Y here, it will be possible to remotely debug the | 10 | If you say Y here, it will be possible to remotely debug the |
12 | kernel using gdb. Documentation of kernel debugger is available | 11 | kernel using gdb. It is recommended but not required, that |
13 | at http://kgdb.sourceforge.net as well as in DocBook form | 12 | you also turn on the kernel config option |
14 | in Documentation/DocBook/. If unsure, say N. | 13 | CONFIG_FRAME_POINTER to aid in producing more reliable stack |
14 | backtraces in the external debugger. Documentation of | ||
15 | kernel debugger is available at http://kgdb.sourceforge.net | ||
16 | as well as in DocBook form in Documentation/DocBook/. If | ||
17 | unsure, say N. | ||
15 | 18 | ||
16 | if KGDB | 19 | if KGDB |
17 | 20 | ||
diff --git a/lib/Makefile b/lib/Makefile index 9085ad6fa53d..44001af76a7d 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
11 | rbtree.o radix-tree.o dump_stack.o \ | 11 | rbtree.o radix-tree.o dump_stack.o \ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o |
15 | 15 | ||
16 | lib-$(CONFIG_MMU) += ioremap.o | 16 | lib-$(CONFIG_MMU) += ioremap.o |
17 | lib-$(CONFIG_SMP) += cpumask.o | 17 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o | |||
19 | lib-y += kobject.o kref.o klist.o | 19 | lib-y += kobject.o kref.o klist.o |
20 | 20 | ||
21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o | 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
23 | string_helpers.o | ||
23 | 24 | ||
24 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 25 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
25 | CFLAGS_kobject.o += -DDEBUG | 26 | CFLAGS_kobject.o += -DDEBUG |
@@ -78,6 +79,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o | |||
78 | 79 | ||
79 | obj-$(CONFIG_HAVE_LMB) += lmb.o | 80 | obj-$(CONFIG_HAVE_LMB) += lmb.o |
80 | 81 | ||
82 | obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o | ||
83 | |||
81 | hostprogs-y := gen_crc32table | 84 | hostprogs-y := gen_crc32table |
82 | clean-files := crc32table.h | 85 | clean-files := crc32table.h |
83 | 86 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21e..06fb57c86de0 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, | |||
316 | EXPORT_SYMBOL(bitmap_scnprintf); | 316 | EXPORT_SYMBOL(bitmap_scnprintf); |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * bitmap_scnprintf_len - return buffer length needed to convert | ||
320 | * bitmap to an ASCII hex string | ||
321 | * @nr_bits: number of bits to be converted | ||
322 | */ | ||
323 | int bitmap_scnprintf_len(unsigned int nr_bits) | ||
324 | { | ||
325 | unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4; | ||
326 | return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1; | ||
327 | } | ||
328 | |||
329 | /** | ||
319 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 330 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
320 | * @buf: pointer to buffer containing string. | 331 | * @buf: pointer to buffer containing string. |
321 | * @buflen: buffer size in bytes. If string is smaller than this | 332 | * @buflen: buffer size in bytes. If string is smaller than this |
diff --git a/lib/cmdline.c b/lib/cmdline.c index 5ba8a942a478..f5f3ad8b62ff 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -126,7 +126,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
126 | * megabyte, or one gigabyte, respectively. | 126 | * megabyte, or one gigabyte, respectively. |
127 | */ | 127 | */ |
128 | 128 | ||
129 | unsigned long long memparse(char *ptr, char **retptr) | 129 | unsigned long long memparse(const char *ptr, char **retptr) |
130 | { | 130 | { |
131 | char *endptr; /* local pointer to end of parsed string */ | 131 | char *endptr; /* local pointer to end of parsed string */ |
132 | 132 | ||
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0ef01d14727c..0218b4693dd8 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | ||
11 | #include <linux/rwsem.h> | 12 | #include <linux/rwsem.h> |
12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
@@ -37,6 +38,7 @@ int debug_locks_off(void) | |||
37 | { | 38 | { |
38 | if (xchg(&debug_locks, 0)) { | 39 | if (xchg(&debug_locks, 0)) { |
39 | if (!debug_locks_silent) { | 40 | if (!debug_locks_silent) { |
41 | oops_in_progress = 1; | ||
40 | console_verbose(); | 42 | console_verbose(); |
41 | return 1; | 43 | return 1; |
42 | } | 44 | } |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index f86196390cfd..e3ab374e1334 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Allocate a new object. If the pool is empty, switch off the debugger. | 114 | * Allocate a new object. If the pool is empty, switch off the debugger. |
115 | * Must be called with interrupts disabled. | ||
115 | */ | 116 | */ |
116 | static struct debug_obj * | 117 | static struct debug_obj * |
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 118 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
148 | static void free_object(struct debug_obj *obj) | 149 | static void free_object(struct debug_obj *obj) |
149 | { | 150 | { |
150 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | 151 | unsigned long idx = (unsigned long)(obj - obj_static_pool); |
152 | unsigned long flags; | ||
151 | 153 | ||
152 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | 154 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { |
153 | spin_lock(&pool_lock); | 155 | spin_lock_irqsave(&pool_lock, flags); |
154 | hlist_add_head(&obj->node, &obj_pool); | 156 | hlist_add_head(&obj->node, &obj_pool); |
155 | obj_pool_free++; | 157 | obj_pool_free++; |
156 | obj_pool_used--; | 158 | obj_pool_used--; |
157 | spin_unlock(&pool_lock); | 159 | spin_unlock_irqrestore(&pool_lock, flags); |
158 | } else { | 160 | } else { |
159 | spin_lock(&pool_lock); | 161 | spin_lock_irqsave(&pool_lock, flags); |
160 | obj_pool_used--; | 162 | obj_pool_used--; |
161 | spin_unlock(&pool_lock); | 163 | spin_unlock_irqrestore(&pool_lock, flags); |
162 | kmem_cache_free(obj_cache, obj); | 164 | kmem_cache_free(obj_cache, obj); |
163 | } | 165 | } |
164 | } | 166 | } |
@@ -171,6 +173,7 @@ static void debug_objects_oom(void) | |||
171 | { | 173 | { |
172 | struct debug_bucket *db = obj_hash; | 174 | struct debug_bucket *db = obj_hash; |
173 | struct hlist_node *node, *tmp; | 175 | struct hlist_node *node, *tmp; |
176 | HLIST_HEAD(freelist); | ||
174 | struct debug_obj *obj; | 177 | struct debug_obj *obj; |
175 | unsigned long flags; | 178 | unsigned long flags; |
176 | int i; | 179 | int i; |
@@ -179,11 +182,14 @@ static void debug_objects_oom(void) | |||
179 | 182 | ||
180 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 183 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
181 | spin_lock_irqsave(&db->lock, flags); | 184 | spin_lock_irqsave(&db->lock, flags); |
182 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 185 | hlist_move_list(&db->list, &freelist); |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | |||
188 | /* Now free them */ | ||
189 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
183 | hlist_del(&obj->node); | 190 | hlist_del(&obj->node); |
184 | free_object(obj); | 191 | free_object(obj); |
185 | } | 192 | } |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | } | 193 | } |
188 | } | 194 | } |
189 | 195 | ||
@@ -205,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
205 | 211 | ||
206 | if (limit < 5 && obj->descr != descr_test) { | 212 | if (limit < 5 && obj->descr != descr_test) { |
207 | limit++; | 213 | limit++; |
208 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | 214 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, |
209 | obj_states[obj->state], obj->descr->name); | 215 | obj_states[obj->state], obj->descr->name); |
210 | WARN_ON(1); | ||
211 | } | 216 | } |
212 | debug_objects_warnings++; | 217 | debug_objects_warnings++; |
213 | } | 218 | } |
@@ -499,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
499 | return; | 504 | return; |
500 | default: | 505 | default: |
501 | hlist_del(&obj->node); | 506 | hlist_del(&obj->node); |
507 | spin_unlock_irqrestore(&db->lock, flags); | ||
502 | free_object(obj); | 508 | free_object(obj); |
503 | break; | 509 | return; |
504 | } | 510 | } |
505 | out_unlock: | 511 | out_unlock: |
506 | spin_unlock_irqrestore(&db->lock, flags); | 512 | spin_unlock_irqrestore(&db->lock, flags); |
@@ -511,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
511 | { | 517 | { |
512 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 518 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
513 | struct hlist_node *node, *tmp; | 519 | struct hlist_node *node, *tmp; |
520 | HLIST_HEAD(freelist); | ||
514 | struct debug_obj_descr *descr; | 521 | struct debug_obj_descr *descr; |
515 | enum debug_obj_state state; | 522 | enum debug_obj_state state; |
516 | struct debug_bucket *db; | 523 | struct debug_bucket *db; |
@@ -546,11 +553,18 @@ repeat: | |||
546 | goto repeat; | 553 | goto repeat; |
547 | default: | 554 | default: |
548 | hlist_del(&obj->node); | 555 | hlist_del(&obj->node); |
549 | free_object(obj); | 556 | hlist_add_head(&obj->node, &freelist); |
550 | break; | 557 | break; |
551 | } | 558 | } |
552 | } | 559 | } |
553 | spin_unlock_irqrestore(&db->lock, flags); | 560 | spin_unlock_irqrestore(&db->lock, flags); |
561 | |||
562 | /* Now free them */ | ||
563 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
564 | hlist_del(&obj->node); | ||
565 | free_object(obj); | ||
566 | } | ||
567 | |||
554 | if (cnt > debug_objects_maxchain) | 568 | if (cnt > debug_objects_maxchain) |
555 | debug_objects_maxchain = cnt; | 569 | debug_objects_maxchain = cnt; |
556 | } | 570 | } |
@@ -733,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
733 | 747 | ||
734 | obj = lookup_object(addr, db); | 748 | obj = lookup_object(addr, db); |
735 | if (!obj && state != ODEBUG_STATE_NONE) { | 749 | if (!obj && state != ODEBUG_STATE_NONE) { |
736 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | 750 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
737 | WARN_ON(1); | ||
738 | goto out; | 751 | goto out; |
739 | } | 752 | } |
740 | if (obj && obj->state != state) { | 753 | if (obj && obj->state != state) { |
741 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | 754 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
742 | obj->state, state); | 755 | obj->state, state); |
743 | WARN_ON(1); | ||
744 | goto out; | 756 | goto out; |
745 | } | 757 | } |
746 | if (fixups != debug_objects_fixups) { | 758 | if (fixups != debug_objects_fixups) { |
747 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | 759 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
748 | fixups, debug_objects_fixups); | 760 | fixups, debug_objects_fixups); |
749 | WARN_ON(1); | ||
750 | goto out; | 761 | goto out; |
751 | } | 762 | } |
752 | if (warnings != debug_objects_warnings) { | 763 | if (warnings != debug_objects_warnings) { |
753 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | 764 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
754 | warnings, debug_objects_warnings); | 765 | warnings, debug_objects_warnings); |
755 | WARN_ON(1); | ||
756 | goto out; | 766 | goto out; |
757 | } | 767 | } |
758 | res = 0; | 768 | res = 0; |
@@ -607,7 +607,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
607 | } | 607 | } |
608 | EXPORT_SYMBOL(idr_replace); | 608 | EXPORT_SYMBOL(idr_replace); |
609 | 609 | ||
610 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | 610 | static void idr_cache_ctor(void *idr_layer) |
611 | { | 611 | { |
612 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 612 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
613 | } | 613 | } |
diff --git a/lib/iomap.c b/lib/iomap.c index 37a3ea4cac9f..d32229385151 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) | |||
40 | static int count = 10; | 40 | static int count = 10; |
41 | if (count) { | 41 | if (count) { |
42 | count--; | 42 | count--; |
43 | printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); | 43 | WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); |
44 | WARN_ON(1); | ||
45 | } | 44 | } |
46 | } | 45 | } |
47 | 46 | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index a3b8d4c3f77a..5d90074dca75 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -30,8 +30,7 @@ again: | |||
30 | return index; | 30 | return index; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void set_bit_area(unsigned long *map, unsigned long i, | 33 | void iommu_area_reserve(unsigned long *map, unsigned long i, int len) |
34 | int len) | ||
35 | { | 34 | { |
36 | unsigned long end = i + len; | 35 | unsigned long end = i + len; |
37 | while (i < end) { | 36 | while (i < end) { |
@@ -64,7 +63,7 @@ again: | |||
64 | start = index + 1; | 63 | start = index + 1; |
65 | goto again; | 64 | goto again; |
66 | } | 65 | } |
67 | set_bit_area(map, index, nr); | 66 | iommu_area_reserve(map, index, nr); |
68 | } | 67 | } |
69 | return index; | 68 | return index; |
70 | } | 69 | } |
diff --git a/lib/klist.c b/lib/klist.c index cca37f96faa2..bbdd3015c2c7 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -37,6 +37,37 @@ | |||
37 | #include <linux/klist.h> | 37 | #include <linux/klist.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | 39 | ||
40 | /* | ||
41 | * Use the lowest bit of n_klist to mark deleted nodes and exclude | ||
42 | * dead ones from iteration. | ||
43 | */ | ||
44 | #define KNODE_DEAD 1LU | ||
45 | #define KNODE_KLIST_MASK ~KNODE_DEAD | ||
46 | |||
47 | static struct klist *knode_klist(struct klist_node *knode) | ||
48 | { | ||
49 | return (struct klist *) | ||
50 | ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); | ||
51 | } | ||
52 | |||
53 | static bool knode_dead(struct klist_node *knode) | ||
54 | { | ||
55 | return (unsigned long)knode->n_klist & KNODE_DEAD; | ||
56 | } | ||
57 | |||
58 | static void knode_set_klist(struct klist_node *knode, struct klist *klist) | ||
59 | { | ||
60 | knode->n_klist = klist; | ||
61 | /* no knode deserves to start its life dead */ | ||
62 | WARN_ON(knode_dead(knode)); | ||
63 | } | ||
64 | |||
65 | static void knode_kill(struct klist_node *knode) | ||
66 | { | ||
67 | /* and no knode should die twice ever either, see we're very humane */ | ||
68 | WARN_ON(knode_dead(knode)); | ||
69 | *(unsigned long *)&knode->n_klist |= KNODE_DEAD; | ||
70 | } | ||
40 | 71 | ||
41 | /** | 72 | /** |
42 | * klist_init - Initialize a klist structure. | 73 | * klist_init - Initialize a klist structure. |
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n) | |||
79 | INIT_LIST_HEAD(&n->n_node); | 110 | INIT_LIST_HEAD(&n->n_node); |
80 | init_completion(&n->n_removed); | 111 | init_completion(&n->n_removed); |
81 | kref_init(&n->n_ref); | 112 | kref_init(&n->n_ref); |
82 | n->n_klist = k; | 113 | knode_set_klist(n, k); |
83 | if (k->get) | 114 | if (k->get) |
84 | k->get(n); | 115 | k->get(n); |
85 | } | 116 | } |
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail); | |||
115 | */ | 146 | */ |
116 | void klist_add_after(struct klist_node *n, struct klist_node *pos) | 147 | void klist_add_after(struct klist_node *n, struct klist_node *pos) |
117 | { | 148 | { |
118 | struct klist *k = pos->n_klist; | 149 | struct klist *k = knode_klist(pos); |
119 | 150 | ||
120 | klist_node_init(k, n); | 151 | klist_node_init(k, n); |
121 | spin_lock(&k->k_lock); | 152 | spin_lock(&k->k_lock); |
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after); | |||
131 | */ | 162 | */ |
132 | void klist_add_before(struct klist_node *n, struct klist_node *pos) | 163 | void klist_add_before(struct klist_node *n, struct klist_node *pos) |
133 | { | 164 | { |
134 | struct klist *k = pos->n_klist; | 165 | struct klist *k = knode_klist(pos); |
135 | 166 | ||
136 | klist_node_init(k, n); | 167 | klist_node_init(k, n); |
137 | spin_lock(&k->k_lock); | 168 | spin_lock(&k->k_lock); |
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref) | |||
144 | { | 175 | { |
145 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); | 176 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); |
146 | 177 | ||
178 | WARN_ON(!knode_dead(n)); | ||
147 | list_del(&n->n_node); | 179 | list_del(&n->n_node); |
148 | complete(&n->n_removed); | 180 | complete(&n->n_removed); |
149 | n->n_klist = NULL; | 181 | knode_set_klist(n, NULL); |
150 | } | 182 | } |
151 | 183 | ||
152 | static int klist_dec_and_del(struct klist_node *n) | 184 | static int klist_dec_and_del(struct klist_node *n) |
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n) | |||
154 | return kref_put(&n->n_ref, klist_release); | 186 | return kref_put(&n->n_ref, klist_release); |
155 | } | 187 | } |
156 | 188 | ||
157 | /** | 189 | static void klist_put(struct klist_node *n, bool kill) |
158 | * klist_del - Decrement the reference count of node and try to remove. | ||
159 | * @n: node we're deleting. | ||
160 | */ | ||
161 | void klist_del(struct klist_node *n) | ||
162 | { | 190 | { |
163 | struct klist *k = n->n_klist; | 191 | struct klist *k = knode_klist(n); |
164 | void (*put)(struct klist_node *) = k->put; | 192 | void (*put)(struct klist_node *) = k->put; |
165 | 193 | ||
166 | spin_lock(&k->k_lock); | 194 | spin_lock(&k->k_lock); |
195 | if (kill) | ||
196 | knode_kill(n); | ||
167 | if (!klist_dec_and_del(n)) | 197 | if (!klist_dec_and_del(n)) |
168 | put = NULL; | 198 | put = NULL; |
169 | spin_unlock(&k->k_lock); | 199 | spin_unlock(&k->k_lock); |
170 | if (put) | 200 | if (put) |
171 | put(n); | 201 | put(n); |
172 | } | 202 | } |
203 | |||
204 | /** | ||
205 | * klist_del - Decrement the reference count of node and try to remove. | ||
206 | * @n: node we're deleting. | ||
207 | */ | ||
208 | void klist_del(struct klist_node *n) | ||
209 | { | ||
210 | klist_put(n, true); | ||
211 | } | ||
173 | EXPORT_SYMBOL_GPL(klist_del); | 212 | EXPORT_SYMBOL_GPL(klist_del); |
174 | 213 | ||
175 | /** | 214 | /** |
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, | |||
206 | struct klist_node *n) | 245 | struct klist_node *n) |
207 | { | 246 | { |
208 | i->i_klist = k; | 247 | i->i_klist = k; |
209 | i->i_head = &k->k_list; | ||
210 | i->i_cur = n; | 248 | i->i_cur = n; |
211 | if (n) | 249 | if (n) |
212 | kref_get(&n->n_ref); | 250 | kref_get(&n->n_ref); |
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init); | |||
237 | void klist_iter_exit(struct klist_iter *i) | 275 | void klist_iter_exit(struct klist_iter *i) |
238 | { | 276 | { |
239 | if (i->i_cur) { | 277 | if (i->i_cur) { |
240 | klist_del(i->i_cur); | 278 | klist_put(i->i_cur, false); |
241 | i->i_cur = NULL; | 279 | i->i_cur = NULL; |
242 | } | 280 | } |
243 | } | 281 | } |
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n) | |||
258 | */ | 296 | */ |
259 | struct klist_node *klist_next(struct klist_iter *i) | 297 | struct klist_node *klist_next(struct klist_iter *i) |
260 | { | 298 | { |
261 | struct list_head *next; | ||
262 | struct klist_node *lnode = i->i_cur; | ||
263 | struct klist_node *knode = NULL; | ||
264 | void (*put)(struct klist_node *) = i->i_klist->put; | 299 | void (*put)(struct klist_node *) = i->i_klist->put; |
300 | struct klist_node *last = i->i_cur; | ||
301 | struct klist_node *next; | ||
265 | 302 | ||
266 | spin_lock(&i->i_klist->k_lock); | 303 | spin_lock(&i->i_klist->k_lock); |
267 | if (lnode) { | 304 | |
268 | next = lnode->n_node.next; | 305 | if (last) { |
269 | if (!klist_dec_and_del(lnode)) | 306 | next = to_klist_node(last->n_node.next); |
307 | if (!klist_dec_and_del(last)) | ||
270 | put = NULL; | 308 | put = NULL; |
271 | } else | 309 | } else |
272 | next = i->i_head->next; | 310 | next = to_klist_node(i->i_klist->k_list.next); |
273 | 311 | ||
274 | if (next != i->i_head) { | 312 | i->i_cur = NULL; |
275 | knode = to_klist_node(next); | 313 | while (next != to_klist_node(&i->i_klist->k_list)) { |
276 | kref_get(&knode->n_ref); | 314 | if (likely(!knode_dead(next))) { |
315 | kref_get(&next->n_ref); | ||
316 | i->i_cur = next; | ||
317 | break; | ||
318 | } | ||
319 | next = to_klist_node(next->n_node.next); | ||
277 | } | 320 | } |
278 | i->i_cur = knode; | 321 | |
279 | spin_unlock(&i->i_klist->k_lock); | 322 | spin_unlock(&i->i_klist->k_lock); |
280 | if (put && lnode) | 323 | |
281 | put(lnode); | 324 | if (put && last) |
282 | return knode; | 325 | put(last); |
326 | return i->i_cur; | ||
283 | } | 327 | } |
284 | EXPORT_SYMBOL_GPL(klist_next); | 328 | EXPORT_SYMBOL_GPL(klist_next); |
diff --git a/lib/kobject.c b/lib/kobject.c index bd732ffebc85..fbf0ae282376 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -223,8 +223,7 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |||
223 | return -ENOMEM; | 223 | return -ENOMEM; |
224 | 224 | ||
225 | /* ewww... some of these buggers have '/' in the name ... */ | 225 | /* ewww... some of these buggers have '/' in the name ... */ |
226 | s = strchr(kobj->name, '/'); | 226 | while ((s = strchr(kobj->name, '/'))) |
227 | if (s) | ||
228 | s[0] = '!'; | 227 | s[0] = '!'; |
229 | 228 | ||
230 | kfree(old_name); | 229 | kfree(old_name); |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9f8d599459d1..3f914725bda8 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -285,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
285 | int len; | 285 | int len; |
286 | 286 | ||
287 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { | 287 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { |
288 | printk(KERN_ERR "add_uevent_var: too many keys\n"); | 288 | WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); |
289 | WARN_ON(1); | ||
290 | return -ENOMEM; | 289 | return -ENOMEM; |
291 | } | 290 | } |
292 | 291 | ||
@@ -297,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
297 | va_end(args); | 296 | va_end(args); |
298 | 297 | ||
299 | if (len >= (sizeof(env->buf) - env->buflen)) { | 298 | if (len >= (sizeof(env->buf) - env->buflen)) { |
300 | printk(KERN_ERR "add_uevent_var: buffer size too small\n"); | 299 | WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); |
301 | WARN_ON(1); | ||
302 | return -ENOMEM; | 300 | return -ENOMEM; |
303 | } | 301 | } |
304 | 302 | ||
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) | |||
462 | if (lmb.memory.region[0].size < lmb.rmo_size) | 462 | if (lmb.memory.region[0].size < lmb.rmo_size) |
463 | lmb.rmo_size = lmb.memory.region[0].size; | 463 | lmb.rmo_size = lmb.memory.region[0].size; |
464 | 464 | ||
465 | memory_limit = lmb_end_of_DRAM(); | ||
466 | |||
465 | /* And truncate any reserves above the limit also. */ | 467 | /* And truncate any reserves above the limit also. */ |
466 | for (i = 0; i < lmb.reserved.cnt; i++) { | 468 | for (i = 0; i < lmb.reserved.cnt; i++) { |
467 | p = &lmb.reserved.region[i]; | 469 | p = &lmb.reserved.region[i]; |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 4a8ba4bf5f6f..a8663890a88c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); | |||
52 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
53 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
54 | */ | 54 | */ |
55 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
56 | { | 56 | { |
57 | s64 ret; | 57 | s64 ret; |
58 | int cpu; | 58 | int cpu; |
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | if (set) | 65 | *pcount = 0; |
66 | *pcount = 0; | ||
67 | } | 66 | } |
68 | if (set) | 67 | fbc->count = ret; |
69 | fbc->count = ret; | ||
70 | 68 | ||
71 | spin_unlock(&fbc->lock); | 69 | spin_unlock(&fbc->lock); |
72 | return ret; | 70 | return ret; |
diff --git a/lib/plist.c b/lib/plist.c index 3074a02272f3..d6c64a824e1d 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
@@ -31,12 +31,13 @@ | |||
31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, | 31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, |
32 | struct list_head *n) | 32 | struct list_head *n) |
33 | { | 33 | { |
34 | if (n->prev != p || p->next != n) { | 34 | WARN(n->prev != p || p->next != n, |
35 | printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); | 35 | "top: %p, n: %p, p: %p\n" |
36 | printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); | 36 | "prev: %p, n: %p, p: %p\n" |
37 | printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); | 37 | "next: %p, n: %p, p: %p\n", |
38 | WARN_ON(1); | 38 | t, t->next, t->prev, |
39 | } | 39 | p, p->next, p->prev, |
40 | n, n->next, n->prev); | ||
40 | } | 41 | } |
41 | 42 | ||
42 | static void plist_check_list(struct list_head *top) | 43 | static void plist_check_list(struct list_head *top) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 56ec21a7f73d..be86b32bc874 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert); | |||
359 | * Returns: the slot corresponding to the position @index in the | 359 | * Returns: the slot corresponding to the position @index in the |
360 | * radix tree @root. This is useful for update-if-exists operations. | 360 | * radix tree @root. This is useful for update-if-exists operations. |
361 | * | 361 | * |
362 | * This function cannot be called under rcu_read_lock, it must be | 362 | * This function can be called under rcu_read_lock iff the slot is not |
363 | * excluded from writers, as must the returned slot for subsequent | 363 | * modified by radix_tree_replace_slot, otherwise it must be called |
364 | * use by radix_tree_deref_slot() and radix_tree_replace slot. | 364 | * exclusive from other writers. Any dereference of the slot must be done |
365 | * Caller must hold tree write locked across slot lookup and | 365 | * using radix_tree_deref_slot. |
366 | * replace. | ||
367 | */ | 366 | */ |
368 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 367 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) |
369 | { | 368 | { |
370 | unsigned int height, shift; | 369 | unsigned int height, shift; |
371 | struct radix_tree_node *node, **slot; | 370 | struct radix_tree_node *node, **slot; |
372 | 371 | ||
373 | node = root->rnode; | 372 | node = rcu_dereference(root->rnode); |
374 | if (node == NULL) | 373 | if (node == NULL) |
375 | return NULL; | 374 | return NULL; |
376 | 375 | ||
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
390 | do { | 389 | do { |
391 | slot = (struct radix_tree_node **) | 390 | slot = (struct radix_tree_node **) |
392 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 391 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
393 | node = *slot; | 392 | node = rcu_dereference(*slot); |
394 | if (node == NULL) | 393 | if (node == NULL) |
395 | return NULL; | 394 | return NULL; |
396 | 395 | ||
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
667 | EXPORT_SYMBOL(radix_tree_next_hole); | 666 | EXPORT_SYMBOL(radix_tree_next_hole); |
668 | 667 | ||
669 | static unsigned int | 668 | static unsigned int |
670 | __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | 669 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, |
671 | unsigned int max_items, unsigned long *next_index) | 670 | unsigned int max_items, unsigned long *next_index) |
672 | { | 671 | { |
673 | unsigned int nr_found = 0; | 672 | unsigned int nr_found = 0; |
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | |||
701 | 700 | ||
702 | /* Bottom level: grab some items */ | 701 | /* Bottom level: grab some items */ |
703 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | 702 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { |
704 | struct radix_tree_node *node; | ||
705 | index++; | 703 | index++; |
706 | node = slot->slots[i]; | 704 | if (slot->slots[i]) { |
707 | if (node) { | 705 | results[nr_found++] = &(slot->slots[i]); |
708 | results[nr_found++] = rcu_dereference(node); | ||
709 | if (nr_found == max_items) | 706 | if (nr_found == max_items) |
710 | goto out; | 707 | goto out; |
711 | } | 708 | } |
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
759 | 756 | ||
760 | ret = 0; | 757 | ret = 0; |
761 | while (ret < max_items) { | 758 | while (ret < max_items) { |
762 | unsigned int nr_found; | 759 | unsigned int nr_found, slots_found, i; |
763 | unsigned long next_index; /* Index of next search */ | 760 | unsigned long next_index; /* Index of next search */ |
764 | 761 | ||
765 | if (cur_index > max_index) | 762 | if (cur_index > max_index) |
766 | break; | 763 | break; |
767 | nr_found = __lookup(node, results + ret, cur_index, | 764 | slots_found = __lookup(node, (void ***)results + ret, cur_index, |
768 | max_items - ret, &next_index); | 765 | max_items - ret, &next_index); |
766 | nr_found = 0; | ||
767 | for (i = 0; i < slots_found; i++) { | ||
768 | struct radix_tree_node *slot; | ||
769 | slot = *(((void ***)results)[ret + i]); | ||
770 | if (!slot) | ||
771 | continue; | ||
772 | results[ret + nr_found] = rcu_dereference(slot); | ||
773 | nr_found++; | ||
774 | } | ||
769 | ret += nr_found; | 775 | ret += nr_found; |
770 | if (next_index == 0) | 776 | if (next_index == 0) |
771 | break; | 777 | break; |
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
776 | } | 782 | } |
777 | EXPORT_SYMBOL(radix_tree_gang_lookup); | 783 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
778 | 784 | ||
785 | /** | ||
786 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | ||
787 | * @root: radix tree root | ||
788 | * @results: where the results of the lookup are placed | ||
789 | * @first_index: start the lookup from this key | ||
790 | * @max_items: place up to this many items at *results | ||
791 | * | ||
792 | * Performs an index-ascending scan of the tree for present items. Places | ||
793 | * their slots at *@results and returns the number of items which were | ||
794 | * placed at *@results. | ||
795 | * | ||
796 | * The implementation is naive. | ||
797 | * | ||
798 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | ||
799 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | ||
800 | * protection, radix_tree_deref_slot may fail requiring a retry. | ||
801 | */ | ||
802 | unsigned int | ||
803 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | ||
804 | unsigned long first_index, unsigned int max_items) | ||
805 | { | ||
806 | unsigned long max_index; | ||
807 | struct radix_tree_node *node; | ||
808 | unsigned long cur_index = first_index; | ||
809 | unsigned int ret; | ||
810 | |||
811 | node = rcu_dereference(root->rnode); | ||
812 | if (!node) | ||
813 | return 0; | ||
814 | |||
815 | if (!radix_tree_is_indirect_ptr(node)) { | ||
816 | if (first_index > 0) | ||
817 | return 0; | ||
818 | results[0] = (void **)&root->rnode; | ||
819 | return 1; | ||
820 | } | ||
821 | node = radix_tree_indirect_to_ptr(node); | ||
822 | |||
823 | max_index = radix_tree_maxindex(node->height); | ||
824 | |||
825 | ret = 0; | ||
826 | while (ret < max_items) { | ||
827 | unsigned int slots_found; | ||
828 | unsigned long next_index; /* Index of next search */ | ||
829 | |||
830 | if (cur_index > max_index) | ||
831 | break; | ||
832 | slots_found = __lookup(node, results + ret, cur_index, | ||
833 | max_items - ret, &next_index); | ||
834 | ret += slots_found; | ||
835 | if (next_index == 0) | ||
836 | break; | ||
837 | cur_index = next_index; | ||
838 | } | ||
839 | |||
840 | return ret; | ||
841 | } | ||
842 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | ||
843 | |||
779 | /* | 844 | /* |
780 | * FIXME: the two tag_get()s here should use find_next_bit() instead of | 845 | * FIXME: the two tag_get()s here should use find_next_bit() instead of |
781 | * open-coding the search. | 846 | * open-coding the search. |
782 | */ | 847 | */ |
783 | static unsigned int | 848 | static unsigned int |
784 | __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | 849 | __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, |
785 | unsigned int max_items, unsigned long *next_index, unsigned int tag) | 850 | unsigned int max_items, unsigned long *next_index, unsigned int tag) |
786 | { | 851 | { |
787 | unsigned int nr_found = 0; | 852 | unsigned int nr_found = 0; |
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
811 | unsigned long j = index & RADIX_TREE_MAP_MASK; | 876 | unsigned long j = index & RADIX_TREE_MAP_MASK; |
812 | 877 | ||
813 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | 878 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { |
814 | struct radix_tree_node *node; | ||
815 | index++; | 879 | index++; |
816 | if (!tag_get(slot, tag, j)) | 880 | if (!tag_get(slot, tag, j)) |
817 | continue; | 881 | continue; |
818 | node = slot->slots[j]; | ||
819 | /* | 882 | /* |
820 | * Even though the tag was found set, we need to | 883 | * Even though the tag was found set, we need to |
821 | * recheck that we have a non-NULL node, because | 884 | * recheck that we have a non-NULL node, because |
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
826 | * lookup ->slots[x] without a lock (ie. can't | 889 | * lookup ->slots[x] without a lock (ie. can't |
827 | * rely on its value remaining the same). | 890 | * rely on its value remaining the same). |
828 | */ | 891 | */ |
829 | if (node) { | 892 | if (slot->slots[j]) { |
830 | node = rcu_dereference(node); | 893 | results[nr_found++] = &(slot->slots[j]); |
831 | results[nr_found++] = node; | ||
832 | if (nr_found == max_items) | 894 | if (nr_found == max_items) |
833 | goto out; | 895 | goto out; |
834 | } | 896 | } |
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
887 | 949 | ||
888 | ret = 0; | 950 | ret = 0; |
889 | while (ret < max_items) { | 951 | while (ret < max_items) { |
890 | unsigned int nr_found; | 952 | unsigned int nr_found, slots_found, i; |
891 | unsigned long next_index; /* Index of next search */ | 953 | unsigned long next_index; /* Index of next search */ |
892 | 954 | ||
893 | if (cur_index > max_index) | 955 | if (cur_index > max_index) |
894 | break; | 956 | break; |
895 | nr_found = __lookup_tag(node, results + ret, cur_index, | 957 | slots_found = __lookup_tag(node, (void ***)results + ret, |
896 | max_items - ret, &next_index, tag); | 958 | cur_index, max_items - ret, &next_index, tag); |
959 | nr_found = 0; | ||
960 | for (i = 0; i < slots_found; i++) { | ||
961 | struct radix_tree_node *slot; | ||
962 | slot = *(((void ***)results)[ret + i]); | ||
963 | if (!slot) | ||
964 | continue; | ||
965 | results[ret + nr_found] = rcu_dereference(slot); | ||
966 | nr_found++; | ||
967 | } | ||
897 | ret += nr_found; | 968 | ret += nr_found; |
898 | if (next_index == 0) | 969 | if (next_index == 0) |
899 | break; | 970 | break; |
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
905 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | 976 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
906 | 977 | ||
907 | /** | 978 | /** |
979 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | ||
980 | * radix tree based on a tag | ||
981 | * @root: radix tree root | ||
982 | * @results: where the results of the lookup are placed | ||
983 | * @first_index: start the lookup from this key | ||
984 | * @max_items: place up to this many items at *results | ||
985 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | ||
986 | * | ||
987 | * Performs an index-ascending scan of the tree for present items which | ||
988 | * have the tag indexed by @tag set. Places the slots at *@results and | ||
989 | * returns the number of slots which were placed at *@results. | ||
990 | */ | ||
991 | unsigned int | ||
992 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | ||
993 | unsigned long first_index, unsigned int max_items, | ||
994 | unsigned int tag) | ||
995 | { | ||
996 | struct radix_tree_node *node; | ||
997 | unsigned long max_index; | ||
998 | unsigned long cur_index = first_index; | ||
999 | unsigned int ret; | ||
1000 | |||
1001 | /* check the root's tag bit */ | ||
1002 | if (!root_tag_get(root, tag)) | ||
1003 | return 0; | ||
1004 | |||
1005 | node = rcu_dereference(root->rnode); | ||
1006 | if (!node) | ||
1007 | return 0; | ||
1008 | |||
1009 | if (!radix_tree_is_indirect_ptr(node)) { | ||
1010 | if (first_index > 0) | ||
1011 | return 0; | ||
1012 | results[0] = (void **)&root->rnode; | ||
1013 | return 1; | ||
1014 | } | ||
1015 | node = radix_tree_indirect_to_ptr(node); | ||
1016 | |||
1017 | max_index = radix_tree_maxindex(node->height); | ||
1018 | |||
1019 | ret = 0; | ||
1020 | while (ret < max_items) { | ||
1021 | unsigned int slots_found; | ||
1022 | unsigned long next_index; /* Index of next search */ | ||
1023 | |||
1024 | if (cur_index > max_index) | ||
1025 | break; | ||
1026 | slots_found = __lookup_tag(node, results + ret, | ||
1027 | cur_index, max_items - ret, &next_index, tag); | ||
1028 | ret += slots_found; | ||
1029 | if (next_index == 0) | ||
1030 | break; | ||
1031 | cur_index = next_index; | ||
1032 | } | ||
1033 | |||
1034 | return ret; | ||
1035 | } | ||
1036 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | ||
1037 | |||
1038 | |||
1039 | /** | ||
908 | * radix_tree_shrink - shrink height of a radix tree to minimal | 1040 | * radix_tree_shrink - shrink height of a radix tree to minimal |
909 | * @root radix tree root | 1041 | * @root radix tree root |
910 | */ | 1042 | */ |
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
1051 | EXPORT_SYMBOL(radix_tree_tagged); | 1183 | EXPORT_SYMBOL(radix_tree_tagged); |
1052 | 1184 | ||
1053 | static void | 1185 | static void |
1054 | radix_tree_node_ctor(struct kmem_cache *cachep, void *node) | 1186 | radix_tree_node_ctor(void *node) |
1055 | { | 1187 | { |
1056 | memset(node, 0, sizeof(struct radix_tree_node)); | 1188 | memset(node, 0, sizeof(struct radix_tree_node)); |
1057 | } | 1189 | } |
diff --git a/lib/random32.c b/lib/random32.c index ca87d86992bd..217d5c4b666d 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state) | |||
56 | return (state->s1 ^ state->s2 ^ state->s3); | 56 | return (state->s1 ^ state->s2 ^ state->s3); |
57 | } | 57 | } |
58 | 58 | ||
59 | static void __set_random32(struct rnd_state *state, unsigned long s) | 59 | /* |
60 | * Handle minimum values for seeds | ||
61 | */ | ||
62 | static inline u32 __seed(u32 x, u32 m) | ||
60 | { | 63 | { |
61 | if (s == 0) | 64 | return (x < m) ? x + m : x; |
62 | s = 1; /* default seed is 1 */ | ||
63 | |||
64 | #define LCG(n) (69069 * n) | ||
65 | state->s1 = LCG(s); | ||
66 | state->s2 = LCG(state->s1); | ||
67 | state->s3 = LCG(state->s2); | ||
68 | |||
69 | /* "warm it up" */ | ||
70 | __random32(state); | ||
71 | __random32(state); | ||
72 | __random32(state); | ||
73 | __random32(state); | ||
74 | __random32(state); | ||
75 | __random32(state); | ||
76 | } | 65 | } |
77 | 66 | ||
78 | /** | 67 | /** |
@@ -107,7 +96,7 @@ void srandom32(u32 entropy) | |||
107 | */ | 96 | */ |
108 | for_each_possible_cpu (i) { | 97 | for_each_possible_cpu (i) { |
109 | struct rnd_state *state = &per_cpu(net_rand_state, i); | 98 | struct rnd_state *state = &per_cpu(net_rand_state, i); |
110 | __set_random32(state, state->s1 ^ entropy); | 99 | state->s1 = __seed(state->s1 ^ entropy, 1); |
111 | } | 100 | } |
112 | } | 101 | } |
113 | EXPORT_SYMBOL(srandom32); | 102 | EXPORT_SYMBOL(srandom32); |
@@ -122,7 +111,19 @@ static int __init random32_init(void) | |||
122 | 111 | ||
123 | for_each_possible_cpu(i) { | 112 | for_each_possible_cpu(i) { |
124 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 113 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
125 | __set_random32(state, i + jiffies); | 114 | |
115 | #define LCG(x) ((x) * 69069) /* super-duper LCG */ | ||
116 | state->s1 = __seed(LCG(i + jiffies), 1); | ||
117 | state->s2 = __seed(LCG(state->s1), 7); | ||
118 | state->s3 = __seed(LCG(state->s2), 15); | ||
119 | |||
120 | /* "warm it up" */ | ||
121 | __random32(state); | ||
122 | __random32(state); | ||
123 | __random32(state); | ||
124 | __random32(state); | ||
125 | __random32(state); | ||
126 | __random32(state); | ||
126 | } | 127 | } |
127 | return 0; | 128 | return 0; |
128 | } | 129 | } |
@@ -135,13 +136,18 @@ core_initcall(random32_init); | |||
135 | static int __init random32_reseed(void) | 136 | static int __init random32_reseed(void) |
136 | { | 137 | { |
137 | int i; | 138 | int i; |
138 | unsigned long seed; | ||
139 | 139 | ||
140 | for_each_possible_cpu(i) { | 140 | for_each_possible_cpu(i) { |
141 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 141 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
142 | u32 seeds[3]; | ||
143 | |||
144 | get_random_bytes(&seeds, sizeof(seeds)); | ||
145 | state->s1 = __seed(seeds[0], 1); | ||
146 | state->s2 = __seed(seeds[1], 7); | ||
147 | state->s3 = __seed(seeds[2], 15); | ||
142 | 148 | ||
143 | get_random_bytes(&seed, sizeof(seed)); | 149 | /* mix it in */ |
144 | __set_random32(state, seed); | 150 | __random32(state); |
145 | } | 151 | } |
146 | return 0; | 152 | return 0; |
147 | } | 153 | } |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 35136671b215..26187edcc7ea 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | 16 | ||
17 | static DEFINE_SPINLOCK(ratelimit_lock); | 17 | static DEFINE_SPINLOCK(ratelimit_lock); |
18 | static unsigned long flags; | ||
19 | 18 | ||
20 | /* | 19 | /* |
21 | * __ratelimit - rate limiting | 20 | * __ratelimit - rate limiting |
@@ -26,6 +25,8 @@ static unsigned long flags; | |||
26 | */ | 25 | */ |
27 | int __ratelimit(struct ratelimit_state *rs) | 26 | int __ratelimit(struct ratelimit_state *rs) |
28 | { | 27 | { |
28 | unsigned long flags; | ||
29 | |||
29 | if (!rs->interval) | 30 | if (!rs->interval) |
30 | return 1; | 31 | return 1; |
31 | 32 | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 876ba6d5b670..8d2688ff1352 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -422,9 +422,12 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
422 | { | 422 | { |
423 | unsigned int offset = 0; | 423 | unsigned int offset = 0; |
424 | struct sg_mapping_iter miter; | 424 | struct sg_mapping_iter miter; |
425 | unsigned long flags; | ||
425 | 426 | ||
426 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); | 427 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); |
427 | 428 | ||
429 | local_irq_save(flags); | ||
430 | |||
428 | while (sg_miter_next(&miter) && offset < buflen) { | 431 | while (sg_miter_next(&miter) && offset < buflen) { |
429 | unsigned int len; | 432 | unsigned int len; |
430 | 433 | ||
@@ -442,6 +445,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | |||
442 | 445 | ||
443 | sg_miter_stop(&miter); | 446 | sg_miter_stop(&miter); |
444 | 447 | ||
448 | local_irq_restore(flags); | ||
445 | return offset; | 449 | return offset; |
446 | } | 450 | } |
447 | 451 | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c new file mode 100644 index 000000000000..238e72a18ce1 --- /dev/null +++ b/lib/show_mem.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Generic show_mem() implementation | ||
3 | * | ||
4 | * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> | ||
5 | * All code subject to the GPL version 2. | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/nmi.h> | ||
10 | #include <linux/quicklist.h> | ||
11 | |||
12 | void show_mem(void) | ||
13 | { | ||
14 | pg_data_t *pgdat; | ||
15 | unsigned long total = 0, reserved = 0, shared = 0, | ||
16 | nonshared = 0, highmem = 0; | ||
17 | |||
18 | printk(KERN_INFO "Mem-Info:\n"); | ||
19 | show_free_areas(); | ||
20 | |||
21 | for_each_online_pgdat(pgdat) { | ||
22 | unsigned long i, flags; | ||
23 | |||
24 | pgdat_resize_lock(pgdat, &flags); | ||
25 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
26 | struct page *page; | ||
27 | unsigned long pfn = pgdat->node_start_pfn + i; | ||
28 | |||
29 | if (unlikely(!(i % MAX_ORDER_NR_PAGES))) | ||
30 | touch_nmi_watchdog(); | ||
31 | |||
32 | if (!pfn_valid(pfn)) | ||
33 | continue; | ||
34 | |||
35 | page = pfn_to_page(pfn); | ||
36 | |||
37 | if (PageHighMem(page)) | ||
38 | highmem++; | ||
39 | |||
40 | if (PageReserved(page)) | ||
41 | reserved++; | ||
42 | else if (page_count(page) == 1) | ||
43 | nonshared++; | ||
44 | else if (page_count(page) > 1) | ||
45 | shared += page_count(page) - 1; | ||
46 | |||
47 | total++; | ||
48 | } | ||
49 | pgdat_resize_unlock(pgdat, &flags); | ||
50 | } | ||
51 | |||
52 | printk(KERN_INFO "%lu pages RAM\n", total); | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | ||
55 | #endif | ||
56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | ||
57 | printk(KERN_INFO "%lu pages shared\n", shared); | ||
58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | ||
59 | #ifdef CONFIG_QUICKLIST | ||
60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | ||
61 | quicklist_total_size()); | ||
62 | #endif | ||
63 | } | ||
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index c4381d9516f6..0f8fc22ed103 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) | |||
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
14 | cpumask_of_cpu_ptr_declare(this_mask); | ||
15 | 14 | ||
16 | if (likely(preempt_count)) | 15 | if (likely(preempt_count)) |
17 | goto out; | 16 | goto out; |
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
23 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
24 | * smp_processor_id(): | 23 | * smp_processor_id(): |
25 | */ | 24 | */ |
26 | cpumask_of_cpu_ptr_next(this_mask, this_cpu); | 25 | if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) |
27 | |||
28 | if (cpus_equal(current->cpus_allowed, *this_mask)) | ||
29 | goto out; | 26 | goto out; |
30 | 27 | ||
31 | /* | 28 | /* |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c new file mode 100644 index 000000000000..8347925030ff --- /dev/null +++ b/lib/string_helpers.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Helpers for formatting and printing strings | ||
3 | * | ||
4 | * Copyright 31 August 2008 James Bottomley | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/math64.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/string_helpers.h> | ||
10 | |||
11 | /** | ||
12 | * string_get_size - get the size in the specified units | ||
13 | * @size: The size to be converted | ||
14 | * @units: units to use (powers of 1000 or 1024) | ||
15 | * @buf: buffer to format to | ||
16 | * @len: length of buffer | ||
17 | * | ||
18 | * This function returns a string formatted to 3 significant figures | ||
19 | * giving the size in the required units. Returns 0 on success or | ||
20 | * error on failure. @buf is always zero terminated. | ||
21 | * | ||
22 | */ | ||
23 | int string_get_size(u64 size, const enum string_size_units units, | ||
24 | char *buf, int len) | ||
25 | { | ||
26 | const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB", | ||
27 | "EB", "ZB", "YB", NULL}; | ||
28 | const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", | ||
29 | "EiB", "ZiB", "YiB", NULL }; | ||
30 | const char **units_str[] = { | ||
31 | [STRING_UNITS_10] = units_10, | ||
32 | [STRING_UNITS_2] = units_2, | ||
33 | }; | ||
34 | const int divisor[] = { | ||
35 | [STRING_UNITS_10] = 1000, | ||
36 | [STRING_UNITS_2] = 1024, | ||
37 | }; | ||
38 | int i, j; | ||
39 | u64 remainder = 0, sf_cap; | ||
40 | char tmp[8]; | ||
41 | |||
42 | tmp[0] = '\0'; | ||
43 | |||
44 | for (i = 0; size > divisor[units] && units_str[units][i]; i++) | ||
45 | remainder = do_div(size, divisor[units]); | ||
46 | |||
47 | sf_cap = size; | ||
48 | for (j = 0; sf_cap*10 < 1000; j++) | ||
49 | sf_cap *= 10; | ||
50 | |||
51 | if (j) { | ||
52 | remainder *= 1000; | ||
53 | do_div(remainder, divisor[units]); | ||
54 | snprintf(tmp, sizeof(tmp), ".%03lld", | ||
55 | (unsigned long long)remainder); | ||
56 | tmp[j+1] = '\0'; | ||
57 | } | ||
58 | |||
59 | snprintf(buf, len, "%lld%s%s", (unsigned long long)size, | ||
60 | tmp, units_str[units][i]); | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | EXPORT_SYMBOL(string_get_size); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8cc..f8eebd489149 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -274,13 +274,14 @@ cleanup1: | |||
274 | } | 274 | } |
275 | 275 | ||
276 | static int | 276 | static int |
277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
278 | { | 278 | { |
279 | dma_addr_t mask = 0xffffffff; | 279 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
280 | /* If the device has a mask, use it, otherwise default to 32 bits */ | 280 | } |
281 | if (hwdev && hwdev->dma_mask) | 281 | |
282 | mask = *hwdev->dma_mask; | 282 | static int is_swiotlb_buffer(char *addr) |
283 | return (addr & ~mask) != 0; | 283 | { |
284 | return addr >= io_tlb_start && addr < io_tlb_end; | ||
284 | } | 285 | } |
285 | 286 | ||
286 | /* | 287 | /* |
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
467 | void *ret; | 468 | void *ret; |
468 | int order = get_order(size); | 469 | int order = get_order(size); |
469 | 470 | ||
470 | /* | ||
471 | * XXX fix me: the DMA API should pass us an explicit DMA mask | ||
472 | * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 | ||
473 | * bit range instead of a 16MB one). | ||
474 | */ | ||
475 | flags |= GFP_DMA; | ||
476 | |||
477 | ret = (void *)__get_free_pages(flags, order); | 471 | ret = (void *)__get_free_pages(flags, order); |
478 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { |
479 | /* | 473 | /* |
480 | * The allocated memory isn't reachable by the device. | 474 | * The allocated memory isn't reachable by the device. |
481 | * Fall back on swiotlb_map_single(). | 475 | * Fall back on swiotlb_map_single(). |
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
490 | * swiotlb_map_single(), which will grab memory from | 484 | * swiotlb_map_single(), which will grab memory from |
491 | * the lowest available address range. | 485 | * the lowest available address range. |
492 | */ | 486 | */ |
493 | dma_addr_t handle; | 487 | ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); |
494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 488 | if (!ret) |
495 | if (swiotlb_dma_mapping_error(handle)) | ||
496 | return NULL; | 489 | return NULL; |
497 | |||
498 | ret = bus_to_virt(handle); | ||
499 | } | 490 | } |
500 | 491 | ||
501 | memset(ret, 0, size); | 492 | memset(ret, 0, size); |
502 | dev_addr = virt_to_bus(ret); | 493 | dev_addr = virt_to_bus(ret); |
503 | 494 | ||
504 | /* Confirm address can be DMA'd by device */ | 495 | /* Confirm address can be DMA'd by device */ |
505 | if (address_needs_mapping(hwdev, dev_addr)) { | 496 | if (address_needs_mapping(hwdev, dev_addr, size)) { |
506 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
507 | (unsigned long long)*hwdev->dma_mask, | 498 | (unsigned long long)*hwdev->dma_mask, |
508 | (unsigned long long)dev_addr); | 499 | (unsigned long long)dev_addr); |
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
518 | dma_addr_t dma_handle) | 509 | dma_addr_t dma_handle) |
519 | { | 510 | { |
520 | WARN_ON(irqs_disabled()); | 511 | WARN_ON(irqs_disabled()); |
521 | if (!(vaddr >= (void *)io_tlb_start | 512 | if (!is_swiotlb_buffer(vaddr)) |
522 | && vaddr < (void *)io_tlb_end)) | ||
523 | free_pages((unsigned long) vaddr, get_order(size)); | 513 | free_pages((unsigned long) vaddr, get_order(size)); |
524 | else | 514 | else |
525 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 515 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
526 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 516 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
527 | } | 517 | } |
528 | 518 | ||
529 | static void | 519 | static void |
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
567 | * we can safely return the device addr and not worry about bounce | 557 | * we can safely return the device addr and not worry about bounce |
568 | * buffering it. | 558 | * buffering it. |
569 | */ | 559 | */ |
570 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) | 560 | if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) |
571 | return dev_addr; | 561 | return dev_addr; |
572 | 562 | ||
573 | /* | 563 | /* |
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
584 | /* | 574 | /* |
585 | * Ensure that the address returned is DMA'ble | 575 | * Ensure that the address returned is DMA'ble |
586 | */ | 576 | */ |
587 | if (address_needs_mapping(hwdev, dev_addr)) | 577 | if (address_needs_mapping(hwdev, dev_addr, size)) |
588 | panic("map_single: bounce buffer is not DMA'ble"); | 578 | panic("map_single: bounce buffer is not DMA'ble"); |
589 | 579 | ||
590 | return dev_addr; | 580 | return dev_addr; |
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
612 | char *dma_addr = bus_to_virt(dev_addr); | 602 | char *dma_addr = bus_to_virt(dev_addr); |
613 | 603 | ||
614 | BUG_ON(dir == DMA_NONE); | 604 | BUG_ON(dir == DMA_NONE); |
615 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 605 | if (is_swiotlb_buffer(dma_addr)) |
616 | unmap_single(hwdev, dma_addr, size, dir); | 606 | unmap_single(hwdev, dma_addr, size, dir); |
617 | else if (dir == DMA_FROM_DEVICE) | 607 | else if (dir == DMA_FROM_DEVICE) |
618 | dma_mark_clean(dma_addr, size); | 608 | dma_mark_clean(dma_addr, size); |
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
642 | char *dma_addr = bus_to_virt(dev_addr); | 632 | char *dma_addr = bus_to_virt(dev_addr); |
643 | 633 | ||
644 | BUG_ON(dir == DMA_NONE); | 634 | BUG_ON(dir == DMA_NONE); |
645 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 635 | if (is_swiotlb_buffer(dma_addr)) |
646 | sync_single(hwdev, dma_addr, size, dir, target); | 636 | sync_single(hwdev, dma_addr, size, dir, target); |
647 | else if (dir == DMA_FROM_DEVICE) | 637 | else if (dir == DMA_FROM_DEVICE) |
648 | dma_mark_clean(dma_addr, size); | 638 | dma_mark_clean(dma_addr, size); |
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
673 | char *dma_addr = bus_to_virt(dev_addr) + offset; | 663 | char *dma_addr = bus_to_virt(dev_addr) + offset; |
674 | 664 | ||
675 | BUG_ON(dir == DMA_NONE); | 665 | BUG_ON(dir == DMA_NONE); |
676 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 666 | if (is_swiotlb_buffer(dma_addr)) |
677 | sync_single(hwdev, dma_addr, size, dir, target); | 667 | sync_single(hwdev, dma_addr, size, dir, target); |
678 | else if (dir == DMA_FROM_DEVICE) | 668 | else if (dir == DMA_FROM_DEVICE) |
679 | dma_mark_clean(dma_addr, size); | 669 | dma_mark_clean(dma_addr, size); |
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
727 | for_each_sg(sgl, sg, nelems, i) { | 717 | for_each_sg(sgl, sg, nelems, i) { |
728 | addr = SG_ENT_VIRT_ADDRESS(sg); | 718 | addr = SG_ENT_VIRT_ADDRESS(sg); |
729 | dev_addr = virt_to_bus(addr); | 719 | dev_addr = virt_to_bus(addr); |
730 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | 720 | if (swiotlb_force || |
721 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | ||
731 | void *map = map_single(hwdev, addr, sg->length, dir); | 722 | void *map = map_single(hwdev, addr, sg->length, dir); |
732 | if (!map) { | 723 | if (!map) { |
733 | /* Don't panic here, we expect map_sg users | 724 | /* Don't panic here, we expect map_sg users |
@@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
824 | } | 815 | } |
825 | 816 | ||
826 | int | 817 | int |
827 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 818 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
828 | { | 819 | { |
829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); | 820 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); |
830 | } | 821 | } |
diff --git a/lib/syscall.c b/lib/syscall.c new file mode 100644 index 000000000000..a4f7067f72fa --- /dev/null +++ b/lib/syscall.c | |||
@@ -0,0 +1,75 @@ | |||
1 | #include <linux/ptrace.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <asm/syscall.h> | ||
5 | |||
6 | static int collect_syscall(struct task_struct *target, long *callno, | ||
7 | unsigned long args[6], unsigned int maxargs, | ||
8 | unsigned long *sp, unsigned long *pc) | ||
9 | { | ||
10 | struct pt_regs *regs = task_pt_regs(target); | ||
11 | if (unlikely(!regs)) | ||
12 | return -EAGAIN; | ||
13 | |||
14 | *sp = user_stack_pointer(regs); | ||
15 | *pc = instruction_pointer(regs); | ||
16 | |||
17 | *callno = syscall_get_nr(target, regs); | ||
18 | if (*callno != -1L && maxargs > 0) | ||
19 | syscall_get_arguments(target, regs, 0, maxargs, args); | ||
20 | |||
21 | return 0; | ||
22 | } | ||
23 | |||
24 | /** | ||
25 | * task_current_syscall - Discover what a blocked task is doing. | ||
26 | * @target: thread to examine | ||
27 | * @callno: filled with system call number or -1 | ||
28 | * @args: filled with @maxargs system call arguments | ||
29 | * @maxargs: number of elements in @args to fill | ||
30 | * @sp: filled with user stack pointer | ||
31 | * @pc: filled with user PC | ||
32 | * | ||
33 | * If @target is blocked in a system call, returns zero with *@callno | ||
34 | * set to the the call's number and @args filled in with its arguments. | ||
35 | * Registers not used for system call arguments may not be available and | ||
36 | * it is not kosher to use &struct user_regset calls while the system | ||
37 | * call is still in progress. Note we may get this result if @target | ||
38 | * has finished its system call but not yet returned to user mode, such | ||
39 | * as when it's stopped for signal handling or syscall exit tracing. | ||
40 | * | ||
41 | * If @target is blocked in the kernel during a fault or exception, | ||
42 | * returns zero with *@callno set to -1 and does not fill in @args. | ||
43 | * If so, it's now safe to examine @target using &struct user_regset | ||
44 | * get() calls as long as we're sure @target won't return to user mode. | ||
45 | * | ||
46 | * Returns -%EAGAIN if @target does not remain blocked. | ||
47 | * | ||
48 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
49 | */ | ||
50 | int task_current_syscall(struct task_struct *target, long *callno, | ||
51 | unsigned long args[6], unsigned int maxargs, | ||
52 | unsigned long *sp, unsigned long *pc) | ||
53 | { | ||
54 | long state; | ||
55 | unsigned long ncsw; | ||
56 | |||
57 | if (unlikely(maxargs > 6)) | ||
58 | return -EINVAL; | ||
59 | |||
60 | if (target == current) | ||
61 | return collect_syscall(target, callno, args, maxargs, sp, pc); | ||
62 | |||
63 | state = target->state; | ||
64 | if (unlikely(!state)) | ||
65 | return -EAGAIN; | ||
66 | |||
67 | ncsw = wait_task_inactive(target, state); | ||
68 | if (unlikely(!ncsw) || | ||
69 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | ||
70 | unlikely(wait_task_inactive(target, state) != ncsw)) | ||
71 | return -EAGAIN; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(task_current_syscall); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 1dc2d1d18fa8..c399bc1093cb 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include <asm/page.h> /* for PAGE_SIZE */ | 28 | #include <asm/page.h> /* for PAGE_SIZE */ |
29 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
30 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | ||
30 | 31 | ||
31 | /* Works only for digits and letters, but small and fast */ | 32 | /* Works only for digits and letters, but small and fast */ |
32 | #define TOLOWER(x) ((x) | 0x20) | 33 | #define TOLOWER(x) ((x) | 0x20) |
@@ -220,7 +221,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ | |||
220 | if (len == 0) \ | 221 | if (len == 0) \ |
221 | return -EINVAL; \ | 222 | return -EINVAL; \ |
222 | \ | 223 | \ |
223 | val = simple_strtoul(cp, &tail, base); \ | 224 | val = simple_strtou##type(cp, &tail, base); \ |
224 | if ((*tail == '\0') || \ | 225 | if ((*tail == '\0') || \ |
225 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ | 226 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ |
226 | *res = val; \ | 227 | *res = val; \ |
@@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio | |||
513 | return buf; | 514 | return buf; |
514 | } | 515 | } |
515 | 516 | ||
516 | static inline void *dereference_function_descriptor(void *ptr) | ||
517 | { | ||
518 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
519 | void *p; | ||
520 | if (!probe_kernel_address(ptr, p)) | ||
521 | ptr = p; | ||
522 | #endif | ||
523 | return ptr; | ||
524 | } | ||
525 | |||
526 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) | 517 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) |
527 | { | 518 | { |
528 | unsigned long value = (unsigned long) ptr; | 519 | unsigned long value = (unsigned long) ptr; |