diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-28 11:26:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-28 11:26:12 -0400 |
commit | 7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch) | |
tree | e730a4565e0318140d2fbd2f0415d18a339d7336 /lib | |
parent | 41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff) | |
parent | 0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff) |
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'lib')
38 files changed, 1777 insertions, 512 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index c7ad7a5b3535..85cf7ea978aa 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -8,10 +8,10 @@ config BITREVERSE | |||
8 | tristate | 8 | tristate |
9 | 9 | ||
10 | config GENERIC_FIND_FIRST_BIT | 10 | config GENERIC_FIND_FIRST_BIT |
11 | def_bool n | 11 | bool |
12 | 12 | ||
13 | config GENERIC_FIND_NEXT_BIT | 13 | config GENERIC_FIND_NEXT_BIT |
14 | def_bool n | 14 | bool |
15 | 15 | ||
16 | config CRC_CCITT | 16 | config CRC_CCITT |
17 | tristate "CRC-CCITT functions" | 17 | tristate "CRC-CCITT functions" |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index df27132a56f4..b0f239e443bc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -74,6 +74,9 @@ config DEBUG_FS | |||
74 | debugging files into. Enable this option to be able to read and | 74 | debugging files into. Enable this option to be able to read and |
75 | write to these files. | 75 | write to these files. |
76 | 76 | ||
77 | For detailed documentation on the debugfs API, see | ||
78 | Documentation/DocBook/filesystems. | ||
79 | |||
77 | If unsure, say N. | 80 | If unsure, say N. |
78 | 81 | ||
79 | config HEADERS_CHECK | 82 | config HEADERS_CHECK |
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP | |||
147 | help | 150 | help |
148 | Say Y here to enable the kernel to detect "soft lockups", | 151 | Say Y here to enable the kernel to detect "soft lockups", |
149 | which are bugs that cause the kernel to loop in kernel | 152 | which are bugs that cause the kernel to loop in kernel |
150 | mode for more than 10 seconds, without giving other tasks a | 153 | mode for more than 60 seconds, without giving other tasks a |
151 | chance to run. | 154 | chance to run. |
152 | 155 | ||
153 | When a soft-lockup is detected, the kernel will print the | 156 | When a soft-lockup is detected, the kernel will print the |
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP | |||
159 | can be detected via the NMI-watchdog, on platforms that | 162 | can be detected via the NMI-watchdog, on platforms that |
160 | support it.) | 163 | support it.) |
161 | 164 | ||
165 | config BOOTPARAM_SOFTLOCKUP_PANIC | ||
166 | bool "Panic (Reboot) On Soft Lockups" | ||
167 | depends on DETECT_SOFTLOCKUP | ||
168 | help | ||
169 | Say Y here to enable the kernel to panic on "soft lockups", | ||
170 | which are bugs that cause the kernel to loop in kernel | ||
171 | mode for more than 60 seconds, without giving other tasks a | ||
172 | chance to run. | ||
173 | |||
174 | The panic can be used in combination with panic_timeout, | ||
175 | to cause the system to reboot automatically after a | ||
176 | lockup has been detected. This feature is useful for | ||
177 | high-availability systems that have uptime guarantees and | ||
178 | where a lockup must be resolved ASAP. | ||
179 | |||
180 | Say N if unsure. | ||
181 | |||
182 | config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE | ||
183 | int | ||
184 | depends on DETECT_SOFTLOCKUP | ||
185 | range 0 1 | ||
186 | default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC | ||
187 | default 1 if BOOTPARAM_SOFTLOCKUP_PANIC | ||
188 | |||
162 | config SCHED_DEBUG | 189 | config SCHED_DEBUG |
163 | bool "Collect scheduler debugging info" | 190 | bool "Collect scheduler debugging info" |
164 | depends on DEBUG_KERNEL && PROC_FS | 191 | depends on DEBUG_KERNEL && PROC_FS |
@@ -367,7 +394,7 @@ config LOCKDEP | |||
367 | bool | 394 | bool |
368 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 395 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
369 | select STACKTRACE | 396 | select STACKTRACE |
370 | select FRAME_POINTER if !X86 && !MIPS | 397 | select FRAME_POINTER if !X86 && !MIPS && !PPC |
371 | select KALLSYMS | 398 | select KALLSYMS |
372 | select KALLSYMS_ALL | 399 | select KALLSYMS_ALL |
373 | 400 | ||
@@ -468,6 +495,15 @@ config DEBUG_VM | |||
468 | 495 | ||
469 | If unsure, say N. | 496 | If unsure, say N. |
470 | 497 | ||
498 | config DEBUG_VIRTUAL | ||
499 | bool "Debug VM translations" | ||
500 | depends on DEBUG_KERNEL && X86 | ||
501 | help | ||
502 | Enable some costly sanity checks in virtual to page code. This can | ||
503 | catch mistakes with virt_to_page() and friends. | ||
504 | |||
505 | If unsure, say N. | ||
506 | |||
471 | config DEBUG_WRITECOUNT | 507 | config DEBUG_WRITECOUNT |
472 | bool "Debug filesystem writers count" | 508 | bool "Debug filesystem writers count" |
473 | depends on DEBUG_KERNEL | 509 | depends on DEBUG_KERNEL |
@@ -478,6 +514,18 @@ config DEBUG_WRITECOUNT | |||
478 | 514 | ||
479 | If unsure, say N. | 515 | If unsure, say N. |
480 | 516 | ||
517 | config DEBUG_MEMORY_INIT | ||
518 | bool "Debug memory initialisation" if EMBEDDED | ||
519 | default !EMBEDDED | ||
520 | help | ||
521 | Enable this for additional checks during memory initialisation. | ||
522 | The sanity checks verify aspects of the VM such as the memory model | ||
523 | and other information provided by the architecture. Verbose | ||
524 | information will be printed at KERN_DEBUG loglevel depending | ||
525 | on the mminit_loglevel= command-line option. | ||
526 | |||
527 | If unsure, say Y | ||
528 | |||
481 | config DEBUG_LIST | 529 | config DEBUG_LIST |
482 | bool "Debug linked list manipulation" | 530 | bool "Debug linked list manipulation" |
483 | depends on DEBUG_KERNEL | 531 | depends on DEBUG_KERNEL |
@@ -558,6 +606,19 @@ config RCU_TORTURE_TEST_RUNNABLE | |||
558 | Say N here if you want the RCU torture tests to start only | 606 | Say N here if you want the RCU torture tests to start only |
559 | after being manually enabled via /proc. | 607 | after being manually enabled via /proc. |
560 | 608 | ||
609 | config RCU_CPU_STALL_DETECTOR | ||
610 | bool "Check for stalled CPUs delaying RCU grace periods" | ||
611 | depends on CLASSIC_RCU | ||
612 | default n | ||
613 | help | ||
614 | This option causes RCU to printk information on which | ||
615 | CPUs are delaying the current grace period, but only when | ||
616 | the grace period extends for excessive time periods. | ||
617 | |||
618 | Say Y if you want RCU to perform such checks. | ||
619 | |||
620 | Say N if you are unsure. | ||
621 | |||
561 | config KPROBES_SANITY_TEST | 622 | config KPROBES_SANITY_TEST |
562 | bool "Kprobes sanity tests" | 623 | bool "Kprobes sanity tests" |
563 | depends on DEBUG_KERNEL | 624 | depends on DEBUG_KERNEL |
@@ -585,6 +646,33 @@ config BACKTRACE_SELF_TEST | |||
585 | 646 | ||
586 | Say N if you are unsure. | 647 | Say N if you are unsure. |
587 | 648 | ||
649 | config DEBUG_BLOCK_EXT_DEVT | ||
650 | bool "Force extended block device numbers and spread them" | ||
651 | depends on DEBUG_KERNEL | ||
652 | depends on BLOCK | ||
653 | default n | ||
654 | help | ||
655 | BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON | ||
656 | SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT | ||
657 | YOU ARE DOING. Distros, please enable this and fix whatever | ||
658 | is broken. | ||
659 | |||
660 | Conventionally, block device numbers are allocated from | ||
661 | predetermined contiguous area. However, extended block area | ||
662 | may introduce non-contiguous block device numbers. This | ||
663 | option forces most block device numbers to be allocated from | ||
664 | the extended space and spreads them to discover kernel or | ||
665 | userland code paths which assume predetermined contiguous | ||
666 | device number allocation. | ||
667 | |||
668 | Note that turning on this debug option shuffles all the | ||
669 | device numbers for all IDE and SCSI devices including libata | ||
670 | ones, so root partition specified using device number | ||
671 | directly (via rdev or root=MAJ:MIN) won't work anymore. | ||
672 | Textual device names (root=/dev/sdXn) will continue to work. | ||
673 | |||
674 | Say N if you are unsure. | ||
675 | |||
588 | config LKDTM | 676 | config LKDTM |
589 | tristate "Linux Kernel Dump Test Tool Module" | 677 | tristate "Linux Kernel Dump Test Tool Module" |
590 | depends on DEBUG_KERNEL | 678 | depends on DEBUG_KERNEL |
@@ -622,10 +710,21 @@ config FAIL_PAGE_ALLOC | |||
622 | 710 | ||
623 | config FAIL_MAKE_REQUEST | 711 | config FAIL_MAKE_REQUEST |
624 | bool "Fault-injection capability for disk IO" | 712 | bool "Fault-injection capability for disk IO" |
625 | depends on FAULT_INJECTION | 713 | depends on FAULT_INJECTION && BLOCK |
626 | help | 714 | help |
627 | Provide fault-injection capability for disk IO. | 715 | Provide fault-injection capability for disk IO. |
628 | 716 | ||
717 | config FAIL_IO_TIMEOUT | ||
718 | bool "Faul-injection capability for faking disk interrupts" | ||
719 | depends on FAULT_INJECTION && BLOCK | ||
720 | help | ||
721 | Provide fault-injection capability on end IO handling. This | ||
722 | will make the block layer "forget" an interrupt as configured, | ||
723 | thus exercising the error handling. | ||
724 | |||
725 | Only works with drivers that use the generic timeout handling, | ||
726 | for others it wont do anything. | ||
727 | |||
629 | config FAULT_INJECTION_DEBUG_FS | 728 | config FAULT_INJECTION_DEBUG_FS |
630 | bool "Debugfs entries for fault-injection capabilities" | 729 | bool "Debugfs entries for fault-injection capabilities" |
631 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS | 730 | depends on FAULT_INJECTION && SYSFS && DEBUG_FS |
@@ -637,13 +736,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER | |||
637 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT | 736 | depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT |
638 | depends on !X86_64 | 737 | depends on !X86_64 |
639 | select STACKTRACE | 738 | select STACKTRACE |
640 | select FRAME_POINTER | 739 | select FRAME_POINTER if !PPC |
641 | help | 740 | help |
642 | Provide stacktrace filter for fault-injection capabilities | 741 | Provide stacktrace filter for fault-injection capabilities |
643 | 742 | ||
644 | config LATENCYTOP | 743 | config LATENCYTOP |
645 | bool "Latency measuring infrastructure" | 744 | bool "Latency measuring infrastructure" |
646 | select FRAME_POINTER if !MIPS | 745 | select FRAME_POINTER if !MIPS && !PPC |
647 | select KALLSYMS | 746 | select KALLSYMS |
648 | select KALLSYMS_ALL | 747 | select KALLSYMS_ALL |
649 | select STACKTRACE | 748 | select STACKTRACE |
@@ -654,6 +753,14 @@ config LATENCYTOP | |||
654 | Enable this option if you want to use the LatencyTOP tool | 753 | Enable this option if you want to use the LatencyTOP tool |
655 | to find out which userspace is blocking on what kernel operations. | 754 | to find out which userspace is blocking on what kernel operations. |
656 | 755 | ||
756 | config SYSCTL_SYSCALL_CHECK | ||
757 | bool "Sysctl checks" | ||
758 | depends on SYSCTL_SYSCALL | ||
759 | ---help--- | ||
760 | sys_sysctl uses binary paths that have been found challenging | ||
761 | to properly maintain and use. This enables checks that help | ||
762 | you to keep things correct. | ||
763 | |||
657 | source kernel/trace/Kconfig | 764 | source kernel/trace/Kconfig |
658 | 765 | ||
659 | config PROVIDE_OHCI1394_DMA_INIT | 766 | config PROVIDE_OHCI1394_DMA_INIT |
@@ -696,6 +803,70 @@ config FIREWIRE_OHCI_REMOTE_DMA | |||
696 | 803 | ||
697 | If unsure, say N. | 804 | If unsure, say N. |
698 | 805 | ||
806 | menuconfig BUILD_DOCSRC | ||
807 | bool "Build targets in Documentation/ tree" | ||
808 | depends on HEADERS_CHECK | ||
809 | help | ||
810 | This option attempts to build objects from the source files in the | ||
811 | kernel Documentation/ tree. | ||
812 | |||
813 | Say N if you are unsure. | ||
814 | |||
815 | config DYNAMIC_PRINTK_DEBUG | ||
816 | bool "Enable dynamic printk() call support" | ||
817 | default n | ||
818 | depends on PRINTK | ||
819 | select PRINTK_DEBUG | ||
820 | help | ||
821 | |||
822 | Compiles debug level messages into the kernel, which would not | ||
823 | otherwise be available at runtime. These messages can then be | ||
824 | enabled/disabled on a per module basis. This mechanism implicitly | ||
825 | enables all pr_debug() and dev_dbg() calls. The impact of this | ||
826 | compile option is a larger kernel text size of about 2%. | ||
827 | |||
828 | Usage: | ||
829 | |||
830 | Dynamic debugging is controlled by the debugfs file, | ||
831 | dynamic_printk/modules. This file contains a list of the modules that | ||
832 | can be enabled. The format of the file is the module name, followed | ||
833 | by a set of flags that can be enabled. The first flag is always the | ||
834 | 'enabled' flag. For example: | ||
835 | |||
836 | <module_name> <enabled=0/1> | ||
837 | . | ||
838 | . | ||
839 | . | ||
840 | |||
841 | <module_name> : Name of the module in which the debug call resides | ||
842 | <enabled=0/1> : whether the messages are enabled or not | ||
843 | |||
844 | From a live system: | ||
845 | |||
846 | snd_hda_intel enabled=0 | ||
847 | fixup enabled=0 | ||
848 | driver enabled=0 | ||
849 | |||
850 | Enable a module: | ||
851 | |||
852 | $echo "set enabled=1 <module_name>" > dynamic_printk/modules | ||
853 | |||
854 | Disable a module: | ||
855 | |||
856 | $echo "set enabled=0 <module_name>" > dynamic_printk/modules | ||
857 | |||
858 | Enable all modules: | ||
859 | |||
860 | $echo "set enabled=1 all" > dynamic_printk/modules | ||
861 | |||
862 | Disable all modules: | ||
863 | |||
864 | $echo "set enabled=0 all" > dynamic_printk/modules | ||
865 | |||
866 | Finally, passing "dynamic_printk" at the command line enables | ||
867 | debugging for all modules. This mode can be turned off via the above | ||
868 | disable command. | ||
869 | |||
699 | source "samples/Kconfig" | 870 | source "samples/Kconfig" |
700 | 871 | ||
701 | source "lib/Kconfig.kgdb" | 872 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index a5d4b1dac2a5..9b5d1d7f2ef7 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
@@ -1,20 +1,20 @@ | |||
1 | 1 | ||
2 | config HAVE_ARCH_KGDB_SHADOW_INFO | ||
3 | bool | ||
4 | |||
5 | config HAVE_ARCH_KGDB | 2 | config HAVE_ARCH_KGDB |
6 | bool | 3 | bool |
7 | 4 | ||
8 | menuconfig KGDB | 5 | menuconfig KGDB |
9 | bool "KGDB: kernel debugging with remote gdb" | 6 | bool "KGDB: kernel debugging with remote gdb" |
10 | select FRAME_POINTER | ||
11 | depends on HAVE_ARCH_KGDB | 7 | depends on HAVE_ARCH_KGDB |
12 | depends on DEBUG_KERNEL && EXPERIMENTAL | 8 | depends on DEBUG_KERNEL && EXPERIMENTAL |
13 | help | 9 | help |
14 | If you say Y here, it will be possible to remotely debug the | 10 | If you say Y here, it will be possible to remotely debug the |
15 | kernel using gdb. Documentation of kernel debugger is available | 11 | kernel using gdb. It is recommended but not required, that |
16 | at http://kgdb.sourceforge.net as well as in DocBook form | 12 | you also turn on the kernel config option |
17 | in Documentation/DocBook/. If unsure, say N. | 13 | CONFIG_FRAME_POINTER to aid in producing more reliable stack |
14 | backtraces in the external debugger. Documentation of | ||
15 | kernel debugger is available at http://kgdb.sourceforge.net | ||
16 | as well as in DocBook form in Documentation/DocBook/. If | ||
17 | unsure, say N. | ||
18 | 18 | ||
19 | if KGDB | 19 | if KGDB |
20 | 20 | ||
diff --git a/lib/Makefile b/lib/Makefile index 818c4d455518..16feaab057b2 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -11,15 +11,16 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
11 | rbtree.o radix-tree.o dump_stack.o \ | 11 | rbtree.o radix-tree.o dump_stack.o \ |
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o |
15 | 15 | ||
16 | lib-$(CONFIG_MMU) += ioremap.o | 16 | lib-$(CONFIG_MMU) += ioremap.o |
17 | lib-$(CONFIG_SMP) += cpumask.o | 17 | lib-$(CONFIG_SMP) += cpumask.o |
18 | 18 | ||
19 | lib-y += kobject.o kref.o klist.o | 19 | lib-y += kobject.o kref.o klist.o |
20 | 20 | ||
21 | obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 21 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o | 22 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
23 | string_helpers.o | ||
23 | 24 | ||
24 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 25 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
25 | CFLAGS_kobject.o += -DDEBUG | 26 | CFLAGS_kobject.o += -DDEBUG |
@@ -78,6 +79,10 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o | |||
78 | 79 | ||
79 | obj-$(CONFIG_HAVE_LMB) += lmb.o | 80 | obj-$(CONFIG_HAVE_LMB) += lmb.o |
80 | 81 | ||
82 | obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o | ||
83 | |||
84 | obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o | ||
85 | |||
81 | hostprogs-y := gen_crc32table | 86 | hostprogs-y := gen_crc32table |
82 | clean-files := crc32table.h | 87 | clean-files := crc32table.h |
83 | 88 | ||
diff --git a/lib/bcd.c b/lib/bcd.c new file mode 100644 index 000000000000..d74257fd0fe7 --- /dev/null +++ b/lib/bcd.c | |||
@@ -0,0 +1,14 @@ | |||
1 | #include <linux/bcd.h> | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | unsigned bcd2bin(unsigned char val) | ||
5 | { | ||
6 | return (val & 0x0f) + (val >> 4) * 10; | ||
7 | } | ||
8 | EXPORT_SYMBOL(bcd2bin); | ||
9 | |||
10 | unsigned char bin2bcd(unsigned val) | ||
11 | { | ||
12 | return ((val / 10) << 4) + val % 10; | ||
13 | } | ||
14 | EXPORT_SYMBOL(bin2bcd); | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21e..1338469ac849 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -996,3 +996,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) | |||
996 | return 0; | 996 | return 0; |
997 | } | 997 | } |
998 | EXPORT_SYMBOL(bitmap_allocate_region); | 998 | EXPORT_SYMBOL(bitmap_allocate_region); |
999 | |||
1000 | /** | ||
1001 | * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. | ||
1002 | * @dst: destination buffer | ||
1003 | * @src: bitmap to copy | ||
1004 | * @nbits: number of bits in the bitmap | ||
1005 | * | ||
1006 | * Require nbits % BITS_PER_LONG == 0. | ||
1007 | */ | ||
1008 | void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) | ||
1009 | { | ||
1010 | unsigned long *d = dst; | ||
1011 | int i; | ||
1012 | |||
1013 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { | ||
1014 | if (BITS_PER_LONG == 64) | ||
1015 | d[i] = cpu_to_le64(src[i]); | ||
1016 | else | ||
1017 | d[i] = cpu_to_le32(src[i]); | ||
1018 | } | ||
1019 | } | ||
1020 | EXPORT_SYMBOL(bitmap_copy_le); | ||
diff --git a/lib/cmdline.c b/lib/cmdline.c index f596c08d213a..f5f3ad8b62ff 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints) | |||
116 | /** | 116 | /** |
117 | * memparse - parse a string with mem suffixes into a number | 117 | * memparse - parse a string with mem suffixes into a number |
118 | * @ptr: Where parse begins | 118 | * @ptr: Where parse begins |
119 | * @retptr: (output) Pointer to next char after parse completes | 119 | * @retptr: (output) Optional pointer to next char after parse completes |
120 | * | 120 | * |
121 | * Parses a string into a number. The number stored at @ptr is | 121 | * Parses a string into a number. The number stored at @ptr is |
122 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), | 122 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), |
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints) | |||
126 | * megabyte, or one gigabyte, respectively. | 126 | * megabyte, or one gigabyte, respectively. |
127 | */ | 127 | */ |
128 | 128 | ||
129 | unsigned long long memparse (char *ptr, char **retptr) | 129 | unsigned long long memparse(const char *ptr, char **retptr) |
130 | { | 130 | { |
131 | unsigned long long ret = simple_strtoull (ptr, retptr, 0); | 131 | char *endptr; /* local pointer to end of parsed string */ |
132 | 132 | ||
133 | switch (**retptr) { | 133 | unsigned long long ret = simple_strtoull(ptr, &endptr, 0); |
134 | |||
135 | switch (*endptr) { | ||
134 | case 'G': | 136 | case 'G': |
135 | case 'g': | 137 | case 'g': |
136 | ret <<= 10; | 138 | ret <<= 10; |
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr) | |||
140 | case 'K': | 142 | case 'K': |
141 | case 'k': | 143 | case 'k': |
142 | ret <<= 10; | 144 | ret <<= 10; |
143 | (*retptr)++; | 145 | endptr++; |
144 | default: | 146 | default: |
145 | break; | 147 | break; |
146 | } | 148 | } |
149 | |||
150 | if (retptr) | ||
151 | *retptr = endptr; | ||
152 | |||
147 | return ret; | 153 | return ret; |
148 | } | 154 | } |
149 | 155 | ||
diff --git a/lib/cpumask.c b/lib/cpumask.c index bb4f76d3c3e7..5f97dc25ef9c 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp) | |||
15 | } | 15 | } |
16 | EXPORT_SYMBOL(__next_cpu); | 16 | EXPORT_SYMBOL(__next_cpu); |
17 | 17 | ||
18 | #if NR_CPUS > 64 | ||
19 | int __next_cpu_nr(int n, const cpumask_t *srcp) | ||
20 | { | ||
21 | return min_t(int, nr_cpu_ids, | ||
22 | find_next_bit(srcp->bits, nr_cpu_ids, n+1)); | ||
23 | } | ||
24 | EXPORT_SYMBOL(__next_cpu_nr); | ||
25 | #endif | ||
26 | |||
18 | int __any_online_cpu(const cpumask_t *mask) | 27 | int __any_online_cpu(const cpumask_t *mask) |
19 | { | 28 | { |
20 | int cpu; | 29 | int cpu; |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 0ef01d14727c..0218b4693dd8 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * | 8 | * |
9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | ||
11 | #include <linux/rwsem.h> | 12 | #include <linux/rwsem.h> |
12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
@@ -37,6 +38,7 @@ int debug_locks_off(void) | |||
37 | { | 38 | { |
38 | if (xchg(&debug_locks, 0)) { | 39 | if (xchg(&debug_locks, 0)) { |
39 | if (!debug_locks_silent) { | 40 | if (!debug_locks_silent) { |
41 | oops_in_progress = 1; | ||
40 | console_verbose(); | 42 | console_verbose(); |
41 | return 1; | 43 | return 1; |
42 | } | 44 | } |
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 85b18d79be89..e3ab374e1334 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |||
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Allocate a new object. If the pool is empty, switch off the debugger. | 114 | * Allocate a new object. If the pool is empty, switch off the debugger. |
115 | * Must be called with interrupts disabled. | ||
115 | */ | 116 | */ |
116 | static struct debug_obj * | 117 | static struct debug_obj * |
117 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | 118 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) | |||
148 | static void free_object(struct debug_obj *obj) | 149 | static void free_object(struct debug_obj *obj) |
149 | { | 150 | { |
150 | unsigned long idx = (unsigned long)(obj - obj_static_pool); | 151 | unsigned long idx = (unsigned long)(obj - obj_static_pool); |
152 | unsigned long flags; | ||
151 | 153 | ||
152 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { | 154 | if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { |
153 | spin_lock(&pool_lock); | 155 | spin_lock_irqsave(&pool_lock, flags); |
154 | hlist_add_head(&obj->node, &obj_pool); | 156 | hlist_add_head(&obj->node, &obj_pool); |
155 | obj_pool_free++; | 157 | obj_pool_free++; |
156 | obj_pool_used--; | 158 | obj_pool_used--; |
157 | spin_unlock(&pool_lock); | 159 | spin_unlock_irqrestore(&pool_lock, flags); |
158 | } else { | 160 | } else { |
159 | spin_lock(&pool_lock); | 161 | spin_lock_irqsave(&pool_lock, flags); |
160 | obj_pool_used--; | 162 | obj_pool_used--; |
161 | spin_unlock(&pool_lock); | 163 | spin_unlock_irqrestore(&pool_lock, flags); |
162 | kmem_cache_free(obj_cache, obj); | 164 | kmem_cache_free(obj_cache, obj); |
163 | } | 165 | } |
164 | } | 166 | } |
@@ -171,6 +173,7 @@ static void debug_objects_oom(void) | |||
171 | { | 173 | { |
172 | struct debug_bucket *db = obj_hash; | 174 | struct debug_bucket *db = obj_hash; |
173 | struct hlist_node *node, *tmp; | 175 | struct hlist_node *node, *tmp; |
176 | HLIST_HEAD(freelist); | ||
174 | struct debug_obj *obj; | 177 | struct debug_obj *obj; |
175 | unsigned long flags; | 178 | unsigned long flags; |
176 | int i; | 179 | int i; |
@@ -179,11 +182,14 @@ static void debug_objects_oom(void) | |||
179 | 182 | ||
180 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | 183 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
181 | spin_lock_irqsave(&db->lock, flags); | 184 | spin_lock_irqsave(&db->lock, flags); |
182 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { | 185 | hlist_move_list(&db->list, &freelist); |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | |||
188 | /* Now free them */ | ||
189 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
183 | hlist_del(&obj->node); | 190 | hlist_del(&obj->node); |
184 | free_object(obj); | 191 | free_object(obj); |
185 | } | 192 | } |
186 | spin_unlock_irqrestore(&db->lock, flags); | ||
187 | } | 193 | } |
188 | } | 194 | } |
189 | 195 | ||
@@ -205,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg) | |||
205 | 211 | ||
206 | if (limit < 5 && obj->descr != descr_test) { | 212 | if (limit < 5 && obj->descr != descr_test) { |
207 | limit++; | 213 | limit++; |
208 | printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, | 214 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, |
209 | obj_states[obj->state], obj->descr->name); | 215 | obj_states[obj->state], obj->descr->name); |
210 | WARN_ON(1); | ||
211 | } | 216 | } |
212 | debug_objects_warnings++; | 217 | debug_objects_warnings++; |
213 | } | 218 | } |
@@ -226,15 +231,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), | |||
226 | 231 | ||
227 | static void debug_object_is_on_stack(void *addr, int onstack) | 232 | static void debug_object_is_on_stack(void *addr, int onstack) |
228 | { | 233 | { |
229 | void *stack = current->stack; | ||
230 | int is_on_stack; | 234 | int is_on_stack; |
231 | static int limit; | 235 | static int limit; |
232 | 236 | ||
233 | if (limit > 4) | 237 | if (limit > 4) |
234 | return; | 238 | return; |
235 | 239 | ||
236 | is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); | 240 | is_on_stack = object_is_on_stack(addr); |
237 | |||
238 | if (is_on_stack == onstack) | 241 | if (is_on_stack == onstack) |
239 | return; | 242 | return; |
240 | 243 | ||
@@ -501,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) | |||
501 | return; | 504 | return; |
502 | default: | 505 | default: |
503 | hlist_del(&obj->node); | 506 | hlist_del(&obj->node); |
507 | spin_unlock_irqrestore(&db->lock, flags); | ||
504 | free_object(obj); | 508 | free_object(obj); |
505 | break; | 509 | return; |
506 | } | 510 | } |
507 | out_unlock: | 511 | out_unlock: |
508 | spin_unlock_irqrestore(&db->lock, flags); | 512 | spin_unlock_irqrestore(&db->lock, flags); |
@@ -513,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |||
513 | { | 517 | { |
514 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | 518 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
515 | struct hlist_node *node, *tmp; | 519 | struct hlist_node *node, *tmp; |
520 | HLIST_HEAD(freelist); | ||
516 | struct debug_obj_descr *descr; | 521 | struct debug_obj_descr *descr; |
517 | enum debug_obj_state state; | 522 | enum debug_obj_state state; |
518 | struct debug_bucket *db; | 523 | struct debug_bucket *db; |
@@ -548,11 +553,18 @@ repeat: | |||
548 | goto repeat; | 553 | goto repeat; |
549 | default: | 554 | default: |
550 | hlist_del(&obj->node); | 555 | hlist_del(&obj->node); |
551 | free_object(obj); | 556 | hlist_add_head(&obj->node, &freelist); |
552 | break; | 557 | break; |
553 | } | 558 | } |
554 | } | 559 | } |
555 | spin_unlock_irqrestore(&db->lock, flags); | 560 | spin_unlock_irqrestore(&db->lock, flags); |
561 | |||
562 | /* Now free them */ | ||
563 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { | ||
564 | hlist_del(&obj->node); | ||
565 | free_object(obj); | ||
566 | } | ||
567 | |||
556 | if (cnt > debug_objects_maxchain) | 568 | if (cnt > debug_objects_maxchain) |
557 | debug_objects_maxchain = cnt; | 569 | debug_objects_maxchain = cnt; |
558 | } | 570 | } |
@@ -735,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) | |||
735 | 747 | ||
736 | obj = lookup_object(addr, db); | 748 | obj = lookup_object(addr, db); |
737 | if (!obj && state != ODEBUG_STATE_NONE) { | 749 | if (!obj && state != ODEBUG_STATE_NONE) { |
738 | printk(KERN_ERR "ODEBUG: selftest object not found\n"); | 750 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
739 | WARN_ON(1); | ||
740 | goto out; | 751 | goto out; |
741 | } | 752 | } |
742 | if (obj && obj->state != state) { | 753 | if (obj && obj->state != state) { |
743 | printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", | 754 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
744 | obj->state, state); | 755 | obj->state, state); |
745 | WARN_ON(1); | ||
746 | goto out; | 756 | goto out; |
747 | } | 757 | } |
748 | if (fixups != debug_objects_fixups) { | 758 | if (fixups != debug_objects_fixups) { |
749 | printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", | 759 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
750 | fixups, debug_objects_fixups); | 760 | fixups, debug_objects_fixups); |
751 | WARN_ON(1); | ||
752 | goto out; | 761 | goto out; |
753 | } | 762 | } |
754 | if (warnings != debug_objects_warnings) { | 763 | if (warnings != debug_objects_warnings) { |
755 | printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", | 764 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
756 | warnings, debug_objects_warnings); | 765 | warnings, debug_objects_warnings); |
757 | WARN_ON(1); | ||
758 | goto out; | 766 | goto out; |
759 | } | 767 | } |
760 | res = 0; | 768 | res = 0; |
diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c new file mode 100644 index 000000000000..d640f87bdc9e --- /dev/null +++ b/lib/dynamic_printk.c | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * lib/dynamic_printk.c | ||
3 | * | ||
4 | * make pr_debug()/dev_dbg() calls runtime configurable based upon their | ||
5 | * their source module. | ||
6 | * | ||
7 | * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | #include <linux/seq_file.h> | ||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/fs.h> | ||
16 | |||
17 | extern struct mod_debug __start___verbose[]; | ||
18 | extern struct mod_debug __stop___verbose[]; | ||
19 | |||
20 | struct debug_name { | ||
21 | struct hlist_node hlist; | ||
22 | struct hlist_node hlist2; | ||
23 | int hash1; | ||
24 | int hash2; | ||
25 | char *name; | ||
26 | int enable; | ||
27 | int type; | ||
28 | }; | ||
29 | |||
30 | static int nr_entries; | ||
31 | static int num_enabled; | ||
32 | int dynamic_enabled = DYNAMIC_ENABLED_NONE; | ||
33 | static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] = | ||
34 | { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT }; | ||
35 | static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] = | ||
36 | { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT }; | ||
37 | static DECLARE_MUTEX(debug_list_mutex); | ||
38 | |||
39 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | ||
40 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | ||
41 | * use independent hash functions, to reduce the chance of false positives. | ||
42 | */ | ||
43 | long long dynamic_printk_enabled; | ||
44 | EXPORT_SYMBOL_GPL(dynamic_printk_enabled); | ||
45 | long long dynamic_printk_enabled2; | ||
46 | EXPORT_SYMBOL_GPL(dynamic_printk_enabled2); | ||
47 | |||
48 | /* returns the debug module pointer. */ | ||
49 | static struct debug_name *find_debug_module(char *module_name) | ||
50 | { | ||
51 | int i; | ||
52 | struct hlist_head *head; | ||
53 | struct hlist_node *node; | ||
54 | struct debug_name *element; | ||
55 | |||
56 | element = NULL; | ||
57 | for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) { | ||
58 | head = &module_table[i]; | ||
59 | hlist_for_each_entry_rcu(element, node, head, hlist) | ||
60 | if (!strcmp(element->name, module_name)) | ||
61 | return element; | ||
62 | } | ||
63 | return NULL; | ||
64 | } | ||
65 | |||
66 | /* returns the debug module pointer. */ | ||
67 | static struct debug_name *find_debug_module_hash(char *module_name, int hash) | ||
68 | { | ||
69 | struct hlist_head *head; | ||
70 | struct hlist_node *node; | ||
71 | struct debug_name *element; | ||
72 | |||
73 | element = NULL; | ||
74 | head = &module_table[hash]; | ||
75 | hlist_for_each_entry_rcu(element, node, head, hlist) | ||
76 | if (!strcmp(element->name, module_name)) | ||
77 | return element; | ||
78 | return NULL; | ||
79 | } | ||
80 | |||
81 | /* caller must hold mutex*/ | ||
82 | static int __add_debug_module(char *mod_name, int hash, int hash2) | ||
83 | { | ||
84 | struct debug_name *new; | ||
85 | char *module_name; | ||
86 | int ret = 0; | ||
87 | |||
88 | if (find_debug_module(mod_name)) { | ||
89 | ret = -EINVAL; | ||
90 | goto out; | ||
91 | } | ||
92 | module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL); | ||
93 | if (!module_name) { | ||
94 | ret = -ENOMEM; | ||
95 | goto out; | ||
96 | } | ||
97 | module_name = strcpy(module_name, mod_name); | ||
98 | module_name[strlen(mod_name)] = '\0'; | ||
99 | new = kzalloc(sizeof(struct debug_name), GFP_KERNEL); | ||
100 | if (!new) { | ||
101 | kfree(module_name); | ||
102 | ret = -ENOMEM; | ||
103 | goto out; | ||
104 | } | ||
105 | INIT_HLIST_NODE(&new->hlist); | ||
106 | INIT_HLIST_NODE(&new->hlist2); | ||
107 | new->name = module_name; | ||
108 | new->hash1 = hash; | ||
109 | new->hash2 = hash2; | ||
110 | hlist_add_head_rcu(&new->hlist, &module_table[hash]); | ||
111 | hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]); | ||
112 | nr_entries++; | ||
113 | out: | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | int unregister_dynamic_debug_module(char *mod_name) | ||
118 | { | ||
119 | struct debug_name *element; | ||
120 | int ret = 0; | ||
121 | |||
122 | down(&debug_list_mutex); | ||
123 | element = find_debug_module(mod_name); | ||
124 | if (!element) { | ||
125 | ret = -EINVAL; | ||
126 | goto out; | ||
127 | } | ||
128 | hlist_del_rcu(&element->hlist); | ||
129 | hlist_del_rcu(&element->hlist2); | ||
130 | synchronize_rcu(); | ||
131 | kfree(element->name); | ||
132 | if (element->enable) | ||
133 | num_enabled--; | ||
134 | kfree(element); | ||
135 | nr_entries--; | ||
136 | out: | ||
137 | up(&debug_list_mutex); | ||
138 | return 0; | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); | ||
141 | |||
142 | int register_dynamic_debug_module(char *mod_name, int type, char *share_name, | ||
143 | char *flags, int hash, int hash2) | ||
144 | { | ||
145 | struct debug_name *elem; | ||
146 | int ret = 0; | ||
147 | |||
148 | down(&debug_list_mutex); | ||
149 | elem = find_debug_module(mod_name); | ||
150 | if (!elem) { | ||
151 | if (__add_debug_module(mod_name, hash, hash2)) | ||
152 | goto out; | ||
153 | elem = find_debug_module(mod_name); | ||
154 | if (dynamic_enabled == DYNAMIC_ENABLED_ALL && | ||
155 | !strcmp(mod_name, share_name)) { | ||
156 | elem->enable = true; | ||
157 | num_enabled++; | ||
158 | } | ||
159 | } | ||
160 | elem->type |= type; | ||
161 | out: | ||
162 | up(&debug_list_mutex); | ||
163 | return ret; | ||
164 | } | ||
165 | EXPORT_SYMBOL_GPL(register_dynamic_debug_module); | ||
166 | |||
167 | int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash) | ||
168 | { | ||
169 | struct debug_name *elem; | ||
170 | int ret = 0; | ||
171 | |||
172 | if (dynamic_enabled == DYNAMIC_ENABLED_ALL) | ||
173 | return 1; | ||
174 | rcu_read_lock(); | ||
175 | elem = find_debug_module_hash(mod_name, hash); | ||
176 | if (elem && elem->enable) | ||
177 | ret = 1; | ||
178 | rcu_read_unlock(); | ||
179 | return ret; | ||
180 | } | ||
181 | EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper); | ||
182 | |||
183 | static void set_all(bool enable) | ||
184 | { | ||
185 | struct debug_name *e; | ||
186 | struct hlist_node *node; | ||
187 | int i; | ||
188 | long long enable_mask; | ||
189 | |||
190 | for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) { | ||
191 | if (module_table[i].first != NULL) { | ||
192 | hlist_for_each_entry(e, node, &module_table[i], hlist) { | ||
193 | e->enable = enable; | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | if (enable) | ||
198 | enable_mask = ULLONG_MAX; | ||
199 | else | ||
200 | enable_mask = 0; | ||
201 | dynamic_printk_enabled = enable_mask; | ||
202 | dynamic_printk_enabled2 = enable_mask; | ||
203 | } | ||
204 | |||
205 | static int disabled_hash(int i, bool first_table) | ||
206 | { | ||
207 | struct debug_name *e; | ||
208 | struct hlist_node *node; | ||
209 | |||
210 | if (first_table) { | ||
211 | hlist_for_each_entry(e, node, &module_table[i], hlist) { | ||
212 | if (e->enable) | ||
213 | return 0; | ||
214 | } | ||
215 | } else { | ||
216 | hlist_for_each_entry(e, node, &module_table2[i], hlist2) { | ||
217 | if (e->enable) | ||
218 | return 0; | ||
219 | } | ||
220 | } | ||
221 | return 1; | ||
222 | } | ||
223 | |||
224 | static ssize_t pr_debug_write(struct file *file, const char __user *buf, | ||
225 | size_t length, loff_t *ppos) | ||
226 | { | ||
227 | char *buffer, *s, *value_str, *setting_str; | ||
228 | int err, value; | ||
229 | struct debug_name *elem = NULL; | ||
230 | int all = 0; | ||
231 | |||
232 | if (length > PAGE_SIZE || length < 0) | ||
233 | return -EINVAL; | ||
234 | |||
235 | buffer = (char *)__get_free_page(GFP_KERNEL); | ||
236 | if (!buffer) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | err = -EFAULT; | ||
240 | if (copy_from_user(buffer, buf, length)) | ||
241 | goto out; | ||
242 | |||
243 | err = -EINVAL; | ||
244 | if (length < PAGE_SIZE) | ||
245 | buffer[length] = '\0'; | ||
246 | else if (buffer[PAGE_SIZE-1]) | ||
247 | goto out; | ||
248 | |||
249 | err = -EINVAL; | ||
250 | down(&debug_list_mutex); | ||
251 | |||
252 | if (strncmp("set", buffer, 3)) | ||
253 | goto out_up; | ||
254 | s = buffer + 3; | ||
255 | setting_str = strsep(&s, "="); | ||
256 | if (s == NULL) | ||
257 | goto out_up; | ||
258 | setting_str = strstrip(setting_str); | ||
259 | value_str = strsep(&s, " "); | ||
260 | if (s == NULL) | ||
261 | goto out_up; | ||
262 | s = strstrip(s); | ||
263 | if (!strncmp(s, "all", 3)) | ||
264 | all = 1; | ||
265 | else | ||
266 | elem = find_debug_module(s); | ||
267 | if (!strncmp(setting_str, "enable", 6)) { | ||
268 | value = !!simple_strtol(value_str, NULL, 10); | ||
269 | if (all) { | ||
270 | if (value) { | ||
271 | set_all(true); | ||
272 | num_enabled = nr_entries; | ||
273 | dynamic_enabled = DYNAMIC_ENABLED_ALL; | ||
274 | } else { | ||
275 | set_all(false); | ||
276 | num_enabled = 0; | ||
277 | dynamic_enabled = DYNAMIC_ENABLED_NONE; | ||
278 | } | ||
279 | err = 0; | ||
280 | } else { | ||
281 | if (elem) { | ||
282 | if (value && (elem->enable == 0)) { | ||
283 | dynamic_printk_enabled |= | ||
284 | (1LL << elem->hash1); | ||
285 | dynamic_printk_enabled2 |= | ||
286 | (1LL << elem->hash2); | ||
287 | elem->enable = 1; | ||
288 | num_enabled++; | ||
289 | dynamic_enabled = DYNAMIC_ENABLED_SOME; | ||
290 | err = 0; | ||
291 | printk(KERN_DEBUG | ||
292 | "debugging enabled for module %s", | ||
293 | elem->name); | ||
294 | } else if (!value && (elem->enable == 1)) { | ||
295 | elem->enable = 0; | ||
296 | num_enabled--; | ||
297 | if (disabled_hash(elem->hash1, true)) | ||
298 | dynamic_printk_enabled &= | ||
299 | ~(1LL << elem->hash1); | ||
300 | if (disabled_hash(elem->hash2, false)) | ||
301 | dynamic_printk_enabled2 &= | ||
302 | ~(1LL << elem->hash2); | ||
303 | if (num_enabled) | ||
304 | dynamic_enabled = | ||
305 | DYNAMIC_ENABLED_SOME; | ||
306 | else | ||
307 | dynamic_enabled = | ||
308 | DYNAMIC_ENABLED_NONE; | ||
309 | err = 0; | ||
310 | printk(KERN_DEBUG | ||
311 | "debugging disabled for module " | ||
312 | "%s", elem->name); | ||
313 | } | ||
314 | } | ||
315 | } | ||
316 | } | ||
317 | if (!err) | ||
318 | err = length; | ||
319 | out_up: | ||
320 | up(&debug_list_mutex); | ||
321 | out: | ||
322 | free_page((unsigned long)buffer); | ||
323 | return err; | ||
324 | } | ||
325 | |||
326 | static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos) | ||
327 | { | ||
328 | return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL; | ||
329 | } | ||
330 | |||
331 | static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
332 | { | ||
333 | (*pos)++; | ||
334 | if (*pos >= DEBUG_HASH_TABLE_SIZE) | ||
335 | return NULL; | ||
336 | return pos; | ||
337 | } | ||
338 | |||
339 | static void pr_debug_seq_stop(struct seq_file *s, void *v) | ||
340 | { | ||
341 | /* Nothing to do */ | ||
342 | } | ||
343 | |||
344 | static int pr_debug_seq_show(struct seq_file *s, void *v) | ||
345 | { | ||
346 | struct hlist_head *head; | ||
347 | struct hlist_node *node; | ||
348 | struct debug_name *elem; | ||
349 | unsigned int i = *(loff_t *) v; | ||
350 | |||
351 | rcu_read_lock(); | ||
352 | head = &module_table[i]; | ||
353 | hlist_for_each_entry_rcu(elem, node, head, hlist) { | ||
354 | seq_printf(s, "%s enabled=%d", elem->name, elem->enable); | ||
355 | seq_printf(s, "\n"); | ||
356 | } | ||
357 | rcu_read_unlock(); | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static struct seq_operations pr_debug_seq_ops = { | ||
362 | .start = pr_debug_seq_start, | ||
363 | .next = pr_debug_seq_next, | ||
364 | .stop = pr_debug_seq_stop, | ||
365 | .show = pr_debug_seq_show | ||
366 | }; | ||
367 | |||
368 | static int pr_debug_open(struct inode *inode, struct file *filp) | ||
369 | { | ||
370 | return seq_open(filp, &pr_debug_seq_ops); | ||
371 | } | ||
372 | |||
373 | static const struct file_operations pr_debug_operations = { | ||
374 | .open = pr_debug_open, | ||
375 | .read = seq_read, | ||
376 | .write = pr_debug_write, | ||
377 | .llseek = seq_lseek, | ||
378 | .release = seq_release, | ||
379 | }; | ||
380 | |||
381 | static int __init dynamic_printk_init(void) | ||
382 | { | ||
383 | struct dentry *dir, *file; | ||
384 | struct mod_debug *iter; | ||
385 | unsigned long value; | ||
386 | |||
387 | dir = debugfs_create_dir("dynamic_printk", NULL); | ||
388 | if (!dir) | ||
389 | return -ENOMEM; | ||
390 | file = debugfs_create_file("modules", 0644, dir, NULL, | ||
391 | &pr_debug_operations); | ||
392 | if (!file) { | ||
393 | debugfs_remove(dir); | ||
394 | return -ENOMEM; | ||
395 | } | ||
396 | for (value = (unsigned long)__start___verbose; | ||
397 | value < (unsigned long)__stop___verbose; | ||
398 | value += sizeof(struct mod_debug)) { | ||
399 | iter = (struct mod_debug *)value; | ||
400 | register_dynamic_debug_module(iter->modname, | ||
401 | iter->type, | ||
402 | iter->logical_modname, | ||
403 | iter->flag_names, iter->hash, iter->hash2); | ||
404 | } | ||
405 | return 0; | ||
406 | } | ||
407 | module_init(dynamic_printk_init); | ||
408 | /* may want to move this earlier so we can get traces as early as possible */ | ||
409 | |||
410 | static int __init dynamic_printk_setup(char *str) | ||
411 | { | ||
412 | if (str) | ||
413 | return -ENOENT; | ||
414 | set_all(true); | ||
415 | return 0; | ||
416 | } | ||
417 | /* Use early_param(), so we can get debug output as early as possible */ | ||
418 | early_param("dynamic_printk", dynamic_printk_setup); | ||
@@ -6,6 +6,8 @@ | |||
6 | * Modified by George Anzinger to reuse immediately and to use | 6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. | 7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * | 8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. | ||
10 | * | ||
9 | * Small id to pointer translation service. | 11 | * Small id to pointer translation service. |
10 | * | 12 | * |
11 | * It uses a radix tree like structure as a sparse array indexed | 13 | * It uses a radix tree like structure as a sparse array indexed |
@@ -35,7 +37,7 @@ | |||
35 | 37 | ||
36 | static struct kmem_cache *idr_layer_cache; | 38 | static struct kmem_cache *idr_layer_cache; |
37 | 39 | ||
38 | static struct idr_layer *alloc_layer(struct idr *idp) | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) |
39 | { | 41 | { |
40 | struct idr_layer *p; | 42 | struct idr_layer *p; |
41 | unsigned long flags; | 43 | unsigned long flags; |
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp) | |||
50 | return(p); | 52 | return(p); |
51 | } | 53 | } |
52 | 54 | ||
55 | static void idr_layer_rcu_free(struct rcu_head *head) | ||
56 | { | ||
57 | struct idr_layer *layer; | ||
58 | |||
59 | layer = container_of(head, struct idr_layer, rcu_head); | ||
60 | kmem_cache_free(idr_layer_cache, layer); | ||
61 | } | ||
62 | |||
63 | static inline void free_layer(struct idr_layer *p) | ||
64 | { | ||
65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | ||
66 | } | ||
67 | |||
53 | /* only called when idp->lock is held */ | 68 | /* only called when idp->lock is held */ |
54 | static void __free_layer(struct idr *idp, struct idr_layer *p) | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
55 | { | 70 | { |
56 | p->ary[0] = idp->id_free; | 71 | p->ary[0] = idp->id_free; |
57 | idp->id_free = p; | 72 | idp->id_free = p; |
58 | idp->id_free_cnt++; | 73 | idp->id_free_cnt++; |
59 | } | 74 | } |
60 | 75 | ||
61 | static void free_layer(struct idr *idp, struct idr_layer *p) | 76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
62 | { | 77 | { |
63 | unsigned long flags; | 78 | unsigned long flags; |
64 | 79 | ||
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p) | |||
66 | * Depends on the return element being zeroed. | 81 | * Depends on the return element being zeroed. |
67 | */ | 82 | */ |
68 | spin_lock_irqsave(&idp->lock, flags); | 83 | spin_lock_irqsave(&idp->lock, flags); |
69 | __free_layer(idp, p); | 84 | __move_to_free_list(idp, p); |
70 | spin_unlock_irqrestore(&idp->lock, flags); | 85 | spin_unlock_irqrestore(&idp->lock, flags); |
71 | } | 86 | } |
72 | 87 | ||
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
96 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
97 | * | 112 | * |
98 | * This function should be called prior to locking and calling the | 113 | * This function should be called prior to locking and calling the |
99 | * following function. It preallocates enough memory to satisfy | 114 | * idr_get_new* functions. It preallocates enough memory to satisfy |
100 | * the worst possible allocation. | 115 | * the worst possible allocation. |
101 | * | 116 | * |
102 | * If the system is REALLY out of memory this function returns 0, | 117 | * If the system is REALLY out of memory this function returns 0, |
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |||
109 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | 124 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); |
110 | if (new == NULL) | 125 | if (new == NULL) |
111 | return (0); | 126 | return (0); |
112 | free_layer(idp, new); | 127 | move_to_free_list(idp, new); |
113 | } | 128 | } |
114 | return 1; | 129 | return 1; |
115 | } | 130 | } |
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
143 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
144 | if (!(p = pa[l])) { | 159 | if (!(p = pa[l])) { |
145 | *starting_id = id; | 160 | *starting_id = id; |
146 | return -2; | 161 | return IDR_NEED_TO_GROW; |
147 | } | 162 | } |
148 | 163 | ||
149 | /* If we need to go up one layer, continue the | 164 | /* If we need to go up one layer, continue the |
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
160 | id = ((id >> sh) ^ n ^ m) << sh; | 175 | id = ((id >> sh) ^ n ^ m) << sh; |
161 | } | 176 | } |
162 | if ((id >= MAX_ID_BIT) || (id < 0)) | 177 | if ((id >= MAX_ID_BIT) || (id < 0)) |
163 | return -3; | 178 | return IDR_NOMORE_SPACE; |
164 | if (l == 0) | 179 | if (l == 0) |
165 | break; | 180 | break; |
166 | /* | 181 | /* |
167 | * Create the layer below if it is missing. | 182 | * Create the layer below if it is missing. |
168 | */ | 183 | */ |
169 | if (!p->ary[m]) { | 184 | if (!p->ary[m]) { |
170 | if (!(new = alloc_layer(idp))) | 185 | new = get_from_free_list(idp); |
186 | if (!new) | ||
171 | return -1; | 187 | return -1; |
172 | p->ary[m] = new; | 188 | rcu_assign_pointer(p->ary[m], new); |
173 | p->count++; | 189 | p->count++; |
174 | } | 190 | } |
175 | pa[l--] = p; | 191 | pa[l--] = p; |
@@ -192,7 +208,7 @@ build_up: | |||
192 | p = idp->top; | 208 | p = idp->top; |
193 | layers = idp->layers; | 209 | layers = idp->layers; |
194 | if (unlikely(!p)) { | 210 | if (unlikely(!p)) { |
195 | if (!(p = alloc_layer(idp))) | 211 | if (!(p = get_from_free_list(idp))) |
196 | return -1; | 212 | return -1; |
197 | layers = 1; | 213 | layers = 1; |
198 | } | 214 | } |
@@ -204,7 +220,7 @@ build_up: | |||
204 | layers++; | 220 | layers++; |
205 | if (!p->count) | 221 | if (!p->count) |
206 | continue; | 222 | continue; |
207 | if (!(new = alloc_layer(idp))) { | 223 | if (!(new = get_from_free_list(idp))) { |
208 | /* | 224 | /* |
209 | * The allocation failed. If we built part of | 225 | * The allocation failed. If we built part of |
210 | * the structure tear it down. | 226 | * the structure tear it down. |
@@ -214,7 +230,7 @@ build_up: | |||
214 | p = p->ary[0]; | 230 | p = p->ary[0]; |
215 | new->ary[0] = NULL; | 231 | new->ary[0] = NULL; |
216 | new->bitmap = new->count = 0; | 232 | new->bitmap = new->count = 0; |
217 | __free_layer(idp, new); | 233 | __move_to_free_list(idp, new); |
218 | } | 234 | } |
219 | spin_unlock_irqrestore(&idp->lock, flags); | 235 | spin_unlock_irqrestore(&idp->lock, flags); |
220 | return -1; | 236 | return -1; |
@@ -225,10 +241,10 @@ build_up: | |||
225 | __set_bit(0, &new->bitmap); | 241 | __set_bit(0, &new->bitmap); |
226 | p = new; | 242 | p = new; |
227 | } | 243 | } |
228 | idp->top = p; | 244 | rcu_assign_pointer(idp->top, p); |
229 | idp->layers = layers; | 245 | idp->layers = layers; |
230 | v = sub_alloc(idp, &id, pa); | 246 | v = sub_alloc(idp, &id, pa); |
231 | if (v == -2) | 247 | if (v == IDR_NEED_TO_GROW) |
232 | goto build_up; | 248 | goto build_up; |
233 | return(v); | 249 | return(v); |
234 | } | 250 | } |
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
244 | * Successfully found an empty slot. Install the user | 260 | * Successfully found an empty slot. Install the user |
245 | * pointer and mark the slot full. | 261 | * pointer and mark the slot full. |
246 | */ | 262 | */ |
247 | pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; | 263 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
264 | (struct idr_layer *)ptr); | ||
248 | pa[0]->count++; | 265 | pa[0]->count++; |
249 | idr_mark_full(pa, id); | 266 | idr_mark_full(pa, id); |
250 | } | 267 | } |
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |||
277 | * This is a cheap hack until the IDR code can be fixed to | 294 | * This is a cheap hack until the IDR code can be fixed to |
278 | * return proper error values. | 295 | * return proper error values. |
279 | */ | 296 | */ |
280 | if (rv < 0) { | 297 | if (rv < 0) |
281 | if (rv == -1) | 298 | return _idr_rc_to_errno(rv); |
282 | return -EAGAIN; | ||
283 | else /* Will be -3 */ | ||
284 | return -ENOSPC; | ||
285 | } | ||
286 | *id = rv; | 299 | *id = rv; |
287 | return 0; | 300 | return 0; |
288 | } | 301 | } |
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id) | |||
312 | * This is a cheap hack until the IDR code can be fixed to | 325 | * This is a cheap hack until the IDR code can be fixed to |
313 | * return proper error values. | 326 | * return proper error values. |
314 | */ | 327 | */ |
315 | if (rv < 0) { | 328 | if (rv < 0) |
316 | if (rv == -1) | 329 | return _idr_rc_to_errno(rv); |
317 | return -EAGAIN; | ||
318 | else /* Will be -3 */ | ||
319 | return -ENOSPC; | ||
320 | } | ||
321 | *id = rv; | 330 | *id = rv; |
322 | return 0; | 331 | return 0; |
323 | } | 332 | } |
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new); | |||
325 | 334 | ||
326 | static void idr_remove_warning(int id) | 335 | static void idr_remove_warning(int id) |
327 | { | 336 | { |
328 | printk("idr_remove called for id=%d which is not allocated.\n", id); | 337 | printk(KERN_WARNING |
338 | "idr_remove called for id=%d which is not allocated.\n", id); | ||
329 | dump_stack(); | 339 | dump_stack(); |
330 | } | 340 | } |
331 | 341 | ||
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
334 | struct idr_layer *p = idp->top; | 344 | struct idr_layer *p = idp->top; |
335 | struct idr_layer **pa[MAX_LEVEL]; | 345 | struct idr_layer **pa[MAX_LEVEL]; |
336 | struct idr_layer ***paa = &pa[0]; | 346 | struct idr_layer ***paa = &pa[0]; |
347 | struct idr_layer *to_free; | ||
337 | int n; | 348 | int n; |
338 | 349 | ||
339 | *paa = NULL; | 350 | *paa = NULL; |
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
349 | n = id & IDR_MASK; | 360 | n = id & IDR_MASK; |
350 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 361 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
351 | __clear_bit(n, &p->bitmap); | 362 | __clear_bit(n, &p->bitmap); |
352 | p->ary[n] = NULL; | 363 | rcu_assign_pointer(p->ary[n], NULL); |
364 | to_free = NULL; | ||
353 | while(*paa && ! --((**paa)->count)){ | 365 | while(*paa && ! --((**paa)->count)){ |
354 | free_layer(idp, **paa); | 366 | if (to_free) |
367 | free_layer(to_free); | ||
368 | to_free = **paa; | ||
355 | **paa-- = NULL; | 369 | **paa-- = NULL; |
356 | } | 370 | } |
357 | if (!*paa) | 371 | if (!*paa) |
358 | idp->layers = 0; | 372 | idp->layers = 0; |
373 | if (to_free) | ||
374 | free_layer(to_free); | ||
359 | } else | 375 | } else |
360 | idr_remove_warning(id); | 376 | idr_remove_warning(id); |
361 | } | 377 | } |
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
368 | void idr_remove(struct idr *idp, int id) | 384 | void idr_remove(struct idr *idp, int id) |
369 | { | 385 | { |
370 | struct idr_layer *p; | 386 | struct idr_layer *p; |
387 | struct idr_layer *to_free; | ||
371 | 388 | ||
372 | /* Mask off upper bits we don't use for the search. */ | 389 | /* Mask off upper bits we don't use for the search. */ |
373 | id &= MAX_ID_MASK; | 390 | id &= MAX_ID_MASK; |
374 | 391 | ||
375 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 392 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
376 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 393 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
377 | idp->top->ary[0]) { // We can drop a layer | 394 | idp->top->ary[0]) { |
378 | 395 | /* | |
396 | * Single child at leftmost slot: we can shrink the tree. | ||
397 | * This level is not needed anymore since when layers are | ||
398 | * inserted, they are inserted at the top of the existing | ||
399 | * tree. | ||
400 | */ | ||
401 | to_free = idp->top; | ||
379 | p = idp->top->ary[0]; | 402 | p = idp->top->ary[0]; |
380 | idp->top->bitmap = idp->top->count = 0; | 403 | rcu_assign_pointer(idp->top, p); |
381 | free_layer(idp, idp->top); | ||
382 | idp->top = p; | ||
383 | --idp->layers; | 404 | --idp->layers; |
405 | to_free->bitmap = to_free->count = 0; | ||
406 | free_layer(to_free); | ||
384 | } | 407 | } |
385 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 408 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
386 | p = alloc_layer(idp); | 409 | p = get_from_free_list(idp); |
410 | /* | ||
411 | * Note: we don't call the rcu callback here, since the only | ||
412 | * layers that fall into the freelist are those that have been | ||
413 | * preallocated. | ||
414 | */ | ||
387 | kmem_cache_free(idr_layer_cache, p); | 415 | kmem_cache_free(idr_layer_cache, p); |
388 | } | 416 | } |
389 | return; | 417 | return; |
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp) | |||
424 | 452 | ||
425 | id += 1 << n; | 453 | id += 1 << n; |
426 | while (n < fls(id)) { | 454 | while (n < fls(id)) { |
427 | if (p) { | 455 | if (p) |
428 | memset(p, 0, sizeof *p); | 456 | free_layer(p); |
429 | free_layer(idp, p); | ||
430 | } | ||
431 | n += IDR_BITS; | 457 | n += IDR_BITS; |
432 | p = *--paa; | 458 | p = *--paa; |
433 | } | 459 | } |
434 | } | 460 | } |
435 | idp->top = NULL; | 461 | rcu_assign_pointer(idp->top, NULL); |
436 | idp->layers = 0; | 462 | idp->layers = 0; |
437 | } | 463 | } |
438 | EXPORT_SYMBOL(idr_remove_all); | 464 | EXPORT_SYMBOL(idr_remove_all); |
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all); | |||
444 | void idr_destroy(struct idr *idp) | 470 | void idr_destroy(struct idr *idp) |
445 | { | 471 | { |
446 | while (idp->id_free_cnt) { | 472 | while (idp->id_free_cnt) { |
447 | struct idr_layer *p = alloc_layer(idp); | 473 | struct idr_layer *p = get_from_free_list(idp); |
448 | kmem_cache_free(idr_layer_cache, p); | 474 | kmem_cache_free(idr_layer_cache, p); |
449 | } | 475 | } |
450 | } | 476 | } |
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy); | |||
459 | * return indicates that @id is not valid or you passed %NULL in | 485 | * return indicates that @id is not valid or you passed %NULL in |
460 | * idr_get_new(). | 486 | * idr_get_new(). |
461 | * | 487 | * |
462 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | 488 | * This function can be called under rcu_read_lock(), given that the leaf |
489 | * pointers lifetimes are correctly managed. | ||
463 | */ | 490 | */ |
464 | void *idr_find(struct idr *idp, int id) | 491 | void *idr_find(struct idr *idp, int id) |
465 | { | 492 | { |
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id) | |||
467 | struct idr_layer *p; | 494 | struct idr_layer *p; |
468 | 495 | ||
469 | n = idp->layers * IDR_BITS; | 496 | n = idp->layers * IDR_BITS; |
470 | p = idp->top; | 497 | p = rcu_dereference(idp->top); |
471 | 498 | ||
472 | /* Mask off upper bits we don't use for the search. */ | 499 | /* Mask off upper bits we don't use for the search. */ |
473 | id &= MAX_ID_MASK; | 500 | id &= MAX_ID_MASK; |
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
477 | 504 | ||
478 | while (n > 0 && p) { | 505 | while (n > 0 && p) { |
479 | n -= IDR_BITS; | 506 | n -= IDR_BITS; |
480 | p = p->ary[(id >> n) & IDR_MASK]; | 507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
481 | } | 508 | } |
482 | return((void *)p); | 509 | return((void *)p); |
483 | } | 510 | } |
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp, | |||
510 | struct idr_layer **paa = &pa[0]; | 537 | struct idr_layer **paa = &pa[0]; |
511 | 538 | ||
512 | n = idp->layers * IDR_BITS; | 539 | n = idp->layers * IDR_BITS; |
513 | p = idp->top; | 540 | p = rcu_dereference(idp->top); |
514 | max = 1 << n; | 541 | max = 1 << n; |
515 | 542 | ||
516 | id = 0; | 543 | id = 0; |
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp, | |||
518 | while (n > 0 && p) { | 545 | while (n > 0 && p) { |
519 | n -= IDR_BITS; | 546 | n -= IDR_BITS; |
520 | *paa++ = p; | 547 | *paa++ = p; |
521 | p = p->ary[(id >> n) & IDR_MASK]; | 548 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
522 | } | 549 | } |
523 | 550 | ||
524 | if (p) { | 551 | if (p) { |
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
548 | * A -ENOENT return indicates that @id was not found. | 575 | * A -ENOENT return indicates that @id was not found. |
549 | * A -EINVAL return indicates that @id was not within valid constraints. | 576 | * A -EINVAL return indicates that @id was not within valid constraints. |
550 | * | 577 | * |
551 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | 578 | * The caller must serialize with writers. |
552 | */ | 579 | */ |
553 | void *idr_replace(struct idr *idp, void *ptr, int id) | 580 | void *idr_replace(struct idr *idp, void *ptr, int id) |
554 | { | 581 | { |
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
574 | return ERR_PTR(-ENOENT); | 601 | return ERR_PTR(-ENOENT); |
575 | 602 | ||
576 | old_p = p->ary[n]; | 603 | old_p = p->ary[n]; |
577 | p->ary[n] = ptr; | 604 | rcu_assign_pointer(p->ary[n], ptr); |
578 | 605 | ||
579 | return old_p; | 606 | return old_p; |
580 | } | 607 | } |
581 | EXPORT_SYMBOL(idr_replace); | 608 | EXPORT_SYMBOL(idr_replace); |
582 | 609 | ||
583 | static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | 610 | static void idr_cache_ctor(void *idr_layer) |
584 | { | 611 | { |
585 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 612 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
586 | } | 613 | } |
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
694 | restart: | 721 | restart: |
695 | /* get vacant slot */ | 722 | /* get vacant slot */ |
696 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | 723 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
697 | if (t < 0) { | 724 | if (t < 0) |
698 | if (t == -1) | 725 | return _idr_rc_to_errno(t); |
699 | return -EAGAIN; | ||
700 | else /* will be -3 */ | ||
701 | return -ENOSPC; | ||
702 | } | ||
703 | 726 | ||
704 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | 727 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
705 | return -ENOSPC; | 728 | return -ENOSPC; |
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
720 | return -EAGAIN; | 743 | return -EAGAIN; |
721 | 744 | ||
722 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | 745 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
723 | pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; | 746 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
747 | (void *)bitmap); | ||
724 | pa[0]->count++; | 748 | pa[0]->count++; |
725 | } | 749 | } |
726 | 750 | ||
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |||
749 | * allocation. | 773 | * allocation. |
750 | */ | 774 | */ |
751 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | 775 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
752 | struct idr_layer *p = alloc_layer(&ida->idr); | 776 | struct idr_layer *p = get_from_free_list(&ida->idr); |
753 | if (p) | 777 | if (p) |
754 | kmem_cache_free(idr_layer_cache, p); | 778 | kmem_cache_free(idr_layer_cache, p); |
755 | } | 779 | } |
diff --git a/lib/inflate.c b/lib/inflate.c index 9762294be062..1a8e8a978128 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = { | |||
230 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} | 230 | #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} |
231 | #define DUMPBITS(n) {b>>=(n);k-=(n);} | 231 | #define DUMPBITS(n) {b>>=(n);k-=(n);} |
232 | 232 | ||
233 | #ifndef NO_INFLATE_MALLOC | ||
234 | /* A trivial malloc implementation, adapted from | ||
235 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | ||
236 | */ | ||
237 | |||
238 | static unsigned long malloc_ptr; | ||
239 | static int malloc_count; | ||
240 | |||
241 | static void *malloc(int size) | ||
242 | { | ||
243 | void *p; | ||
244 | |||
245 | if (size < 0) | ||
246 | error("Malloc error"); | ||
247 | if (!malloc_ptr) | ||
248 | malloc_ptr = free_mem_ptr; | ||
249 | |||
250 | malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ | ||
251 | |||
252 | p = (void *)malloc_ptr; | ||
253 | malloc_ptr += size; | ||
254 | |||
255 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | ||
256 | error("Out of memory"); | ||
257 | |||
258 | malloc_count++; | ||
259 | return p; | ||
260 | } | ||
261 | |||
262 | static void free(void *where) | ||
263 | { | ||
264 | malloc_count--; | ||
265 | if (!malloc_count) | ||
266 | malloc_ptr = free_mem_ptr; | ||
267 | } | ||
268 | #else | ||
269 | #define malloc(a) kmalloc(a, GFP_KERNEL) | ||
270 | #define free(a) kfree(a) | ||
271 | #endif | ||
233 | 272 | ||
234 | /* | 273 | /* |
235 | Huffman code decoding is performed using a multi-level table lookup. | 274 | Huffman code decoding is performed using a multi-level table lookup. |
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void) | |||
1045 | int e; /* last block flag */ | 1084 | int e; /* last block flag */ |
1046 | int r; /* result code */ | 1085 | int r; /* result code */ |
1047 | unsigned h; /* maximum struct huft's malloc'ed */ | 1086 | unsigned h; /* maximum struct huft's malloc'ed */ |
1048 | void *ptr; | ||
1049 | 1087 | ||
1050 | /* initialize window, bit buffer */ | 1088 | /* initialize window, bit buffer */ |
1051 | wp = 0; | 1089 | wp = 0; |
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void) | |||
1057 | h = 0; | 1095 | h = 0; |
1058 | do { | 1096 | do { |
1059 | hufts = 0; | 1097 | hufts = 0; |
1060 | gzip_mark(&ptr); | 1098 | #ifdef ARCH_HAS_DECOMP_WDOG |
1061 | if ((r = inflate_block(&e)) != 0) { | 1099 | arch_decomp_wdog(); |
1062 | gzip_release(&ptr); | 1100 | #endif |
1063 | return r; | 1101 | r = inflate_block(&e); |
1064 | } | 1102 | if (r) |
1065 | gzip_release(&ptr); | 1103 | return r; |
1066 | if (hufts > h) | 1104 | if (hufts > h) |
1067 | h = hufts; | 1105 | h = hufts; |
1068 | } while (!e); | 1106 | } while (!e); |
diff --git a/lib/iomap.c b/lib/iomap.c index 37a3ea4cac9f..d32229385151 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access) | |||
40 | static int count = 10; | 40 | static int count = 10; |
41 | if (count) { | 41 | if (count) { |
42 | count--; | 42 | count--; |
43 | printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); | 43 | WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); |
44 | WARN_ON(1); | ||
45 | } | 44 | } |
46 | } | 45 | } |
47 | 46 | ||
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index a3b8d4c3f77a..75dbda03f4fb 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c | |||
@@ -30,8 +30,7 @@ again: | |||
30 | return index; | 30 | return index; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void set_bit_area(unsigned long *map, unsigned long i, | 33 | void iommu_area_reserve(unsigned long *map, unsigned long i, int len) |
34 | int len) | ||
35 | { | 34 | { |
36 | unsigned long end = i + len; | 35 | unsigned long end = i + len; |
37 | while (i < end) { | 36 | while (i < end) { |
@@ -64,7 +63,7 @@ again: | |||
64 | start = index + 1; | 63 | start = index + 1; |
65 | goto again; | 64 | goto again; |
66 | } | 65 | } |
67 | set_bit_area(map, index, nr); | 66 | iommu_area_reserve(map, index, nr); |
68 | } | 67 | } |
69 | return index; | 68 | return index; |
70 | } | 69 | } |
@@ -80,3 +79,12 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) | |||
80 | } | 79 | } |
81 | } | 80 | } |
82 | EXPORT_SYMBOL(iommu_area_free); | 81 | EXPORT_SYMBOL(iommu_area_free); |
82 | |||
83 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | ||
84 | unsigned long io_page_size) | ||
85 | { | ||
86 | unsigned long size = (addr & (io_page_size - 1)) + len; | ||
87 | |||
88 | return DIV_ROUND_UP(size, io_page_size); | ||
89 | } | ||
90 | EXPORT_SYMBOL(iommu_num_pages); | ||
diff --git a/lib/klist.c b/lib/klist.c index cca37f96faa2..bbdd3015c2c7 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -37,6 +37,37 @@ | |||
37 | #include <linux/klist.h> | 37 | #include <linux/klist.h> |
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | 39 | ||
40 | /* | ||
41 | * Use the lowest bit of n_klist to mark deleted nodes and exclude | ||
42 | * dead ones from iteration. | ||
43 | */ | ||
44 | #define KNODE_DEAD 1LU | ||
45 | #define KNODE_KLIST_MASK ~KNODE_DEAD | ||
46 | |||
47 | static struct klist *knode_klist(struct klist_node *knode) | ||
48 | { | ||
49 | return (struct klist *) | ||
50 | ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); | ||
51 | } | ||
52 | |||
53 | static bool knode_dead(struct klist_node *knode) | ||
54 | { | ||
55 | return (unsigned long)knode->n_klist & KNODE_DEAD; | ||
56 | } | ||
57 | |||
58 | static void knode_set_klist(struct klist_node *knode, struct klist *klist) | ||
59 | { | ||
60 | knode->n_klist = klist; | ||
61 | /* no knode deserves to start its life dead */ | ||
62 | WARN_ON(knode_dead(knode)); | ||
63 | } | ||
64 | |||
65 | static void knode_kill(struct klist_node *knode) | ||
66 | { | ||
67 | /* and no knode should die twice ever either, see we're very humane */ | ||
68 | WARN_ON(knode_dead(knode)); | ||
69 | *(unsigned long *)&knode->n_klist |= KNODE_DEAD; | ||
70 | } | ||
40 | 71 | ||
41 | /** | 72 | /** |
42 | * klist_init - Initialize a klist structure. | 73 | * klist_init - Initialize a klist structure. |
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n) | |||
79 | INIT_LIST_HEAD(&n->n_node); | 110 | INIT_LIST_HEAD(&n->n_node); |
80 | init_completion(&n->n_removed); | 111 | init_completion(&n->n_removed); |
81 | kref_init(&n->n_ref); | 112 | kref_init(&n->n_ref); |
82 | n->n_klist = k; | 113 | knode_set_klist(n, k); |
83 | if (k->get) | 114 | if (k->get) |
84 | k->get(n); | 115 | k->get(n); |
85 | } | 116 | } |
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail); | |||
115 | */ | 146 | */ |
116 | void klist_add_after(struct klist_node *n, struct klist_node *pos) | 147 | void klist_add_after(struct klist_node *n, struct klist_node *pos) |
117 | { | 148 | { |
118 | struct klist *k = pos->n_klist; | 149 | struct klist *k = knode_klist(pos); |
119 | 150 | ||
120 | klist_node_init(k, n); | 151 | klist_node_init(k, n); |
121 | spin_lock(&k->k_lock); | 152 | spin_lock(&k->k_lock); |
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after); | |||
131 | */ | 162 | */ |
132 | void klist_add_before(struct klist_node *n, struct klist_node *pos) | 163 | void klist_add_before(struct klist_node *n, struct klist_node *pos) |
133 | { | 164 | { |
134 | struct klist *k = pos->n_klist; | 165 | struct klist *k = knode_klist(pos); |
135 | 166 | ||
136 | klist_node_init(k, n); | 167 | klist_node_init(k, n); |
137 | spin_lock(&k->k_lock); | 168 | spin_lock(&k->k_lock); |
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref) | |||
144 | { | 175 | { |
145 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); | 176 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); |
146 | 177 | ||
178 | WARN_ON(!knode_dead(n)); | ||
147 | list_del(&n->n_node); | 179 | list_del(&n->n_node); |
148 | complete(&n->n_removed); | 180 | complete(&n->n_removed); |
149 | n->n_klist = NULL; | 181 | knode_set_klist(n, NULL); |
150 | } | 182 | } |
151 | 183 | ||
152 | static int klist_dec_and_del(struct klist_node *n) | 184 | static int klist_dec_and_del(struct klist_node *n) |
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n) | |||
154 | return kref_put(&n->n_ref, klist_release); | 186 | return kref_put(&n->n_ref, klist_release); |
155 | } | 187 | } |
156 | 188 | ||
157 | /** | 189 | static void klist_put(struct klist_node *n, bool kill) |
158 | * klist_del - Decrement the reference count of node and try to remove. | ||
159 | * @n: node we're deleting. | ||
160 | */ | ||
161 | void klist_del(struct klist_node *n) | ||
162 | { | 190 | { |
163 | struct klist *k = n->n_klist; | 191 | struct klist *k = knode_klist(n); |
164 | void (*put)(struct klist_node *) = k->put; | 192 | void (*put)(struct klist_node *) = k->put; |
165 | 193 | ||
166 | spin_lock(&k->k_lock); | 194 | spin_lock(&k->k_lock); |
195 | if (kill) | ||
196 | knode_kill(n); | ||
167 | if (!klist_dec_and_del(n)) | 197 | if (!klist_dec_and_del(n)) |
168 | put = NULL; | 198 | put = NULL; |
169 | spin_unlock(&k->k_lock); | 199 | spin_unlock(&k->k_lock); |
170 | if (put) | 200 | if (put) |
171 | put(n); | 201 | put(n); |
172 | } | 202 | } |
203 | |||
204 | /** | ||
205 | * klist_del - Decrement the reference count of node and try to remove. | ||
206 | * @n: node we're deleting. | ||
207 | */ | ||
208 | void klist_del(struct klist_node *n) | ||
209 | { | ||
210 | klist_put(n, true); | ||
211 | } | ||
173 | EXPORT_SYMBOL_GPL(klist_del); | 212 | EXPORT_SYMBOL_GPL(klist_del); |
174 | 213 | ||
175 | /** | 214 | /** |
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i, | |||
206 | struct klist_node *n) | 245 | struct klist_node *n) |
207 | { | 246 | { |
208 | i->i_klist = k; | 247 | i->i_klist = k; |
209 | i->i_head = &k->k_list; | ||
210 | i->i_cur = n; | 248 | i->i_cur = n; |
211 | if (n) | 249 | if (n) |
212 | kref_get(&n->n_ref); | 250 | kref_get(&n->n_ref); |
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init); | |||
237 | void klist_iter_exit(struct klist_iter *i) | 275 | void klist_iter_exit(struct klist_iter *i) |
238 | { | 276 | { |
239 | if (i->i_cur) { | 277 | if (i->i_cur) { |
240 | klist_del(i->i_cur); | 278 | klist_put(i->i_cur, false); |
241 | i->i_cur = NULL; | 279 | i->i_cur = NULL; |
242 | } | 280 | } |
243 | } | 281 | } |
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n) | |||
258 | */ | 296 | */ |
259 | struct klist_node *klist_next(struct klist_iter *i) | 297 | struct klist_node *klist_next(struct klist_iter *i) |
260 | { | 298 | { |
261 | struct list_head *next; | ||
262 | struct klist_node *lnode = i->i_cur; | ||
263 | struct klist_node *knode = NULL; | ||
264 | void (*put)(struct klist_node *) = i->i_klist->put; | 299 | void (*put)(struct klist_node *) = i->i_klist->put; |
300 | struct klist_node *last = i->i_cur; | ||
301 | struct klist_node *next; | ||
265 | 302 | ||
266 | spin_lock(&i->i_klist->k_lock); | 303 | spin_lock(&i->i_klist->k_lock); |
267 | if (lnode) { | 304 | |
268 | next = lnode->n_node.next; | 305 | if (last) { |
269 | if (!klist_dec_and_del(lnode)) | 306 | next = to_klist_node(last->n_node.next); |
307 | if (!klist_dec_and_del(last)) | ||
270 | put = NULL; | 308 | put = NULL; |
271 | } else | 309 | } else |
272 | next = i->i_head->next; | 310 | next = to_klist_node(i->i_klist->k_list.next); |
273 | 311 | ||
274 | if (next != i->i_head) { | 312 | i->i_cur = NULL; |
275 | knode = to_klist_node(next); | 313 | while (next != to_klist_node(&i->i_klist->k_list)) { |
276 | kref_get(&knode->n_ref); | 314 | if (likely(!knode_dead(next))) { |
315 | kref_get(&next->n_ref); | ||
316 | i->i_cur = next; | ||
317 | break; | ||
318 | } | ||
319 | next = to_klist_node(next->n_node.next); | ||
277 | } | 320 | } |
278 | i->i_cur = knode; | 321 | |
279 | spin_unlock(&i->i_klist->k_lock); | 322 | spin_unlock(&i->i_klist->k_lock); |
280 | if (put && lnode) | 323 | |
281 | put(lnode); | 324 | if (put && last) |
282 | return knode; | 325 | put(last); |
326 | return i->i_cur; | ||
283 | } | 327 | } |
284 | EXPORT_SYMBOL_GPL(klist_next); | 328 | EXPORT_SYMBOL_GPL(klist_next); |
diff --git a/lib/kobject.c b/lib/kobject.c index dcade0543bd2..0487d1f64806 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj) | |||
164 | return -ENOENT; | 164 | return -ENOENT; |
165 | 165 | ||
166 | if (!kobj->name || !kobj->name[0]) { | 166 | if (!kobj->name || !kobj->name[0]) { |
167 | pr_debug("kobject: (%p): attempted to be registered with empty " | 167 | WARN(1, "kobject: (%p): attempted to be registered with empty " |
168 | "name!\n", kobj); | 168 | "name!\n", kobj); |
169 | WARN_ON(1); | ||
170 | return -EINVAL; | 169 | return -EINVAL; |
171 | } | 170 | } |
172 | 171 | ||
@@ -216,13 +215,18 @@ static int kobject_add_internal(struct kobject *kobj) | |||
216 | static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | 215 | static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, |
217 | va_list vargs) | 216 | va_list vargs) |
218 | { | 217 | { |
219 | /* Free the old name, if necessary. */ | 218 | const char *old_name = kobj->name; |
220 | kfree(kobj->name); | 219 | char *s; |
221 | 220 | ||
222 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); | 221 | kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
223 | if (!kobj->name) | 222 | if (!kobj->name) |
224 | return -ENOMEM; | 223 | return -ENOMEM; |
225 | 224 | ||
225 | /* ewww... some of these buggers have '/' in the name ... */ | ||
226 | while ((s = strchr(kobj->name, '/'))) | ||
227 | s[0] = '!'; | ||
228 | |||
229 | kfree(old_name); | ||
226 | return 0; | 230 | return 0; |
227 | } | 231 | } |
228 | 232 | ||
@@ -383,11 +387,17 @@ EXPORT_SYMBOL_GPL(kobject_init_and_add); | |||
383 | * kobject_rename - change the name of an object | 387 | * kobject_rename - change the name of an object |
384 | * @kobj: object in question. | 388 | * @kobj: object in question. |
385 | * @new_name: object's new name | 389 | * @new_name: object's new name |
390 | * | ||
391 | * It is the responsibility of the caller to provide mutual | ||
392 | * exclusion between two different calls of kobject_rename | ||
393 | * on the same kobject and to ensure that new_name is valid and | ||
394 | * won't conflict with other kobjects. | ||
386 | */ | 395 | */ |
387 | int kobject_rename(struct kobject *kobj, const char *new_name) | 396 | int kobject_rename(struct kobject *kobj, const char *new_name) |
388 | { | 397 | { |
389 | int error = 0; | 398 | int error = 0; |
390 | const char *devpath = NULL; | 399 | const char *devpath = NULL; |
400 | const char *dup_name = NULL, *name; | ||
391 | char *devpath_string = NULL; | 401 | char *devpath_string = NULL; |
392 | char *envp[2]; | 402 | char *envp[2]; |
393 | 403 | ||
@@ -397,19 +407,6 @@ int kobject_rename(struct kobject *kobj, const char *new_name) | |||
397 | if (!kobj->parent) | 407 | if (!kobj->parent) |
398 | return -EINVAL; | 408 | return -EINVAL; |
399 | 409 | ||
400 | /* see if this name is already in use */ | ||
401 | if (kobj->kset) { | ||
402 | struct kobject *temp_kobj; | ||
403 | temp_kobj = kset_find_obj(kobj->kset, new_name); | ||
404 | if (temp_kobj) { | ||
405 | printk(KERN_WARNING "kobject '%s' cannot be renamed " | ||
406 | "to '%s' as '%s' is already in existence.\n", | ||
407 | kobject_name(kobj), new_name, new_name); | ||
408 | kobject_put(temp_kobj); | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | devpath = kobject_get_path(kobj, GFP_KERNEL); | 410 | devpath = kobject_get_path(kobj, GFP_KERNEL); |
414 | if (!devpath) { | 411 | if (!devpath) { |
415 | error = -ENOMEM; | 412 | error = -ENOMEM; |
@@ -424,15 +421,27 @@ int kobject_rename(struct kobject *kobj, const char *new_name) | |||
424 | envp[0] = devpath_string; | 421 | envp[0] = devpath_string; |
425 | envp[1] = NULL; | 422 | envp[1] = NULL; |
426 | 423 | ||
424 | name = dup_name = kstrdup(new_name, GFP_KERNEL); | ||
425 | if (!name) { | ||
426 | error = -ENOMEM; | ||
427 | goto out; | ||
428 | } | ||
429 | |||
427 | error = sysfs_rename_dir(kobj, new_name); | 430 | error = sysfs_rename_dir(kobj, new_name); |
431 | if (error) | ||
432 | goto out; | ||
433 | |||
434 | /* Install the new kobject name */ | ||
435 | dup_name = kobj->name; | ||
436 | kobj->name = name; | ||
428 | 437 | ||
429 | /* This function is mostly/only used for network interface. | 438 | /* This function is mostly/only used for network interface. |
430 | * Some hotplug package track interfaces by their name and | 439 | * Some hotplug package track interfaces by their name and |
431 | * therefore want to know when the name is changed by the user. */ | 440 | * therefore want to know when the name is changed by the user. */ |
432 | if (!error) | 441 | kobject_uevent_env(kobj, KOBJ_MOVE, envp); |
433 | kobject_uevent_env(kobj, KOBJ_MOVE, envp); | ||
434 | 442 | ||
435 | out: | 443 | out: |
444 | kfree(dup_name); | ||
436 | kfree(devpath_string); | 445 | kfree(devpath_string); |
437 | kfree(devpath); | 446 | kfree(devpath); |
438 | kobject_put(kobj); | 447 | kobject_put(kobj); |
@@ -577,12 +586,10 @@ static void kobject_release(struct kref *kref) | |||
577 | void kobject_put(struct kobject *kobj) | 586 | void kobject_put(struct kobject *kobj) |
578 | { | 587 | { |
579 | if (kobj) { | 588 | if (kobj) { |
580 | if (!kobj->state_initialized) { | 589 | if (!kobj->state_initialized) |
581 | printk(KERN_WARNING "kobject: '%s' (%p): is not " | 590 | WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " |
582 | "initialized, yet kobject_put() is being " | 591 | "initialized, yet kobject_put() is being " |
583 | "called.\n", kobject_name(kobj), kobj); | 592 | "called.\n", kobject_name(kobj), kobj); |
584 | WARN_ON(1); | ||
585 | } | ||
586 | kref_put(&kobj->kref, kobject_release); | 593 | kref_put(&kobj->kref, kobject_release); |
587 | } | 594 | } |
588 | } | 595 | } |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 2fa545a63160..3f914725bda8 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
245 | if (retval) | 245 | if (retval) |
246 | goto exit; | 246 | goto exit; |
247 | 247 | ||
248 | call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); | 248 | retval = call_usermodehelper(argv[0], argv, |
249 | env->envp, UMH_WAIT_EXEC); | ||
249 | } | 250 | } |
250 | 251 | ||
251 | exit: | 252 | exit: |
@@ -284,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
284 | int len; | 285 | int len; |
285 | 286 | ||
286 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { | 287 | if (env->envp_idx >= ARRAY_SIZE(env->envp)) { |
287 | printk(KERN_ERR "add_uevent_var: too many keys\n"); | 288 | WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); |
288 | WARN_ON(1); | ||
289 | return -ENOMEM; | 289 | return -ENOMEM; |
290 | } | 290 | } |
291 | 291 | ||
@@ -296,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |||
296 | va_end(args); | 296 | va_end(args); |
297 | 297 | ||
298 | if (len >= (sizeof(env->buf) - env->buflen)) { | 298 | if (len >= (sizeof(env->buf) - env->buflen)) { |
299 | printk(KERN_ERR "add_uevent_var: buffer size too small\n"); | 299 | WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); |
300 | WARN_ON(1); | ||
301 | return -ENOMEM; | 300 | return -ENOMEM; |
302 | } | 301 | } |
303 | 302 | ||
diff --git a/lib/list_debug.c b/lib/list_debug.c index 4350ba9655bd..1a39f4e3ae1f 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new, | |||
20 | struct list_head *prev, | 20 | struct list_head *prev, |
21 | struct list_head *next) | 21 | struct list_head *next) |
22 | { | 22 | { |
23 | if (unlikely(next->prev != prev)) { | 23 | WARN(next->prev != prev, |
24 | printk(KERN_ERR "list_add corruption. next->prev should be " | 24 | "list_add corruption. next->prev should be " |
25 | "prev (%p), but was %p. (next=%p).\n", | 25 | "prev (%p), but was %p. (next=%p).\n", |
26 | prev, next->prev, next); | 26 | prev, next->prev, next); |
27 | BUG(); | 27 | WARN(prev->next != next, |
28 | } | 28 | "list_add corruption. prev->next should be " |
29 | if (unlikely(prev->next != next)) { | 29 | "next (%p), but was %p. (prev=%p).\n", |
30 | printk(KERN_ERR "list_add corruption. prev->next should be " | 30 | next, prev->next, prev); |
31 | "next (%p), but was %p. (prev=%p).\n", | ||
32 | next, prev->next, prev); | ||
33 | BUG(); | ||
34 | } | ||
35 | next->prev = new; | 31 | next->prev = new; |
36 | new->next = next; | 32 | new->next = next; |
37 | new->prev = prev; | 33 | new->prev = prev; |
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new, | |||
40 | EXPORT_SYMBOL(__list_add); | 36 | EXPORT_SYMBOL(__list_add); |
41 | 37 | ||
42 | /** | 38 | /** |
43 | * list_add - add a new entry | ||
44 | * @new: new entry to be added | ||
45 | * @head: list head to add it after | ||
46 | * | ||
47 | * Insert a new entry after the specified head. | ||
48 | * This is good for implementing stacks. | ||
49 | */ | ||
50 | void list_add(struct list_head *new, struct list_head *head) | ||
51 | { | ||
52 | __list_add(new, head, head->next); | ||
53 | } | ||
54 | EXPORT_SYMBOL(list_add); | ||
55 | |||
56 | /** | ||
57 | * list_del - deletes entry from list. | 39 | * list_del - deletes entry from list. |
58 | * @entry: the element to delete from the list. | 40 | * @entry: the element to delete from the list. |
59 | * Note: list_empty on entry does not return true after this, the entry is | 41 | * Note: list_empty on entry does not return true after this, the entry is |
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add); | |||
61 | */ | 43 | */ |
62 | void list_del(struct list_head *entry) | 44 | void list_del(struct list_head *entry) |
63 | { | 45 | { |
64 | if (unlikely(entry->prev->next != entry)) { | 46 | WARN(entry->prev->next != entry, |
65 | printk(KERN_ERR "list_del corruption. prev->next should be %p, " | 47 | "list_del corruption. prev->next should be %p, " |
66 | "but was %p\n", entry, entry->prev->next); | 48 | "but was %p\n", entry, entry->prev->next); |
67 | BUG(); | 49 | WARN(entry->next->prev != entry, |
68 | } | 50 | "list_del corruption. next->prev should be %p, " |
69 | if (unlikely(entry->next->prev != entry)) { | 51 | "but was %p\n", entry, entry->next->prev); |
70 | printk(KERN_ERR "list_del corruption. next->prev should be %p, " | ||
71 | "but was %p\n", entry, entry->next->prev); | ||
72 | BUG(); | ||
73 | } | ||
74 | __list_del(entry->prev, entry->next); | 52 | __list_del(entry->prev, entry->next); |
75 | entry->next = LIST_POISON1; | 53 | entry->next = LIST_POISON1; |
76 | entry->prev = LIST_POISON2; | 54 | entry->prev = LIST_POISON2; |
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit) | |||
462 | if (lmb.memory.region[0].size < lmb.rmo_size) | 462 | if (lmb.memory.region[0].size < lmb.rmo_size) |
463 | lmb.rmo_size = lmb.memory.region[0].size; | 463 | lmb.rmo_size = lmb.memory.region[0].size; |
464 | 464 | ||
465 | memory_limit = lmb_end_of_DRAM(); | ||
466 | |||
465 | /* And truncate any reserves above the limit also. */ | 467 | /* And truncate any reserves above the limit also. */ |
466 | for (i = 0; i < lmb.reserved.cnt; i++) { | 468 | for (i = 0; i < lmb.reserved.cnt; i++) { |
467 | p = &lmb.reserved.region[i]; | 469 | p = &lmb.reserved.region[i]; |
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c index 77f0f9b775a9..5dc6b29c1575 100644 --- a/lib/lzo/lzo1x_decompress.c +++ b/lib/lzo/lzo1x_decompress.c | |||
@@ -138,8 +138,7 @@ match: | |||
138 | t += 31 + *ip++; | 138 | t += 31 + *ip++; |
139 | } | 139 | } |
140 | m_pos = op - 1; | 140 | m_pos = op - 1; |
141 | m_pos -= le16_to_cpu(get_unaligned( | 141 | m_pos -= get_unaligned_le16(ip) >> 2; |
142 | (const unsigned short *)ip)) >> 2; | ||
143 | ip += 2; | 142 | ip += 2; |
144 | } else if (t >= 16) { | 143 | } else if (t >= 16) { |
145 | m_pos = op; | 144 | m_pos = op; |
@@ -157,8 +156,7 @@ match: | |||
157 | } | 156 | } |
158 | t += 7 + *ip++; | 157 | t += 7 + *ip++; |
159 | } | 158 | } |
160 | m_pos -= le16_to_cpu(get_unaligned( | 159 | m_pos -= get_unaligned_le16(ip) >> 2; |
161 | (const unsigned short *)ip)) >> 2; | ||
162 | ip += 2; | 160 | ip += 2; |
163 | if (m_pos == op) | 161 | if (m_pos == op) |
164 | goto eof_found; | 162 | goto eof_found; |
diff --git a/lib/parser.c b/lib/parser.c index 4f0cbc03e0e8..b00d02059a5f 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -100,7 +100,7 @@ static int match_one(char *s, const char *p, substring_t args[]) | |||
100 | * format identifiers which will be taken into account when matching the | 100 | * format identifiers which will be taken into account when matching the |
101 | * tokens, and whose locations will be returned in the @args array. | 101 | * tokens, and whose locations will be returned in the @args array. |
102 | */ | 102 | */ |
103 | int match_token(char *s, match_table_t table, substring_t args[]) | 103 | int match_token(char *s, const match_table_t table, substring_t args[]) |
104 | { | 104 | { |
105 | const struct match_token *p; | 105 | const struct match_token *p; |
106 | 106 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 4a8ba4bf5f6f..a8663890a88c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); | |||
52 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
53 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
54 | */ | 54 | */ |
55 | s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
56 | { | 56 | { |
57 | s64 ret; | 57 | s64 ret; |
58 | int cpu; | 58 | int cpu; |
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | |||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | if (set) | 65 | *pcount = 0; |
66 | *pcount = 0; | ||
67 | } | 66 | } |
68 | if (set) | 67 | fbc->count = ret; |
69 | fbc->count = ret; | ||
70 | 68 | ||
71 | spin_unlock(&fbc->lock); | 69 | spin_unlock(&fbc->lock); |
72 | return ret; | 70 | return ret; |
diff --git a/lib/plist.c b/lib/plist.c index 3074a02272f3..d6c64a824e1d 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
@@ -31,12 +31,13 @@ | |||
31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, | 31 | static void plist_check_prev_next(struct list_head *t, struct list_head *p, |
32 | struct list_head *n) | 32 | struct list_head *n) |
33 | { | 33 | { |
34 | if (n->prev != p || p->next != n) { | 34 | WARN(n->prev != p || p->next != n, |
35 | printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); | 35 | "top: %p, n: %p, p: %p\n" |
36 | printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); | 36 | "prev: %p, n: %p, p: %p\n" |
37 | printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); | 37 | "next: %p, n: %p, p: %p\n", |
38 | WARN_ON(1); | 38 | t, t->next, t->prev, |
39 | } | 39 | p, p->next, p->prev, |
40 | n, n->next, n->prev); | ||
40 | } | 41 | } |
41 | 42 | ||
42 | static void plist_check_list(struct list_head *top) | 43 | static void plist_check_list(struct list_head *top) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 56ec21a7f73d..be86b32bc874 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert); | |||
359 | * Returns: the slot corresponding to the position @index in the | 359 | * Returns: the slot corresponding to the position @index in the |
360 | * radix tree @root. This is useful for update-if-exists operations. | 360 | * radix tree @root. This is useful for update-if-exists operations. |
361 | * | 361 | * |
362 | * This function cannot be called under rcu_read_lock, it must be | 362 | * This function can be called under rcu_read_lock iff the slot is not |
363 | * excluded from writers, as must the returned slot for subsequent | 363 | * modified by radix_tree_replace_slot, otherwise it must be called |
364 | * use by radix_tree_deref_slot() and radix_tree_replace slot. | 364 | * exclusive from other writers. Any dereference of the slot must be done |
365 | * Caller must hold tree write locked across slot lookup and | 365 | * using radix_tree_deref_slot. |
366 | * replace. | ||
367 | */ | 366 | */ |
368 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | 367 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) |
369 | { | 368 | { |
370 | unsigned int height, shift; | 369 | unsigned int height, shift; |
371 | struct radix_tree_node *node, **slot; | 370 | struct radix_tree_node *node, **slot; |
372 | 371 | ||
373 | node = root->rnode; | 372 | node = rcu_dereference(root->rnode); |
374 | if (node == NULL) | 373 | if (node == NULL) |
375 | return NULL; | 374 | return NULL; |
376 | 375 | ||
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |||
390 | do { | 389 | do { |
391 | slot = (struct radix_tree_node **) | 390 | slot = (struct radix_tree_node **) |
392 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 391 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
393 | node = *slot; | 392 | node = rcu_dereference(*slot); |
394 | if (node == NULL) | 393 | if (node == NULL) |
395 | return NULL; | 394 | return NULL; |
396 | 395 | ||
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |||
667 | EXPORT_SYMBOL(radix_tree_next_hole); | 666 | EXPORT_SYMBOL(radix_tree_next_hole); |
668 | 667 | ||
669 | static unsigned int | 668 | static unsigned int |
670 | __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | 669 | __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, |
671 | unsigned int max_items, unsigned long *next_index) | 670 | unsigned int max_items, unsigned long *next_index) |
672 | { | 671 | { |
673 | unsigned int nr_found = 0; | 672 | unsigned int nr_found = 0; |
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index, | |||
701 | 700 | ||
702 | /* Bottom level: grab some items */ | 701 | /* Bottom level: grab some items */ |
703 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { | 702 | for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { |
704 | struct radix_tree_node *node; | ||
705 | index++; | 703 | index++; |
706 | node = slot->slots[i]; | 704 | if (slot->slots[i]) { |
707 | if (node) { | 705 | results[nr_found++] = &(slot->slots[i]); |
708 | results[nr_found++] = rcu_dereference(node); | ||
709 | if (nr_found == max_items) | 706 | if (nr_found == max_items) |
710 | goto out; | 707 | goto out; |
711 | } | 708 | } |
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
759 | 756 | ||
760 | ret = 0; | 757 | ret = 0; |
761 | while (ret < max_items) { | 758 | while (ret < max_items) { |
762 | unsigned int nr_found; | 759 | unsigned int nr_found, slots_found, i; |
763 | unsigned long next_index; /* Index of next search */ | 760 | unsigned long next_index; /* Index of next search */ |
764 | 761 | ||
765 | if (cur_index > max_index) | 762 | if (cur_index > max_index) |
766 | break; | 763 | break; |
767 | nr_found = __lookup(node, results + ret, cur_index, | 764 | slots_found = __lookup(node, (void ***)results + ret, cur_index, |
768 | max_items - ret, &next_index); | 765 | max_items - ret, &next_index); |
766 | nr_found = 0; | ||
767 | for (i = 0; i < slots_found; i++) { | ||
768 | struct radix_tree_node *slot; | ||
769 | slot = *(((void ***)results)[ret + i]); | ||
770 | if (!slot) | ||
771 | continue; | ||
772 | results[ret + nr_found] = rcu_dereference(slot); | ||
773 | nr_found++; | ||
774 | } | ||
769 | ret += nr_found; | 775 | ret += nr_found; |
770 | if (next_index == 0) | 776 | if (next_index == 0) |
771 | break; | 777 | break; |
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
776 | } | 782 | } |
777 | EXPORT_SYMBOL(radix_tree_gang_lookup); | 783 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
778 | 784 | ||
785 | /** | ||
786 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | ||
787 | * @root: radix tree root | ||
788 | * @results: where the results of the lookup are placed | ||
789 | * @first_index: start the lookup from this key | ||
790 | * @max_items: place up to this many items at *results | ||
791 | * | ||
792 | * Performs an index-ascending scan of the tree for present items. Places | ||
793 | * their slots at *@results and returns the number of items which were | ||
794 | * placed at *@results. | ||
795 | * | ||
796 | * The implementation is naive. | ||
797 | * | ||
798 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | ||
799 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | ||
800 | * protection, radix_tree_deref_slot may fail requiring a retry. | ||
801 | */ | ||
802 | unsigned int | ||
803 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | ||
804 | unsigned long first_index, unsigned int max_items) | ||
805 | { | ||
806 | unsigned long max_index; | ||
807 | struct radix_tree_node *node; | ||
808 | unsigned long cur_index = first_index; | ||
809 | unsigned int ret; | ||
810 | |||
811 | node = rcu_dereference(root->rnode); | ||
812 | if (!node) | ||
813 | return 0; | ||
814 | |||
815 | if (!radix_tree_is_indirect_ptr(node)) { | ||
816 | if (first_index > 0) | ||
817 | return 0; | ||
818 | results[0] = (void **)&root->rnode; | ||
819 | return 1; | ||
820 | } | ||
821 | node = radix_tree_indirect_to_ptr(node); | ||
822 | |||
823 | max_index = radix_tree_maxindex(node->height); | ||
824 | |||
825 | ret = 0; | ||
826 | while (ret < max_items) { | ||
827 | unsigned int slots_found; | ||
828 | unsigned long next_index; /* Index of next search */ | ||
829 | |||
830 | if (cur_index > max_index) | ||
831 | break; | ||
832 | slots_found = __lookup(node, results + ret, cur_index, | ||
833 | max_items - ret, &next_index); | ||
834 | ret += slots_found; | ||
835 | if (next_index == 0) | ||
836 | break; | ||
837 | cur_index = next_index; | ||
838 | } | ||
839 | |||
840 | return ret; | ||
841 | } | ||
842 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | ||
843 | |||
779 | /* | 844 | /* |
780 | * FIXME: the two tag_get()s here should use find_next_bit() instead of | 845 | * FIXME: the two tag_get()s here should use find_next_bit() instead of |
781 | * open-coding the search. | 846 | * open-coding the search. |
782 | */ | 847 | */ |
783 | static unsigned int | 848 | static unsigned int |
784 | __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | 849 | __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, |
785 | unsigned int max_items, unsigned long *next_index, unsigned int tag) | 850 | unsigned int max_items, unsigned long *next_index, unsigned int tag) |
786 | { | 851 | { |
787 | unsigned int nr_found = 0; | 852 | unsigned int nr_found = 0; |
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
811 | unsigned long j = index & RADIX_TREE_MAP_MASK; | 876 | unsigned long j = index & RADIX_TREE_MAP_MASK; |
812 | 877 | ||
813 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { | 878 | for ( ; j < RADIX_TREE_MAP_SIZE; j++) { |
814 | struct radix_tree_node *node; | ||
815 | index++; | 879 | index++; |
816 | if (!tag_get(slot, tag, j)) | 880 | if (!tag_get(slot, tag, j)) |
817 | continue; | 881 | continue; |
818 | node = slot->slots[j]; | ||
819 | /* | 882 | /* |
820 | * Even though the tag was found set, we need to | 883 | * Even though the tag was found set, we need to |
821 | * recheck that we have a non-NULL node, because | 884 | * recheck that we have a non-NULL node, because |
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, | |||
826 | * lookup ->slots[x] without a lock (ie. can't | 889 | * lookup ->slots[x] without a lock (ie. can't |
827 | * rely on its value remaining the same). | 890 | * rely on its value remaining the same). |
828 | */ | 891 | */ |
829 | if (node) { | 892 | if (slot->slots[j]) { |
830 | node = rcu_dereference(node); | 893 | results[nr_found++] = &(slot->slots[j]); |
831 | results[nr_found++] = node; | ||
832 | if (nr_found == max_items) | 894 | if (nr_found == max_items) |
833 | goto out; | 895 | goto out; |
834 | } | 896 | } |
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
887 | 949 | ||
888 | ret = 0; | 950 | ret = 0; |
889 | while (ret < max_items) { | 951 | while (ret < max_items) { |
890 | unsigned int nr_found; | 952 | unsigned int nr_found, slots_found, i; |
891 | unsigned long next_index; /* Index of next search */ | 953 | unsigned long next_index; /* Index of next search */ |
892 | 954 | ||
893 | if (cur_index > max_index) | 955 | if (cur_index > max_index) |
894 | break; | 956 | break; |
895 | nr_found = __lookup_tag(node, results + ret, cur_index, | 957 | slots_found = __lookup_tag(node, (void ***)results + ret, |
896 | max_items - ret, &next_index, tag); | 958 | cur_index, max_items - ret, &next_index, tag); |
959 | nr_found = 0; | ||
960 | for (i = 0; i < slots_found; i++) { | ||
961 | struct radix_tree_node *slot; | ||
962 | slot = *(((void ***)results)[ret + i]); | ||
963 | if (!slot) | ||
964 | continue; | ||
965 | results[ret + nr_found] = rcu_dereference(slot); | ||
966 | nr_found++; | ||
967 | } | ||
897 | ret += nr_found; | 968 | ret += nr_found; |
898 | if (next_index == 0) | 969 | if (next_index == 0) |
899 | break; | 970 | break; |
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
905 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | 976 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
906 | 977 | ||
907 | /** | 978 | /** |
979 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | ||
980 | * radix tree based on a tag | ||
981 | * @root: radix tree root | ||
982 | * @results: where the results of the lookup are placed | ||
983 | * @first_index: start the lookup from this key | ||
984 | * @max_items: place up to this many items at *results | ||
985 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | ||
986 | * | ||
987 | * Performs an index-ascending scan of the tree for present items which | ||
988 | * have the tag indexed by @tag set. Places the slots at *@results and | ||
989 | * returns the number of slots which were placed at *@results. | ||
990 | */ | ||
991 | unsigned int | ||
992 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | ||
993 | unsigned long first_index, unsigned int max_items, | ||
994 | unsigned int tag) | ||
995 | { | ||
996 | struct radix_tree_node *node; | ||
997 | unsigned long max_index; | ||
998 | unsigned long cur_index = first_index; | ||
999 | unsigned int ret; | ||
1000 | |||
1001 | /* check the root's tag bit */ | ||
1002 | if (!root_tag_get(root, tag)) | ||
1003 | return 0; | ||
1004 | |||
1005 | node = rcu_dereference(root->rnode); | ||
1006 | if (!node) | ||
1007 | return 0; | ||
1008 | |||
1009 | if (!radix_tree_is_indirect_ptr(node)) { | ||
1010 | if (first_index > 0) | ||
1011 | return 0; | ||
1012 | results[0] = (void **)&root->rnode; | ||
1013 | return 1; | ||
1014 | } | ||
1015 | node = radix_tree_indirect_to_ptr(node); | ||
1016 | |||
1017 | max_index = radix_tree_maxindex(node->height); | ||
1018 | |||
1019 | ret = 0; | ||
1020 | while (ret < max_items) { | ||
1021 | unsigned int slots_found; | ||
1022 | unsigned long next_index; /* Index of next search */ | ||
1023 | |||
1024 | if (cur_index > max_index) | ||
1025 | break; | ||
1026 | slots_found = __lookup_tag(node, results + ret, | ||
1027 | cur_index, max_items - ret, &next_index, tag); | ||
1028 | ret += slots_found; | ||
1029 | if (next_index == 0) | ||
1030 | break; | ||
1031 | cur_index = next_index; | ||
1032 | } | ||
1033 | |||
1034 | return ret; | ||
1035 | } | ||
1036 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | ||
1037 | |||
1038 | |||
1039 | /** | ||
908 | * radix_tree_shrink - shrink height of a radix tree to minimal | 1040 | * radix_tree_shrink - shrink height of a radix tree to minimal |
909 | * @root radix tree root | 1041 | * @root radix tree root |
910 | */ | 1042 | */ |
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) | |||
1051 | EXPORT_SYMBOL(radix_tree_tagged); | 1183 | EXPORT_SYMBOL(radix_tree_tagged); |
1052 | 1184 | ||
1053 | static void | 1185 | static void |
1054 | radix_tree_node_ctor(struct kmem_cache *cachep, void *node) | 1186 | radix_tree_node_ctor(void *node) |
1055 | { | 1187 | { |
1056 | memset(node, 0, sizeof(struct radix_tree_node)); | 1188 | memset(node, 0, sizeof(struct radix_tree_node)); |
1057 | } | 1189 | } |
diff --git a/lib/random32.c b/lib/random32.c index ca87d86992bd..217d5c4b666d 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state) | |||
56 | return (state->s1 ^ state->s2 ^ state->s3); | 56 | return (state->s1 ^ state->s2 ^ state->s3); |
57 | } | 57 | } |
58 | 58 | ||
59 | static void __set_random32(struct rnd_state *state, unsigned long s) | 59 | /* |
60 | * Handle minimum values for seeds | ||
61 | */ | ||
62 | static inline u32 __seed(u32 x, u32 m) | ||
60 | { | 63 | { |
61 | if (s == 0) | 64 | return (x < m) ? x + m : x; |
62 | s = 1; /* default seed is 1 */ | ||
63 | |||
64 | #define LCG(n) (69069 * n) | ||
65 | state->s1 = LCG(s); | ||
66 | state->s2 = LCG(state->s1); | ||
67 | state->s3 = LCG(state->s2); | ||
68 | |||
69 | /* "warm it up" */ | ||
70 | __random32(state); | ||
71 | __random32(state); | ||
72 | __random32(state); | ||
73 | __random32(state); | ||
74 | __random32(state); | ||
75 | __random32(state); | ||
76 | } | 65 | } |
77 | 66 | ||
78 | /** | 67 | /** |
@@ -107,7 +96,7 @@ void srandom32(u32 entropy) | |||
107 | */ | 96 | */ |
108 | for_each_possible_cpu (i) { | 97 | for_each_possible_cpu (i) { |
109 | struct rnd_state *state = &per_cpu(net_rand_state, i); | 98 | struct rnd_state *state = &per_cpu(net_rand_state, i); |
110 | __set_random32(state, state->s1 ^ entropy); | 99 | state->s1 = __seed(state->s1 ^ entropy, 1); |
111 | } | 100 | } |
112 | } | 101 | } |
113 | EXPORT_SYMBOL(srandom32); | 102 | EXPORT_SYMBOL(srandom32); |
@@ -122,7 +111,19 @@ static int __init random32_init(void) | |||
122 | 111 | ||
123 | for_each_possible_cpu(i) { | 112 | for_each_possible_cpu(i) { |
124 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 113 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
125 | __set_random32(state, i + jiffies); | 114 | |
115 | #define LCG(x) ((x) * 69069) /* super-duper LCG */ | ||
116 | state->s1 = __seed(LCG(i + jiffies), 1); | ||
117 | state->s2 = __seed(LCG(state->s1), 7); | ||
118 | state->s3 = __seed(LCG(state->s2), 15); | ||
119 | |||
120 | /* "warm it up" */ | ||
121 | __random32(state); | ||
122 | __random32(state); | ||
123 | __random32(state); | ||
124 | __random32(state); | ||
125 | __random32(state); | ||
126 | __random32(state); | ||
126 | } | 127 | } |
127 | return 0; | 128 | return 0; |
128 | } | 129 | } |
@@ -135,13 +136,18 @@ core_initcall(random32_init); | |||
135 | static int __init random32_reseed(void) | 136 | static int __init random32_reseed(void) |
136 | { | 137 | { |
137 | int i; | 138 | int i; |
138 | unsigned long seed; | ||
139 | 139 | ||
140 | for_each_possible_cpu(i) { | 140 | for_each_possible_cpu(i) { |
141 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 141 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
142 | u32 seeds[3]; | ||
143 | |||
144 | get_random_bytes(&seeds, sizeof(seeds)); | ||
145 | state->s1 = __seed(seeds[0], 1); | ||
146 | state->s2 = __seed(seeds[1], 7); | ||
147 | state->s3 = __seed(seeds[2], 15); | ||
142 | 148 | ||
143 | get_random_bytes(&seed, sizeof(seed)); | 149 | /* mix it in */ |
144 | __set_random32(state, seed); | 150 | __random32(state); |
145 | } | 151 | } |
146 | return 0; | 152 | return 0; |
147 | } | 153 | } |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 485e3040dcd4..26187edcc7ea 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> | 4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> |
5 | * | 5 | * |
6 | * 2008-05-01 rewrite the function and use a ratelimit_state data struct as | ||
7 | * parameter. Now every user can use their own standalone ratelimit_state. | ||
8 | * | ||
6 | * This file is released under the GPLv2. | 9 | * This file is released under the GPLv2. |
7 | * | 10 | * |
8 | */ | 11 | */ |
@@ -11,41 +14,44 @@ | |||
11 | #include <linux/jiffies.h> | 14 | #include <linux/jiffies.h> |
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | 16 | ||
17 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
18 | |||
14 | /* | 19 | /* |
15 | * __ratelimit - rate limiting | 20 | * __ratelimit - rate limiting |
16 | * @ratelimit_jiffies: minimum time in jiffies between two callbacks | 21 | * @rs: ratelimit_state data |
17 | * @ratelimit_burst: number of callbacks we do before ratelimiting | ||
18 | * | 22 | * |
19 | * This enforces a rate limit: not more than @ratelimit_burst callbacks | 23 | * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks |
20 | * in every ratelimit_jiffies | 24 | * in every @rs->ratelimit_jiffies |
21 | */ | 25 | */ |
22 | int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) | 26 | int __ratelimit(struct ratelimit_state *rs) |
23 | { | 27 | { |
24 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
25 | static unsigned toks = 10 * 5 * HZ; | ||
26 | static unsigned long last_msg; | ||
27 | static int missed; | ||
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | unsigned long now = jiffies; | ||
30 | 29 | ||
31 | spin_lock_irqsave(&ratelimit_lock, flags); | 30 | if (!rs->interval) |
32 | toks += now - last_msg; | ||
33 | last_msg = now; | ||
34 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
35 | toks = ratelimit_burst * ratelimit_jiffies; | ||
36 | if (toks >= ratelimit_jiffies) { | ||
37 | int lost = missed; | ||
38 | |||
39 | missed = 0; | ||
40 | toks -= ratelimit_jiffies; | ||
41 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
42 | if (lost) | ||
43 | printk(KERN_WARNING "%s: %d messages suppressed\n", | ||
44 | __func__, lost); | ||
45 | return 1; | 31 | return 1; |
32 | |||
33 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
34 | if (!rs->begin) | ||
35 | rs->begin = jiffies; | ||
36 | |||
37 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | ||
38 | if (rs->missed) | ||
39 | printk(KERN_WARNING "%s: %d callbacks suppressed\n", | ||
40 | __func__, rs->missed); | ||
41 | rs->begin = 0; | ||
42 | rs->printed = 0; | ||
43 | rs->missed = 0; | ||
46 | } | 44 | } |
47 | missed++; | 45 | if (rs->burst && rs->burst > rs->printed) |
46 | goto print; | ||
47 | |||
48 | rs->missed++; | ||
48 | spin_unlock_irqrestore(&ratelimit_lock, flags); | 49 | spin_unlock_irqrestore(&ratelimit_lock, flags); |
49 | return 0; | 50 | return 0; |
51 | |||
52 | print: | ||
53 | rs->printed++; | ||
54 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
55 | return 1; | ||
50 | } | 56 | } |
51 | EXPORT_SYMBOL(__ratelimit); | 57 | EXPORT_SYMBOL(__ratelimit); |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index b80c21100d78..8d2688ff1352 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
295 | EXPORT_SYMBOL(sg_alloc_table); | 295 | EXPORT_SYMBOL(sg_alloc_table); |
296 | 296 | ||
297 | /** | 297 | /** |
298 | * sg_miter_start - start mapping iteration over a sg list | ||
299 | * @miter: sg mapping iter to be started | ||
300 | * @sgl: sg list to iterate over | ||
301 | * @nents: number of sg entries | ||
302 | * | ||
303 | * Description: | ||
304 | * Starts mapping iterator @miter. | ||
305 | * | ||
306 | * Context: | ||
307 | * Don't care. | ||
308 | */ | ||
309 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | ||
310 | unsigned int nents, unsigned int flags) | ||
311 | { | ||
312 | memset(miter, 0, sizeof(struct sg_mapping_iter)); | ||
313 | |||
314 | miter->__sg = sgl; | ||
315 | miter->__nents = nents; | ||
316 | miter->__offset = 0; | ||
317 | miter->__flags = flags; | ||
318 | } | ||
319 | EXPORT_SYMBOL(sg_miter_start); | ||
320 | |||
321 | /** | ||
322 | * sg_miter_next - proceed mapping iterator to the next mapping | ||
323 | * @miter: sg mapping iter to proceed | ||
324 | * | ||
325 | * Description: | ||
326 | * Proceeds @miter@ to the next mapping. @miter@ should have been | ||
327 | * started using sg_miter_start(). On successful return, | ||
328 | * @miter@->page, @miter@->addr and @miter@->length point to the | ||
329 | * current mapping. | ||
330 | * | ||
331 | * Context: | ||
332 | * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till | ||
333 | * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. | ||
334 | * | ||
335 | * Returns: | ||
336 | * true if @miter contains the next mapping. false if end of sg | ||
337 | * list is reached. | ||
338 | */ | ||
339 | bool sg_miter_next(struct sg_mapping_iter *miter) | ||
340 | { | ||
341 | unsigned int off, len; | ||
342 | |||
343 | /* check for end and drop resources from the last iteration */ | ||
344 | if (!miter->__nents) | ||
345 | return false; | ||
346 | |||
347 | sg_miter_stop(miter); | ||
348 | |||
349 | /* get to the next sg if necessary. __offset is adjusted by stop */ | ||
350 | if (miter->__offset == miter->__sg->length && --miter->__nents) { | ||
351 | miter->__sg = sg_next(miter->__sg); | ||
352 | miter->__offset = 0; | ||
353 | } | ||
354 | |||
355 | /* map the next page */ | ||
356 | off = miter->__sg->offset + miter->__offset; | ||
357 | len = miter->__sg->length - miter->__offset; | ||
358 | |||
359 | miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT); | ||
360 | off &= ~PAGE_MASK; | ||
361 | miter->length = min_t(unsigned int, len, PAGE_SIZE - off); | ||
362 | miter->consumed = miter->length; | ||
363 | |||
364 | if (miter->__flags & SG_MITER_ATOMIC) | ||
365 | miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; | ||
366 | else | ||
367 | miter->addr = kmap(miter->page) + off; | ||
368 | |||
369 | return true; | ||
370 | } | ||
371 | EXPORT_SYMBOL(sg_miter_next); | ||
372 | |||
373 | /** | ||
374 | * sg_miter_stop - stop mapping iteration | ||
375 | * @miter: sg mapping iter to be stopped | ||
376 | * | ||
377 | * Description: | ||
378 | * Stops mapping iterator @miter. @miter should have been started | ||
379 | * started using sg_miter_start(). A stopped iteration can be | ||
380 | * resumed by calling sg_miter_next() on it. This is useful when | ||
381 | * resources (kmap) need to be released during iteration. | ||
382 | * | ||
383 | * Context: | ||
384 | * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. | ||
385 | */ | ||
386 | void sg_miter_stop(struct sg_mapping_iter *miter) | ||
387 | { | ||
388 | WARN_ON(miter->consumed > miter->length); | ||
389 | |||
390 | /* drop resources from the last iteration */ | ||
391 | if (miter->addr) { | ||
392 | miter->__offset += miter->consumed; | ||
393 | |||
394 | if (miter->__flags & SG_MITER_ATOMIC) { | ||
395 | WARN_ON(!irqs_disabled()); | ||
396 | kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); | ||
397 | } else | ||
398 | kunmap(miter->addr); | ||
399 | |||
400 | miter->page = NULL; | ||
401 | miter->addr = NULL; | ||
402 | miter->length = 0; | ||
403 | miter->consumed = 0; | ||
404 | } | ||
405 | } | ||
406 | EXPORT_SYMBOL(sg_miter_stop); | ||
407 | |||
408 | /** | ||
298 | * sg_copy_buffer - Copy data between a linear buffer and an SG list | 409 | * sg_copy_buffer - Copy data between a linear buffer and an SG list |
299 | * @sgl: The SG list | 410 | * @sgl: The SG list |
300 | * @nents: Number of SG entries | 411 | * @nents: Number of SG entries |
@@ -309,56 +420,33 @@ EXPORT_SYMBOL(sg_alloc_table); | |||
309 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, | 420 | static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, |
310 | void *buf, size_t buflen, int to_buffer) | 421 | void *buf, size_t buflen, int to_buffer) |
311 | { | 422 | { |
312 | struct scatterlist *sg; | 423 | unsigned int offset = 0; |
313 | size_t buf_off = 0; | 424 | struct sg_mapping_iter miter; |
314 | int i; | 425 | unsigned long flags; |
315 | 426 | ||
316 | WARN_ON(!irqs_disabled()); | 427 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC); |
317 | 428 | ||
318 | for_each_sg(sgl, sg, nents, i) { | 429 | local_irq_save(flags); |
319 | struct page *page; | 430 | |
320 | int n = 0; | 431 | while (sg_miter_next(&miter) && offset < buflen) { |
321 | unsigned int sg_off = sg->offset; | 432 | unsigned int len; |
322 | unsigned int sg_copy = sg->length; | 433 | |
323 | 434 | len = min(miter.length, buflen - offset); | |
324 | if (sg_copy > buflen) | 435 | |
325 | sg_copy = buflen; | 436 | if (to_buffer) |
326 | buflen -= sg_copy; | 437 | memcpy(buf + offset, miter.addr, len); |
327 | 438 | else { | |
328 | while (sg_copy > 0) { | 439 | memcpy(miter.addr, buf + offset, len); |
329 | unsigned int page_copy; | 440 | flush_kernel_dcache_page(miter.page); |
330 | void *p; | ||
331 | |||
332 | page_copy = PAGE_SIZE - sg_off; | ||
333 | if (page_copy > sg_copy) | ||
334 | page_copy = sg_copy; | ||
335 | |||
336 | page = nth_page(sg_page(sg), n); | ||
337 | p = kmap_atomic(page, KM_BIO_SRC_IRQ); | ||
338 | |||
339 | if (to_buffer) | ||
340 | memcpy(buf + buf_off, p + sg_off, page_copy); | ||
341 | else { | ||
342 | memcpy(p + sg_off, buf + buf_off, page_copy); | ||
343 | flush_kernel_dcache_page(page); | ||
344 | } | ||
345 | |||
346 | kunmap_atomic(p, KM_BIO_SRC_IRQ); | ||
347 | |||
348 | buf_off += page_copy; | ||
349 | sg_off += page_copy; | ||
350 | if (sg_off == PAGE_SIZE) { | ||
351 | sg_off = 0; | ||
352 | n++; | ||
353 | } | ||
354 | sg_copy -= page_copy; | ||
355 | } | 441 | } |
356 | 442 | ||
357 | if (!buflen) | 443 | offset += len; |
358 | break; | ||
359 | } | 444 | } |
360 | 445 | ||
361 | return buf_off; | 446 | sg_miter_stop(&miter); |
447 | |||
448 | local_irq_restore(flags); | ||
449 | return offset; | ||
362 | } | 450 | } |
363 | 451 | ||
364 | /** | 452 | /** |
diff --git a/lib/show_mem.c b/lib/show_mem.c new file mode 100644 index 000000000000..238e72a18ce1 --- /dev/null +++ b/lib/show_mem.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Generic show_mem() implementation | ||
3 | * | ||
4 | * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> | ||
5 | * All code subject to the GPL version 2. | ||
6 | */ | ||
7 | |||
8 | #include <linux/mm.h> | ||
9 | #include <linux/nmi.h> | ||
10 | #include <linux/quicklist.h> | ||
11 | |||
12 | void show_mem(void) | ||
13 | { | ||
14 | pg_data_t *pgdat; | ||
15 | unsigned long total = 0, reserved = 0, shared = 0, | ||
16 | nonshared = 0, highmem = 0; | ||
17 | |||
18 | printk(KERN_INFO "Mem-Info:\n"); | ||
19 | show_free_areas(); | ||
20 | |||
21 | for_each_online_pgdat(pgdat) { | ||
22 | unsigned long i, flags; | ||
23 | |||
24 | pgdat_resize_lock(pgdat, &flags); | ||
25 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | ||
26 | struct page *page; | ||
27 | unsigned long pfn = pgdat->node_start_pfn + i; | ||
28 | |||
29 | if (unlikely(!(i % MAX_ORDER_NR_PAGES))) | ||
30 | touch_nmi_watchdog(); | ||
31 | |||
32 | if (!pfn_valid(pfn)) | ||
33 | continue; | ||
34 | |||
35 | page = pfn_to_page(pfn); | ||
36 | |||
37 | if (PageHighMem(page)) | ||
38 | highmem++; | ||
39 | |||
40 | if (PageReserved(page)) | ||
41 | reserved++; | ||
42 | else if (page_count(page) == 1) | ||
43 | nonshared++; | ||
44 | else if (page_count(page) > 1) | ||
45 | shared += page_count(page) - 1; | ||
46 | |||
47 | total++; | ||
48 | } | ||
49 | pgdat_resize_unlock(pgdat, &flags); | ||
50 | } | ||
51 | |||
52 | printk(KERN_INFO "%lu pages RAM\n", total); | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | ||
55 | #endif | ||
56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | ||
57 | printk(KERN_INFO "%lu pages shared\n", shared); | ||
58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | ||
59 | #ifdef CONFIG_QUICKLIST | ||
60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | ||
61 | quicklist_total_size()); | ||
62 | #endif | ||
63 | } | ||
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 3b4dc098181e..0f8fc22ed103 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c | |||
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void) | |||
11 | { | 11 | { |
12 | unsigned long preempt_count = preempt_count(); | 12 | unsigned long preempt_count = preempt_count(); |
13 | int this_cpu = raw_smp_processor_id(); | 13 | int this_cpu = raw_smp_processor_id(); |
14 | cpumask_t this_mask; | ||
15 | 14 | ||
16 | if (likely(preempt_count)) | 15 | if (likely(preempt_count)) |
17 | goto out; | 16 | goto out; |
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) | |||
23 | * Kernel threads bound to a single CPU can safely use | 22 | * Kernel threads bound to a single CPU can safely use |
24 | * smp_processor_id(): | 23 | * smp_processor_id(): |
25 | */ | 24 | */ |
26 | this_mask = cpumask_of_cpu(this_cpu); | 25 | if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) |
27 | |||
28 | if (cpus_equal(current->cpus_allowed, this_mask)) | ||
29 | goto out; | 26 | goto out; |
30 | 27 | ||
31 | /* | 28 | /* |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c new file mode 100644 index 000000000000..ab431d4cc970 --- /dev/null +++ b/lib/string_helpers.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Helpers for formatting and printing strings | ||
3 | * | ||
4 | * Copyright 31 August 2008 James Bottomley | ||
5 | */ | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/math64.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/string_helpers.h> | ||
10 | |||
11 | /** | ||
12 | * string_get_size - get the size in the specified units | ||
13 | * @size: The size to be converted | ||
14 | * @units: units to use (powers of 1000 or 1024) | ||
15 | * @buf: buffer to format to | ||
16 | * @len: length of buffer | ||
17 | * | ||
18 | * This function returns a string formatted to 3 significant figures | ||
19 | * giving the size in the required units. Returns 0 on success or | ||
20 | * error on failure. @buf is always zero terminated. | ||
21 | * | ||
22 | */ | ||
23 | int string_get_size(u64 size, const enum string_size_units units, | ||
24 | char *buf, int len) | ||
25 | { | ||
26 | const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", | ||
27 | "EB", "ZB", "YB", NULL}; | ||
28 | const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", | ||
29 | "EiB", "ZiB", "YiB", NULL }; | ||
30 | const char **units_str[] = { | ||
31 | [STRING_UNITS_10] = units_10, | ||
32 | [STRING_UNITS_2] = units_2, | ||
33 | }; | ||
34 | const unsigned int divisor[] = { | ||
35 | [STRING_UNITS_10] = 1000, | ||
36 | [STRING_UNITS_2] = 1024, | ||
37 | }; | ||
38 | int i, j; | ||
39 | u64 remainder = 0, sf_cap; | ||
40 | char tmp[8]; | ||
41 | |||
42 | tmp[0] = '\0'; | ||
43 | i = 0; | ||
44 | if (size >= divisor[units]) { | ||
45 | while (size >= divisor[units] && units_str[units][i]) { | ||
46 | remainder = do_div(size, divisor[units]); | ||
47 | i++; | ||
48 | } | ||
49 | |||
50 | sf_cap = size; | ||
51 | for (j = 0; sf_cap*10 < 1000; j++) | ||
52 | sf_cap *= 10; | ||
53 | |||
54 | if (j) { | ||
55 | remainder *= 1000; | ||
56 | do_div(remainder, divisor[units]); | ||
57 | snprintf(tmp, sizeof(tmp), ".%03lld", | ||
58 | (unsigned long long)remainder); | ||
59 | tmp[j+1] = '\0'; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | snprintf(buf, len, "%lld%s %s", (unsigned long long)size, | ||
64 | tmp, units_str[units][i]); | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | EXPORT_SYMBOL(string_get_size); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d568894df8cc..f8eebd489149 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -274,13 +274,14 @@ cleanup1: | |||
274 | } | 274 | } |
275 | 275 | ||
276 | static int | 276 | static int |
277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr) | 277 | address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) |
278 | { | 278 | { |
279 | dma_addr_t mask = 0xffffffff; | 279 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
280 | /* If the device has a mask, use it, otherwise default to 32 bits */ | 280 | } |
281 | if (hwdev && hwdev->dma_mask) | 281 | |
282 | mask = *hwdev->dma_mask; | 282 | static int is_swiotlb_buffer(char *addr) |
283 | return (addr & ~mask) != 0; | 283 | { |
284 | return addr >= io_tlb_start && addr < io_tlb_end; | ||
284 | } | 285 | } |
285 | 286 | ||
286 | /* | 287 | /* |
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
467 | void *ret; | 468 | void *ret; |
468 | int order = get_order(size); | 469 | int order = get_order(size); |
469 | 470 | ||
470 | /* | ||
471 | * XXX fix me: the DMA API should pass us an explicit DMA mask | ||
472 | * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32 | ||
473 | * bit range instead of a 16MB one). | ||
474 | */ | ||
475 | flags |= GFP_DMA; | ||
476 | |||
477 | ret = (void *)__get_free_pages(flags, order); | 471 | ret = (void *)__get_free_pages(flags, order); |
478 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { | 472 | if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { |
479 | /* | 473 | /* |
480 | * The allocated memory isn't reachable by the device. | 474 | * The allocated memory isn't reachable by the device. |
481 | * Fall back on swiotlb_map_single(). | 475 | * Fall back on swiotlb_map_single(). |
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
490 | * swiotlb_map_single(), which will grab memory from | 484 | * swiotlb_map_single(), which will grab memory from |
491 | * the lowest available address range. | 485 | * the lowest available address range. |
492 | */ | 486 | */ |
493 | dma_addr_t handle; | 487 | ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE); |
494 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 488 | if (!ret) |
495 | if (swiotlb_dma_mapping_error(handle)) | ||
496 | return NULL; | 489 | return NULL; |
497 | |||
498 | ret = bus_to_virt(handle); | ||
499 | } | 490 | } |
500 | 491 | ||
501 | memset(ret, 0, size); | 492 | memset(ret, 0, size); |
502 | dev_addr = virt_to_bus(ret); | 493 | dev_addr = virt_to_bus(ret); |
503 | 494 | ||
504 | /* Confirm address can be DMA'd by device */ | 495 | /* Confirm address can be DMA'd by device */ |
505 | if (address_needs_mapping(hwdev, dev_addr)) { | 496 | if (address_needs_mapping(hwdev, dev_addr, size)) { |
506 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", | 497 | printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", |
507 | (unsigned long long)*hwdev->dma_mask, | 498 | (unsigned long long)*hwdev->dma_mask, |
508 | (unsigned long long)dev_addr); | 499 | (unsigned long long)dev_addr); |
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
518 | dma_addr_t dma_handle) | 509 | dma_addr_t dma_handle) |
519 | { | 510 | { |
520 | WARN_ON(irqs_disabled()); | 511 | WARN_ON(irqs_disabled()); |
521 | if (!(vaddr >= (void *)io_tlb_start | 512 | if (!is_swiotlb_buffer(vaddr)) |
522 | && vaddr < (void *)io_tlb_end)) | ||
523 | free_pages((unsigned long) vaddr, get_order(size)); | 513 | free_pages((unsigned long) vaddr, get_order(size)); |
524 | else | 514 | else |
525 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ | 515 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
526 | swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); | 516 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
527 | } | 517 | } |
528 | 518 | ||
529 | static void | 519 | static void |
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
567 | * we can safely return the device addr and not worry about bounce | 557 | * we can safely return the device addr and not worry about bounce |
568 | * buffering it. | 558 | * buffering it. |
569 | */ | 559 | */ |
570 | if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) | 560 | if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) |
571 | return dev_addr; | 561 | return dev_addr; |
572 | 562 | ||
573 | /* | 563 | /* |
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | |||
584 | /* | 574 | /* |
585 | * Ensure that the address returned is DMA'ble | 575 | * Ensure that the address returned is DMA'ble |
586 | */ | 576 | */ |
587 | if (address_needs_mapping(hwdev, dev_addr)) | 577 | if (address_needs_mapping(hwdev, dev_addr, size)) |
588 | panic("map_single: bounce buffer is not DMA'ble"); | 578 | panic("map_single: bounce buffer is not DMA'ble"); |
589 | 579 | ||
590 | return dev_addr; | 580 | return dev_addr; |
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | |||
612 | char *dma_addr = bus_to_virt(dev_addr); | 602 | char *dma_addr = bus_to_virt(dev_addr); |
613 | 603 | ||
614 | BUG_ON(dir == DMA_NONE); | 604 | BUG_ON(dir == DMA_NONE); |
615 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 605 | if (is_swiotlb_buffer(dma_addr)) |
616 | unmap_single(hwdev, dma_addr, size, dir); | 606 | unmap_single(hwdev, dma_addr, size, dir); |
617 | else if (dir == DMA_FROM_DEVICE) | 607 | else if (dir == DMA_FROM_DEVICE) |
618 | dma_mark_clean(dma_addr, size); | 608 | dma_mark_clean(dma_addr, size); |
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |||
642 | char *dma_addr = bus_to_virt(dev_addr); | 632 | char *dma_addr = bus_to_virt(dev_addr); |
643 | 633 | ||
644 | BUG_ON(dir == DMA_NONE); | 634 | BUG_ON(dir == DMA_NONE); |
645 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 635 | if (is_swiotlb_buffer(dma_addr)) |
646 | sync_single(hwdev, dma_addr, size, dir, target); | 636 | sync_single(hwdev, dma_addr, size, dir, target); |
647 | else if (dir == DMA_FROM_DEVICE) | 637 | else if (dir == DMA_FROM_DEVICE) |
648 | dma_mark_clean(dma_addr, size); | 638 | dma_mark_clean(dma_addr, size); |
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, | |||
673 | char *dma_addr = bus_to_virt(dev_addr) + offset; | 663 | char *dma_addr = bus_to_virt(dev_addr) + offset; |
674 | 664 | ||
675 | BUG_ON(dir == DMA_NONE); | 665 | BUG_ON(dir == DMA_NONE); |
676 | if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) | 666 | if (is_swiotlb_buffer(dma_addr)) |
677 | sync_single(hwdev, dma_addr, size, dir, target); | 667 | sync_single(hwdev, dma_addr, size, dir, target); |
678 | else if (dir == DMA_FROM_DEVICE) | 668 | else if (dir == DMA_FROM_DEVICE) |
679 | dma_mark_clean(dma_addr, size); | 669 | dma_mark_clean(dma_addr, size); |
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
727 | for_each_sg(sgl, sg, nelems, i) { | 717 | for_each_sg(sgl, sg, nelems, i) { |
728 | addr = SG_ENT_VIRT_ADDRESS(sg); | 718 | addr = SG_ENT_VIRT_ADDRESS(sg); |
729 | dev_addr = virt_to_bus(addr); | 719 | dev_addr = virt_to_bus(addr); |
730 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | 720 | if (swiotlb_force || |
721 | address_needs_mapping(hwdev, dev_addr, sg->length)) { | ||
731 | void *map = map_single(hwdev, addr, sg->length, dir); | 722 | void *map = map_single(hwdev, addr, sg->length, dir); |
732 | if (!map) { | 723 | if (!map) { |
733 | /* Don't panic here, we expect map_sg users | 724 | /* Don't panic here, we expect map_sg users |
@@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |||
824 | } | 815 | } |
825 | 816 | ||
826 | int | 817 | int |
827 | swiotlb_dma_mapping_error(dma_addr_t dma_addr) | 818 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
828 | { | 819 | { |
829 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); | 820 | return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); |
830 | } | 821 | } |
diff --git a/lib/syscall.c b/lib/syscall.c new file mode 100644 index 000000000000..a4f7067f72fa --- /dev/null +++ b/lib/syscall.c | |||
@@ -0,0 +1,75 @@ | |||
1 | #include <linux/ptrace.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <asm/syscall.h> | ||
5 | |||
6 | static int collect_syscall(struct task_struct *target, long *callno, | ||
7 | unsigned long args[6], unsigned int maxargs, | ||
8 | unsigned long *sp, unsigned long *pc) | ||
9 | { | ||
10 | struct pt_regs *regs = task_pt_regs(target); | ||
11 | if (unlikely(!regs)) | ||
12 | return -EAGAIN; | ||
13 | |||
14 | *sp = user_stack_pointer(regs); | ||
15 | *pc = instruction_pointer(regs); | ||
16 | |||
17 | *callno = syscall_get_nr(target, regs); | ||
18 | if (*callno != -1L && maxargs > 0) | ||
19 | syscall_get_arguments(target, regs, 0, maxargs, args); | ||
20 | |||
21 | return 0; | ||
22 | } | ||
23 | |||
24 | /** | ||
25 | * task_current_syscall - Discover what a blocked task is doing. | ||
26 | * @target: thread to examine | ||
27 | * @callno: filled with system call number or -1 | ||
28 | * @args: filled with @maxargs system call arguments | ||
29 | * @maxargs: number of elements in @args to fill | ||
30 | * @sp: filled with user stack pointer | ||
31 | * @pc: filled with user PC | ||
32 | * | ||
33 | * If @target is blocked in a system call, returns zero with *@callno | ||
34 | * set to the the call's number and @args filled in with its arguments. | ||
35 | * Registers not used for system call arguments may not be available and | ||
36 | * it is not kosher to use &struct user_regset calls while the system | ||
37 | * call is still in progress. Note we may get this result if @target | ||
38 | * has finished its system call but not yet returned to user mode, such | ||
39 | * as when it's stopped for signal handling or syscall exit tracing. | ||
40 | * | ||
41 | * If @target is blocked in the kernel during a fault or exception, | ||
42 | * returns zero with *@callno set to -1 and does not fill in @args. | ||
43 | * If so, it's now safe to examine @target using &struct user_regset | ||
44 | * get() calls as long as we're sure @target won't return to user mode. | ||
45 | * | ||
46 | * Returns -%EAGAIN if @target does not remain blocked. | ||
47 | * | ||
48 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
49 | */ | ||
50 | int task_current_syscall(struct task_struct *target, long *callno, | ||
51 | unsigned long args[6], unsigned int maxargs, | ||
52 | unsigned long *sp, unsigned long *pc) | ||
53 | { | ||
54 | long state; | ||
55 | unsigned long ncsw; | ||
56 | |||
57 | if (unlikely(maxargs > 6)) | ||
58 | return -EINVAL; | ||
59 | |||
60 | if (target == current) | ||
61 | return collect_syscall(target, callno, args, maxargs, sp, pc); | ||
62 | |||
63 | state = target->state; | ||
64 | if (unlikely(!state)) | ||
65 | return -EAGAIN; | ||
66 | |||
67 | ncsw = wait_task_inactive(target, state); | ||
68 | if (unlikely(!ncsw) || | ||
69 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | ||
70 | unlikely(wait_task_inactive(target, state) != ncsw)) | ||
71 | return -EAGAIN; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(task_current_syscall); | ||
diff --git a/lib/textsearch.c b/lib/textsearch.c index a3e500ad51d7..9fbcb44c554f 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c | |||
@@ -54,10 +54,13 @@ | |||
54 | * USAGE | 54 | * USAGE |
55 | * | 55 | * |
56 | * Before a search can be performed, a configuration must be created | 56 | * Before a search can be performed, a configuration must be created |
57 | * by calling textsearch_prepare() specyfing the searching algorithm and | 57 | * by calling textsearch_prepare() specifying the searching algorithm, |
58 | * the pattern to look for. The returned configuration may then be used | 58 | * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE |
59 | * for an arbitary amount of times and even in parallel as long as a | 59 | * to perform case insensitive matching. But it might slow down |
60 | * separate struct ts_state variable is provided to every instance. | 60 | * performance of algorithm, so you should use it at own your risk. |
61 | * The returned configuration may then be used for an arbitary | ||
62 | * amount of times and even in parallel as long as a separate struct | ||
63 | * ts_state variable is provided to every instance. | ||
61 | * | 64 | * |
62 | * The actual search is performed by either calling textsearch_find_- | 65 | * The actual search is performed by either calling textsearch_find_- |
63 | * continuous() for linear data or by providing an own get_next_block() | 66 | * continuous() for linear data or by providing an own get_next_block() |
@@ -89,7 +92,6 @@ | |||
89 | * panic("Oh my god, dancing chickens at %d\n", pos); | 92 | * panic("Oh my god, dancing chickens at %d\n", pos); |
90 | * | 93 | * |
91 | * textsearch_destroy(conf); | 94 | * textsearch_destroy(conf); |
92 | * | ||
93 | * ========================================================================== | 95 | * ========================================================================== |
94 | */ | 96 | */ |
95 | 97 | ||
@@ -265,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, | |||
265 | return ERR_PTR(-EINVAL); | 267 | return ERR_PTR(-EINVAL); |
266 | 268 | ||
267 | ops = lookup_ts_algo(algo); | 269 | ops = lookup_ts_algo(algo); |
268 | #ifdef CONFIG_KMOD | 270 | #ifdef CONFIG_MODULES |
269 | /* | 271 | /* |
270 | * Why not always autoload you may ask. Some users are | 272 | * Why not always autoload you may ask. Some users are |
271 | * in a situation where requesting a module may deadlock, | 273 | * in a situation where requesting a module may deadlock, |
@@ -280,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, | |||
280 | if (ops == NULL) | 282 | if (ops == NULL) |
281 | goto errout; | 283 | goto errout; |
282 | 284 | ||
283 | conf = ops->init(pattern, len, gfp_mask); | 285 | conf = ops->init(pattern, len, gfp_mask, flags); |
284 | if (IS_ERR(conf)) { | 286 | if (IS_ERR(conf)) { |
285 | err = PTR_ERR(conf); | 287 | err = PTR_ERR(conf); |
286 | goto errout; | 288 | goto errout; |
diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 4a7fce72898e..9e66ee4020e9 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/types.h> | 40 | #include <linux/types.h> |
41 | #include <linux/string.h> | 41 | #include <linux/string.h> |
42 | #include <linux/ctype.h> | ||
42 | #include <linux/textsearch.h> | 43 | #include <linux/textsearch.h> |
43 | 44 | ||
44 | /* Alphabet size, use ASCII */ | 45 | /* Alphabet size, use ASCII */ |
@@ -64,6 +65,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) | |||
64 | unsigned int i, text_len, consumed = state->offset; | 65 | unsigned int i, text_len, consumed = state->offset; |
65 | const u8 *text; | 66 | const u8 *text; |
66 | int shift = bm->patlen - 1, bs; | 67 | int shift = bm->patlen - 1, bs; |
68 | const u8 icase = conf->flags & TS_IGNORECASE; | ||
67 | 69 | ||
68 | for (;;) { | 70 | for (;;) { |
69 | text_len = conf->get_next_block(consumed, &text, conf, state); | 71 | text_len = conf->get_next_block(consumed, &text, conf, state); |
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state) | |||
75 | DEBUGP("Searching in position %d (%c)\n", | 77 | DEBUGP("Searching in position %d (%c)\n", |
76 | shift, text[shift]); | 78 | shift, text[shift]); |
77 | for (i = 0; i < bm->patlen; i++) | 79 | for (i = 0; i < bm->patlen; i++) |
78 | if (text[shift-i] != bm->pattern[bm->patlen-1-i]) | 80 | if ((icase ? toupper(text[shift-i]) |
81 | : text[shift-i]) | ||
82 | != bm->pattern[bm->patlen-1-i]) | ||
79 | goto next; | 83 | goto next; |
80 | 84 | ||
81 | /* London calling... */ | 85 | /* London calling... */ |
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g) | |||
111 | return ret; | 115 | return ret; |
112 | } | 116 | } |
113 | 117 | ||
114 | static void compute_prefix_tbl(struct ts_bm *bm) | 118 | static void compute_prefix_tbl(struct ts_bm *bm, int flags) |
115 | { | 119 | { |
116 | int i, j, g; | 120 | int i, j, g; |
117 | 121 | ||
118 | for (i = 0; i < ASIZE; i++) | 122 | for (i = 0; i < ASIZE; i++) |
119 | bm->bad_shift[i] = bm->patlen; | 123 | bm->bad_shift[i] = bm->patlen; |
120 | for (i = 0; i < bm->patlen - 1; i++) | 124 | for (i = 0; i < bm->patlen - 1; i++) { |
121 | bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; | 125 | bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; |
126 | if (flags & TS_IGNORECASE) | ||
127 | bm->bad_shift[tolower(bm->pattern[i])] | ||
128 | = bm->patlen - 1 - i; | ||
129 | } | ||
122 | 130 | ||
123 | /* Compute the good shift array, used to match reocurrences | 131 | /* Compute the good shift array, used to match reocurrences |
124 | * of a subpattern */ | 132 | * of a subpattern */ |
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm) | |||
135 | } | 143 | } |
136 | 144 | ||
137 | static struct ts_config *bm_init(const void *pattern, unsigned int len, | 145 | static struct ts_config *bm_init(const void *pattern, unsigned int len, |
138 | gfp_t gfp_mask) | 146 | gfp_t gfp_mask, int flags) |
139 | { | 147 | { |
140 | struct ts_config *conf; | 148 | struct ts_config *conf; |
141 | struct ts_bm *bm; | 149 | struct ts_bm *bm; |
150 | int i; | ||
142 | unsigned int prefix_tbl_len = len * sizeof(unsigned int); | 151 | unsigned int prefix_tbl_len = len * sizeof(unsigned int); |
143 | size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; | 152 | size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; |
144 | 153 | ||
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len, | |||
146 | if (IS_ERR(conf)) | 155 | if (IS_ERR(conf)) |
147 | return conf; | 156 | return conf; |
148 | 157 | ||
158 | conf->flags = flags; | ||
149 | bm = ts_config_priv(conf); | 159 | bm = ts_config_priv(conf); |
150 | bm->patlen = len; | 160 | bm->patlen = len; |
151 | bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; | 161 | bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; |
152 | memcpy(bm->pattern, pattern, len); | 162 | if (flags & TS_IGNORECASE) |
153 | compute_prefix_tbl(bm); | 163 | for (i = 0; i < len; i++) |
164 | bm->pattern[i] = toupper(((u8 *)pattern)[i]); | ||
165 | else | ||
166 | memcpy(bm->pattern, pattern, len); | ||
167 | compute_prefix_tbl(bm, flags); | ||
154 | 168 | ||
155 | return conf; | 169 | return conf; |
156 | } | 170 | } |
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index af575b61526b..5696a35184e4 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c | |||
@@ -257,7 +257,7 @@ found_match: | |||
257 | } | 257 | } |
258 | 258 | ||
259 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, | 259 | static struct ts_config *fsm_init(const void *pattern, unsigned int len, |
260 | gfp_t gfp_mask) | 260 | gfp_t gfp_mask, int flags) |
261 | { | 261 | { |
262 | int i, err = -EINVAL; | 262 | int i, err = -EINVAL; |
263 | struct ts_config *conf; | 263 | struct ts_config *conf; |
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len, | |||
269 | if (len % sizeof(struct ts_fsm_token) || ntokens < 1) | 269 | if (len % sizeof(struct ts_fsm_token) || ntokens < 1) |
270 | goto errout; | 270 | goto errout; |
271 | 271 | ||
272 | if (flags & TS_IGNORECASE) | ||
273 | goto errout; | ||
274 | |||
272 | for (i = 0; i < ntokens; i++) { | 275 | for (i = 0; i < ntokens; i++) { |
273 | struct ts_fsm_token *t = &tokens[i]; | 276 | struct ts_fsm_token *t = &tokens[i]; |
274 | 277 | ||
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len, | |||
284 | if (IS_ERR(conf)) | 287 | if (IS_ERR(conf)) |
285 | return conf; | 288 | return conf; |
286 | 289 | ||
290 | conf->flags = flags; | ||
287 | fsm = ts_config_priv(conf); | 291 | fsm = ts_config_priv(conf); |
288 | fsm->ntokens = ntokens; | 292 | fsm->ntokens = ntokens; |
289 | memcpy(fsm->tokens, pattern, len); | 293 | memcpy(fsm->tokens, pattern, len); |
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 3ced628cab4b..632f783e65f1 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/ctype.h> | ||
36 | #include <linux/textsearch.h> | 37 | #include <linux/textsearch.h> |
37 | 38 | ||
38 | struct ts_kmp | 39 | struct ts_kmp |
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) | |||
47 | struct ts_kmp *kmp = ts_config_priv(conf); | 48 | struct ts_kmp *kmp = ts_config_priv(conf); |
48 | unsigned int i, q = 0, text_len, consumed = state->offset; | 49 | unsigned int i, q = 0, text_len, consumed = state->offset; |
49 | const u8 *text; | 50 | const u8 *text; |
51 | const int icase = conf->flags & TS_IGNORECASE; | ||
50 | 52 | ||
51 | for (;;) { | 53 | for (;;) { |
52 | text_len = conf->get_next_block(consumed, &text, conf, state); | 54 | text_len = conf->get_next_block(consumed, &text, conf, state); |
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) | |||
55 | break; | 57 | break; |
56 | 58 | ||
57 | for (i = 0; i < text_len; i++) { | 59 | for (i = 0; i < text_len; i++) { |
58 | while (q > 0 && kmp->pattern[q] != text[i]) | 60 | while (q > 0 && kmp->pattern[q] |
61 | != (icase ? toupper(text[i]) : text[i])) | ||
59 | q = kmp->prefix_tbl[q - 1]; | 62 | q = kmp->prefix_tbl[q - 1]; |
60 | if (kmp->pattern[q] == text[i]) | 63 | if (kmp->pattern[q] |
64 | == (icase ? toupper(text[i]) : text[i])) | ||
61 | q++; | 65 | q++; |
62 | if (unlikely(q == kmp->pattern_len)) { | 66 | if (unlikely(q == kmp->pattern_len)) { |
63 | state->offset = consumed + i + 1; | 67 | state->offset = consumed + i + 1; |
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) | |||
72 | } | 76 | } |
73 | 77 | ||
74 | static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, | 78 | static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, |
75 | unsigned int *prefix_tbl) | 79 | unsigned int *prefix_tbl, int flags) |
76 | { | 80 | { |
77 | unsigned int k, q; | 81 | unsigned int k, q; |
82 | const u8 icase = flags & TS_IGNORECASE; | ||
78 | 83 | ||
79 | for (k = 0, q = 1; q < len; q++) { | 84 | for (k = 0, q = 1; q < len; q++) { |
80 | while (k > 0 && pattern[k] != pattern[q]) | 85 | while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k]) |
86 | != (icase ? toupper(pattern[q]) : pattern[q])) | ||
81 | k = prefix_tbl[k-1]; | 87 | k = prefix_tbl[k-1]; |
82 | if (pattern[k] == pattern[q]) | 88 | if ((icase ? toupper(pattern[k]) : pattern[k]) |
89 | == (icase ? toupper(pattern[q]) : pattern[q])) | ||
83 | k++; | 90 | k++; |
84 | prefix_tbl[q] = k; | 91 | prefix_tbl[q] = k; |
85 | } | 92 | } |
86 | } | 93 | } |
87 | 94 | ||
88 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, | 95 | static struct ts_config *kmp_init(const void *pattern, unsigned int len, |
89 | gfp_t gfp_mask) | 96 | gfp_t gfp_mask, int flags) |
90 | { | 97 | { |
91 | struct ts_config *conf; | 98 | struct ts_config *conf; |
92 | struct ts_kmp *kmp; | 99 | struct ts_kmp *kmp; |
100 | int i; | ||
93 | unsigned int prefix_tbl_len = len * sizeof(unsigned int); | 101 | unsigned int prefix_tbl_len = len * sizeof(unsigned int); |
94 | size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; | 102 | size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; |
95 | 103 | ||
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len, | |||
97 | if (IS_ERR(conf)) | 105 | if (IS_ERR(conf)) |
98 | return conf; | 106 | return conf; |
99 | 107 | ||
108 | conf->flags = flags; | ||
100 | kmp = ts_config_priv(conf); | 109 | kmp = ts_config_priv(conf); |
101 | kmp->pattern_len = len; | 110 | kmp->pattern_len = len; |
102 | compute_prefix_tbl(pattern, len, kmp->prefix_tbl); | 111 | compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags); |
103 | kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; | 112 | kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; |
104 | memcpy(kmp->pattern, pattern, len); | 113 | if (flags & TS_IGNORECASE) |
114 | for (i = 0; i < len; i++) | ||
115 | kmp->pattern[i] = toupper(((u8 *)pattern)[i]); | ||
116 | else | ||
117 | memcpy(kmp->pattern, pattern, len); | ||
105 | 118 | ||
106 | return conf; | 119 | return conf; |
107 | } | 120 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 1dc2d1d18fa8..a013bbc23717 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -24,47 +24,57 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/kallsyms.h> | 25 | #include <linux/kallsyms.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/ioport.h> | ||
27 | 28 | ||
28 | #include <asm/page.h> /* for PAGE_SIZE */ | 29 | #include <asm/page.h> /* for PAGE_SIZE */ |
29 | #include <asm/div64.h> | 30 | #include <asm/div64.h> |
31 | #include <asm/sections.h> /* for dereference_function_descriptor() */ | ||
30 | 32 | ||
31 | /* Works only for digits and letters, but small and fast */ | 33 | /* Works only for digits and letters, but small and fast */ |
32 | #define TOLOWER(x) ((x) | 0x20) | 34 | #define TOLOWER(x) ((x) | 0x20) |
33 | 35 | ||
36 | static unsigned int simple_guess_base(const char *cp) | ||
37 | { | ||
38 | if (cp[0] == '0') { | ||
39 | if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) | ||
40 | return 16; | ||
41 | else | ||
42 | return 8; | ||
43 | } else { | ||
44 | return 10; | ||
45 | } | ||
46 | } | ||
47 | |||
34 | /** | 48 | /** |
35 | * simple_strtoul - convert a string to an unsigned long | 49 | * simple_strtoul - convert a string to an unsigned long |
36 | * @cp: The start of the string | 50 | * @cp: The start of the string |
37 | * @endp: A pointer to the end of the parsed string will be placed here | 51 | * @endp: A pointer to the end of the parsed string will be placed here |
38 | * @base: The number base to use | 52 | * @base: The number base to use |
39 | */ | 53 | */ |
40 | unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) | 54 | unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) |
41 | { | 55 | { |
42 | unsigned long result = 0,value; | 56 | unsigned long result = 0; |
43 | 57 | ||
44 | if (!base) { | 58 | if (!base) |
45 | base = 10; | 59 | base = simple_guess_base(cp); |
46 | if (*cp == '0') { | 60 | |
47 | base = 8; | 61 | if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') |
48 | cp++; | 62 | cp += 2; |
49 | if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { | 63 | |
50 | cp++; | 64 | while (isxdigit(*cp)) { |
51 | base = 16; | 65 | unsigned int value; |
52 | } | 66 | |
53 | } | 67 | value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; |
54 | } else if (base == 16) { | 68 | if (value >= base) |
55 | if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') | 69 | break; |
56 | cp += 2; | 70 | result = result * base + value; |
57 | } | ||
58 | while (isxdigit(*cp) && | ||
59 | (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { | ||
60 | result = result*base + value; | ||
61 | cp++; | 71 | cp++; |
62 | } | 72 | } |
73 | |||
63 | if (endp) | 74 | if (endp) |
64 | *endp = (char *)cp; | 75 | *endp = (char *)cp; |
65 | return result; | 76 | return result; |
66 | } | 77 | } |
67 | |||
68 | EXPORT_SYMBOL(simple_strtoul); | 78 | EXPORT_SYMBOL(simple_strtoul); |
69 | 79 | ||
70 | /** | 80 | /** |
@@ -73,13 +83,12 @@ EXPORT_SYMBOL(simple_strtoul); | |||
73 | * @endp: A pointer to the end of the parsed string will be placed here | 83 | * @endp: A pointer to the end of the parsed string will be placed here |
74 | * @base: The number base to use | 84 | * @base: The number base to use |
75 | */ | 85 | */ |
76 | long simple_strtol(const char *cp,char **endp,unsigned int base) | 86 | long simple_strtol(const char *cp, char **endp, unsigned int base) |
77 | { | 87 | { |
78 | if(*cp=='-') | 88 | if(*cp == '-') |
79 | return -simple_strtoul(cp+1,endp,base); | 89 | return -simple_strtoul(cp + 1, endp, base); |
80 | return simple_strtoul(cp,endp,base); | 90 | return simple_strtoul(cp, endp, base); |
81 | } | 91 | } |
82 | |||
83 | EXPORT_SYMBOL(simple_strtol); | 92 | EXPORT_SYMBOL(simple_strtol); |
84 | 93 | ||
85 | /** | 94 | /** |
@@ -88,34 +97,30 @@ EXPORT_SYMBOL(simple_strtol); | |||
88 | * @endp: A pointer to the end of the parsed string will be placed here | 97 | * @endp: A pointer to the end of the parsed string will be placed here |
89 | * @base: The number base to use | 98 | * @base: The number base to use |
90 | */ | 99 | */ |
91 | unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base) | 100 | unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) |
92 | { | 101 | { |
93 | unsigned long long result = 0,value; | 102 | unsigned long long result = 0; |
94 | 103 | ||
95 | if (!base) { | 104 | if (!base) |
96 | base = 10; | 105 | base = simple_guess_base(cp); |
97 | if (*cp == '0') { | 106 | |
98 | base = 8; | 107 | if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') |
99 | cp++; | 108 | cp += 2; |
100 | if ((TOLOWER(*cp) == 'x') && isxdigit(cp[1])) { | 109 | |
101 | cp++; | 110 | while (isxdigit(*cp)) { |
102 | base = 16; | 111 | unsigned int value; |
103 | } | 112 | |
104 | } | 113 | value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; |
105 | } else if (base == 16) { | 114 | if (value >= base) |
106 | if (cp[0] == '0' && TOLOWER(cp[1]) == 'x') | 115 | break; |
107 | cp += 2; | 116 | result = result * base + value; |
108 | } | ||
109 | while (isxdigit(*cp) | ||
110 | && (value = isdigit(*cp) ? *cp-'0' : TOLOWER(*cp)-'a'+10) < base) { | ||
111 | result = result*base + value; | ||
112 | cp++; | 117 | cp++; |
113 | } | 118 | } |
119 | |||
114 | if (endp) | 120 | if (endp) |
115 | *endp = (char *)cp; | 121 | *endp = (char *)cp; |
116 | return result; | 122 | return result; |
117 | } | 123 | } |
118 | |||
119 | EXPORT_SYMBOL(simple_strtoull); | 124 | EXPORT_SYMBOL(simple_strtoull); |
120 | 125 | ||
121 | /** | 126 | /** |
@@ -124,14 +129,13 @@ EXPORT_SYMBOL(simple_strtoull); | |||
124 | * @endp: A pointer to the end of the parsed string will be placed here | 129 | * @endp: A pointer to the end of the parsed string will be placed here |
125 | * @base: The number base to use | 130 | * @base: The number base to use |
126 | */ | 131 | */ |
127 | long long simple_strtoll(const char *cp,char **endp,unsigned int base) | 132 | long long simple_strtoll(const char *cp, char **endp, unsigned int base) |
128 | { | 133 | { |
129 | if(*cp=='-') | 134 | if(*cp=='-') |
130 | return -simple_strtoull(cp+1,endp,base); | 135 | return -simple_strtoull(cp + 1, endp, base); |
131 | return simple_strtoull(cp,endp,base); | 136 | return simple_strtoull(cp, endp, base); |
132 | } | 137 | } |
133 | 138 | ||
134 | |||
135 | /** | 139 | /** |
136 | * strict_strtoul - convert a string to an unsigned long strictly | 140 | * strict_strtoul - convert a string to an unsigned long strictly |
137 | * @cp: The string to be converted | 141 | * @cp: The string to be converted |
@@ -154,7 +158,27 @@ long long simple_strtoll(const char *cp,char **endp,unsigned int base) | |||
154 | * simple_strtoul just ignores the successive invalid characters and | 158 | * simple_strtoul just ignores the successive invalid characters and |
155 | * return the converted value of prefix part of the string. | 159 | * return the converted value of prefix part of the string. |
156 | */ | 160 | */ |
157 | int strict_strtoul(const char *cp, unsigned int base, unsigned long *res); | 161 | int strict_strtoul(const char *cp, unsigned int base, unsigned long *res) |
162 | { | ||
163 | char *tail; | ||
164 | unsigned long val; | ||
165 | size_t len; | ||
166 | |||
167 | *res = 0; | ||
168 | len = strlen(cp); | ||
169 | if (len == 0) | ||
170 | return -EINVAL; | ||
171 | |||
172 | val = simple_strtoul(cp, &tail, base); | ||
173 | if ((*tail == '\0') || | ||
174 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
175 | *res = val; | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | return -EINVAL; | ||
180 | } | ||
181 | EXPORT_SYMBOL(strict_strtoul); | ||
158 | 182 | ||
159 | /** | 183 | /** |
160 | * strict_strtol - convert a string to a long strictly | 184 | * strict_strtol - convert a string to a long strictly |
@@ -168,7 +192,20 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res); | |||
168 | * It returns 0 if conversion is successful and *res is set to the converted | 192 | * It returns 0 if conversion is successful and *res is set to the converted |
169 | * value, otherwise it returns -EINVAL and *res is set to 0. | 193 | * value, otherwise it returns -EINVAL and *res is set to 0. |
170 | */ | 194 | */ |
171 | int strict_strtol(const char *cp, unsigned int base, long *res); | 195 | int strict_strtol(const char *cp, unsigned int base, long *res) |
196 | { | ||
197 | int ret; | ||
198 | if (*cp == '-') { | ||
199 | ret = strict_strtoul(cp + 1, base, (unsigned long *)res); | ||
200 | if (!ret) | ||
201 | *res = -(*res); | ||
202 | } else { | ||
203 | ret = strict_strtoul(cp, base, (unsigned long *)res); | ||
204 | } | ||
205 | |||
206 | return ret; | ||
207 | } | ||
208 | EXPORT_SYMBOL(strict_strtol); | ||
172 | 209 | ||
173 | /** | 210 | /** |
174 | * strict_strtoull - convert a string to an unsigned long long strictly | 211 | * strict_strtoull - convert a string to an unsigned long long strictly |
@@ -192,7 +229,27 @@ int strict_strtol(const char *cp, unsigned int base, long *res); | |||
192 | * simple_strtoull just ignores the successive invalid characters and | 229 | * simple_strtoull just ignores the successive invalid characters and |
193 | * return the converted value of prefix part of the string. | 230 | * return the converted value of prefix part of the string. |
194 | */ | 231 | */ |
195 | int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res); | 232 | int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res) |
233 | { | ||
234 | char *tail; | ||
235 | unsigned long long val; | ||
236 | size_t len; | ||
237 | |||
238 | *res = 0; | ||
239 | len = strlen(cp); | ||
240 | if (len == 0) | ||
241 | return -EINVAL; | ||
242 | |||
243 | val = simple_strtoull(cp, &tail, base); | ||
244 | if ((*tail == '\0') || | ||
245 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { | ||
246 | *res = val; | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | return -EINVAL; | ||
251 | } | ||
252 | EXPORT_SYMBOL(strict_strtoull); | ||
196 | 253 | ||
197 | /** | 254 | /** |
198 | * strict_strtoll - convert a string to a long long strictly | 255 | * strict_strtoll - convert a string to a long long strictly |
@@ -206,53 +263,20 @@ int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res); | |||
206 | * It returns 0 if conversion is successful and *res is set to the converted | 263 | * It returns 0 if conversion is successful and *res is set to the converted |
207 | * value, otherwise it returns -EINVAL and *res is set to 0. | 264 | * value, otherwise it returns -EINVAL and *res is set to 0. |
208 | */ | 265 | */ |
209 | int strict_strtoll(const char *cp, unsigned int base, long long *res); | 266 | int strict_strtoll(const char *cp, unsigned int base, long long *res) |
210 | 267 | { | |
211 | #define define_strict_strtoux(type, valtype) \ | 268 | int ret; |
212 | int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\ | 269 | if (*cp == '-') { |
213 | { \ | 270 | ret = strict_strtoull(cp + 1, base, (unsigned long long *)res); |
214 | char *tail; \ | 271 | if (!ret) |
215 | valtype val; \ | 272 | *res = -(*res); |
216 | size_t len; \ | 273 | } else { |
217 | \ | 274 | ret = strict_strtoull(cp, base, (unsigned long long *)res); |
218 | *res = 0; \ | 275 | } |
219 | len = strlen(cp); \ | ||
220 | if (len == 0) \ | ||
221 | return -EINVAL; \ | ||
222 | \ | ||
223 | val = simple_strtoul(cp, &tail, base); \ | ||
224 | if ((*tail == '\0') || \ | ||
225 | ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ | ||
226 | *res = val; \ | ||
227 | return 0; \ | ||
228 | } \ | ||
229 | \ | ||
230 | return -EINVAL; \ | ||
231 | } \ | ||
232 | |||
233 | #define define_strict_strtox(type, valtype) \ | ||
234 | int strict_strto##type(const char *cp, unsigned int base, valtype *res) \ | ||
235 | { \ | ||
236 | int ret; \ | ||
237 | if (*cp == '-') { \ | ||
238 | ret = strict_strtou##type(cp+1, base, res); \ | ||
239 | if (!ret) \ | ||
240 | *res = -(*res); \ | ||
241 | } else \ | ||
242 | ret = strict_strtou##type(cp, base, res); \ | ||
243 | \ | ||
244 | return ret; \ | ||
245 | } \ | ||
246 | |||
247 | define_strict_strtoux(l, unsigned long) | ||
248 | define_strict_strtox(l, long) | ||
249 | define_strict_strtoux(ll, unsigned long long) | ||
250 | define_strict_strtox(ll, long long) | ||
251 | 276 | ||
252 | EXPORT_SYMBOL(strict_strtoul); | 277 | return ret; |
253 | EXPORT_SYMBOL(strict_strtol); | 278 | } |
254 | EXPORT_SYMBOL(strict_strtoll); | 279 | EXPORT_SYMBOL(strict_strtoll); |
255 | EXPORT_SYMBOL(strict_strtoull); | ||
256 | 280 | ||
257 | static int skip_atoi(const char **s) | 281 | static int skip_atoi(const char **s) |
258 | { | 282 | { |
@@ -513,16 +537,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio | |||
513 | return buf; | 537 | return buf; |
514 | } | 538 | } |
515 | 539 | ||
516 | static inline void *dereference_function_descriptor(void *ptr) | ||
517 | { | ||
518 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
519 | void *p; | ||
520 | if (!probe_kernel_address(ptr, p)) | ||
521 | ptr = p; | ||
522 | #endif | ||
523 | return ptr; | ||
524 | } | ||
525 | |||
526 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) | 540 | static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags) |
527 | { | 541 | { |
528 | unsigned long value = (unsigned long) ptr; | 542 | unsigned long value = (unsigned long) ptr; |
@@ -537,18 +551,51 @@ static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int | |||
537 | #endif | 551 | #endif |
538 | } | 552 | } |
539 | 553 | ||
554 | static char *resource_string(char *buf, char *end, struct resource *res, int field_width, int precision, int flags) | ||
555 | { | ||
556 | #ifndef IO_RSRC_PRINTK_SIZE | ||
557 | #define IO_RSRC_PRINTK_SIZE 4 | ||
558 | #endif | ||
559 | |||
560 | #ifndef MEM_RSRC_PRINTK_SIZE | ||
561 | #define MEM_RSRC_PRINTK_SIZE 8 | ||
562 | #endif | ||
563 | |||
564 | /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ | ||
565 | char sym[4*sizeof(resource_size_t) + 8]; | ||
566 | char *p = sym, *pend = sym + sizeof(sym); | ||
567 | int size = -1; | ||
568 | |||
569 | if (res->flags & IORESOURCE_IO) | ||
570 | size = IO_RSRC_PRINTK_SIZE; | ||
571 | else if (res->flags & IORESOURCE_MEM) | ||
572 | size = MEM_RSRC_PRINTK_SIZE; | ||
573 | |||
574 | *p++ = '['; | ||
575 | p = number(p, pend, res->start, 16, size, -1, SPECIAL | SMALL | ZEROPAD); | ||
576 | *p++ = '-'; | ||
577 | p = number(p, pend, res->end, 16, size, -1, SPECIAL | SMALL | ZEROPAD); | ||
578 | *p++ = ']'; | ||
579 | *p = 0; | ||
580 | |||
581 | return string(buf, end, sym, field_width, precision, flags); | ||
582 | } | ||
583 | |||
540 | /* | 584 | /* |
541 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 585 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
542 | * by an extra set of alphanumeric characters that are extended format | 586 | * by an extra set of alphanumeric characters that are extended format |
543 | * specifiers. | 587 | * specifiers. |
544 | * | 588 | * |
545 | * Right now we just handle 'F' (for symbolic Function descriptor pointers) | 589 | * Right now we handle: |
546 | * and 'S' (for Symbolic direct pointers), but this can easily be | 590 | * |
547 | * extended in the future (network address types etc). | 591 | * - 'F' For symbolic function descriptor pointers |
592 | * - 'S' For symbolic direct pointers | ||
593 | * - 'R' For a struct resource pointer, it prints the range of | ||
594 | * addresses (not the name nor the flags) | ||
548 | * | 595 | * |
549 | * The difference between 'S' and 'F' is that on ia64 and ppc64 function | 596 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
550 | * pointers are really function descriptors, which contain a pointer the | 597 | * function pointers are really function descriptors, which contain a |
551 | * real address. | 598 | * pointer to the real address. |
552 | */ | 599 | */ |
553 | static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) | 600 | static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags) |
554 | { | 601 | { |
@@ -558,6 +605,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field | |||
558 | /* Fallthrough */ | 605 | /* Fallthrough */ |
559 | case 'S': | 606 | case 'S': |
560 | return symbol_string(buf, end, ptr, field_width, precision, flags); | 607 | return symbol_string(buf, end, ptr, field_width, precision, flags); |
608 | case 'R': | ||
609 | return resource_string(buf, end, ptr, field_width, precision, flags); | ||
561 | } | 610 | } |
562 | flags |= SMALL; | 611 | flags |= SMALL; |
563 | if (field_width == -1) { | 612 | if (field_width == -1) { |
@@ -574,6 +623,11 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field | |||
574 | * @fmt: The format string to use | 623 | * @fmt: The format string to use |
575 | * @args: Arguments for the format string | 624 | * @args: Arguments for the format string |
576 | * | 625 | * |
626 | * This function follows C99 vsnprintf, but has some extensions: | ||
627 | * %pS output the name of a text symbol | ||
628 | * %pF output the name of a function pointer | ||
629 | * %pR output the address range in a struct resource | ||
630 | * | ||
577 | * The return value is the number of characters which would | 631 | * The return value is the number of characters which would |
578 | * be generated for the given input, excluding the trailing | 632 | * be generated for the given input, excluding the trailing |
579 | * '\0', as per ISO C99. If you want to have the exact | 633 | * '\0', as per ISO C99. If you want to have the exact |
@@ -799,7 +853,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
799 | /* the trailing null byte doesn't count towards the total */ | 853 | /* the trailing null byte doesn't count towards the total */ |
800 | return str-buf; | 854 | return str-buf; |
801 | } | 855 | } |
802 | |||
803 | EXPORT_SYMBOL(vsnprintf); | 856 | EXPORT_SYMBOL(vsnprintf); |
804 | 857 | ||
805 | /** | 858 | /** |
@@ -815,6 +868,8 @@ EXPORT_SYMBOL(vsnprintf); | |||
815 | * | 868 | * |
816 | * Call this function if you are already dealing with a va_list. | 869 | * Call this function if you are already dealing with a va_list. |
817 | * You probably want scnprintf() instead. | 870 | * You probably want scnprintf() instead. |
871 | * | ||
872 | * See the vsnprintf() documentation for format string extensions over C99. | ||
818 | */ | 873 | */ |
819 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | 874 | int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) |
820 | { | 875 | { |
@@ -823,7 +878,6 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
823 | i=vsnprintf(buf,size,fmt,args); | 878 | i=vsnprintf(buf,size,fmt,args); |
824 | return (i >= size) ? (size - 1) : i; | 879 | return (i >= size) ? (size - 1) : i; |
825 | } | 880 | } |
826 | |||
827 | EXPORT_SYMBOL(vscnprintf); | 881 | EXPORT_SYMBOL(vscnprintf); |
828 | 882 | ||
829 | /** | 883 | /** |
@@ -837,6 +891,8 @@ EXPORT_SYMBOL(vscnprintf); | |||
837 | * generated for the given input, excluding the trailing null, | 891 | * generated for the given input, excluding the trailing null, |
838 | * as per ISO C99. If the return is greater than or equal to | 892 | * as per ISO C99. If the return is greater than or equal to |
839 | * @size, the resulting string is truncated. | 893 | * @size, the resulting string is truncated. |
894 | * | ||
895 | * See the vsnprintf() documentation for format string extensions over C99. | ||
840 | */ | 896 | */ |
841 | int snprintf(char * buf, size_t size, const char *fmt, ...) | 897 | int snprintf(char * buf, size_t size, const char *fmt, ...) |
842 | { | 898 | { |
@@ -848,7 +904,6 @@ int snprintf(char * buf, size_t size, const char *fmt, ...) | |||
848 | va_end(args); | 904 | va_end(args); |
849 | return i; | 905 | return i; |
850 | } | 906 | } |
851 | |||
852 | EXPORT_SYMBOL(snprintf); | 907 | EXPORT_SYMBOL(snprintf); |
853 | 908 | ||
854 | /** | 909 | /** |
@@ -886,12 +941,13 @@ EXPORT_SYMBOL(scnprintf); | |||
886 | * | 941 | * |
887 | * Call this function if you are already dealing with a va_list. | 942 | * Call this function if you are already dealing with a va_list. |
888 | * You probably want sprintf() instead. | 943 | * You probably want sprintf() instead. |
944 | * | ||
945 | * See the vsnprintf() documentation for format string extensions over C99. | ||
889 | */ | 946 | */ |
890 | int vsprintf(char *buf, const char *fmt, va_list args) | 947 | int vsprintf(char *buf, const char *fmt, va_list args) |
891 | { | 948 | { |
892 | return vsnprintf(buf, INT_MAX, fmt, args); | 949 | return vsnprintf(buf, INT_MAX, fmt, args); |
893 | } | 950 | } |
894 | |||
895 | EXPORT_SYMBOL(vsprintf); | 951 | EXPORT_SYMBOL(vsprintf); |
896 | 952 | ||
897 | /** | 953 | /** |
@@ -903,6 +959,8 @@ EXPORT_SYMBOL(vsprintf); | |||
903 | * The function returns the number of characters written | 959 | * The function returns the number of characters written |
904 | * into @buf. Use snprintf() or scnprintf() in order to avoid | 960 | * into @buf. Use snprintf() or scnprintf() in order to avoid |
905 | * buffer overflows. | 961 | * buffer overflows. |
962 | * | ||
963 | * See the vsnprintf() documentation for format string extensions over C99. | ||
906 | */ | 964 | */ |
907 | int sprintf(char * buf, const char *fmt, ...) | 965 | int sprintf(char * buf, const char *fmt, ...) |
908 | { | 966 | { |
@@ -914,7 +972,6 @@ int sprintf(char * buf, const char *fmt, ...) | |||
914 | va_end(args); | 972 | va_end(args); |
915 | return i; | 973 | return i; |
916 | } | 974 | } |
917 | |||
918 | EXPORT_SYMBOL(sprintf); | 975 | EXPORT_SYMBOL(sprintf); |
919 | 976 | ||
920 | /** | 977 | /** |
@@ -1143,7 +1200,6 @@ int vsscanf(const char * buf, const char * fmt, va_list args) | |||
1143 | 1200 | ||
1144 | return num; | 1201 | return num; |
1145 | } | 1202 | } |
1146 | |||
1147 | EXPORT_SYMBOL(vsscanf); | 1203 | EXPORT_SYMBOL(vsscanf); |
1148 | 1204 | ||
1149 | /** | 1205 | /** |
@@ -1162,5 +1218,4 @@ int sscanf(const char * buf, const char * fmt, ...) | |||
1162 | va_end(args); | 1218 | va_end(args); |
1163 | return i; | 1219 | return i; |
1164 | } | 1220 | } |
1165 | |||
1166 | EXPORT_SYMBOL(sscanf); | 1221 | EXPORT_SYMBOL(sscanf); |