aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug138
-rw-r--r--lib/Kconfig.kgdb14
-rw-r--r--lib/Makefile15
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/bitmap.c11
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/crc-t10dif.c67
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/debugobjects.c65
-rw-r--r--lib/idr.c142
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/klist.c96
-rw-r--r--lib/kobject.c19
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/percpu_counter.c3
-rw-r--r--lib/plist.c13
-rw-r--r--lib/radix-tree.c182
-rw-r--r--lib/random32.c48
-rw-r--r--lib/ratelimit.c56
-rw-r--r--lib/scatterlist.c180
-rw-r--r--lib/show_mem.c63
-rw-r--r--lib/smp_processor_id.c11
-rw-r--r--lib/string_helpers.c64
-rw-r--r--lib/swiotlb.c51
-rw-r--r--lib/syscall.c75
-rw-r--r--lib/textsearch.c17
-rw-r--r--lib/ts_bm.c28
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
-rw-r--r--lib/vsprintf.c121
38 files changed, 1270 insertions, 421 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8cc8e8722a3f..c7ad7a5b3535 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -29,6 +29,13 @@ config CRC16
29 the kernel tree does. Such modules that use library CRC16 29 the kernel tree does. Such modules that use library CRC16
30 functions require M here. 30 functions require M here.
31 31
32config CRC_T10DIF
33 tristate "CRC calculation for the T10 Data Integrity Field"
34 help
35 This option is only needed if a module that's not in the
36 kernel tree needs to calculate CRC checks for use with the
37 SCSI data integrity subsystem.
38
32config CRC_ITU_T 39config CRC_ITU_T
33 tristate "CRC ITU-T V.41 functions" 40 tristate "CRC ITU-T V.41 functions"
34 help 41 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9d9dc0ddf13a..aa81d2848448 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
74 debugging files into. Enable this option to be able to read and 74 debugging files into. Enable this option to be able to read and
75 write to these files. 75 write to these files.
76 76
77 For detailed documentation on the debugfs API, see
78 Documentation/DocBook/filesystems.
79
77 If unsure, say N. 80 If unsure, say N.
78 81
79config HEADERS_CHECK 82config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
147 help 150 help
148 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
149 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
150 mode for more than 10 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
151 chance to run. 154 chance to run.
152 155
153 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
159 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
160 support it.) 163 support it.)
161 164
165config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP
168 help
169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a
172 chance to run.
173
174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP.
179
180 Say N if unsure.
181
182config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int
184 depends on DETECT_SOFTLOCKUP
185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188
162config SCHED_DEBUG 189config SCHED_DEBUG
163 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
164 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
@@ -367,7 +394,7 @@ config LOCKDEP
367 bool 394 bool
368 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 395 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
369 select STACKTRACE 396 select STACKTRACE
370 select FRAME_POINTER if !X86 && !MIPS 397 select FRAME_POINTER if !X86 && !MIPS && !PPC
371 select KALLSYMS 398 select KALLSYMS
372 select KALLSYMS_ALL 399 select KALLSYMS_ALL
373 400
@@ -419,7 +446,6 @@ config DEBUG_LOCKING_API_SELFTESTS
419 446
420config STACKTRACE 447config STACKTRACE
421 bool 448 bool
422 depends on DEBUG_KERNEL
423 depends on STACKTRACE_SUPPORT 449 depends on STACKTRACE_SUPPORT
424 450
425config DEBUG_KOBJECT 451config DEBUG_KOBJECT
@@ -488,6 +514,18 @@ config DEBUG_WRITECOUNT
488 514
489 If unsure, say N. 515 If unsure, say N.
490 516
517config DEBUG_MEMORY_INIT
518 bool "Debug memory initialisation" if EMBEDDED
519 default !EMBEDDED
520 help
521 Enable this for additional checks during memory initialisation.
522 The sanity checks verify aspects of the VM such as the memory model
523 and other information provided by the architecture. Verbose
524 information will be printed at KERN_DEBUG loglevel depending
525 on the mminit_loglevel= command-line option.
526
527 If unsure, say Y
528
491config DEBUG_LIST 529config DEBUG_LIST
492 bool "Debug linked list manipulation" 530 bool "Debug linked list manipulation"
493 depends on DEBUG_KERNEL 531 depends on DEBUG_KERNEL
@@ -540,16 +578,47 @@ config BOOT_PRINTK_DELAY
540config RCU_TORTURE_TEST 578config RCU_TORTURE_TEST
541 tristate "torture tests for RCU" 579 tristate "torture tests for RCU"
542 depends on DEBUG_KERNEL 580 depends on DEBUG_KERNEL
543 depends on m
544 default n 581 default n
545 help 582 help
546 This option provides a kernel module that runs torture tests 583 This option provides a kernel module that runs torture tests
547 on the RCU infrastructure. The kernel module may be built 584 on the RCU infrastructure. The kernel module may be built
548 after the fact on the running kernel to be tested, if desired. 585 after the fact on the running kernel to be tested, if desired.
549 586
587 Say Y here if you want RCU torture tests to be built into
588 the kernel.
550 Say M if you want the RCU torture tests to build as a module. 589 Say M if you want the RCU torture tests to build as a module.
551 Say N if you are unsure. 590 Say N if you are unsure.
552 591
592config RCU_TORTURE_TEST_RUNNABLE
593 bool "torture tests for RCU runnable by default"
594 depends on RCU_TORTURE_TEST = y
595 default n
596 help
597 This option provides a way to build the RCU torture tests
598 directly into the kernel without them starting up at boot
599 time. You can use /proc/sys/kernel/rcutorture_runnable
600 to manually override this setting. This /proc file is
601 available only when the RCU torture tests have been built
602 into the kernel.
603
604 Say Y here if you want the RCU torture tests to start during
605 boot (you probably don't).
606 Say N here if you want the RCU torture tests to start only
607 after being manually enabled via /proc.
608
609config RCU_CPU_STALL_DETECTOR
610 bool "Check for stalled CPUs delaying RCU grace periods"
611 depends on CLASSIC_RCU
612 default n
613 help
614 This option causes RCU to printk information on which
615 CPUs are delaying the current grace period, but only when
616 the grace period extends for excessive time periods.
617
618 Say Y if you want RCU to perform such checks.
619
620 Say N if you are unsure.
621
553config KPROBES_SANITY_TEST 622config KPROBES_SANITY_TEST
554 bool "Kprobes sanity tests" 623 bool "Kprobes sanity tests"
555 depends on DEBUG_KERNEL 624 depends on DEBUG_KERNEL
@@ -572,6 +641,31 @@ config BACKTRACE_SELF_TEST
572 for distributions or general kernels, but only for kernel 641 for distributions or general kernels, but only for kernel
573 developers working on architecture code. 642 developers working on architecture code.
574 643
644 Note that if you want to also test saved backtraces, you will
645 have to enable STACKTRACE as well.
646
647 Say N if you are unsure.
648
649config DEBUG_BLOCK_EXT_DEVT
650 bool "Force extended block device numbers and spread them"
651 depends on DEBUG_KERNEL
652 depends on BLOCK
653 default n
654 help
655 Conventionally, block device numbers are allocated from
656 predetermined contiguous area. However, extended block area
657 may introduce non-contiguous block device numbers. This
658 option forces most block device numbers to be allocated from
659 the extended space and spreads them to discover kernel or
660 userland code paths which assume predetermined contiguous
661 device number allocation.
662
663 Note that turning on this debug option shuffles all the
664 device numbers for all IDE and SCSI devices including libata
665 ones, so root partition specified using device number
666 directly (via rdev or root=MAJ:MIN) won't work anymore.
667 Textual device names (root=/dev/sdXn) will continue to work.
668
575 Say N if you are unsure. 669 Say N if you are unsure.
576 670
577config LKDTM 671config LKDTM
@@ -611,10 +705,21 @@ config FAIL_PAGE_ALLOC
611 705
612config FAIL_MAKE_REQUEST 706config FAIL_MAKE_REQUEST
613 bool "Fault-injection capability for disk IO" 707 bool "Fault-injection capability for disk IO"
614 depends on FAULT_INJECTION 708 depends on FAULT_INJECTION && BLOCK
615 help 709 help
616 Provide fault-injection capability for disk IO. 710 Provide fault-injection capability for disk IO.
617 711
712config FAIL_IO_TIMEOUT
713 bool "Faul-injection capability for faking disk interrupts"
714 depends on FAULT_INJECTION && BLOCK
715 help
716 Provide fault-injection capability on end IO handling. This
717 will make the block layer "forget" an interrupt as configured,
718 thus exercising the error handling.
719
720 Only works with drivers that use the generic timeout handling,
721 for others it wont do anything.
722
618config FAULT_INJECTION_DEBUG_FS 723config FAULT_INJECTION_DEBUG_FS
619 bool "Debugfs entries for fault-injection capabilities" 724 bool "Debugfs entries for fault-injection capabilities"
620 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 725 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -626,13 +731,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
626 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 731 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
627 depends on !X86_64 732 depends on !X86_64
628 select STACKTRACE 733 select STACKTRACE
629 select FRAME_POINTER 734 select FRAME_POINTER if !PPC
630 help 735 help
631 Provide stacktrace filter for fault-injection capabilities 736 Provide stacktrace filter for fault-injection capabilities
632 737
633config LATENCYTOP 738config LATENCYTOP
634 bool "Latency measuring infrastructure" 739 bool "Latency measuring infrastructure"
635 select FRAME_POINTER if !MIPS 740 select FRAME_POINTER if !MIPS && !PPC
636 select KALLSYMS 741 select KALLSYMS
637 select KALLSYMS_ALL 742 select KALLSYMS_ALL
638 select STACKTRACE 743 select STACKTRACE
@@ -643,6 +748,16 @@ config LATENCYTOP
643 Enable this option if you want to use the LatencyTOP tool 748 Enable this option if you want to use the LatencyTOP tool
644 to find out which userspace is blocking on what kernel operations. 749 to find out which userspace is blocking on what kernel operations.
645 750
751config SYSCTL_SYSCALL_CHECK
752 bool "Sysctl checks"
753 depends on SYSCTL_SYSCALL
754 ---help---
755 sys_sysctl uses binary paths that have been found challenging
756 to properly maintain and use. This enables checks that help
757 you to keep things correct.
758
759source kernel/trace/Kconfig
760
646config PROVIDE_OHCI1394_DMA_INIT 761config PROVIDE_OHCI1394_DMA_INIT
647 bool "Remote debugging over FireWire early on boot" 762 bool "Remote debugging over FireWire early on boot"
648 depends on PCI && X86 763 depends on PCI && X86
@@ -683,6 +798,15 @@ config FIREWIRE_OHCI_REMOTE_DMA
683 798
684 If unsure, say N. 799 If unsure, say N.
685 800
801menuconfig BUILD_DOCSRC
802 bool "Build targets in Documentation/ tree"
803 depends on HEADERS_CHECK
804 help
805 This option attempts to build objects from the source files in the
806 kernel Documentation/ tree.
807
808 Say N if you are unsure.
809
686source "samples/Kconfig" 810source "samples/Kconfig"
687 811
688source "lib/Kconfig.kgdb" 812source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..9b5d1d7f2ef7 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,20 +1,20 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB 2config HAVE_ARCH_KGDB
6 bool 3 bool
7 4
8menuconfig KGDB 5menuconfig KGDB
9 bool "KGDB: kernel debugging with remote gdb" 6 bool "KGDB: kernel debugging with remote gdb"
10 select FRAME_POINTER
11 depends on HAVE_ARCH_KGDB 7 depends on HAVE_ARCH_KGDB
12 depends on DEBUG_KERNEL && EXPERIMENTAL 8 depends on DEBUG_KERNEL && EXPERIMENTAL
13 help 9 help
14 If you say Y here, it will be possible to remotely debug the 10 If you say Y here, it will be possible to remotely debug the
15 kernel using gdb. Documentation of kernel debugger is available 11 kernel using gdb. It is recommended but not required, that
16 at http://kgdb.sourceforge.net as well as in DocBook form 12 you also turn on the kernel config option
17 in Documentation/DocBook/. If unsure, say N. 13 CONFIG_FRAME_POINTER to aid in producing more reliable stack
14 backtraces in the external debugger. Documentation of
15 kernel debugger is available at http://kgdb.sourceforge.net
16 as well as in DocBook form in Documentation/DocBook/. If
17 unsure, say N.
18 18
19if KGDB 19if KGDB
20 20
diff --git a/lib/Makefile b/lib/Makefile
index 74b0cfb1fcc3..44001af76a7d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,19 +2,25 @@
2# Makefile for some libs needed in the kernel. 2# Makefile for some libs needed in the kernel.
3# 3#
4 4
5ifdef CONFIG_FTRACE
6ORIG_CFLAGS := $(KBUILD_CFLAGS)
7KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
8endif
9
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
7 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
9 proportions.o prio_heap.o ratelimit.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o
10 15
11lib-$(CONFIG_MMU) += ioremap.o 16lib-$(CONFIG_MMU) += ioremap.o
12lib-$(CONFIG_SMP) += cpumask.o 17lib-$(CONFIG_SMP) += cpumask.o
13 18
14lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
15 20
16obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
17 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
23 string_helpers.o
18 24
19ifeq ($(CONFIG_DEBUG_KOBJECT),y) 25ifeq ($(CONFIG_DEBUG_KOBJECT),y)
20CFLAGS_kobject.o += -DDEBUG 26CFLAGS_kobject.o += -DDEBUG
@@ -45,6 +51,7 @@ endif
45obj-$(CONFIG_BITREVERSE) += bitrev.o 51obj-$(CONFIG_BITREVERSE) += bitrev.o
46obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 52obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
47obj-$(CONFIG_CRC16) += crc16.o 53obj-$(CONFIG_CRC16) += crc16.o
54obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
48obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o 55obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
49obj-$(CONFIG_CRC32) += crc32.o 56obj-$(CONFIG_CRC32) += crc32.o
50obj-$(CONFIG_CRC7) += crc7.o 57obj-$(CONFIG_CRC7) += crc7.o
@@ -72,6 +79,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
72 79
73obj-$(CONFIG_HAVE_LMB) += lmb.o 80obj-$(CONFIG_HAVE_LMB) += lmb.o
74 81
82obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
83
75hostprogs-y := gen_crc32table 84hostprogs-y := gen_crc32table
76clean-files := crc32table.h 85clean-files := crc32table.h
77 86
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
1#include <linux/bcd.h>
2#include <linux/module.h>
3
4unsigned bcd2bin(unsigned char val)
5{
6 return (val & 0x0f) + (val >> 4) * 10;
7}
8EXPORT_SYMBOL(bcd2bin);
9
10unsigned char bin2bcd(unsigned val)
11{
12 return ((val / 10) << 4) + val % 10;
13}
14EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 482df94ea21e..06fb57c86de0 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
316EXPORT_SYMBOL(bitmap_scnprintf); 316EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_scnprintf_len - return buffer length needed to convert
320 * bitmap to an ASCII hex string
321 * @nr_bits: number of bits to be converted
322 */
323int bitmap_scnprintf_len(unsigned int nr_bits)
324{
325 unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4;
326 return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1;
327}
328
329/**
319 * __bitmap_parse - convert an ASCII hex string into a bitmap. 330 * __bitmap_parse - convert an ASCII hex string into a bitmap.
320 * @buf: pointer to buffer containing string. 331 * @buf: pointer to buffer containing string.
321 * @buflen: buffer size in bytes. If string is smaller than this 332 * @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f55787..bfeafd60ee9f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/kernel.h>
40#include <linux/bug.h> 41#include <linux/bug.h>
41#include <linux/sched.h> 42#include <linux/sched.h>
42 43
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
149 (void *)bugaddr); 150 (void *)bugaddr);
150 151
151 show_regs(regs); 152 show_regs(regs);
153 add_taint(TAINT_WARN);
152 return BUG_TRAP_TYPE_WARN; 154 return BUG_TRAP_TYPE_WARN;
153 } 155 }
154 156
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..f5f3ad8b62ff 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
116/** 116/**
117 * memparse - parse a string with mem suffixes into a number 117 * memparse - parse a string with mem suffixes into a number
118 * @ptr: Where parse begins 118 * @ptr: Where parse begins
119 * @retptr: (output) Pointer to next char after parse completes 119 * @retptr: (output) Optional pointer to next char after parse completes
120 * 120 *
121 * Parses a string into a number. The number stored at @ptr is 121 * Parses a string into a number. The number stored at @ptr is
122 * potentially suffixed with %K (for kilobytes, or 1024 bytes), 122 * potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
126 * megabyte, or one gigabyte, respectively. 126 * megabyte, or one gigabyte, respectively.
127 */ 127 */
128 128
129unsigned long long memparse (char *ptr, char **retptr) 129unsigned long long memparse(const char *ptr, char **retptr)
130{ 130{
131 unsigned long long ret = simple_strtoull (ptr, retptr, 0); 131 char *endptr; /* local pointer to end of parsed string */
132 132
133 switch (**retptr) { 133 unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
134
135 switch (*endptr) {
134 case 'G': 136 case 'G':
135 case 'g': 137 case 'g':
136 ret <<= 10; 138 ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
140 case 'K': 142 case 'K':
141 case 'k': 143 case 'k':
142 ret <<= 10; 144 ret <<= 10;
143 (*retptr)++; 145 endptr++;
144 default: 146 default:
145 break; 147 break;
146 } 148 }
149
150 if (retptr)
151 *retptr = endptr;
152
147 return ret; 153 return ret;
148} 154}
149 155
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
new file mode 100644
index 000000000000..fbbd66ed86cd
--- /dev/null
+++ b/lib/crc-t10dif.c
@@ -0,0 +1,67 @@
1/*
2 * T10 Data Integrity Field CRC16 calculation
3 *
4 * Copyright (c) 2007 Oracle Corporation. All rights reserved.
5 * Written by Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/crc-t10dif.h>
14
15/* Table generated using the following polynomium:
16 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
17 * gt: 0x8bb7
18 */
19static const __u16 t10_dif_crc_table[256] = {
20 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
21 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
22 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
23 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
24 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
25 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
26 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
27 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
28 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
29 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
30 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
31 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
32 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
33 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
34 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
35 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
36 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
37 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
38 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
39 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
40 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
41 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
42 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
43 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
44 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
45 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
46 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
47 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
48 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
49 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
50 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
51 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
52};
53
54__u16 crc_t10dif(const unsigned char *buffer, size_t len)
55{
56 __u16 crc = 0;
57 unsigned int i;
58
59 for (i = 0 ; i < len ; i++)
60 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
61
62 return crc;
63}
64EXPORT_SYMBOL(crc_t10dif);
65
66MODULE_DESCRIPTION("T10 DIF CRC calculation");
67MODULE_LICENSE("GPL");
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0ef01d14727c..0218b4693dd8 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -8,6 +8,7 @@
8 * 8 *
9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 */ 10 */
11#include <linux/kernel.h>
11#include <linux/rwsem.h> 12#include <linux/rwsem.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/module.h> 14#include <linux/module.h>
@@ -37,6 +38,7 @@ int debug_locks_off(void)
37{ 38{
38 if (xchg(&debug_locks, 0)) { 39 if (xchg(&debug_locks, 0)) {
39 if (!debug_locks_silent) { 40 if (!debug_locks_silent) {
41 oops_in_progress = 1;
40 console_verbose(); 42 console_verbose();
41 return 1; 43 return 1;
42 } 44 }
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae1..e3ab374e1334 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
68{ 68{
69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
70 struct debug_obj *new; 70 struct debug_obj *new;
71 unsigned long flags;
71 72
72 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
73 return obj_pool_free; 74 return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
81 if (!new) 82 if (!new)
82 return obj_pool_free; 83 return obj_pool_free;
83 84
84 spin_lock(&pool_lock); 85 spin_lock_irqsave(&pool_lock, flags);
85 hlist_add_head(&new->node, &obj_pool); 86 hlist_add_head(&new->node, &obj_pool);
86 obj_pool_free++; 87 obj_pool_free++;
87 spin_unlock(&pool_lock); 88 spin_unlock_irqrestore(&pool_lock, flags);
88 } 89 }
89 return obj_pool_free; 90 return obj_pool_free;
90} 91}
@@ -110,16 +111,14 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110} 111}
111 112
112/* 113/*
113 * Allocate a new object. If the pool is empty and no refill possible, 114 * Allocate a new object. If the pool is empty, switch off the debugger.
114 * switch off the debugger. 115 * Must be called with interrupts disabled.
115 */ 116 */
116static struct debug_obj * 117static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 118alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118{ 119{
119 struct debug_obj *obj = NULL; 120 struct debug_obj *obj = NULL;
120 int retry = 0;
121 121
122repeat:
123 spin_lock(&pool_lock); 122 spin_lock(&pool_lock);
124 if (obj_pool.first) { 123 if (obj_pool.first) {
125 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 124 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +140,6 @@ repeat:
141 } 140 }
142 spin_unlock(&pool_lock); 141 spin_unlock(&pool_lock);
143 142
144 if (fill_pool() && !obj && !retry++)
145 goto repeat;
146
147 return obj; 143 return obj;
148} 144}
149 145
@@ -153,17 +149,18 @@ repeat:
153static void free_object(struct debug_obj *obj) 149static void free_object(struct debug_obj *obj)
154{ 150{
155 unsigned long idx = (unsigned long)(obj - obj_static_pool); 151 unsigned long idx = (unsigned long)(obj - obj_static_pool);
152 unsigned long flags;
156 153
157 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 154 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
158 spin_lock(&pool_lock); 155 spin_lock_irqsave(&pool_lock, flags);
159 hlist_add_head(&obj->node, &obj_pool); 156 hlist_add_head(&obj->node, &obj_pool);
160 obj_pool_free++; 157 obj_pool_free++;
161 obj_pool_used--; 158 obj_pool_used--;
162 spin_unlock(&pool_lock); 159 spin_unlock_irqrestore(&pool_lock, flags);
163 } else { 160 } else {
164 spin_lock(&pool_lock); 161 spin_lock_irqsave(&pool_lock, flags);
165 obj_pool_used--; 162 obj_pool_used--;
166 spin_unlock(&pool_lock); 163 spin_unlock_irqrestore(&pool_lock, flags);
167 kmem_cache_free(obj_cache, obj); 164 kmem_cache_free(obj_cache, obj);
168 } 165 }
169} 166}
@@ -176,6 +173,7 @@ static void debug_objects_oom(void)
176{ 173{
177 struct debug_bucket *db = obj_hash; 174 struct debug_bucket *db = obj_hash;
178 struct hlist_node *node, *tmp; 175 struct hlist_node *node, *tmp;
176 HLIST_HEAD(freelist);
179 struct debug_obj *obj; 177 struct debug_obj *obj;
180 unsigned long flags; 178 unsigned long flags;
181 int i; 179 int i;
@@ -184,11 +182,14 @@ static void debug_objects_oom(void)
184 182
185 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 183 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
186 spin_lock_irqsave(&db->lock, flags); 184 spin_lock_irqsave(&db->lock, flags);
187 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 185 hlist_move_list(&db->list, &freelist);
186 spin_unlock_irqrestore(&db->lock, flags);
187
188 /* Now free them */
189 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
188 hlist_del(&obj->node); 190 hlist_del(&obj->node);
189 free_object(obj); 191 free_object(obj);
190 } 192 }
191 spin_unlock_irqrestore(&db->lock, flags);
192 } 193 }
193} 194}
194 195
@@ -210,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
210 211
211 if (limit < 5 && obj->descr != descr_test) { 212 if (limit < 5 && obj->descr != descr_test) {
212 limit++; 213 limit++;
213 printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 214 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
214 obj_states[obj->state], obj->descr->name); 215 obj_states[obj->state], obj->descr->name);
215 WARN_ON(1);
216 } 216 }
217 debug_objects_warnings++; 217 debug_objects_warnings++;
218} 218}
@@ -231,15 +231,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
231 231
232static void debug_object_is_on_stack(void *addr, int onstack) 232static void debug_object_is_on_stack(void *addr, int onstack)
233{ 233{
234 void *stack = current->stack;
235 int is_on_stack; 234 int is_on_stack;
236 static int limit; 235 static int limit;
237 236
238 if (limit > 4) 237 if (limit > 4)
239 return; 238 return;
240 239
241 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); 240 is_on_stack = object_is_on_stack(addr);
242
243 if (is_on_stack == onstack) 241 if (is_on_stack == onstack)
244 return; 242 return;
245 243
@@ -261,6 +259,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261 struct debug_obj *obj; 259 struct debug_obj *obj;
262 unsigned long flags; 260 unsigned long flags;
263 261
262 fill_pool();
263
264 db = get_bucket((unsigned long) addr); 264 db = get_bucket((unsigned long) addr);
265 265
266 spin_lock_irqsave(&db->lock, flags); 266 spin_lock_irqsave(&db->lock, flags);
@@ -504,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
504 return; 504 return;
505 default: 505 default:
506 hlist_del(&obj->node); 506 hlist_del(&obj->node);
507 spin_unlock_irqrestore(&db->lock, flags);
507 free_object(obj); 508 free_object(obj);
508 break; 509 return;
509 } 510 }
510out_unlock: 511out_unlock:
511 spin_unlock_irqrestore(&db->lock, flags); 512 spin_unlock_irqrestore(&db->lock, flags);
@@ -516,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
516{ 517{
517 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 518 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
518 struct hlist_node *node, *tmp; 519 struct hlist_node *node, *tmp;
520 HLIST_HEAD(freelist);
519 struct debug_obj_descr *descr; 521 struct debug_obj_descr *descr;
520 enum debug_obj_state state; 522 enum debug_obj_state state;
521 struct debug_bucket *db; 523 struct debug_bucket *db;
@@ -551,11 +553,18 @@ repeat:
551 goto repeat; 553 goto repeat;
552 default: 554 default:
553 hlist_del(&obj->node); 555 hlist_del(&obj->node);
554 free_object(obj); 556 hlist_add_head(&obj->node, &freelist);
555 break; 557 break;
556 } 558 }
557 } 559 }
558 spin_unlock_irqrestore(&db->lock, flags); 560 spin_unlock_irqrestore(&db->lock, flags);
561
562 /* Now free them */
563 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
564 hlist_del(&obj->node);
565 free_object(obj);
566 }
567
559 if (cnt > debug_objects_maxchain) 568 if (cnt > debug_objects_maxchain)
560 debug_objects_maxchain = cnt; 569 debug_objects_maxchain = cnt;
561 } 570 }
@@ -738,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
738 747
739 obj = lookup_object(addr, db); 748 obj = lookup_object(addr, db);
740 if (!obj && state != ODEBUG_STATE_NONE) { 749 if (!obj && state != ODEBUG_STATE_NONE) {
741 printk(KERN_ERR "ODEBUG: selftest object not found\n"); 750 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
742 WARN_ON(1);
743 goto out; 751 goto out;
744 } 752 }
745 if (obj && obj->state != state) { 753 if (obj && obj->state != state) {
746 printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 754 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
747 obj->state, state); 755 obj->state, state);
748 WARN_ON(1);
749 goto out; 756 goto out;
750 } 757 }
751 if (fixups != debug_objects_fixups) { 758 if (fixups != debug_objects_fixups) {
752 printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 759 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
753 fixups, debug_objects_fixups); 760 fixups, debug_objects_fixups);
754 WARN_ON(1);
755 goto out; 761 goto out;
756 } 762 }
757 if (warnings != debug_objects_warnings) { 763 if (warnings != debug_objects_warnings) {
758 printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 764 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
759 warnings, debug_objects_warnings); 765 warnings, debug_objects_warnings);
760 WARN_ON(1);
761 goto out; 766 goto out;
762 } 767 }
763 res = 0; 768 res = 0;
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..e728c7fccc4d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
9 * Small id to pointer translation service. 11 * Small id to pointer translation service.
10 * 12 *
11 * It uses a radix tree like structure as a sparse array indexed 13 * It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
35 37
36static struct kmem_cache *idr_layer_cache; 38static struct kmem_cache *idr_layer_cache;
37 39
38static struct idr_layer *alloc_layer(struct idr *idp) 40static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 41{
40 struct idr_layer *p; 42 struct idr_layer *p;
41 unsigned long flags; 43 unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
50 return(p); 52 return(p);
51} 53}
52 54
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
53/* only called when idp->lock is held */ 68/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 70{
56 p->ary[0] = idp->id_free; 71 p->ary[0] = idp->id_free;
57 idp->id_free = p; 72 idp->id_free = p;
58 idp->id_free_cnt++; 73 idp->id_free_cnt++;
59} 74}
60 75
61static void free_layer(struct idr *idp, struct idr_layer *p) 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 77{
63 unsigned long flags; 78 unsigned long flags;
64 79
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 81 * Depends on the return element being zeroed.
67 */ 82 */
68 spin_lock_irqsave(&idp->lock, flags); 83 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 84 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 85 spin_unlock_irqrestore(&idp->lock, flags);
71} 86}
72 87
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
96 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
97 * 112 *
98 * This function should be called prior to locking and calling the 113 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy 114 * idr_get_new* functions. It preallocates enough memory to satisfy
100 * the worst possible allocation. 115 * the worst possible allocation.
101 * 116 *
102 * If the system is REALLY out of memory this function returns 0, 117 * If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 124 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 125 if (new == NULL)
111 return (0); 126 return (0);
112 free_layer(idp, new); 127 move_to_free_list(idp, new);
113 } 128 }
114 return 1; 129 return 1;
115} 130}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
143 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
144 if (!(p = pa[l])) { 159 if (!(p = pa[l])) {
145 *starting_id = id; 160 *starting_id = id;
146 return -2; 161 return IDR_NEED_TO_GROW;
147 } 162 }
148 163
149 /* If we need to go up one layer, continue the 164 /* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
160 id = ((id >> sh) ^ n ^ m) << sh; 175 id = ((id >> sh) ^ n ^ m) << sh;
161 } 176 }
162 if ((id >= MAX_ID_BIT) || (id < 0)) 177 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3; 178 return IDR_NOMORE_SPACE;
164 if (l == 0) 179 if (l == 0)
165 break; 180 break;
166 /* 181 /*
167 * Create the layer below if it is missing. 182 * Create the layer below if it is missing.
168 */ 183 */
169 if (!p->ary[m]) { 184 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 185 new = get_from_free_list(idp);
186 if (!new)
171 return -1; 187 return -1;
172 p->ary[m] = new; 188 rcu_assign_pointer(p->ary[m], new);
173 p->count++; 189 p->count++;
174 } 190 }
175 pa[l--] = p; 191 pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
192 p = idp->top; 208 p = idp->top;
193 layers = idp->layers; 209 layers = idp->layers;
194 if (unlikely(!p)) { 210 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 211 if (!(p = get_from_free_list(idp)))
196 return -1; 212 return -1;
197 layers = 1; 213 layers = 1;
198 } 214 }
@@ -204,7 +220,7 @@ build_up:
204 layers++; 220 layers++;
205 if (!p->count) 221 if (!p->count)
206 continue; 222 continue;
207 if (!(new = alloc_layer(idp))) { 223 if (!(new = get_from_free_list(idp))) {
208 /* 224 /*
209 * The allocation failed. If we built part of 225 * The allocation failed. If we built part of
210 * the structure tear it down. 226 * the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
214 p = p->ary[0]; 230 p = p->ary[0];
215 new->ary[0] = NULL; 231 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 232 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 233 __move_to_free_list(idp, new);
218 } 234 }
219 spin_unlock_irqrestore(&idp->lock, flags); 235 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 236 return -1;
@@ -225,10 +241,10 @@ build_up:
225 __set_bit(0, &new->bitmap); 241 __set_bit(0, &new->bitmap);
226 p = new; 242 p = new;
227 } 243 }
228 idp->top = p; 244 rcu_assign_pointer(idp->top, p);
229 idp->layers = layers; 245 idp->layers = layers;
230 v = sub_alloc(idp, &id, pa); 246 v = sub_alloc(idp, &id, pa);
231 if (v == -2) 247 if (v == IDR_NEED_TO_GROW)
232 goto build_up; 248 goto build_up;
233 return(v); 249 return(v);
234} 250}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
244 * Successfully found an empty slot. Install the user 260 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full. 261 * pointer and mark the slot full.
246 */ 262 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 263 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
264 (struct idr_layer *)ptr);
248 pa[0]->count++; 265 pa[0]->count++;
249 idr_mark_full(pa, id); 266 idr_mark_full(pa, id);
250 } 267 }
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 * This is a cheap hack until the IDR code can be fixed to 294 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values. 295 * return proper error values.
279 */ 296 */
280 if (rv < 0) { 297 if (rv < 0)
281 if (rv == -1) 298 return _idr_rc_to_errno(rv);
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv; 299 *id = rv;
287 return 0; 300 return 0;
288} 301}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
312 * This is a cheap hack until the IDR code can be fixed to 325 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values. 326 * return proper error values.
314 */ 327 */
315 if (rv < 0) { 328 if (rv < 0)
316 if (rv == -1) 329 return _idr_rc_to_errno(rv);
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv; 330 *id = rv;
322 return 0; 331 return 0;
323} 332}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
325 334
326static void idr_remove_warning(int id) 335static void idr_remove_warning(int id)
327{ 336{
328 printk("idr_remove called for id=%d which is not allocated.\n", id); 337 printk(KERN_WARNING
338 "idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack(); 339 dump_stack();
330} 340}
331 341
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top; 344 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL]; 345 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0]; 346 struct idr_layer ***paa = &pa[0];
347 struct idr_layer *to_free;
337 int n; 348 int n;
338 349
339 *paa = NULL; 350 *paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
349 n = id & IDR_MASK; 360 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 361 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap); 362 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 363 rcu_assign_pointer(p->ary[n], NULL);
364 to_free = NULL;
353 while(*paa && ! --((**paa)->count)){ 365 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 366 if (to_free)
367 free_layer(to_free);
368 to_free = **paa;
355 **paa-- = NULL; 369 **paa-- = NULL;
356 } 370 }
357 if (!*paa) 371 if (!*paa)
358 idp->layers = 0; 372 idp->layers = 0;
373 if (to_free)
374 free_layer(to_free);
359 } else 375 } else
360 idr_remove_warning(id); 376 idr_remove_warning(id);
361} 377}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
368void idr_remove(struct idr *idp, int id) 384void idr_remove(struct idr *idp, int id)
369{ 385{
370 struct idr_layer *p; 386 struct idr_layer *p;
387 struct idr_layer *to_free;
371 388
372 /* Mask off upper bits we don't use for the search. */ 389 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK; 390 id &= MAX_ID_MASK;
374 391
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 392 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 393 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer 394 idp->top->ary[0]) {
378 395 /*
396 * Single child at leftmost slot: we can shrink the tree.
397 * This level is not needed anymore since when layers are
398 * inserted, they are inserted at the top of the existing
399 * tree.
400 */
401 to_free = idp->top;
379 p = idp->top->ary[0]; 402 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 403 rcu_assign_pointer(idp->top, p);
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers; 404 --idp->layers;
405 to_free->bitmap = to_free->count = 0;
406 free_layer(to_free);
384 } 407 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 408 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 409 p = get_from_free_list(idp);
410 /*
411 * Note: we don't call the rcu callback here, since the only
412 * layers that fall into the freelist are those that have been
413 * preallocated.
414 */
387 kmem_cache_free(idr_layer_cache, p); 415 kmem_cache_free(idr_layer_cache, p);
388 } 416 }
389 return; 417 return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
424 452
425 id += 1 << n; 453 id += 1 << n;
426 while (n < fls(id)) { 454 while (n < fls(id)) {
427 if (p) { 455 if (p)
428 memset(p, 0, sizeof *p); 456 free_layer(p);
429 free_layer(idp, p);
430 }
431 n += IDR_BITS; 457 n += IDR_BITS;
432 p = *--paa; 458 p = *--paa;
433 } 459 }
434 } 460 }
435 idp->top = NULL; 461 rcu_assign_pointer(idp->top, NULL);
436 idp->layers = 0; 462 idp->layers = 0;
437} 463}
438EXPORT_SYMBOL(idr_remove_all); 464EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 470void idr_destroy(struct idr *idp)
445{ 471{
446 while (idp->id_free_cnt) { 472 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 473 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 474 kmem_cache_free(idr_layer_cache, p);
449 } 475 }
450} 476}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
459 * return indicates that @id is not valid or you passed %NULL in 485 * return indicates that @id is not valid or you passed %NULL in
460 * idr_get_new(). 486 * idr_get_new().
461 * 487 *
462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 488 * This function can be called under rcu_read_lock(), given that the leaf
489 * pointers lifetimes are correctly managed.
463 */ 490 */
464void *idr_find(struct idr *idp, int id) 491void *idr_find(struct idr *idp, int id)
465{ 492{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
467 struct idr_layer *p; 494 struct idr_layer *p;
468 495
469 n = idp->layers * IDR_BITS; 496 n = idp->layers * IDR_BITS;
470 p = idp->top; 497 p = rcu_dereference(idp->top);
471 498
472 /* Mask off upper bits we don't use for the search. */ 499 /* Mask off upper bits we don't use for the search. */
473 id &= MAX_ID_MASK; 500 id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
477 504
478 while (n > 0 && p) { 505 while (n > 0 && p) {
479 n -= IDR_BITS; 506 n -= IDR_BITS;
480 p = p->ary[(id >> n) & IDR_MASK]; 507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
481 } 508 }
482 return((void *)p); 509 return((void *)p);
483} 510}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
510 struct idr_layer **paa = &pa[0]; 537 struct idr_layer **paa = &pa[0];
511 538
512 n = idp->layers * IDR_BITS; 539 n = idp->layers * IDR_BITS;
513 p = idp->top; 540 p = rcu_dereference(idp->top);
514 max = 1 << n; 541 max = 1 << n;
515 542
516 id = 0; 543 id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
518 while (n > 0 && p) { 545 while (n > 0 && p) {
519 n -= IDR_BITS; 546 n -= IDR_BITS;
520 *paa++ = p; 547 *paa++ = p;
521 p = p->ary[(id >> n) & IDR_MASK]; 548 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
522 } 549 }
523 550
524 if (p) { 551 if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
548 * A -ENOENT return indicates that @id was not found. 575 * A -ENOENT return indicates that @id was not found.
549 * A -EINVAL return indicates that @id was not within valid constraints. 576 * A -EINVAL return indicates that @id was not within valid constraints.
550 * 577 *
551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 578 * The caller must serialize with writers.
552 */ 579 */
553void *idr_replace(struct idr *idp, void *ptr, int id) 580void *idr_replace(struct idr *idp, void *ptr, int id)
554{ 581{
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
574 return ERR_PTR(-ENOENT); 601 return ERR_PTR(-ENOENT);
575 602
576 old_p = p->ary[n]; 603 old_p = p->ary[n];
577 p->ary[n] = ptr; 604 rcu_assign_pointer(p->ary[n], ptr);
578 605
579 return old_p; 606 return old_p;
580} 607}
581EXPORT_SYMBOL(idr_replace); 608EXPORT_SYMBOL(idr_replace);
582 609
583static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) 610static void idr_cache_ctor(void *idr_layer)
584{ 611{
585 memset(idr_layer, 0, sizeof(struct idr_layer)); 612 memset(idr_layer, 0, sizeof(struct idr_layer));
586} 613}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
694 restart: 721 restart:
695 /* get vacant slot */ 722 /* get vacant slot */
696 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 723 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 if (t < 0) { 724 if (t < 0)
698 if (t == -1) 725 return _idr_rc_to_errno(t);
699 return -EAGAIN;
700 else /* will be -3 */
701 return -ENOSPC;
702 }
703 726
704 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 727 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 return -ENOSPC; 728 return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
720 return -EAGAIN; 743 return -EAGAIN;
721 744
722 memset(bitmap, 0, sizeof(struct ida_bitmap)); 745 memset(bitmap, 0, sizeof(struct ida_bitmap));
723 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 746 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
747 (void *)bitmap);
724 pa[0]->count++; 748 pa[0]->count++;
725 } 749 }
726 750
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 773 * allocation.
750 */ 774 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 775 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 776 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 777 if (p)
754 kmem_cache_free(idr_layer_cache, p); 778 kmem_cache_free(idr_layer_cache, p);
755 } 779 }
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} 230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
231#define DUMPBITS(n) {b>>=(n);k-=(n);} 231#define DUMPBITS(n) {b>>=(n);k-=(n);}
232 232
233#ifndef NO_INFLATE_MALLOC
234/* A trivial malloc implementation, adapted from
235 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
236 */
237
238static unsigned long malloc_ptr;
239static int malloc_count;
240
241static void *malloc(int size)
242{
243 void *p;
244
245 if (size < 0)
246 error("Malloc error");
247 if (!malloc_ptr)
248 malloc_ptr = free_mem_ptr;
249
250 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
251
252 p = (void *)malloc_ptr;
253 malloc_ptr += size;
254
255 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
256 error("Out of memory");
257
258 malloc_count++;
259 return p;
260}
261
262static void free(void *where)
263{
264 malloc_count--;
265 if (!malloc_count)
266 malloc_ptr = free_mem_ptr;
267}
268#else
269#define malloc(a) kmalloc(a, GFP_KERNEL)
270#define free(a) kfree(a)
271#endif
233 272
234/* 273/*
235 Huffman code decoding is performed using a multi-level table lookup. 274 Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
1045 int e; /* last block flag */ 1084 int e; /* last block flag */
1046 int r; /* result code */ 1085 int r; /* result code */
1047 unsigned h; /* maximum struct huft's malloc'ed */ 1086 unsigned h; /* maximum struct huft's malloc'ed */
1048 void *ptr;
1049 1087
1050 /* initialize window, bit buffer */ 1088 /* initialize window, bit buffer */
1051 wp = 0; 1089 wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
1057 h = 0; 1095 h = 0;
1058 do { 1096 do {
1059 hufts = 0; 1097 hufts = 0;
1060 gzip_mark(&ptr); 1098#ifdef ARCH_HAS_DECOMP_WDOG
1061 if ((r = inflate_block(&e)) != 0) { 1099 arch_decomp_wdog();
1062 gzip_release(&ptr); 1100#endif
1063 return r; 1101 r = inflate_block(&e);
1064 } 1102 if (r)
1065 gzip_release(&ptr); 1103 return r;
1066 if (hufts > h) 1104 if (hufts > h)
1067 h = hufts; 1105 h = hufts;
1068 } while (!e); 1106 } while (!e);
diff --git a/lib/iomap.c b/lib/iomap.c
index 37a3ea4cac9f..d32229385151 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access)
40 static int count = 10; 40 static int count = 10;
41 if (count) { 41 if (count) {
42 count--; 42 count--;
43 printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); 43 WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
44 WARN_ON(1);
45 } 44 }
46} 45}
47 46
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..5d90074dca75 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
30 return index; 30 return index;
31} 31}
32 32
33static inline void set_bit_area(unsigned long *map, unsigned long i, 33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34 int len)
35{ 34{
36 unsigned long end = i + len; 35 unsigned long end = i + len;
37 while (i < end) { 36 while (i < end) {
@@ -64,7 +63,7 @@ again:
64 start = index + 1; 63 start = index + 1;
65 goto again; 64 goto again;
66 } 65 }
67 set_bit_area(map, index, nr); 66 iommu_area_reserve(map, index, nr);
68 } 67 }
69 return index; 68 return index;
70} 69}
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa2..bbdd3015c2c7 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
37#include <linux/klist.h> 37#include <linux/klist.h>
38#include <linux/module.h> 38#include <linux/module.h>
39 39
40/*
41 * Use the lowest bit of n_klist to mark deleted nodes and exclude
42 * dead ones from iteration.
43 */
44#define KNODE_DEAD 1LU
45#define KNODE_KLIST_MASK ~KNODE_DEAD
46
47static struct klist *knode_klist(struct klist_node *knode)
48{
49 return (struct klist *)
50 ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
51}
52
53static bool knode_dead(struct klist_node *knode)
54{
55 return (unsigned long)knode->n_klist & KNODE_DEAD;
56}
57
58static void knode_set_klist(struct klist_node *knode, struct klist *klist)
59{
60 knode->n_klist = klist;
61 /* no knode deserves to start its life dead */
62 WARN_ON(knode_dead(knode));
63}
64
65static void knode_kill(struct klist_node *knode)
66{
67 /* and no knode should die twice ever either, see we're very humane */
68 WARN_ON(knode_dead(knode));
69 *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
70}
40 71
41/** 72/**
42 * klist_init - Initialize a klist structure. 73 * klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
79 INIT_LIST_HEAD(&n->n_node); 110 INIT_LIST_HEAD(&n->n_node);
80 init_completion(&n->n_removed); 111 init_completion(&n->n_removed);
81 kref_init(&n->n_ref); 112 kref_init(&n->n_ref);
82 n->n_klist = k; 113 knode_set_klist(n, k);
83 if (k->get) 114 if (k->get)
84 k->get(n); 115 k->get(n);
85} 116}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
115 */ 146 */
116void klist_add_after(struct klist_node *n, struct klist_node *pos) 147void klist_add_after(struct klist_node *n, struct klist_node *pos)
117{ 148{
118 struct klist *k = pos->n_klist; 149 struct klist *k = knode_klist(pos);
119 150
120 klist_node_init(k, n); 151 klist_node_init(k, n);
121 spin_lock(&k->k_lock); 152 spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
131 */ 162 */
132void klist_add_before(struct klist_node *n, struct klist_node *pos) 163void klist_add_before(struct klist_node *n, struct klist_node *pos)
133{ 164{
134 struct klist *k = pos->n_klist; 165 struct klist *k = knode_klist(pos);
135 166
136 klist_node_init(k, n); 167 klist_node_init(k, n);
137 spin_lock(&k->k_lock); 168 spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
144{ 175{
145 struct klist_node *n = container_of(kref, struct klist_node, n_ref); 176 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
146 177
178 WARN_ON(!knode_dead(n));
147 list_del(&n->n_node); 179 list_del(&n->n_node);
148 complete(&n->n_removed); 180 complete(&n->n_removed);
149 n->n_klist = NULL; 181 knode_set_klist(n, NULL);
150} 182}
151 183
152static int klist_dec_and_del(struct klist_node *n) 184static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
154 return kref_put(&n->n_ref, klist_release); 186 return kref_put(&n->n_ref, klist_release);
155} 187}
156 188
157/** 189static void klist_put(struct klist_node *n, bool kill)
158 * klist_del - Decrement the reference count of node and try to remove.
159 * @n: node we're deleting.
160 */
161void klist_del(struct klist_node *n)
162{ 190{
163 struct klist *k = n->n_klist; 191 struct klist *k = knode_klist(n);
164 void (*put)(struct klist_node *) = k->put; 192 void (*put)(struct klist_node *) = k->put;
165 193
166 spin_lock(&k->k_lock); 194 spin_lock(&k->k_lock);
195 if (kill)
196 knode_kill(n);
167 if (!klist_dec_and_del(n)) 197 if (!klist_dec_and_del(n))
168 put = NULL; 198 put = NULL;
169 spin_unlock(&k->k_lock); 199 spin_unlock(&k->k_lock);
170 if (put) 200 if (put)
171 put(n); 201 put(n);
172} 202}
203
204/**
205 * klist_del - Decrement the reference count of node and try to remove.
206 * @n: node we're deleting.
207 */
208void klist_del(struct klist_node *n)
209{
210 klist_put(n, true);
211}
173EXPORT_SYMBOL_GPL(klist_del); 212EXPORT_SYMBOL_GPL(klist_del);
174 213
175/** 214/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
206 struct klist_node *n) 245 struct klist_node *n)
207{ 246{
208 i->i_klist = k; 247 i->i_klist = k;
209 i->i_head = &k->k_list;
210 i->i_cur = n; 248 i->i_cur = n;
211 if (n) 249 if (n)
212 kref_get(&n->n_ref); 250 kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
237void klist_iter_exit(struct klist_iter *i) 275void klist_iter_exit(struct klist_iter *i)
238{ 276{
239 if (i->i_cur) { 277 if (i->i_cur) {
240 klist_del(i->i_cur); 278 klist_put(i->i_cur, false);
241 i->i_cur = NULL; 279 i->i_cur = NULL;
242 } 280 }
243} 281}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
258 */ 296 */
259struct klist_node *klist_next(struct klist_iter *i) 297struct klist_node *klist_next(struct klist_iter *i)
260{ 298{
261 struct list_head *next;
262 struct klist_node *lnode = i->i_cur;
263 struct klist_node *knode = NULL;
264 void (*put)(struct klist_node *) = i->i_klist->put; 299 void (*put)(struct klist_node *) = i->i_klist->put;
300 struct klist_node *last = i->i_cur;
301 struct klist_node *next;
265 302
266 spin_lock(&i->i_klist->k_lock); 303 spin_lock(&i->i_klist->k_lock);
267 if (lnode) { 304
268 next = lnode->n_node.next; 305 if (last) {
269 if (!klist_dec_and_del(lnode)) 306 next = to_klist_node(last->n_node.next);
307 if (!klist_dec_and_del(last))
270 put = NULL; 308 put = NULL;
271 } else 309 } else
272 next = i->i_head->next; 310 next = to_klist_node(i->i_klist->k_list.next);
273 311
274 if (next != i->i_head) { 312 i->i_cur = NULL;
275 knode = to_klist_node(next); 313 while (next != to_klist_node(&i->i_klist->k_list)) {
276 kref_get(&knode->n_ref); 314 if (likely(!knode_dead(next))) {
315 kref_get(&next->n_ref);
316 i->i_cur = next;
317 break;
318 }
319 next = to_klist_node(next->n_node.next);
277 } 320 }
278 i->i_cur = knode; 321
279 spin_unlock(&i->i_klist->k_lock); 322 spin_unlock(&i->i_klist->k_lock);
280 if (put && lnode) 323
281 put(lnode); 324 if (put && last)
282 return knode; 325 put(last);
326 return i->i_cur;
283} 327}
284EXPORT_SYMBOL_GPL(klist_next); 328EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/kobject.c b/lib/kobject.c
index 718e5101c263..fbf0ae282376 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
164 return -ENOENT; 164 return -ENOENT;
165 165
166 if (!kobj->name || !kobj->name[0]) { 166 if (!kobj->name || !kobj->name[0]) {
167 pr_debug("kobject: (%p): attempted to be registered with empty " 167 WARN(1, "kobject: (%p): attempted to be registered with empty "
168 "name!\n", kobj); 168 "name!\n", kobj);
169 WARN_ON(1);
170 return -EINVAL; 169 return -EINVAL;
171 } 170 }
172 171
@@ -216,13 +215,18 @@ static int kobject_add_internal(struct kobject *kobj)
216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
217 va_list vargs) 216 va_list vargs)
218{ 217{
219 /* Free the old name, if necessary. */ 218 const char *old_name = kobj->name;
220 kfree(kobj->name); 219 char *s;
221 220
222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 221 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
223 if (!kobj->name) 222 if (!kobj->name)
224 return -ENOMEM; 223 return -ENOMEM;
225 224
225 /* ewww... some of these buggers have '/' in the name ... */
226 while ((s = strchr(kobj->name, '/')))
227 s[0] = '!';
228
229 kfree(old_name);
226 return 0; 230 return 0;
227} 231}
228 232
@@ -439,6 +443,7 @@ out:
439 443
440 return error; 444 return error;
441} 445}
446EXPORT_SYMBOL_GPL(kobject_rename);
442 447
443/** 448/**
444 * kobject_move - move object to another parent 449 * kobject_move - move object to another parent
@@ -576,12 +581,10 @@ static void kobject_release(struct kref *kref)
576void kobject_put(struct kobject *kobj) 581void kobject_put(struct kobject *kobj)
577{ 582{
578 if (kobj) { 583 if (kobj) {
579 if (!kobj->state_initialized) { 584 if (!kobj->state_initialized)
580 printk(KERN_WARNING "kobject: '%s' (%p): is not " 585 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
581 "initialized, yet kobject_put() is being " 586 "initialized, yet kobject_put() is being "
582 "called.\n", kobject_name(kobj), kobj); 587 "called.\n", kobject_name(kobj), kobj);
583 WARN_ON(1);
584 }
585 kref_put(&kobj->kref, kobject_release); 588 kref_put(&kobj->kref, kobject_release);
586 } 589 }
587} 590}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..3f914725bda8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
245 if (retval) 245 if (retval)
246 goto exit; 246 goto exit;
247 247
248 call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); 248 retval = call_usermodehelper(argv[0], argv,
249 env->envp, UMH_WAIT_EXEC);
249 } 250 }
250 251
251exit: 252exit:
@@ -284,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
284 int len; 285 int len;
285 286
286 if (env->envp_idx >= ARRAY_SIZE(env->envp)) { 287 if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
287 printk(KERN_ERR "add_uevent_var: too many keys\n"); 288 WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
288 WARN_ON(1);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
@@ -296,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
296 va_end(args); 296 va_end(args);
297 297
298 if (len >= (sizeof(env->buf) - env->buflen)) { 298 if (len >= (sizeof(env->buf) - env->buflen)) {
299 printk(KERN_ERR "add_uevent_var: buffer size too small\n"); 299 WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
300 WARN_ON(1);
301 return -ENOMEM; 300 return -ENOMEM;
302 } 301 }
303 302
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
20 struct list_head *prev, 20 struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 if (unlikely(next->prev != prev)) { 23 WARN(next->prev != prev,
24 printk(KERN_ERR "list_add corruption. next->prev should be " 24 "list_add corruption. next->prev should be "
25 "prev (%p), but was %p. (next=%p).\n", 25 "prev (%p), but was %p. (next=%p).\n",
26 prev, next->prev, next); 26 prev, next->prev, next);
27 BUG(); 27 WARN(prev->next != next,
28 } 28 "list_add corruption. prev->next should be "
29 if (unlikely(prev->next != next)) { 29 "next (%p), but was %p. (prev=%p).\n",
30 printk(KERN_ERR "list_add corruption. prev->next should be " 30 next, prev->next, prev);
31 "next (%p), but was %p. (prev=%p).\n",
32 next, prev->next, prev);
33 BUG();
34 }
35 next->prev = new; 31 next->prev = new;
36 new->next = next; 32 new->next = next;
37 new->prev = prev; 33 new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
40EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
41 37
42/** 38/**
43 * list_add - add a new entry
44 * @new: new entry to be added
45 * @head: list head to add it after
46 *
47 * Insert a new entry after the specified head.
48 * This is good for implementing stacks.
49 */
50void list_add(struct list_head *new, struct list_head *head)
51{
52 __list_add(new, head, head->next);
53}
54EXPORT_SYMBOL(list_add);
55
56/**
57 * list_del - deletes entry from list. 39 * list_del - deletes entry from list.
58 * @entry: the element to delete from the list. 40 * @entry: the element to delete from the list.
59 * Note: list_empty on entry does not return true after this, the entry is 41 * Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
61 */ 43 */
62void list_del(struct list_head *entry) 44void list_del(struct list_head *entry)
63{ 45{
64 if (unlikely(entry->prev->next != entry)) { 46 WARN(entry->prev->next != entry,
65 printk(KERN_ERR "list_del corruption. prev->next should be %p, " 47 "list_del corruption. prev->next should be %p, "
66 "but was %p\n", entry, entry->prev->next); 48 "but was %p\n", entry, entry->prev->next);
67 BUG(); 49 WARN(entry->next->prev != entry,
68 } 50 "list_del corruption. next->prev should be %p, "
69 if (unlikely(entry->next->prev != entry)) { 51 "but was %p\n", entry, entry->next->prev);
70 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
71 "but was %p\n", entry, entry->next->prev);
72 BUG();
73 }
74 __list_del(entry->prev, entry->next); 52 __list_del(entry->prev, entry->next);
75 entry->next = LIST_POISON1; 53 entry->next = LIST_POISON1;
76 entry->prev = LIST_POISON2; 54 entry->prev = LIST_POISON2;
diff --git a/lib/lmb.c b/lib/lmb.c
index 5d7b9286503e..97e547037084 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit)
462 if (lmb.memory.region[0].size < lmb.rmo_size) 462 if (lmb.memory.region[0].size < lmb.rmo_size)
463 lmb.rmo_size = lmb.memory.region[0].size; 463 lmb.rmo_size = lmb.memory.region[0].size;
464 464
465 memory_limit = lmb_end_of_DRAM();
466
465 /* And truncate any reserves above the limit also. */ 467 /* And truncate any reserves above the limit also. */
466 for (i = 0; i < lmb.reserved.cnt; i++) { 468 for (i = 0; i < lmb.reserved.cnt; i++) {
467 p = &lmb.reserved.region[i]; 469 p = &lmb.reserved.region[i];
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
138 t += 31 + *ip++; 138 t += 31 + *ip++;
139 } 139 }
140 m_pos = op - 1; 140 m_pos = op - 1;
141 m_pos -= le16_to_cpu(get_unaligned( 141 m_pos -= get_unaligned_le16(ip) >> 2;
142 (const unsigned short *)ip)) >> 2;
143 ip += 2; 142 ip += 2;
144 } else if (t >= 16) { 143 } else if (t >= 16) {
145 m_pos = op; 144 m_pos = op;
@@ -157,8 +156,7 @@ match:
157 } 156 }
158 t += 7 + *ip++; 157 t += 7 + *ip++;
159 } 158 }
160 m_pos -= le16_to_cpu(get_unaligned( 159 m_pos -= get_unaligned_le16(ip) >> 2;
161 (const unsigned short *)ip)) >> 2;
162 ip += 2; 160 ip += 2;
163 if (m_pos == op) 161 if (m_pos == op)
164 goto eof_found; 162 goto eof_found;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb5..a8663890a88c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -62,7 +62,10 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 *pcount = 0;
65 } 66 }
67 fbc->count = ret;
68
66 spin_unlock(&fbc->lock); 69 spin_unlock(&fbc->lock);
67 return ret; 70 return ret;
68} 71}
diff --git a/lib/plist.c b/lib/plist.c
index 3074a02272f3..d6c64a824e1d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,12 +31,13 @@
31static void plist_check_prev_next(struct list_head *t, struct list_head *p, 31static void plist_check_prev_next(struct list_head *t, struct list_head *p,
32 struct list_head *n) 32 struct list_head *n)
33{ 33{
34 if (n->prev != p || p->next != n) { 34 WARN(n->prev != p || p->next != n,
35 printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); 35 "top: %p, n: %p, p: %p\n"
36 printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); 36 "prev: %p, n: %p, p: %p\n"
37 printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); 37 "next: %p, n: %p, p: %p\n",
38 WARN_ON(1); 38 t, t->next, t->prev,
39 } 39 p, p->next, p->prev,
40 n, n->next, n->prev);
40} 41}
41 42
42static void plist_check_list(struct list_head *top) 43static void plist_check_list(struct list_head *top)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 169a2f8dabcc..be86b32bc874 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> 4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin 5 * Copyright (C) 2006 Nick Piggin
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert);
359 * Returns: the slot corresponding to the position @index in the 359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations. 360 * radix tree @root. This is useful for update-if-exists operations.
361 * 361 *
362 * This function cannot be called under rcu_read_lock, it must be 362 * This function can be called under rcu_read_lock iff the slot is not
363 * excluded from writers, as must the returned slot for subsequent 363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * use by radix_tree_deref_slot() and radix_tree_replace slot. 364 * exclusive from other writers. Any dereference of the slot must be done
365 * Caller must hold tree write locked across slot lookup and 365 * using radix_tree_deref_slot.
366 * replace.
367 */ 366 */
368void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
369{ 368{
370 unsigned int height, shift; 369 unsigned int height, shift;
371 struct radix_tree_node *node, **slot; 370 struct radix_tree_node *node, **slot;
372 371
373 node = root->rnode; 372 node = rcu_dereference(root->rnode);
374 if (node == NULL) 373 if (node == NULL)
375 return NULL; 374 return NULL;
376 375
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
390 do { 389 do {
391 slot = (struct radix_tree_node **) 390 slot = (struct radix_tree_node **)
392 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 391 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
393 node = *slot; 392 node = rcu_dereference(*slot);
394 if (node == NULL) 393 if (node == NULL)
395 return NULL; 394 return NULL;
396 395
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
667EXPORT_SYMBOL(radix_tree_next_hole); 666EXPORT_SYMBOL(radix_tree_next_hole);
668 667
669static unsigned int 668static unsigned int
670__lookup(struct radix_tree_node *slot, void **results, unsigned long index, 669__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 670 unsigned int max_items, unsigned long *next_index)
672{ 671{
673 unsigned int nr_found = 0; 672 unsigned int nr_found = 0;
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index,
701 700
702 /* Bottom level: grab some items */ 701 /* Bottom level: grab some items */
703 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { 702 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
704 struct radix_tree_node *node;
705 index++; 703 index++;
706 node = slot->slots[i]; 704 if (slot->slots[i]) {
707 if (node) { 705 results[nr_found++] = &(slot->slots[i]);
708 results[nr_found++] = rcu_dereference(node);
709 if (nr_found == max_items) 706 if (nr_found == max_items)
710 goto out; 707 goto out;
711 } 708 }
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
759 756
760 ret = 0; 757 ret = 0;
761 while (ret < max_items) { 758 while (ret < max_items) {
762 unsigned int nr_found; 759 unsigned int nr_found, slots_found, i;
763 unsigned long next_index; /* Index of next search */ 760 unsigned long next_index; /* Index of next search */
764 761
765 if (cur_index > max_index) 762 if (cur_index > max_index)
766 break; 763 break;
767 nr_found = __lookup(node, results + ret, cur_index, 764 slots_found = __lookup(node, (void ***)results + ret, cur_index,
768 max_items - ret, &next_index); 765 max_items - ret, &next_index);
766 nr_found = 0;
767 for (i = 0; i < slots_found; i++) {
768 struct radix_tree_node *slot;
769 slot = *(((void ***)results)[ret + i]);
770 if (!slot)
771 continue;
772 results[ret + nr_found] = rcu_dereference(slot);
773 nr_found++;
774 }
769 ret += nr_found; 775 ret += nr_found;
770 if (next_index == 0) 776 if (next_index == 0)
771 break; 777 break;
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
776} 782}
777EXPORT_SYMBOL(radix_tree_gang_lookup); 783EXPORT_SYMBOL(radix_tree_gang_lookup);
778 784
785/**
786 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
787 * @root: radix tree root
788 * @results: where the results of the lookup are placed
789 * @first_index: start the lookup from this key
790 * @max_items: place up to this many items at *results
791 *
792 * Performs an index-ascending scan of the tree for present items. Places
793 * their slots at *@results and returns the number of items which were
794 * placed at *@results.
795 *
796 * The implementation is naive.
797 *
798 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
799 * be dereferenced with radix_tree_deref_slot, and if using only RCU
800 * protection, radix_tree_deref_slot may fail requiring a retry.
801 */
802unsigned int
803radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
804 unsigned long first_index, unsigned int max_items)
805{
806 unsigned long max_index;
807 struct radix_tree_node *node;
808 unsigned long cur_index = first_index;
809 unsigned int ret;
810
811 node = rcu_dereference(root->rnode);
812 if (!node)
813 return 0;
814
815 if (!radix_tree_is_indirect_ptr(node)) {
816 if (first_index > 0)
817 return 0;
818 results[0] = (void **)&root->rnode;
819 return 1;
820 }
821 node = radix_tree_indirect_to_ptr(node);
822
823 max_index = radix_tree_maxindex(node->height);
824
825 ret = 0;
826 while (ret < max_items) {
827 unsigned int slots_found;
828 unsigned long next_index; /* Index of next search */
829
830 if (cur_index > max_index)
831 break;
832 slots_found = __lookup(node, results + ret, cur_index,
833 max_items - ret, &next_index);
834 ret += slots_found;
835 if (next_index == 0)
836 break;
837 cur_index = next_index;
838 }
839
840 return ret;
841}
842EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
843
779/* 844/*
780 * FIXME: the two tag_get()s here should use find_next_bit() instead of 845 * FIXME: the two tag_get()s here should use find_next_bit() instead of
781 * open-coding the search. 846 * open-coding the search.
782 */ 847 */
783static unsigned int 848static unsigned int
784__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, 849__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
785 unsigned int max_items, unsigned long *next_index, unsigned int tag) 850 unsigned int max_items, unsigned long *next_index, unsigned int tag)
786{ 851{
787 unsigned int nr_found = 0; 852 unsigned int nr_found = 0;
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
811 unsigned long j = index & RADIX_TREE_MAP_MASK; 876 unsigned long j = index & RADIX_TREE_MAP_MASK;
812 877
813 for ( ; j < RADIX_TREE_MAP_SIZE; j++) { 878 for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
814 struct radix_tree_node *node;
815 index++; 879 index++;
816 if (!tag_get(slot, tag, j)) 880 if (!tag_get(slot, tag, j))
817 continue; 881 continue;
818 node = slot->slots[j];
819 /* 882 /*
820 * Even though the tag was found set, we need to 883 * Even though the tag was found set, we need to
821 * recheck that we have a non-NULL node, because 884 * recheck that we have a non-NULL node, because
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
826 * lookup ->slots[x] without a lock (ie. can't 889 * lookup ->slots[x] without a lock (ie. can't
827 * rely on its value remaining the same). 890 * rely on its value remaining the same).
828 */ 891 */
829 if (node) { 892 if (slot->slots[j]) {
830 node = rcu_dereference(node); 893 results[nr_found++] = &(slot->slots[j]);
831 results[nr_found++] = node;
832 if (nr_found == max_items) 894 if (nr_found == max_items)
833 goto out; 895 goto out;
834 } 896 }
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
887 949
888 ret = 0; 950 ret = 0;
889 while (ret < max_items) { 951 while (ret < max_items) {
890 unsigned int nr_found; 952 unsigned int nr_found, slots_found, i;
891 unsigned long next_index; /* Index of next search */ 953 unsigned long next_index; /* Index of next search */
892 954
893 if (cur_index > max_index) 955 if (cur_index > max_index)
894 break; 956 break;
895 nr_found = __lookup_tag(node, results + ret, cur_index, 957 slots_found = __lookup_tag(node, (void ***)results + ret,
896 max_items - ret, &next_index, tag); 958 cur_index, max_items - ret, &next_index, tag);
959 nr_found = 0;
960 for (i = 0; i < slots_found; i++) {
961 struct radix_tree_node *slot;
962 slot = *(((void ***)results)[ret + i]);
963 if (!slot)
964 continue;
965 results[ret + nr_found] = rcu_dereference(slot);
966 nr_found++;
967 }
897 ret += nr_found; 968 ret += nr_found;
898 if (next_index == 0) 969 if (next_index == 0)
899 break; 970 break;
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
905EXPORT_SYMBOL(radix_tree_gang_lookup_tag); 976EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
906 977
907/** 978/**
979 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
980 * radix tree based on a tag
981 * @root: radix tree root
982 * @results: where the results of the lookup are placed
983 * @first_index: start the lookup from this key
984 * @max_items: place up to this many items at *results
985 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
986 *
987 * Performs an index-ascending scan of the tree for present items which
988 * have the tag indexed by @tag set. Places the slots at *@results and
989 * returns the number of slots which were placed at *@results.
990 */
991unsigned int
992radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
993 unsigned long first_index, unsigned int max_items,
994 unsigned int tag)
995{
996 struct radix_tree_node *node;
997 unsigned long max_index;
998 unsigned long cur_index = first_index;
999 unsigned int ret;
1000
1001 /* check the root's tag bit */
1002 if (!root_tag_get(root, tag))
1003 return 0;
1004
1005 node = rcu_dereference(root->rnode);
1006 if (!node)
1007 return 0;
1008
1009 if (!radix_tree_is_indirect_ptr(node)) {
1010 if (first_index > 0)
1011 return 0;
1012 results[0] = (void **)&root->rnode;
1013 return 1;
1014 }
1015 node = radix_tree_indirect_to_ptr(node);
1016
1017 max_index = radix_tree_maxindex(node->height);
1018
1019 ret = 0;
1020 while (ret < max_items) {
1021 unsigned int slots_found;
1022 unsigned long next_index; /* Index of next search */
1023
1024 if (cur_index > max_index)
1025 break;
1026 slots_found = __lookup_tag(node, results + ret,
1027 cur_index, max_items - ret, &next_index, tag);
1028 ret += slots_found;
1029 if (next_index == 0)
1030 break;
1031 cur_index = next_index;
1032 }
1033
1034 return ret;
1035}
1036EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1037
1038
1039/**
908 * radix_tree_shrink - shrink height of a radix tree to minimal 1040 * radix_tree_shrink - shrink height of a radix tree to minimal
909 * @root radix tree root 1041 * @root radix tree root
910 */ 1042 */
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1051EXPORT_SYMBOL(radix_tree_tagged); 1183EXPORT_SYMBOL(radix_tree_tagged);
1052 1184
1053static void 1185static void
1054radix_tree_node_ctor(struct kmem_cache *cachep, void *node) 1186radix_tree_node_ctor(void *node)
1055{ 1187{
1056 memset(node, 0, sizeof(struct radix_tree_node)); 1188 memset(node, 0, sizeof(struct radix_tree_node));
1057} 1189}
diff --git a/lib/random32.c b/lib/random32.c
index ca87d86992bd..217d5c4b666d 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state)
56 return (state->s1 ^ state->s2 ^ state->s3); 56 return (state->s1 ^ state->s2 ^ state->s3);
57} 57}
58 58
59static void __set_random32(struct rnd_state *state, unsigned long s) 59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
60{ 63{
61 if (s == 0) 64 return (x < m) ? x + m : x;
62 s = 1; /* default seed is 1 */
63
64#define LCG(n) (69069 * n)
65 state->s1 = LCG(s);
66 state->s2 = LCG(state->s1);
67 state->s3 = LCG(state->s2);
68
69 /* "warm it up" */
70 __random32(state);
71 __random32(state);
72 __random32(state);
73 __random32(state);
74 __random32(state);
75 __random32(state);
76} 65}
77 66
78/** 67/**
@@ -107,7 +96,7 @@ void srandom32(u32 entropy)
107 */ 96 */
108 for_each_possible_cpu (i) { 97 for_each_possible_cpu (i) {
109 struct rnd_state *state = &per_cpu(net_rand_state, i); 98 struct rnd_state *state = &per_cpu(net_rand_state, i);
110 __set_random32(state, state->s1 ^ entropy); 99 state->s1 = __seed(state->s1 ^ entropy, 1);
111 } 100 }
112} 101}
113EXPORT_SYMBOL(srandom32); 102EXPORT_SYMBOL(srandom32);
@@ -122,7 +111,19 @@ static int __init random32_init(void)
122 111
123 for_each_possible_cpu(i) { 112 for_each_possible_cpu(i) {
124 struct rnd_state *state = &per_cpu(net_rand_state,i); 113 struct rnd_state *state = &per_cpu(net_rand_state,i);
125 __set_random32(state, i + jiffies); 114
115#define LCG(x) ((x) * 69069) /* super-duper LCG */
116 state->s1 = __seed(LCG(i + jiffies), 1);
117 state->s2 = __seed(LCG(state->s1), 7);
118 state->s3 = __seed(LCG(state->s2), 15);
119
120 /* "warm it up" */
121 __random32(state);
122 __random32(state);
123 __random32(state);
124 __random32(state);
125 __random32(state);
126 __random32(state);
126 } 127 }
127 return 0; 128 return 0;
128} 129}
@@ -135,13 +136,18 @@ core_initcall(random32_init);
135static int __init random32_reseed(void) 136static int __init random32_reseed(void)
136{ 137{
137 int i; 138 int i;
138 unsigned long seed;
139 139
140 for_each_possible_cpu(i) { 140 for_each_possible_cpu(i) {
141 struct rnd_state *state = &per_cpu(net_rand_state,i); 141 struct rnd_state *state = &per_cpu(net_rand_state,i);
142 u32 seeds[3];
143
144 get_random_bytes(&seeds, sizeof(seeds));
145 state->s1 = __seed(seeds[0], 1);
146 state->s2 = __seed(seeds[1], 7);
147 state->s3 = __seed(seeds[2], 15);
142 148
143 get_random_bytes(&seed, sizeof(seed)); 149 /* mix it in */
144 __set_random32(state, seed); 150 __random32(state);
145 } 151 }
146 return 0; 152 return 0;
147} 153}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..26187edcc7ea 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> 4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
5 * 5 *
6 * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
7 * parameter. Now every user can use their own standalone ratelimit_state.
8 *
6 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
7 * 10 *
8 */ 11 */
@@ -11,41 +14,44 @@
11#include <linux/jiffies.h> 14#include <linux/jiffies.h>
12#include <linux/module.h> 15#include <linux/module.h>
13 16
17static DEFINE_SPINLOCK(ratelimit_lock);
18
14/* 19/*
15 * __ratelimit - rate limiting 20 * __ratelimit - rate limiting
16 * @ratelimit_jiffies: minimum time in jiffies between two callbacks 21 * @rs: ratelimit_state data
17 * @ratelimit_burst: number of callbacks we do before ratelimiting
18 * 22 *
19 * This enforces a rate limit: not more than @ratelimit_burst callbacks 23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
20 * in every ratelimit_jiffies 24 * in every @rs->ratelimit_jiffies
21 */ 25 */
22int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) 26int __ratelimit(struct ratelimit_state *rs)
23{ 27{
24 static DEFINE_SPINLOCK(ratelimit_lock);
25 static unsigned toks = 10 * 5 * HZ;
26 static unsigned long last_msg;
27 static int missed;
28 unsigned long flags; 28 unsigned long flags;
29 unsigned long now = jiffies;
30 29
31 spin_lock_irqsave(&ratelimit_lock, flags); 30 if (!rs->interval)
32 toks += now - last_msg;
33 last_msg = now;
34 if (toks > (ratelimit_burst * ratelimit_jiffies))
35 toks = ratelimit_burst * ratelimit_jiffies;
36 if (toks >= ratelimit_jiffies) {
37 int lost = missed;
38
39 missed = 0;
40 toks -= ratelimit_jiffies;
41 spin_unlock_irqrestore(&ratelimit_lock, flags);
42 if (lost)
43 printk(KERN_WARNING "%s: %d messages suppressed\n",
44 __func__, lost);
45 return 1; 31 return 1;
32
33 spin_lock_irqsave(&ratelimit_lock, flags);
34 if (!rs->begin)
35 rs->begin = jiffies;
36
37 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed);
41 rs->begin = 0;
42 rs->printed = 0;
43 rs->missed = 0;
46 } 44 }
47 missed++; 45 if (rs->burst && rs->burst > rs->printed)
46 goto print;
47
48 rs->missed++;
48 spin_unlock_irqrestore(&ratelimit_lock, flags); 49 spin_unlock_irqrestore(&ratelimit_lock, flags);
49 return 0; 50 return 0;
51
52print:
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
50} 56}
51EXPORT_SYMBOL(__ratelimit); 57EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..8d2688ff1352 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
295EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296 296
297/** 297/**
298 * sg_miter_start - start mapping iteration over a sg list
299 * @miter: sg mapping iter to be started
300 * @sgl: sg list to iterate over
301 * @nents: number of sg entries
302 *
303 * Description:
304 * Starts mapping iterator @miter.
305 *
306 * Context:
307 * Don't care.
308 */
309void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
310 unsigned int nents, unsigned int flags)
311{
312 memset(miter, 0, sizeof(struct sg_mapping_iter));
313
314 miter->__sg = sgl;
315 miter->__nents = nents;
316 miter->__offset = 0;
317 miter->__flags = flags;
318}
319EXPORT_SYMBOL(sg_miter_start);
320
321/**
322 * sg_miter_next - proceed mapping iterator to the next mapping
323 * @miter: sg mapping iter to proceed
324 *
325 * Description:
326 * Proceeds @miter@ to the next mapping. @miter@ should have been
327 * started using sg_miter_start(). On successful return,
328 * @miter@->page, @miter@->addr and @miter@->length point to the
329 * current mapping.
330 *
331 * Context:
332 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
333 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
334 *
335 * Returns:
336 * true if @miter contains the next mapping. false if end of sg
337 * list is reached.
338 */
339bool sg_miter_next(struct sg_mapping_iter *miter)
340{
341 unsigned int off, len;
342
343 /* check for end and drop resources from the last iteration */
344 if (!miter->__nents)
345 return false;
346
347 sg_miter_stop(miter);
348
349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) {
351 miter->__sg = sg_next(miter->__sg);
352 miter->__offset = 0;
353 }
354
355 /* map the next page */
356 off = miter->__sg->offset + miter->__offset;
357 len = miter->__sg->length - miter->__offset;
358
359 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
360 off &= ~PAGE_MASK;
361 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
362 miter->consumed = miter->length;
363
364 if (miter->__flags & SG_MITER_ATOMIC)
365 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
366 else
367 miter->addr = kmap(miter->page) + off;
368
369 return true;
370}
371EXPORT_SYMBOL(sg_miter_next);
372
373/**
374 * sg_miter_stop - stop mapping iteration
375 * @miter: sg mapping iter to be stopped
376 *
377 * Description:
378 * Stops mapping iterator @miter. @miter should have been started
379 * started using sg_miter_start(). A stopped iteration can be
380 * resumed by calling sg_miter_next() on it. This is useful when
381 * resources (kmap) need to be released during iteration.
382 *
383 * Context:
384 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
385 */
386void sg_miter_stop(struct sg_mapping_iter *miter)
387{
388 WARN_ON(miter->consumed > miter->length);
389
390 /* drop resources from the last iteration */
391 if (miter->addr) {
392 miter->__offset += miter->consumed;
393
394 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else
398 kunmap(miter->addr);
399
400 miter->page = NULL;
401 miter->addr = NULL;
402 miter->length = 0;
403 miter->consumed = 0;
404 }
405}
406EXPORT_SYMBOL(sg_miter_stop);
407
408/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list 410 * @sgl: The SG list
300 * @nents: Number of SG entries 411 * @nents: Number of SG entries
@@ -309,56 +420,33 @@ EXPORT_SYMBOL(sg_alloc_table);
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 420static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer) 421 void *buf, size_t buflen, int to_buffer)
311{ 422{
312 struct scatterlist *sg; 423 unsigned int offset = 0;
313 size_t buf_off = 0; 424 struct sg_mapping_iter miter;
314 int i; 425 unsigned long flags;
315 426
316 WARN_ON(!irqs_disabled()); 427 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
317 428
318 for_each_sg(sgl, sg, nents, i) { 429 local_irq_save(flags);
319 struct page *page; 430
320 int n = 0; 431 while (sg_miter_next(&miter) && offset < buflen) {
321 unsigned int sg_off = sg->offset; 432 unsigned int len;
322 unsigned int sg_copy = sg->length; 433
323 434 len = min(miter.length, buflen - offset);
324 if (sg_copy > buflen) 435
325 sg_copy = buflen; 436 if (to_buffer)
326 buflen -= sg_copy; 437 memcpy(buf + offset, miter.addr, len);
327 438 else {
328 while (sg_copy > 0) { 439 memcpy(miter.addr, buf + offset, len);
329 unsigned int page_copy; 440 flush_kernel_dcache_page(miter.page);
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 } 441 }
356 442
357 if (!buflen) 443 offset += len;
358 break;
359 } 444 }
360 445
361 return buf_off; 446 sg_miter_stop(&miter);
447
448 local_irq_restore(flags);
449 return offset;
362} 450}
363 451
364/** 452/**
diff --git a/lib/show_mem.c b/lib/show_mem.c
new file mode 100644
index 000000000000..238e72a18ce1
--- /dev/null
+++ b/lib/show_mem.c
@@ -0,0 +1,63 @@
1/*
2 * Generic show_mem() implementation
3 *
4 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
5 * All code subject to the GPL version 2.
6 */
7
8#include <linux/mm.h>
9#include <linux/nmi.h>
10#include <linux/quicklist.h>
11
12void show_mem(void)
13{
14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0;
17
18 printk(KERN_INFO "Mem-Info:\n");
19 show_free_areas();
20
21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags;
23
24 pgdat_resize_lock(pgdat, &flags);
25 for (i = 0; i < pgdat->node_spanned_pages; i++) {
26 struct page *page;
27 unsigned long pfn = pgdat->node_start_pfn + i;
28
29 if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
30 touch_nmi_watchdog();
31
32 if (!pfn_valid(pfn))
33 continue;
34
35 page = pfn_to_page(pfn);
36
37 if (PageHighMem(page))
38 highmem++;
39
40 if (PageReserved(page))
41 reserved++;
42 else if (page_count(page) == 1)
43 nonshared++;
44 else if (page_count(page) > 1)
45 shared += page_count(page) - 1;
46
47 total++;
48 }
49 pgdat_resize_unlock(pgdat, &flags);
50 }
51
52 printk(KERN_INFO "%lu pages RAM\n", total);
53#ifdef CONFIG_HIGHMEM
54 printk(KERN_INFO "%lu pages HighMem\n", highmem);
55#endif
56 printk(KERN_INFO "%lu pages reserved\n", reserved);
57 printk(KERN_INFO "%lu pages shared\n", shared);
58 printk(KERN_INFO "%lu pages non-shared\n", nonshared);
59#ifdef CONFIG_QUICKLIST
60 printk(KERN_INFO "%lu pages in pagetable cache\n",
61 quicklist_total_size());
62#endif
63}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 6c90fb90e19c..0f8fc22ed103 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,11 +7,10 @@
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9 9
10unsigned int debug_smp_processor_id(void) 10notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask;
15 14
16 if (likely(preempt_count)) 15 if (likely(preempt_count))
17 goto out; 16 goto out;
@@ -23,9 +22,7 @@ unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 23 * smp_processor_id():
25 */ 24 */
26 this_mask = cpumask_of_cpu(this_cpu); 25 if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
27
28 if (cpus_equal(current->cpus_allowed, this_mask))
29 goto out; 26 goto out;
30 27
31 /* 28 /*
@@ -37,7 +34,7 @@ unsigned int debug_smp_processor_id(void)
37 /* 34 /*
38 * Avoid recursion: 35 * Avoid recursion:
39 */ 36 */
40 preempt_disable(); 37 preempt_disable_notrace();
41 38
42 if (!printk_ratelimit()) 39 if (!printk_ratelimit())
43 goto out_enable; 40 goto out_enable;
@@ -49,7 +46,7 @@ unsigned int debug_smp_processor_id(void)
49 dump_stack(); 46 dump_stack();
50 47
51out_enable: 48out_enable:
52 preempt_enable_no_resched(); 49 preempt_enable_no_resched_notrace();
53out: 50out:
54 return this_cpu; 51 return this_cpu;
55} 52}
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 000000000000..8347925030ff
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,64 @@
1/*
2 * Helpers for formatting and printing strings
3 *
4 * Copyright 31 August 2008 James Bottomley
5 */
6#include <linux/kernel.h>
7#include <linux/math64.h>
8#include <linux/module.h>
9#include <linux/string_helpers.h>
10
11/**
12 * string_get_size - get the size in the specified units
13 * @size: The size to be converted
14 * @units: units to use (powers of 1000 or 1024)
15 * @buf: buffer to format to
16 * @len: length of buffer
17 *
18 * This function returns a string formatted to 3 significant figures
19 * giving the size in the required units. Returns 0 on success or
20 * error on failure. @buf is always zero terminated.
21 *
22 */
23int string_get_size(u64 size, const enum string_size_units units,
24 char *buf, int len)
25{
26 const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
27 "EB", "ZB", "YB", NULL};
28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
29 "EiB", "ZiB", "YiB", NULL };
30 const char **units_str[] = {
31 [STRING_UNITS_10] = units_10,
32 [STRING_UNITS_2] = units_2,
33 };
34 const int divisor[] = {
35 [STRING_UNITS_10] = 1000,
36 [STRING_UNITS_2] = 1024,
37 };
38 int i, j;
39 u64 remainder = 0, sf_cap;
40 char tmp[8];
41
42 tmp[0] = '\0';
43
44 for (i = 0; size > divisor[units] && units_str[units][i]; i++)
45 remainder = do_div(size, divisor[units]);
46
47 sf_cap = size;
48 for (j = 0; sf_cap*10 < 1000; j++)
49 sf_cap *= 10;
50
51 if (j) {
52 remainder *= 1000;
53 do_div(remainder, divisor[units]);
54 snprintf(tmp, sizeof(tmp), ".%03lld",
55 (unsigned long long)remainder);
56 tmp[j+1] = '\0';
57 }
58
59 snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
60 tmp, units_str[units][i]);
61
62 return 0;
63}
64EXPORT_SYMBOL(string_get_size);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8cc..f8eebd489149 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
274} 274}
275 275
276static int 276static int
277address_needs_mapping(struct device *hwdev, dma_addr_t addr) 277address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
278{ 278{
279 dma_addr_t mask = 0xffffffff; 279 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
280 /* If the device has a mask, use it, otherwise default to 32 bits */ 280}
281 if (hwdev && hwdev->dma_mask) 281
282 mask = *hwdev->dma_mask; 282static int is_swiotlb_buffer(char *addr)
283 return (addr & ~mask) != 0; 283{
284 return addr >= io_tlb_start && addr < io_tlb_end;
284} 285}
285 286
286/* 287/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
467 void *ret; 468 void *ret;
468 int order = get_order(size); 469 int order = get_order(size);
469 470
470 /*
471 * XXX fix me: the DMA API should pass us an explicit DMA mask
472 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
473 * bit range instead of a 16MB one).
474 */
475 flags |= GFP_DMA;
476
477 ret = (void *)__get_free_pages(flags, order); 471 ret = (void *)__get_free_pages(flags, order);
478 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) { 472 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
479 /* 473 /*
480 * The allocated memory isn't reachable by the device. 474 * The allocated memory isn't reachable by the device.
481 * Fall back on swiotlb_map_single(). 475 * Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
490 * swiotlb_map_single(), which will grab memory from 484 * swiotlb_map_single(), which will grab memory from
491 * the lowest available address range. 485 * the lowest available address range.
492 */ 486 */
493 dma_addr_t handle; 487 ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
494 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); 488 if (!ret)
495 if (swiotlb_dma_mapping_error(handle))
496 return NULL; 489 return NULL;
497
498 ret = bus_to_virt(handle);
499 } 490 }
500 491
501 memset(ret, 0, size); 492 memset(ret, 0, size);
502 dev_addr = virt_to_bus(ret); 493 dev_addr = virt_to_bus(ret);
503 494
504 /* Confirm address can be DMA'd by device */ 495 /* Confirm address can be DMA'd by device */
505 if (address_needs_mapping(hwdev, dev_addr)) { 496 if (address_needs_mapping(hwdev, dev_addr, size)) {
506 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 497 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
507 (unsigned long long)*hwdev->dma_mask, 498 (unsigned long long)*hwdev->dma_mask,
508 (unsigned long long)dev_addr); 499 (unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
518 dma_addr_t dma_handle) 509 dma_addr_t dma_handle)
519{ 510{
520 WARN_ON(irqs_disabled()); 511 WARN_ON(irqs_disabled());
521 if (!(vaddr >= (void *)io_tlb_start 512 if (!is_swiotlb_buffer(vaddr))
522 && vaddr < (void *)io_tlb_end))
523 free_pages((unsigned long) vaddr, get_order(size)); 513 free_pages((unsigned long) vaddr, get_order(size));
524 else 514 else
525 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 515 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
526 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 516 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
527} 517}
528 518
529static void 519static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
567 * we can safely return the device addr and not worry about bounce 557 * we can safely return the device addr and not worry about bounce
568 * buffering it. 558 * buffering it.
569 */ 559 */
570 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 560 if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
571 return dev_addr; 561 return dev_addr;
572 562
573 /* 563 /*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
584 /* 574 /*
585 * Ensure that the address returned is DMA'ble 575 * Ensure that the address returned is DMA'ble
586 */ 576 */
587 if (address_needs_mapping(hwdev, dev_addr)) 577 if (address_needs_mapping(hwdev, dev_addr, size))
588 panic("map_single: bounce buffer is not DMA'ble"); 578 panic("map_single: bounce buffer is not DMA'ble");
589 579
590 return dev_addr; 580 return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
612 char *dma_addr = bus_to_virt(dev_addr); 602 char *dma_addr = bus_to_virt(dev_addr);
613 603
614 BUG_ON(dir == DMA_NONE); 604 BUG_ON(dir == DMA_NONE);
615 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 605 if (is_swiotlb_buffer(dma_addr))
616 unmap_single(hwdev, dma_addr, size, dir); 606 unmap_single(hwdev, dma_addr, size, dir);
617 else if (dir == DMA_FROM_DEVICE) 607 else if (dir == DMA_FROM_DEVICE)
618 dma_mark_clean(dma_addr, size); 608 dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
642 char *dma_addr = bus_to_virt(dev_addr); 632 char *dma_addr = bus_to_virt(dev_addr);
643 633
644 BUG_ON(dir == DMA_NONE); 634 BUG_ON(dir == DMA_NONE);
645 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 635 if (is_swiotlb_buffer(dma_addr))
646 sync_single(hwdev, dma_addr, size, dir, target); 636 sync_single(hwdev, dma_addr, size, dir, target);
647 else if (dir == DMA_FROM_DEVICE) 637 else if (dir == DMA_FROM_DEVICE)
648 dma_mark_clean(dma_addr, size); 638 dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
673 char *dma_addr = bus_to_virt(dev_addr) + offset; 663 char *dma_addr = bus_to_virt(dev_addr) + offset;
674 664
675 BUG_ON(dir == DMA_NONE); 665 BUG_ON(dir == DMA_NONE);
676 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 666 if (is_swiotlb_buffer(dma_addr))
677 sync_single(hwdev, dma_addr, size, dir, target); 667 sync_single(hwdev, dma_addr, size, dir, target);
678 else if (dir == DMA_FROM_DEVICE) 668 else if (dir == DMA_FROM_DEVICE)
679 dma_mark_clean(dma_addr, size); 669 dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
727 for_each_sg(sgl, sg, nelems, i) { 717 for_each_sg(sgl, sg, nelems, i) {
728 addr = SG_ENT_VIRT_ADDRESS(sg); 718 addr = SG_ENT_VIRT_ADDRESS(sg);
729 dev_addr = virt_to_bus(addr); 719 dev_addr = virt_to_bus(addr);
730 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 720 if (swiotlb_force ||
721 address_needs_mapping(hwdev, dev_addr, sg->length)) {
731 void *map = map_single(hwdev, addr, sg->length, dir); 722 void *map = map_single(hwdev, addr, sg->length, dir);
732 if (!map) { 723 if (!map) {
733 /* Don't panic here, we expect map_sg users 724 /* Don't panic here, we expect map_sg users
@@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
824} 815}
825 816
826int 817int
827swiotlb_dma_mapping_error(dma_addr_t dma_addr) 818swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
828{ 819{
829 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 820 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
830} 821}
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 000000000000..a4f7067f72fa
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,75 @@
1#include <linux/ptrace.h>
2#include <linux/sched.h>
3#include <linux/module.h>
4#include <asm/syscall.h>
5
6static int collect_syscall(struct task_struct *target, long *callno,
7 unsigned long args[6], unsigned int maxargs,
8 unsigned long *sp, unsigned long *pc)
9{
10 struct pt_regs *regs = task_pt_regs(target);
11 if (unlikely(!regs))
12 return -EAGAIN;
13
14 *sp = user_stack_pointer(regs);
15 *pc = instruction_pointer(regs);
16
17 *callno = syscall_get_nr(target, regs);
18 if (*callno != -1L && maxargs > 0)
19 syscall_get_arguments(target, regs, 0, maxargs, args);
20
21 return 0;
22}
23
24/**
25 * task_current_syscall - Discover what a blocked task is doing.
26 * @target: thread to examine
27 * @callno: filled with system call number or -1
28 * @args: filled with @maxargs system call arguments
29 * @maxargs: number of elements in @args to fill
30 * @sp: filled with user stack pointer
31 * @pc: filled with user PC
32 *
33 * If @target is blocked in a system call, returns zero with *@callno
34 * set to the the call's number and @args filled in with its arguments.
35 * Registers not used for system call arguments may not be available and
36 * it is not kosher to use &struct user_regset calls while the system
37 * call is still in progress. Note we may get this result if @target
38 * has finished its system call but not yet returned to user mode, such
39 * as when it's stopped for signal handling or syscall exit tracing.
40 *
41 * If @target is blocked in the kernel during a fault or exception,
42 * returns zero with *@callno set to -1 and does not fill in @args.
43 * If so, it's now safe to examine @target using &struct user_regset
44 * get() calls as long as we're sure @target won't return to user mode.
45 *
46 * Returns -%EAGAIN if @target does not remain blocked.
47 *
48 * Returns -%EINVAL if @maxargs is too large (maximum is six).
49 */
50int task_current_syscall(struct task_struct *target, long *callno,
51 unsigned long args[6], unsigned int maxargs,
52 unsigned long *sp, unsigned long *pc)
53{
54 long state;
55 unsigned long ncsw;
56
57 if (unlikely(maxargs > 6))
58 return -EINVAL;
59
60 if (target == current)
61 return collect_syscall(target, callno, args, maxargs, sp, pc);
62
63 state = target->state;
64 if (unlikely(!state))
65 return -EAGAIN;
66
67 ncsw = wait_task_inactive(target, state);
68 if (unlikely(!ncsw) ||
69 unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
70 unlikely(wait_task_inactive(target, state) != ncsw))
71 return -EAGAIN;
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index be8bda3862f5..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
54 * USAGE 54 * USAGE
55 * 55 *
56 * Before a search can be performed, a configuration must be created 56 * Before a search can be performed, a configuration must be created
57 * by calling textsearch_prepare() specyfing the searching algorithm and 57 * by calling textsearch_prepare() specifying the searching algorithm,
58 * the pattern to look for. The returned configuration may then be used 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
59 * for an arbitary amount of times and even in parallel as long as a 59 * to perform case insensitive matching. But it might slow down
60 * separate struct ts_state variable is provided to every instance. 60 * performance of algorithm, so you should use it at own your risk.
61 * The returned configuration may then be used for an arbitary
62 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance.
61 * 64 *
62 * The actual search is performed by either calling textsearch_find_- 65 * The actual search is performed by either calling textsearch_find_-
63 * continuous() for linear data or by providing an own get_next_block() 66 * continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
89 * panic("Oh my god, dancing chickens at %d\n", pos); 92 * panic("Oh my god, dancing chickens at %d\n", pos);
90 * 93 *
91 * textsearch_destroy(conf); 94 * textsearch_destroy(conf);
92 *
93 * ========================================================================== 95 * ==========================================================================
94 */ 96 */
95 97
@@ -97,6 +99,7 @@
97#include <linux/types.h> 99#include <linux/types.h>
98#include <linux/string.h> 100#include <linux/string.h>
99#include <linux/init.h> 101#include <linux/init.h>
102#include <linux/rculist.h>
100#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
101#include <linux/err.h> 104#include <linux/err.h>
102#include <linux/textsearch.h> 105#include <linux/textsearch.h>
@@ -264,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
264 return ERR_PTR(-EINVAL); 267 return ERR_PTR(-EINVAL);
265 268
266 ops = lookup_ts_algo(algo); 269 ops = lookup_ts_algo(algo);
267#ifdef CONFIG_KMOD 270#ifdef CONFIG_MODULES
268 /* 271 /*
269 * Why not always autoload you may ask. Some users are 272 * Why not always autoload you may ask. Some users are
270 * in a situation where requesting a module may deadlock, 273 * in a situation where requesting a module may deadlock,
@@ -279,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
279 if (ops == NULL) 282 if (ops == NULL)
280 goto errout; 283 goto errout;
281 284
282 conf = ops->init(pattern, len, gfp_mask); 285 conf = ops->init(pattern, len, gfp_mask, flags);
283 if (IS_ERR(conf)) { 286 if (IS_ERR(conf)) {
284 err = PTR_ERR(conf); 287 err = PTR_ERR(conf);
285 goto errout; 288 goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a4..9e66ee4020e9 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/ctype.h>
42#include <linux/textsearch.h> 43#include <linux/textsearch.h>
43 44
44/* Alphabet size, use ASCII */ 45/* Alphabet size, use ASCII */
@@ -63,7 +64,8 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63 struct ts_bm *bm = ts_config_priv(conf); 64 struct ts_bm *bm = ts_config_priv(conf);
64 unsigned int i, text_len, consumed = state->offset; 65 unsigned int i, text_len, consumed = state->offset;
65 const u8 *text; 66 const u8 *text;
66 int shift = bm->patlen, bs; 67 int shift = bm->patlen - 1, bs;
68 const u8 icase = conf->flags & TS_IGNORECASE;
67 69
68 for (;;) { 70 for (;;) {
69 text_len = conf->get_next_block(consumed, &text, conf, state); 71 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
75 DEBUGP("Searching in position %d (%c)\n", 77 DEBUGP("Searching in position %d (%c)\n",
76 shift, text[shift]); 78 shift, text[shift]);
77 for (i = 0; i < bm->patlen; i++) 79 for (i = 0; i < bm->patlen; i++)
78 if (text[shift-i] != bm->pattern[bm->patlen-1-i]) 80 if ((icase ? toupper(text[shift-i])
81 : text[shift-i])
82 != bm->pattern[bm->patlen-1-i])
79 goto next; 83 goto next;
80 84
81 /* London calling... */ 85 /* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
111 return ret; 115 return ret;
112} 116}
113 117
114static void compute_prefix_tbl(struct ts_bm *bm) 118static void compute_prefix_tbl(struct ts_bm *bm, int flags)
115{ 119{
116 int i, j, g; 120 int i, j, g;
117 121
118 for (i = 0; i < ASIZE; i++) 122 for (i = 0; i < ASIZE; i++)
119 bm->bad_shift[i] = bm->patlen; 123 bm->bad_shift[i] = bm->patlen;
120 for (i = 0; i < bm->patlen - 1; i++) 124 for (i = 0; i < bm->patlen - 1; i++) {
121 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; 125 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
126 if (flags & TS_IGNORECASE)
127 bm->bad_shift[tolower(bm->pattern[i])]
128 = bm->patlen - 1 - i;
129 }
122 130
123 /* Compute the good shift array, used to match reocurrences 131 /* Compute the good shift array, used to match reocurrences
124 * of a subpattern */ 132 * of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
135} 143}
136 144
137static struct ts_config *bm_init(const void *pattern, unsigned int len, 145static struct ts_config *bm_init(const void *pattern, unsigned int len,
138 gfp_t gfp_mask) 146 gfp_t gfp_mask, int flags)
139{ 147{
140 struct ts_config *conf; 148 struct ts_config *conf;
141 struct ts_bm *bm; 149 struct ts_bm *bm;
150 int i;
142 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 151 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
143 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; 152 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
144 153
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
146 if (IS_ERR(conf)) 155 if (IS_ERR(conf))
147 return conf; 156 return conf;
148 157
158 conf->flags = flags;
149 bm = ts_config_priv(conf); 159 bm = ts_config_priv(conf);
150 bm->patlen = len; 160 bm->patlen = len;
151 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; 161 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
152 memcpy(bm->pattern, pattern, len); 162 if (flags & TS_IGNORECASE)
153 compute_prefix_tbl(bm); 163 for (i = 0; i < len; i++)
164 bm->pattern[i] = toupper(((u8 *)pattern)[i]);
165 else
166 memcpy(bm->pattern, pattern, len);
167 compute_prefix_tbl(bm, flags);
154 168
155 return conf; 169 return conf;
156} 170}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526b..5696a35184e4 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
257} 257}
258 258
259static struct ts_config *fsm_init(const void *pattern, unsigned int len, 259static struct ts_config *fsm_init(const void *pattern, unsigned int len,
260 gfp_t gfp_mask) 260 gfp_t gfp_mask, int flags)
261{ 261{
262 int i, err = -EINVAL; 262 int i, err = -EINVAL;
263 struct ts_config *conf; 263 struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1) 269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
270 goto errout; 270 goto errout;
271 271
272 if (flags & TS_IGNORECASE)
273 goto errout;
274
272 for (i = 0; i < ntokens; i++) { 275 for (i = 0; i < ntokens; i++) {
273 struct ts_fsm_token *t = &tokens[i]; 276 struct ts_fsm_token *t = &tokens[i];
274 277
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
284 if (IS_ERR(conf)) 287 if (IS_ERR(conf))
285 return conf; 288 return conf;
286 289
290 conf->flags = flags;
287 fsm = ts_config_priv(conf); 291 fsm = ts_config_priv(conf);
288 fsm->ntokens = ntokens; 292 fsm->ntokens = ntokens;
289 memcpy(fsm->tokens, pattern, len); 293 memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4b..632f783e65f1 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/ctype.h>
36#include <linux/textsearch.h> 37#include <linux/textsearch.h>
37 38
38struct ts_kmp 39struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
47 struct ts_kmp *kmp = ts_config_priv(conf); 48 struct ts_kmp *kmp = ts_config_priv(conf);
48 unsigned int i, q = 0, text_len, consumed = state->offset; 49 unsigned int i, q = 0, text_len, consumed = state->offset;
49 const u8 *text; 50 const u8 *text;
51 const int icase = conf->flags & TS_IGNORECASE;
50 52
51 for (;;) { 53 for (;;) {
52 text_len = conf->get_next_block(consumed, &text, conf, state); 54 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
55 break; 57 break;
56 58
57 for (i = 0; i < text_len; i++) { 59 for (i = 0; i < text_len; i++) {
58 while (q > 0 && kmp->pattern[q] != text[i]) 60 while (q > 0 && kmp->pattern[q]
61 != (icase ? toupper(text[i]) : text[i]))
59 q = kmp->prefix_tbl[q - 1]; 62 q = kmp->prefix_tbl[q - 1];
60 if (kmp->pattern[q] == text[i]) 63 if (kmp->pattern[q]
64 == (icase ? toupper(text[i]) : text[i]))
61 q++; 65 q++;
62 if (unlikely(q == kmp->pattern_len)) { 66 if (unlikely(q == kmp->pattern_len)) {
63 state->offset = consumed + i + 1; 67 state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
72} 76}
73 77
74static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, 78static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
75 unsigned int *prefix_tbl) 79 unsigned int *prefix_tbl, int flags)
76{ 80{
77 unsigned int k, q; 81 unsigned int k, q;
82 const u8 icase = flags & TS_IGNORECASE;
78 83
79 for (k = 0, q = 1; q < len; q++) { 84 for (k = 0, q = 1; q < len; q++) {
80 while (k > 0 && pattern[k] != pattern[q]) 85 while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
86 != (icase ? toupper(pattern[q]) : pattern[q]))
81 k = prefix_tbl[k-1]; 87 k = prefix_tbl[k-1];
82 if (pattern[k] == pattern[q]) 88 if ((icase ? toupper(pattern[k]) : pattern[k])
89 == (icase ? toupper(pattern[q]) : pattern[q]))
83 k++; 90 k++;
84 prefix_tbl[q] = k; 91 prefix_tbl[q] = k;
85 } 92 }
86} 93}
87 94
88static struct ts_config *kmp_init(const void *pattern, unsigned int len, 95static struct ts_config *kmp_init(const void *pattern, unsigned int len,
89 gfp_t gfp_mask) 96 gfp_t gfp_mask, int flags)
90{ 97{
91 struct ts_config *conf; 98 struct ts_config *conf;
92 struct ts_kmp *kmp; 99 struct ts_kmp *kmp;
100 int i;
93 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 101 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
94 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; 102 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
95 103
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
97 if (IS_ERR(conf)) 105 if (IS_ERR(conf))
98 return conf; 106 return conf;
99 107
108 conf->flags = flags;
100 kmp = ts_config_priv(conf); 109 kmp = ts_config_priv(conf);
101 kmp->pattern_len = len; 110 kmp->pattern_len = len;
102 compute_prefix_tbl(pattern, len, kmp->prefix_tbl); 111 compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
103 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; 112 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
104 memcpy(kmp->pattern, pattern, len); 113 if (flags & TS_IGNORECASE)
114 for (i = 0; i < len; i++)
115 kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
116 else
117 memcpy(kmp->pattern, pattern, len);
105 118
106 return conf; 119 return conf;
107} 120}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6021757a4496..c399bc1093cb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -22,9 +22,12 @@
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/kallsyms.h>
26#include <linux/uaccess.h>
25 27
26#include <asm/page.h> /* for PAGE_SIZE */ 28#include <asm/page.h> /* for PAGE_SIZE */
27#include <asm/div64.h> 29#include <asm/div64.h>
30#include <asm/sections.h> /* for dereference_function_descriptor() */
28 31
29/* Works only for digits and letters, but small and fast */ 32/* Works only for digits and letters, but small and fast */
30#define TOLOWER(x) ((x) | 0x20) 33#define TOLOWER(x) ((x) | 0x20)
@@ -218,7 +221,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\
218 if (len == 0) \ 221 if (len == 0) \
219 return -EINVAL; \ 222 return -EINVAL; \
220 \ 223 \
221 val = simple_strtoul(cp, &tail, base); \ 224 val = simple_strtou##type(cp, &tail, base); \
222 if ((*tail == '\0') || \ 225 if ((*tail == '\0') || \
223 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\ 226 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\
224 *res = val; \ 227 *res = val; \
@@ -482,6 +485,79 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
482 return buf; 485 return buf;
483} 486}
484 487
488static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
489{
490 int len, i;
491
492 if ((unsigned long)s < PAGE_SIZE)
493 s = "<NULL>";
494
495 len = strnlen(s, precision);
496
497 if (!(flags & LEFT)) {
498 while (len < field_width--) {
499 if (buf < end)
500 *buf = ' ';
501 ++buf;
502 }
503 }
504 for (i = 0; i < len; ++i) {
505 if (buf < end)
506 *buf = *s;
507 ++buf; ++s;
508 }
509 while (len < field_width--) {
510 if (buf < end)
511 *buf = ' ';
512 ++buf;
513 }
514 return buf;
515}
516
517static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
518{
519 unsigned long value = (unsigned long) ptr;
520#ifdef CONFIG_KALLSYMS
521 char sym[KSYM_SYMBOL_LEN];
522 sprint_symbol(sym, value);
523 return string(buf, end, sym, field_width, precision, flags);
524#else
525 field_width = 2*sizeof(void *);
526 flags |= SPECIAL | SMALL | ZEROPAD;
527 return number(buf, end, value, 16, field_width, precision, flags);
528#endif
529}
530
531/*
532 * Show a '%p' thing. A kernel extension is that the '%p' is followed
533 * by an extra set of alphanumeric characters that are extended format
534 * specifiers.
535 *
536 * Right now we just handle 'F' (for symbolic Function descriptor pointers)
537 * and 'S' (for Symbolic direct pointers), but this can easily be
538 * extended in the future (network address types etc).
539 *
540 * The difference between 'S' and 'F' is that on ia64 and ppc64 function
541 * pointers are really function descriptors, which contain a pointer the
542 * real address.
543 */
544static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
545{
546 switch (*fmt) {
547 case 'F':
548 ptr = dereference_function_descriptor(ptr);
549 /* Fallthrough */
550 case 'S':
551 return symbol_string(buf, end, ptr, field_width, precision, flags);
552 }
553 flags |= SMALL;
554 if (field_width == -1) {
555 field_width = 2*sizeof(void *);
556 flags |= ZEROPAD;
557 }
558 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
559}
560
485/** 561/**
486 * vsnprintf - Format a string and place it in a buffer 562 * vsnprintf - Format a string and place it in a buffer
487 * @buf: The buffer to place the result into 563 * @buf: The buffer to place the result into
@@ -502,11 +578,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
502 */ 578 */
503int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 579int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
504{ 580{
505 int len;
506 unsigned long long num; 581 unsigned long long num;
507 int i, base; 582 int base;
508 char *str, *end, c; 583 char *str, *end, c;
509 const char *s;
510 584
511 int flags; /* flags to number() */ 585 int flags; /* flags to number() */
512 586
@@ -622,43 +696,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
622 continue; 696 continue;
623 697
624 case 's': 698 case 's':
625 s = va_arg(args, char *); 699 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
626 if ((unsigned long)s < PAGE_SIZE)
627 s = "<NULL>";
628
629 len = strnlen(s, precision);
630
631 if (!(flags & LEFT)) {
632 while (len < field_width--) {
633 if (str < end)
634 *str = ' ';
635 ++str;
636 }
637 }
638 for (i = 0; i < len; ++i) {
639 if (str < end)
640 *str = *s;
641 ++str; ++s;
642 }
643 while (len < field_width--) {
644 if (str < end)
645 *str = ' ';
646 ++str;
647 }
648 continue; 700 continue;
649 701
650 case 'p': 702 case 'p':
651 flags |= SMALL; 703 str = pointer(fmt+1, str, end,
652 if (field_width == -1) { 704 va_arg(args, void *),
653 field_width = 2*sizeof(void *); 705 field_width, precision, flags);
654 flags |= ZEROPAD; 706 /* Skip all alphanumeric pointer suffixes */
655 } 707 while (isalnum(fmt[1]))
656 str = number(str, end, 708 fmt++;
657 (unsigned long) va_arg(args, void *),
658 16, field_width, precision, flags);
659 continue; 709 continue;
660 710
661
662 case 'n': 711 case 'n':
663 /* FIXME: 712 /* FIXME:
664 * What does C99 say about the overflow case here? */ 713 * What does C99 say about the overflow case here? */