aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-31 12:43:41 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-31 12:43:41 -0400
commit85e9ca333d03fbd56b9e123c8456f0d98e20faad (patch)
tree7bb15ada5f536950efa23ad60ea9eea60380ca1c /lib
parenta300bec952127d9a15e666b391bb35c9aecb3002 (diff)
parent6e86841d05f371b5b9b86ce76c02aaee83352298 (diff)
Merge branch 'linus' into timers/hpet
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug67
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/Makefile12
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/crc-t10dif.c67
-rw-r--r--lib/debugobjects.c34
-rw-r--r--lib/idr.c142
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/iommu-helper.c8
-rw-r--r--lib/kobject.c20
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/plist.c13
-rw-r--r--lib/radix-tree.c182
-rw-r--r--lib/ratelimit.c56
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/show_mem.c63
-rw-r--r--lib/smp_processor_id.c11
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/syscall.c75
-rw-r--r--lib/textsearch.c17
-rw-r--r--lib/ts_bm.c28
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
-rw-r--r--lib/vsprintf.c128
32 files changed, 990 insertions, 326 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8cc8e8722a3f..c7ad7a5b3535 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -29,6 +29,13 @@ config CRC16
29 the kernel tree does. Such modules that use library CRC16 29 the kernel tree does. Such modules that use library CRC16
30 functions require M here. 30 functions require M here.
31 31
32config CRC_T10DIF
33 tristate "CRC calculation for the T10 Data Integrity Field"
34 help
35 This option is only needed if a module that's not in the
36 kernel tree needs to calculate CRC checks for use with the
37 SCSI data integrity subsystem.
38
32config CRC_ITU_T 39config CRC_ITU_T
33 tristate "CRC ITU-T V.41 functions" 40 tristate "CRC ITU-T V.41 functions"
34 help 41 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d2099f41aa1e..e1d4764435ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
74 debugging files into. Enable this option to be able to read and 74 debugging files into. Enable this option to be able to read and
75 write to these files. 75 write to these files.
76 76
77 For detailed documentation on the debugfs API, see
78 Documentation/DocBook/filesystems.
79
77 If unsure, say N. 80 If unsure, say N.
78 81
79config HEADERS_CHECK 82config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
147 help 150 help
148 Say Y here to enable the kernel to detect "soft lockups", 151 Say Y here to enable the kernel to detect "soft lockups",
149 which are bugs that cause the kernel to loop in kernel 152 which are bugs that cause the kernel to loop in kernel
150 mode for more than 10 seconds, without giving other tasks a 153 mode for more than 60 seconds, without giving other tasks a
151 chance to run. 154 chance to run.
152 155
153 When a soft-lockup is detected, the kernel will print the 156 When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
159 can be detected via the NMI-watchdog, on platforms that 162 can be detected via the NMI-watchdog, on platforms that
160 support it.) 163 support it.)
161 164
165config BOOTPARAM_SOFTLOCKUP_PANIC
166 bool "Panic (Reboot) On Soft Lockups"
167 depends on DETECT_SOFTLOCKUP
168 help
169 Say Y here to enable the kernel to panic on "soft lockups",
170 which are bugs that cause the kernel to loop in kernel
171 mode for more than 60 seconds, without giving other tasks a
172 chance to run.
173
174 The panic can be used in combination with panic_timeout,
175 to cause the system to reboot automatically after a
176 lockup has been detected. This feature is useful for
177 high-availability systems that have uptime guarantees and
178 where a lockup must be resolved ASAP.
179
180 Say N if unsure.
181
182config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
183 int
184 depends on DETECT_SOFTLOCKUP
185 range 0 1
186 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
187 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
188
162config SCHED_DEBUG 189config SCHED_DEBUG
163 bool "Collect scheduler debugging info" 190 bool "Collect scheduler debugging info"
164 depends on DEBUG_KERNEL && PROC_FS 191 depends on DEBUG_KERNEL && PROC_FS
@@ -419,7 +446,6 @@ config DEBUG_LOCKING_API_SELFTESTS
419 446
420config STACKTRACE 447config STACKTRACE
421 bool 448 bool
422 depends on DEBUG_KERNEL
423 depends on STACKTRACE_SUPPORT 449 depends on STACKTRACE_SUPPORT
424 450
425config DEBUG_KOBJECT 451config DEBUG_KOBJECT
@@ -479,6 +505,18 @@ config DEBUG_WRITECOUNT
479 505
480 If unsure, say N. 506 If unsure, say N.
481 507
508config DEBUG_MEMORY_INIT
509 bool "Debug memory initialisation" if EMBEDDED
510 default !EMBEDDED
511 help
512 Enable this for additional checks during memory initialisation.
513 The sanity checks verify aspects of the VM such as the memory model
514 and other information provided by the architecture. Verbose
515 information will be printed at KERN_DEBUG loglevel depending
516 on the mminit_loglevel= command-line option.
517
518 If unsure, say Y
519
482config DEBUG_LIST 520config DEBUG_LIST
483 bool "Debug linked list manipulation" 521 bool "Debug linked list manipulation"
484 depends on DEBUG_KERNEL 522 depends on DEBUG_KERNEL
@@ -531,16 +569,34 @@ config BOOT_PRINTK_DELAY
531config RCU_TORTURE_TEST 569config RCU_TORTURE_TEST
532 tristate "torture tests for RCU" 570 tristate "torture tests for RCU"
533 depends on DEBUG_KERNEL 571 depends on DEBUG_KERNEL
534 depends on m
535 default n 572 default n
536 help 573 help
537 This option provides a kernel module that runs torture tests 574 This option provides a kernel module that runs torture tests
538 on the RCU infrastructure. The kernel module may be built 575 on the RCU infrastructure. The kernel module may be built
539 after the fact on the running kernel to be tested, if desired. 576 after the fact on the running kernel to be tested, if desired.
540 577
578 Say Y here if you want RCU torture tests to be built into
579 the kernel.
541 Say M if you want the RCU torture tests to build as a module. 580 Say M if you want the RCU torture tests to build as a module.
542 Say N if you are unsure. 581 Say N if you are unsure.
543 582
583config RCU_TORTURE_TEST_RUNNABLE
584 bool "torture tests for RCU runnable by default"
585 depends on RCU_TORTURE_TEST = y
586 default n
587 help
588 This option provides a way to build the RCU torture tests
589 directly into the kernel without them starting up at boot
590 time. You can use /proc/sys/kernel/rcutorture_runnable
591 to manually override this setting. This /proc file is
592 available only when the RCU torture tests have been built
593 into the kernel.
594
595 Say Y here if you want the RCU torture tests to start during
596 boot (you probably don't).
597 Say N here if you want the RCU torture tests to start only
598 after being manually enabled via /proc.
599
544config KPROBES_SANITY_TEST 600config KPROBES_SANITY_TEST
545 bool "Kprobes sanity tests" 601 bool "Kprobes sanity tests"
546 depends on DEBUG_KERNEL 602 depends on DEBUG_KERNEL
@@ -563,6 +619,9 @@ config BACKTRACE_SELF_TEST
563 for distributions or general kernels, but only for kernel 619 for distributions or general kernels, but only for kernel
564 developers working on architecture code. 620 developers working on architecture code.
565 621
622 Note that if you want to also test saved backtraces, you will
623 have to enable STACKTRACE as well.
624
566 Say N if you are unsure. 625 Say N if you are unsure.
567 626
568config LKDTM 627config LKDTM
@@ -634,6 +693,8 @@ config LATENCYTOP
634 Enable this option if you want to use the LatencyTOP tool 693 Enable this option if you want to use the LatencyTOP tool
635 to find out which userspace is blocking on what kernel operations. 694 to find out which userspace is blocking on what kernel operations.
636 695
696source kernel/trace/Kconfig
697
637config PROVIDE_OHCI1394_DMA_INIT 698config PROVIDE_OHCI1394_DMA_INIT
638 bool "Remote debugging over FireWire early on boot" 699 bool "Remote debugging over FireWire early on boot"
639 depends on PCI && X86 700 depends on PCI && X86
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
1 1
2config HAVE_ARCH_KGDB_SHADOW_INFO
3 bool
4
5config HAVE_ARCH_KGDB 2config HAVE_ARCH_KGDB
6 bool 3 bool
7 4
diff --git a/lib/Makefile b/lib/Makefile
index 74b0cfb1fcc3..3b1f94bbe9de 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,18 +2,23 @@
2# Makefile for some libs needed in the kernel. 2# Makefile for some libs needed in the kernel.
3# 3#
4 4
5ifdef CONFIG_FTRACE
6ORIG_CFLAGS := $(KBUILD_CFLAGS)
7KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
8endif
9
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
7 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
9 proportions.o prio_heap.o ratelimit.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o
10 15
11lib-$(CONFIG_MMU) += ioremap.o 16lib-$(CONFIG_MMU) += ioremap.o
12lib-$(CONFIG_SMP) += cpumask.o 17lib-$(CONFIG_SMP) += cpumask.o
13 18
14lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
15 20
16obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
17 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
18 23
19ifeq ($(CONFIG_DEBUG_KOBJECT),y) 24ifeq ($(CONFIG_DEBUG_KOBJECT),y)
@@ -45,6 +50,7 @@ endif
45obj-$(CONFIG_BITREVERSE) += bitrev.o 50obj-$(CONFIG_BITREVERSE) += bitrev.o
46obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 51obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
47obj-$(CONFIG_CRC16) += crc16.o 52obj-$(CONFIG_CRC16) += crc16.o
53obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
48obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o 54obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
49obj-$(CONFIG_CRC32) += crc32.o 55obj-$(CONFIG_CRC32) += crc32.o
50obj-$(CONFIG_CRC7) += crc7.o 56obj-$(CONFIG_CRC7) += crc7.o
@@ -72,6 +78,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
72 78
73obj-$(CONFIG_HAVE_LMB) += lmb.o 79obj-$(CONFIG_HAVE_LMB) += lmb.o
74 80
81obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
82
75hostprogs-y := gen_crc32table 83hostprogs-y := gen_crc32table
76clean-files := crc32table.h 84clean-files := crc32table.h
77 85
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
1#include <linux/bcd.h>
2#include <linux/module.h>
3
4unsigned bcd2bin(unsigned char val)
5{
6 return (val & 0x0f) + (val >> 4) * 10;
7}
8EXPORT_SYMBOL(bcd2bin);
9
10unsigned char bin2bcd(unsigned val)
11{
12 return ((val / 10) << 4) + val % 10;
13}
14EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f55787..bfeafd60ee9f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/kernel.h>
40#include <linux/bug.h> 41#include <linux/bug.h>
41#include <linux/sched.h> 42#include <linux/sched.h>
42 43
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
149 (void *)bugaddr); 150 (void *)bugaddr);
150 151
151 show_regs(regs); 152 show_regs(regs);
153 add_taint(TAINT_WARN);
152 return BUG_TRAP_TYPE_WARN; 154 return BUG_TRAP_TYPE_WARN;
153 } 155 }
154 156
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..5ba8a942a478 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
116/** 116/**
117 * memparse - parse a string with mem suffixes into a number 117 * memparse - parse a string with mem suffixes into a number
118 * @ptr: Where parse begins 118 * @ptr: Where parse begins
119 * @retptr: (output) Pointer to next char after parse completes 119 * @retptr: (output) Optional pointer to next char after parse completes
120 * 120 *
121 * Parses a string into a number. The number stored at @ptr is 121 * Parses a string into a number. The number stored at @ptr is
122 * potentially suffixed with %K (for kilobytes, or 1024 bytes), 122 * potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
126 * megabyte, or one gigabyte, respectively. 126 * megabyte, or one gigabyte, respectively.
127 */ 127 */
128 128
129unsigned long long memparse (char *ptr, char **retptr) 129unsigned long long memparse(char *ptr, char **retptr)
130{ 130{
131 unsigned long long ret = simple_strtoull (ptr, retptr, 0); 131 char *endptr; /* local pointer to end of parsed string */
132 132
133 switch (**retptr) { 133 unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
134
135 switch (*endptr) {
134 case 'G': 136 case 'G':
135 case 'g': 137 case 'g':
136 ret <<= 10; 138 ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
140 case 'K': 142 case 'K':
141 case 'k': 143 case 'k':
142 ret <<= 10; 144 ret <<= 10;
143 (*retptr)++; 145 endptr++;
144 default: 146 default:
145 break; 147 break;
146 } 148 }
149
150 if (retptr)
151 *retptr = endptr;
152
147 return ret; 153 return ret;
148} 154}
149 155
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
15} 15}
16EXPORT_SYMBOL(__next_cpu); 16EXPORT_SYMBOL(__next_cpu);
17 17
18#if NR_CPUS > 64
19int __next_cpu_nr(int n, const cpumask_t *srcp)
20{
21 return min_t(int, nr_cpu_ids,
22 find_next_bit(srcp->bits, nr_cpu_ids, n+1));
23}
24EXPORT_SYMBOL(__next_cpu_nr);
25#endif
26
18int __any_online_cpu(const cpumask_t *mask) 27int __any_online_cpu(const cpumask_t *mask)
19{ 28{
20 int cpu; 29 int cpu;
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
new file mode 100644
index 000000000000..fbbd66ed86cd
--- /dev/null
+++ b/lib/crc-t10dif.c
@@ -0,0 +1,67 @@
1/*
2 * T10 Data Integrity Field CRC16 calculation
3 *
4 * Copyright (c) 2007 Oracle Corporation. All rights reserved.
5 * Written by Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */
10
11#include <linux/types.h>
12#include <linux/module.h>
13#include <linux/crc-t10dif.h>
14
15/* Table generated using the following polynomium:
16 * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
17 * gt: 0x8bb7
18 */
19static const __u16 t10_dif_crc_table[256] = {
20 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
21 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
22 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
23 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
24 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
25 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
26 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
27 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
28 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
29 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
30 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
31 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
32 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
33 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
34 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
35 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
36 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
37 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
38 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
39 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
40 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
41 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
42 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
43 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
44 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
45 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
46 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
47 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
48 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
49 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
50 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
51 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
52};
53
54__u16 crc_t10dif(const unsigned char *buffer, size_t len)
55{
56 __u16 crc = 0;
57 unsigned int i;
58
59 for (i = 0 ; i < len ; i++)
60 crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
61
62 return crc;
63}
64EXPORT_SYMBOL(crc_t10dif);
65
66MODULE_DESCRIPTION("T10 DIF CRC calculation");
67MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae1..45a6bde762d1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
68{ 68{
69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
70 struct debug_obj *new; 70 struct debug_obj *new;
71 unsigned long flags;
71 72
72 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
73 return obj_pool_free; 74 return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
81 if (!new) 82 if (!new)
82 return obj_pool_free; 83 return obj_pool_free;
83 84
84 spin_lock(&pool_lock); 85 spin_lock_irqsave(&pool_lock, flags);
85 hlist_add_head(&new->node, &obj_pool); 86 hlist_add_head(&new->node, &obj_pool);
86 obj_pool_free++; 87 obj_pool_free++;
87 spin_unlock(&pool_lock); 88 spin_unlock_irqrestore(&pool_lock, flags);
88 } 89 }
89 return obj_pool_free; 90 return obj_pool_free;
90} 91}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
110} 111}
111 112
112/* 113/*
113 * Allocate a new object. If the pool is empty and no refill possible, 114 * Allocate a new object. If the pool is empty, switch off the debugger.
114 * switch off the debugger.
115 */ 115 */
116static struct debug_obj * 116static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
118{ 118{
119 struct debug_obj *obj = NULL; 119 struct debug_obj *obj = NULL;
120 int retry = 0;
121 120
122repeat:
123 spin_lock(&pool_lock); 121 spin_lock(&pool_lock);
124 if (obj_pool.first) { 122 if (obj_pool.first) {
125 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
141 } 139 }
142 spin_unlock(&pool_lock); 140 spin_unlock(&pool_lock);
143 141
144 if (fill_pool() && !obj && !retry++)
145 goto repeat;
146
147 return obj; 142 return obj;
148} 143}
149 144
@@ -210,9 +205,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
210 205
211 if (limit < 5 && obj->descr != descr_test) { 206 if (limit < 5 && obj->descr != descr_test) {
212 limit++; 207 limit++;
213 printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 208 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
214 obj_states[obj->state], obj->descr->name); 209 obj_states[obj->state], obj->descr->name);
215 WARN_ON(1);
216 } 210 }
217 debug_objects_warnings++; 211 debug_objects_warnings++;
218} 212}
@@ -231,15 +225,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
231 225
232static void debug_object_is_on_stack(void *addr, int onstack) 226static void debug_object_is_on_stack(void *addr, int onstack)
233{ 227{
234 void *stack = current->stack;
235 int is_on_stack; 228 int is_on_stack;
236 static int limit; 229 static int limit;
237 230
238 if (limit > 4) 231 if (limit > 4)
239 return; 232 return;
240 233
241 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); 234 is_on_stack = object_is_on_stack(addr);
242
243 if (is_on_stack == onstack) 235 if (is_on_stack == onstack)
244 return; 236 return;
245 237
@@ -261,6 +253,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
261 struct debug_obj *obj; 253 struct debug_obj *obj;
262 unsigned long flags; 254 unsigned long flags;
263 255
256 fill_pool();
257
264 db = get_bucket((unsigned long) addr); 258 db = get_bucket((unsigned long) addr);
265 259
266 spin_lock_irqsave(&db->lock, flags); 260 spin_lock_irqsave(&db->lock, flags);
@@ -738,26 +732,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
738 732
739 obj = lookup_object(addr, db); 733 obj = lookup_object(addr, db);
740 if (!obj && state != ODEBUG_STATE_NONE) { 734 if (!obj && state != ODEBUG_STATE_NONE) {
741 printk(KERN_ERR "ODEBUG: selftest object not found\n"); 735 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
742 WARN_ON(1);
743 goto out; 736 goto out;
744 } 737 }
745 if (obj && obj->state != state) { 738 if (obj && obj->state != state) {
746 printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 739 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
747 obj->state, state); 740 obj->state, state);
748 WARN_ON(1);
749 goto out; 741 goto out;
750 } 742 }
751 if (fixups != debug_objects_fixups) { 743 if (fixups != debug_objects_fixups) {
752 printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 744 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
753 fixups, debug_objects_fixups); 745 fixups, debug_objects_fixups);
754 WARN_ON(1);
755 goto out; 746 goto out;
756 } 747 }
757 if (warnings != debug_objects_warnings) { 748 if (warnings != debug_objects_warnings) {
758 printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 749 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
759 warnings, debug_objects_warnings); 750 warnings, debug_objects_warnings);
760 WARN_ON(1);
761 goto out; 751 goto out;
762 } 752 }
763 res = 0; 753 res = 0;
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..e728c7fccc4d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
9 * Small id to pointer translation service. 11 * Small id to pointer translation service.
10 * 12 *
11 * It uses a radix tree like structure as a sparse array indexed 13 * It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
35 37
36static struct kmem_cache *idr_layer_cache; 38static struct kmem_cache *idr_layer_cache;
37 39
38static struct idr_layer *alloc_layer(struct idr *idp) 40static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 41{
40 struct idr_layer *p; 42 struct idr_layer *p;
41 unsigned long flags; 43 unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
50 return(p); 52 return(p);
51} 53}
52 54
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
53/* only called when idp->lock is held */ 68/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 70{
56 p->ary[0] = idp->id_free; 71 p->ary[0] = idp->id_free;
57 idp->id_free = p; 72 idp->id_free = p;
58 idp->id_free_cnt++; 73 idp->id_free_cnt++;
59} 74}
60 75
61static void free_layer(struct idr *idp, struct idr_layer *p) 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 77{
63 unsigned long flags; 78 unsigned long flags;
64 79
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 81 * Depends on the return element being zeroed.
67 */ 82 */
68 spin_lock_irqsave(&idp->lock, flags); 83 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 84 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 85 spin_unlock_irqrestore(&idp->lock, flags);
71} 86}
72 87
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
96 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
97 * 112 *
98 * This function should be called prior to locking and calling the 113 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy 114 * idr_get_new* functions. It preallocates enough memory to satisfy
100 * the worst possible allocation. 115 * the worst possible allocation.
101 * 116 *
102 * If the system is REALLY out of memory this function returns 0, 117 * If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 124 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 125 if (new == NULL)
111 return (0); 126 return (0);
112 free_layer(idp, new); 127 move_to_free_list(idp, new);
113 } 128 }
114 return 1; 129 return 1;
115} 130}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
143 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
144 if (!(p = pa[l])) { 159 if (!(p = pa[l])) {
145 *starting_id = id; 160 *starting_id = id;
146 return -2; 161 return IDR_NEED_TO_GROW;
147 } 162 }
148 163
149 /* If we need to go up one layer, continue the 164 /* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
160 id = ((id >> sh) ^ n ^ m) << sh; 175 id = ((id >> sh) ^ n ^ m) << sh;
161 } 176 }
162 if ((id >= MAX_ID_BIT) || (id < 0)) 177 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3; 178 return IDR_NOMORE_SPACE;
164 if (l == 0) 179 if (l == 0)
165 break; 180 break;
166 /* 181 /*
167 * Create the layer below if it is missing. 182 * Create the layer below if it is missing.
168 */ 183 */
169 if (!p->ary[m]) { 184 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 185 new = get_from_free_list(idp);
186 if (!new)
171 return -1; 187 return -1;
172 p->ary[m] = new; 188 rcu_assign_pointer(p->ary[m], new);
173 p->count++; 189 p->count++;
174 } 190 }
175 pa[l--] = p; 191 pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
192 p = idp->top; 208 p = idp->top;
193 layers = idp->layers; 209 layers = idp->layers;
194 if (unlikely(!p)) { 210 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 211 if (!(p = get_from_free_list(idp)))
196 return -1; 212 return -1;
197 layers = 1; 213 layers = 1;
198 } 214 }
@@ -204,7 +220,7 @@ build_up:
204 layers++; 220 layers++;
205 if (!p->count) 221 if (!p->count)
206 continue; 222 continue;
207 if (!(new = alloc_layer(idp))) { 223 if (!(new = get_from_free_list(idp))) {
208 /* 224 /*
209 * The allocation failed. If we built part of 225 * The allocation failed. If we built part of
210 * the structure tear it down. 226 * the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
214 p = p->ary[0]; 230 p = p->ary[0];
215 new->ary[0] = NULL; 231 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 232 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 233 __move_to_free_list(idp, new);
218 } 234 }
219 spin_unlock_irqrestore(&idp->lock, flags); 235 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 236 return -1;
@@ -225,10 +241,10 @@ build_up:
225 __set_bit(0, &new->bitmap); 241 __set_bit(0, &new->bitmap);
226 p = new; 242 p = new;
227 } 243 }
228 idp->top = p; 244 rcu_assign_pointer(idp->top, p);
229 idp->layers = layers; 245 idp->layers = layers;
230 v = sub_alloc(idp, &id, pa); 246 v = sub_alloc(idp, &id, pa);
231 if (v == -2) 247 if (v == IDR_NEED_TO_GROW)
232 goto build_up; 248 goto build_up;
233 return(v); 249 return(v);
234} 250}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
244 * Successfully found an empty slot. Install the user 260 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full. 261 * pointer and mark the slot full.
246 */ 262 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 263 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
264 (struct idr_layer *)ptr);
248 pa[0]->count++; 265 pa[0]->count++;
249 idr_mark_full(pa, id); 266 idr_mark_full(pa, id);
250 } 267 }
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 * This is a cheap hack until the IDR code can be fixed to 294 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values. 295 * return proper error values.
279 */ 296 */
280 if (rv < 0) { 297 if (rv < 0)
281 if (rv == -1) 298 return _idr_rc_to_errno(rv);
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv; 299 *id = rv;
287 return 0; 300 return 0;
288} 301}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
312 * This is a cheap hack until the IDR code can be fixed to 325 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values. 326 * return proper error values.
314 */ 327 */
315 if (rv < 0) { 328 if (rv < 0)
316 if (rv == -1) 329 return _idr_rc_to_errno(rv);
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv; 330 *id = rv;
322 return 0; 331 return 0;
323} 332}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
325 334
326static void idr_remove_warning(int id) 335static void idr_remove_warning(int id)
327{ 336{
328 printk("idr_remove called for id=%d which is not allocated.\n", id); 337 printk(KERN_WARNING
338 "idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack(); 339 dump_stack();
330} 340}
331 341
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top; 344 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL]; 345 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0]; 346 struct idr_layer ***paa = &pa[0];
347 struct idr_layer *to_free;
337 int n; 348 int n;
338 349
339 *paa = NULL; 350 *paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
349 n = id & IDR_MASK; 360 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 361 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap); 362 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 363 rcu_assign_pointer(p->ary[n], NULL);
364 to_free = NULL;
353 while(*paa && ! --((**paa)->count)){ 365 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 366 if (to_free)
367 free_layer(to_free);
368 to_free = **paa;
355 **paa-- = NULL; 369 **paa-- = NULL;
356 } 370 }
357 if (!*paa) 371 if (!*paa)
358 idp->layers = 0; 372 idp->layers = 0;
373 if (to_free)
374 free_layer(to_free);
359 } else 375 } else
360 idr_remove_warning(id); 376 idr_remove_warning(id);
361} 377}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
368void idr_remove(struct idr *idp, int id) 384void idr_remove(struct idr *idp, int id)
369{ 385{
370 struct idr_layer *p; 386 struct idr_layer *p;
387 struct idr_layer *to_free;
371 388
372 /* Mask off upper bits we don't use for the search. */ 389 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK; 390 id &= MAX_ID_MASK;
374 391
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 392 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 393 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer 394 idp->top->ary[0]) {
378 395 /*
396 * Single child at leftmost slot: we can shrink the tree.
397 * This level is not needed anymore since when layers are
398 * inserted, they are inserted at the top of the existing
399 * tree.
400 */
401 to_free = idp->top;
379 p = idp->top->ary[0]; 402 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 403 rcu_assign_pointer(idp->top, p);
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers; 404 --idp->layers;
405 to_free->bitmap = to_free->count = 0;
406 free_layer(to_free);
384 } 407 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 408 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 409 p = get_from_free_list(idp);
410 /*
411 * Note: we don't call the rcu callback here, since the only
412 * layers that fall into the freelist are those that have been
413 * preallocated.
414 */
387 kmem_cache_free(idr_layer_cache, p); 415 kmem_cache_free(idr_layer_cache, p);
388 } 416 }
389 return; 417 return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
424 452
425 id += 1 << n; 453 id += 1 << n;
426 while (n < fls(id)) { 454 while (n < fls(id)) {
427 if (p) { 455 if (p)
428 memset(p, 0, sizeof *p); 456 free_layer(p);
429 free_layer(idp, p);
430 }
431 n += IDR_BITS; 457 n += IDR_BITS;
432 p = *--paa; 458 p = *--paa;
433 } 459 }
434 } 460 }
435 idp->top = NULL; 461 rcu_assign_pointer(idp->top, NULL);
436 idp->layers = 0; 462 idp->layers = 0;
437} 463}
438EXPORT_SYMBOL(idr_remove_all); 464EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 470void idr_destroy(struct idr *idp)
445{ 471{
446 while (idp->id_free_cnt) { 472 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 473 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 474 kmem_cache_free(idr_layer_cache, p);
449 } 475 }
450} 476}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
459 * return indicates that @id is not valid or you passed %NULL in 485 * return indicates that @id is not valid or you passed %NULL in
460 * idr_get_new(). 486 * idr_get_new().
461 * 487 *
462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 488 * This function can be called under rcu_read_lock(), given that the leaf
489 * pointers lifetimes are correctly managed.
463 */ 490 */
464void *idr_find(struct idr *idp, int id) 491void *idr_find(struct idr *idp, int id)
465{ 492{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
467 struct idr_layer *p; 494 struct idr_layer *p;
468 495
469 n = idp->layers * IDR_BITS; 496 n = idp->layers * IDR_BITS;
470 p = idp->top; 497 p = rcu_dereference(idp->top);
471 498
472 /* Mask off upper bits we don't use for the search. */ 499 /* Mask off upper bits we don't use for the search. */
473 id &= MAX_ID_MASK; 500 id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
477 504
478 while (n > 0 && p) { 505 while (n > 0 && p) {
479 n -= IDR_BITS; 506 n -= IDR_BITS;
480 p = p->ary[(id >> n) & IDR_MASK]; 507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
481 } 508 }
482 return((void *)p); 509 return((void *)p);
483} 510}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
510 struct idr_layer **paa = &pa[0]; 537 struct idr_layer **paa = &pa[0];
511 538
512 n = idp->layers * IDR_BITS; 539 n = idp->layers * IDR_BITS;
513 p = idp->top; 540 p = rcu_dereference(idp->top);
514 max = 1 << n; 541 max = 1 << n;
515 542
516 id = 0; 543 id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
518 while (n > 0 && p) { 545 while (n > 0 && p) {
519 n -= IDR_BITS; 546 n -= IDR_BITS;
520 *paa++ = p; 547 *paa++ = p;
521 p = p->ary[(id >> n) & IDR_MASK]; 548 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
522 } 549 }
523 550
524 if (p) { 551 if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
548 * A -ENOENT return indicates that @id was not found. 575 * A -ENOENT return indicates that @id was not found.
549 * A -EINVAL return indicates that @id was not within valid constraints. 576 * A -EINVAL return indicates that @id was not within valid constraints.
550 * 577 *
551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 578 * The caller must serialize with writers.
552 */ 579 */
553void *idr_replace(struct idr *idp, void *ptr, int id) 580void *idr_replace(struct idr *idp, void *ptr, int id)
554{ 581{
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
574 return ERR_PTR(-ENOENT); 601 return ERR_PTR(-ENOENT);
575 602
576 old_p = p->ary[n]; 603 old_p = p->ary[n];
577 p->ary[n] = ptr; 604 rcu_assign_pointer(p->ary[n], ptr);
578 605
579 return old_p; 606 return old_p;
580} 607}
581EXPORT_SYMBOL(idr_replace); 608EXPORT_SYMBOL(idr_replace);
582 609
583static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) 610static void idr_cache_ctor(void *idr_layer)
584{ 611{
585 memset(idr_layer, 0, sizeof(struct idr_layer)); 612 memset(idr_layer, 0, sizeof(struct idr_layer));
586} 613}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
694 restart: 721 restart:
695 /* get vacant slot */ 722 /* get vacant slot */
696 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 723 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 if (t < 0) { 724 if (t < 0)
698 if (t == -1) 725 return _idr_rc_to_errno(t);
699 return -EAGAIN;
700 else /* will be -3 */
701 return -ENOSPC;
702 }
703 726
704 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 727 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 return -ENOSPC; 728 return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
720 return -EAGAIN; 743 return -EAGAIN;
721 744
722 memset(bitmap, 0, sizeof(struct ida_bitmap)); 745 memset(bitmap, 0, sizeof(struct ida_bitmap));
723 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 746 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
747 (void *)bitmap);
724 pa[0]->count++; 748 pa[0]->count++;
725 } 749 }
726 750
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 773 * allocation.
750 */ 774 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 775 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 776 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 777 if (p)
754 kmem_cache_free(idr_layer_cache, p); 778 kmem_cache_free(idr_layer_cache, p);
755 } 779 }
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}} 230#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
231#define DUMPBITS(n) {b>>=(n);k-=(n);} 231#define DUMPBITS(n) {b>>=(n);k-=(n);}
232 232
233#ifndef NO_INFLATE_MALLOC
234/* A trivial malloc implementation, adapted from
235 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
236 */
237
238static unsigned long malloc_ptr;
239static int malloc_count;
240
241static void *malloc(int size)
242{
243 void *p;
244
245 if (size < 0)
246 error("Malloc error");
247 if (!malloc_ptr)
248 malloc_ptr = free_mem_ptr;
249
250 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
251
252 p = (void *)malloc_ptr;
253 malloc_ptr += size;
254
255 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
256 error("Out of memory");
257
258 malloc_count++;
259 return p;
260}
261
262static void free(void *where)
263{
264 malloc_count--;
265 if (!malloc_count)
266 malloc_ptr = free_mem_ptr;
267}
268#else
269#define malloc(a) kmalloc(a, GFP_KERNEL)
270#define free(a) kfree(a)
271#endif
233 272
234/* 273/*
235 Huffman code decoding is performed using a multi-level table lookup. 274 Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
1045 int e; /* last block flag */ 1084 int e; /* last block flag */
1046 int r; /* result code */ 1085 int r; /* result code */
1047 unsigned h; /* maximum struct huft's malloc'ed */ 1086 unsigned h; /* maximum struct huft's malloc'ed */
1048 void *ptr;
1049 1087
1050 /* initialize window, bit buffer */ 1088 /* initialize window, bit buffer */
1051 wp = 0; 1089 wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
1057 h = 0; 1095 h = 0;
1058 do { 1096 do {
1059 hufts = 0; 1097 hufts = 0;
1060 gzip_mark(&ptr); 1098#ifdef ARCH_HAS_DECOMP_WDOG
1061 if ((r = inflate_block(&e)) != 0) { 1099 arch_decomp_wdog();
1062 gzip_release(&ptr); 1100#endif
1063 return r; 1101 r = inflate_block(&e);
1064 } 1102 if (r)
1065 gzip_release(&ptr); 1103 return r;
1066 if (hufts > h) 1104 if (hufts > h)
1067 h = hufts; 1105 h = hufts;
1068 } while (!e); 1106 } while (!e);
diff --git a/lib/iomap.c b/lib/iomap.c
index 37a3ea4cac9f..d32229385151 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access)
40 static int count = 10; 40 static int count = 10;
41 if (count) { 41 if (count) {
42 count--; 42 count--;
43 printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access); 43 WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
44 WARN_ON(1);
45 } 44 }
46} 45}
47 46
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..889ddce2021e 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -80,3 +80,11 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
80 } 80 }
81} 81}
82EXPORT_SYMBOL(iommu_area_free); 82EXPORT_SYMBOL(iommu_area_free);
83
84unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
85{
86 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
87
88 return size >> PAGE_SHIFT;
89}
90EXPORT_SYMBOL(iommu_num_pages);
diff --git a/lib/kobject.c b/lib/kobject.c
index 718e5101c263..bd732ffebc85 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
164 return -ENOENT; 164 return -ENOENT;
165 165
166 if (!kobj->name || !kobj->name[0]) { 166 if (!kobj->name || !kobj->name[0]) {
167 pr_debug("kobject: (%p): attempted to be registered with empty " 167 WARN(1, "kobject: (%p): attempted to be registered with empty "
168 "name!\n", kobj); 168 "name!\n", kobj);
169 WARN_ON(1);
170 return -EINVAL; 169 return -EINVAL;
171 } 170 }
172 171
@@ -216,13 +215,19 @@ static int kobject_add_internal(struct kobject *kobj)
216static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 215static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
217 va_list vargs) 216 va_list vargs)
218{ 217{
219 /* Free the old name, if necessary. */ 218 const char *old_name = kobj->name;
220 kfree(kobj->name); 219 char *s;
221 220
222 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); 221 kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
223 if (!kobj->name) 222 if (!kobj->name)
224 return -ENOMEM; 223 return -ENOMEM;
225 224
225 /* ewww... some of these buggers have '/' in the name ... */
226 s = strchr(kobj->name, '/');
227 if (s)
228 s[0] = '!';
229
230 kfree(old_name);
226 return 0; 231 return 0;
227} 232}
228 233
@@ -439,6 +444,7 @@ out:
439 444
440 return error; 445 return error;
441} 446}
447EXPORT_SYMBOL_GPL(kobject_rename);
442 448
443/** 449/**
444 * kobject_move - move object to another parent 450 * kobject_move - move object to another parent
@@ -576,12 +582,10 @@ static void kobject_release(struct kref *kref)
576void kobject_put(struct kobject *kobj) 582void kobject_put(struct kobject *kobj)
577{ 583{
578 if (kobj) { 584 if (kobj) {
579 if (!kobj->state_initialized) { 585 if (!kobj->state_initialized)
580 printk(KERN_WARNING "kobject: '%s' (%p): is not " 586 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
581 "initialized, yet kobject_put() is being " 587 "initialized, yet kobject_put() is being "
582 "called.\n", kobject_name(kobj), kobj); 588 "called.\n", kobject_name(kobj), kobj);
583 WARN_ON(1);
584 }
585 kref_put(&kobj->kref, kobject_release); 589 kref_put(&kobj->kref, kobject_release);
586 } 590 }
587} 591}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..3f914725bda8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
245 if (retval) 245 if (retval)
246 goto exit; 246 goto exit;
247 247
248 call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC); 248 retval = call_usermodehelper(argv[0], argv,
249 env->envp, UMH_WAIT_EXEC);
249 } 250 }
250 251
251exit: 252exit:
@@ -284,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
284 int len; 285 int len;
285 286
286 if (env->envp_idx >= ARRAY_SIZE(env->envp)) { 287 if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
287 printk(KERN_ERR "add_uevent_var: too many keys\n"); 288 WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
288 WARN_ON(1);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 291
@@ -296,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
296 va_end(args); 296 va_end(args);
297 297
298 if (len >= (sizeof(env->buf) - env->buflen)) { 298 if (len >= (sizeof(env->buf) - env->buflen)) {
299 printk(KERN_ERR "add_uevent_var: buffer size too small\n"); 299 WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
300 WARN_ON(1);
301 return -ENOMEM; 300 return -ENOMEM;
302 } 301 }
303 302
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
20 struct list_head *prev, 20 struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 if (unlikely(next->prev != prev)) { 23 WARN(next->prev != prev,
24 printk(KERN_ERR "list_add corruption. next->prev should be " 24 "list_add corruption. next->prev should be "
25 "prev (%p), but was %p. (next=%p).\n", 25 "prev (%p), but was %p. (next=%p).\n",
26 prev, next->prev, next); 26 prev, next->prev, next);
27 BUG(); 27 WARN(prev->next != next,
28 } 28 "list_add corruption. prev->next should be "
29 if (unlikely(prev->next != next)) { 29 "next (%p), but was %p. (prev=%p).\n",
30 printk(KERN_ERR "list_add corruption. prev->next should be " 30 next, prev->next, prev);
31 "next (%p), but was %p. (prev=%p).\n",
32 next, prev->next, prev);
33 BUG();
34 }
35 next->prev = new; 31 next->prev = new;
36 new->next = next; 32 new->next = next;
37 new->prev = prev; 33 new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
40EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
41 37
42/** 38/**
43 * list_add - add a new entry
44 * @new: new entry to be added
45 * @head: list head to add it after
46 *
47 * Insert a new entry after the specified head.
48 * This is good for implementing stacks.
49 */
50void list_add(struct list_head *new, struct list_head *head)
51{
52 __list_add(new, head, head->next);
53}
54EXPORT_SYMBOL(list_add);
55
56/**
57 * list_del - deletes entry from list. 39 * list_del - deletes entry from list.
58 * @entry: the element to delete from the list. 40 * @entry: the element to delete from the list.
59 * Note: list_empty on entry does not return true after this, the entry is 41 * Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
61 */ 43 */
62void list_del(struct list_head *entry) 44void list_del(struct list_head *entry)
63{ 45{
64 if (unlikely(entry->prev->next != entry)) { 46 WARN(entry->prev->next != entry,
65 printk(KERN_ERR "list_del corruption. prev->next should be %p, " 47 "list_del corruption. prev->next should be %p, "
66 "but was %p\n", entry, entry->prev->next); 48 "but was %p\n", entry, entry->prev->next);
67 BUG(); 49 WARN(entry->next->prev != entry,
68 } 50 "list_del corruption. next->prev should be %p, "
69 if (unlikely(entry->next->prev != entry)) { 51 "but was %p\n", entry, entry->next->prev);
70 printk(KERN_ERR "list_del corruption. next->prev should be %p, "
71 "but was %p\n", entry, entry->next->prev);
72 BUG();
73 }
74 __list_del(entry->prev, entry->next); 52 __list_del(entry->prev, entry->next);
75 entry->next = LIST_POISON1; 53 entry->next = LIST_POISON1;
76 entry->prev = LIST_POISON2; 54 entry->prev = LIST_POISON2;
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
138 t += 31 + *ip++; 138 t += 31 + *ip++;
139 } 139 }
140 m_pos = op - 1; 140 m_pos = op - 1;
141 m_pos -= le16_to_cpu(get_unaligned( 141 m_pos -= get_unaligned_le16(ip) >> 2;
142 (const unsigned short *)ip)) >> 2;
143 ip += 2; 142 ip += 2;
144 } else if (t >= 16) { 143 } else if (t >= 16) {
145 m_pos = op; 144 m_pos = op;
@@ -157,8 +156,7 @@ match:
157 } 156 }
158 t += 7 + *ip++; 157 t += 7 + *ip++;
159 } 158 }
160 m_pos -= le16_to_cpu(get_unaligned( 159 m_pos -= get_unaligned_le16(ip) >> 2;
161 (const unsigned short *)ip)) >> 2;
162 ip += 2; 160 ip += 2;
163 if (m_pos == op) 161 if (m_pos == op)
164 goto eof_found; 162 goto eof_found;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb5..4a8ba4bf5f6f 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
52 * Add up all the per-cpu counts, return the result. This is a more accurate 52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive() 53 * but much slower version of percpu_counter_read_positive()
54 */ 54 */
55s64 __percpu_counter_sum(struct percpu_counter *fbc) 55s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
56{ 56{
57 s64 ret; 57 s64 ret;
58 int cpu; 58 int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 if (set)
66 *pcount = 0;
65 } 67 }
68 if (set)
69 fbc->count = ret;
70
66 spin_unlock(&fbc->lock); 71 spin_unlock(&fbc->lock);
67 return ret; 72 return ret;
68} 73}
diff --git a/lib/plist.c b/lib/plist.c
index 3074a02272f3..d6c64a824e1d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,12 +31,13 @@
31static void plist_check_prev_next(struct list_head *t, struct list_head *p, 31static void plist_check_prev_next(struct list_head *t, struct list_head *p,
32 struct list_head *n) 32 struct list_head *n)
33{ 33{
34 if (n->prev != p || p->next != n) { 34 WARN(n->prev != p || p->next != n,
35 printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev); 35 "top: %p, n: %p, p: %p\n"
36 printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev); 36 "prev: %p, n: %p, p: %p\n"
37 printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev); 37 "next: %p, n: %p, p: %p\n",
38 WARN_ON(1); 38 t, t->next, t->prev,
39 } 39 p, p->next, p->prev,
40 n, n->next, n->prev);
40} 41}
41 42
42static void plist_check_list(struct list_head *top) 43static void plist_check_list(struct list_head *top)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 169a2f8dabcc..be86b32bc874 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com> 4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin 5 * Copyright (C) 2006 Nick Piggin
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert);
359 * Returns: the slot corresponding to the position @index in the 359 * Returns: the slot corresponding to the position @index in the
360 * radix tree @root. This is useful for update-if-exists operations. 360 * radix tree @root. This is useful for update-if-exists operations.
361 * 361 *
362 * This function cannot be called under rcu_read_lock, it must be 362 * This function can be called under rcu_read_lock iff the slot is not
363 * excluded from writers, as must the returned slot for subsequent 363 * modified by radix_tree_replace_slot, otherwise it must be called
364 * use by radix_tree_deref_slot() and radix_tree_replace slot. 364 * exclusive from other writers. Any dereference of the slot must be done
365 * Caller must hold tree write locked across slot lookup and 365 * using radix_tree_deref_slot.
366 * replace.
367 */ 366 */
368void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 367void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
369{ 368{
370 unsigned int height, shift; 369 unsigned int height, shift;
371 struct radix_tree_node *node, **slot; 370 struct radix_tree_node *node, **slot;
372 371
373 node = root->rnode; 372 node = rcu_dereference(root->rnode);
374 if (node == NULL) 373 if (node == NULL)
375 return NULL; 374 return NULL;
376 375
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
390 do { 389 do {
391 slot = (struct radix_tree_node **) 390 slot = (struct radix_tree_node **)
392 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 391 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
393 node = *slot; 392 node = rcu_dereference(*slot);
394 if (node == NULL) 393 if (node == NULL)
395 return NULL; 394 return NULL;
396 395
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
667EXPORT_SYMBOL(radix_tree_next_hole); 666EXPORT_SYMBOL(radix_tree_next_hole);
668 667
669static unsigned int 668static unsigned int
670__lookup(struct radix_tree_node *slot, void **results, unsigned long index, 669__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
671 unsigned int max_items, unsigned long *next_index) 670 unsigned int max_items, unsigned long *next_index)
672{ 671{
673 unsigned int nr_found = 0; 672 unsigned int nr_found = 0;
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index,
701 700
702 /* Bottom level: grab some items */ 701 /* Bottom level: grab some items */
703 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) { 702 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
704 struct radix_tree_node *node;
705 index++; 703 index++;
706 node = slot->slots[i]; 704 if (slot->slots[i]) {
707 if (node) { 705 results[nr_found++] = &(slot->slots[i]);
708 results[nr_found++] = rcu_dereference(node);
709 if (nr_found == max_items) 706 if (nr_found == max_items)
710 goto out; 707 goto out;
711 } 708 }
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
759 756
760 ret = 0; 757 ret = 0;
761 while (ret < max_items) { 758 while (ret < max_items) {
762 unsigned int nr_found; 759 unsigned int nr_found, slots_found, i;
763 unsigned long next_index; /* Index of next search */ 760 unsigned long next_index; /* Index of next search */
764 761
765 if (cur_index > max_index) 762 if (cur_index > max_index)
766 break; 763 break;
767 nr_found = __lookup(node, results + ret, cur_index, 764 slots_found = __lookup(node, (void ***)results + ret, cur_index,
768 max_items - ret, &next_index); 765 max_items - ret, &next_index);
766 nr_found = 0;
767 for (i = 0; i < slots_found; i++) {
768 struct radix_tree_node *slot;
769 slot = *(((void ***)results)[ret + i]);
770 if (!slot)
771 continue;
772 results[ret + nr_found] = rcu_dereference(slot);
773 nr_found++;
774 }
769 ret += nr_found; 775 ret += nr_found;
770 if (next_index == 0) 776 if (next_index == 0)
771 break; 777 break;
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
776} 782}
777EXPORT_SYMBOL(radix_tree_gang_lookup); 783EXPORT_SYMBOL(radix_tree_gang_lookup);
778 784
785/**
786 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
787 * @root: radix tree root
788 * @results: where the results of the lookup are placed
789 * @first_index: start the lookup from this key
790 * @max_items: place up to this many items at *results
791 *
792 * Performs an index-ascending scan of the tree for present items. Places
793 * their slots at *@results and returns the number of items which were
794 * placed at *@results.
795 *
796 * The implementation is naive.
797 *
798 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
799 * be dereferenced with radix_tree_deref_slot, and if using only RCU
800 * protection, radix_tree_deref_slot may fail requiring a retry.
801 */
802unsigned int
803radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
804 unsigned long first_index, unsigned int max_items)
805{
806 unsigned long max_index;
807 struct radix_tree_node *node;
808 unsigned long cur_index = first_index;
809 unsigned int ret;
810
811 node = rcu_dereference(root->rnode);
812 if (!node)
813 return 0;
814
815 if (!radix_tree_is_indirect_ptr(node)) {
816 if (first_index > 0)
817 return 0;
818 results[0] = (void **)&root->rnode;
819 return 1;
820 }
821 node = radix_tree_indirect_to_ptr(node);
822
823 max_index = radix_tree_maxindex(node->height);
824
825 ret = 0;
826 while (ret < max_items) {
827 unsigned int slots_found;
828 unsigned long next_index; /* Index of next search */
829
830 if (cur_index > max_index)
831 break;
832 slots_found = __lookup(node, results + ret, cur_index,
833 max_items - ret, &next_index);
834 ret += slots_found;
835 if (next_index == 0)
836 break;
837 cur_index = next_index;
838 }
839
840 return ret;
841}
842EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
843
779/* 844/*
780 * FIXME: the two tag_get()s here should use find_next_bit() instead of 845 * FIXME: the two tag_get()s here should use find_next_bit() instead of
781 * open-coding the search. 846 * open-coding the search.
782 */ 847 */
783static unsigned int 848static unsigned int
784__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index, 849__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
785 unsigned int max_items, unsigned long *next_index, unsigned int tag) 850 unsigned int max_items, unsigned long *next_index, unsigned int tag)
786{ 851{
787 unsigned int nr_found = 0; 852 unsigned int nr_found = 0;
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
811 unsigned long j = index & RADIX_TREE_MAP_MASK; 876 unsigned long j = index & RADIX_TREE_MAP_MASK;
812 877
813 for ( ; j < RADIX_TREE_MAP_SIZE; j++) { 878 for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
814 struct radix_tree_node *node;
815 index++; 879 index++;
816 if (!tag_get(slot, tag, j)) 880 if (!tag_get(slot, tag, j))
817 continue; 881 continue;
818 node = slot->slots[j];
819 /* 882 /*
820 * Even though the tag was found set, we need to 883 * Even though the tag was found set, we need to
821 * recheck that we have a non-NULL node, because 884 * recheck that we have a non-NULL node, because
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
826 * lookup ->slots[x] without a lock (ie. can't 889 * lookup ->slots[x] without a lock (ie. can't
827 * rely on its value remaining the same). 890 * rely on its value remaining the same).
828 */ 891 */
829 if (node) { 892 if (slot->slots[j]) {
830 node = rcu_dereference(node); 893 results[nr_found++] = &(slot->slots[j]);
831 results[nr_found++] = node;
832 if (nr_found == max_items) 894 if (nr_found == max_items)
833 goto out; 895 goto out;
834 } 896 }
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
887 949
888 ret = 0; 950 ret = 0;
889 while (ret < max_items) { 951 while (ret < max_items) {
890 unsigned int nr_found; 952 unsigned int nr_found, slots_found, i;
891 unsigned long next_index; /* Index of next search */ 953 unsigned long next_index; /* Index of next search */
892 954
893 if (cur_index > max_index) 955 if (cur_index > max_index)
894 break; 956 break;
895 nr_found = __lookup_tag(node, results + ret, cur_index, 957 slots_found = __lookup_tag(node, (void ***)results + ret,
896 max_items - ret, &next_index, tag); 958 cur_index, max_items - ret, &next_index, tag);
959 nr_found = 0;
960 for (i = 0; i < slots_found; i++) {
961 struct radix_tree_node *slot;
962 slot = *(((void ***)results)[ret + i]);
963 if (!slot)
964 continue;
965 results[ret + nr_found] = rcu_dereference(slot);
966 nr_found++;
967 }
897 ret += nr_found; 968 ret += nr_found;
898 if (next_index == 0) 969 if (next_index == 0)
899 break; 970 break;
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
905EXPORT_SYMBOL(radix_tree_gang_lookup_tag); 976EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
906 977
907/** 978/**
979 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
980 * radix tree based on a tag
981 * @root: radix tree root
982 * @results: where the results of the lookup are placed
983 * @first_index: start the lookup from this key
984 * @max_items: place up to this many items at *results
985 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
986 *
987 * Performs an index-ascending scan of the tree for present items which
988 * have the tag indexed by @tag set. Places the slots at *@results and
989 * returns the number of slots which were placed at *@results.
990 */
991unsigned int
992radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
993 unsigned long first_index, unsigned int max_items,
994 unsigned int tag)
995{
996 struct radix_tree_node *node;
997 unsigned long max_index;
998 unsigned long cur_index = first_index;
999 unsigned int ret;
1000
1001 /* check the root's tag bit */
1002 if (!root_tag_get(root, tag))
1003 return 0;
1004
1005 node = rcu_dereference(root->rnode);
1006 if (!node)
1007 return 0;
1008
1009 if (!radix_tree_is_indirect_ptr(node)) {
1010 if (first_index > 0)
1011 return 0;
1012 results[0] = (void **)&root->rnode;
1013 return 1;
1014 }
1015 node = radix_tree_indirect_to_ptr(node);
1016
1017 max_index = radix_tree_maxindex(node->height);
1018
1019 ret = 0;
1020 while (ret < max_items) {
1021 unsigned int slots_found;
1022 unsigned long next_index; /* Index of next search */
1023
1024 if (cur_index > max_index)
1025 break;
1026 slots_found = __lookup_tag(node, results + ret,
1027 cur_index, max_items - ret, &next_index, tag);
1028 ret += slots_found;
1029 if (next_index == 0)
1030 break;
1031 cur_index = next_index;
1032 }
1033
1034 return ret;
1035}
1036EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1037
1038
1039/**
908 * radix_tree_shrink - shrink height of a radix tree to minimal 1040 * radix_tree_shrink - shrink height of a radix tree to minimal
909 * @root radix tree root 1041 * @root radix tree root
910 */ 1042 */
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1051EXPORT_SYMBOL(radix_tree_tagged); 1183EXPORT_SYMBOL(radix_tree_tagged);
1052 1184
1053static void 1185static void
1054radix_tree_node_ctor(struct kmem_cache *cachep, void *node) 1186radix_tree_node_ctor(void *node)
1055{ 1187{
1056 memset(node, 0, sizeof(struct radix_tree_node)); 1188 memset(node, 0, sizeof(struct radix_tree_node));
1057} 1189}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..26187edcc7ea 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
3 * 3 *
4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> 4 * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
5 * 5 *
6 * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
7 * parameter. Now every user can use their own standalone ratelimit_state.
8 *
6 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
7 * 10 *
8 */ 11 */
@@ -11,41 +14,44 @@
11#include <linux/jiffies.h> 14#include <linux/jiffies.h>
12#include <linux/module.h> 15#include <linux/module.h>
13 16
17static DEFINE_SPINLOCK(ratelimit_lock);
18
14/* 19/*
15 * __ratelimit - rate limiting 20 * __ratelimit - rate limiting
16 * @ratelimit_jiffies: minimum time in jiffies between two callbacks 21 * @rs: ratelimit_state data
17 * @ratelimit_burst: number of callbacks we do before ratelimiting
18 * 22 *
19 * This enforces a rate limit: not more than @ratelimit_burst callbacks 23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
20 * in every ratelimit_jiffies 24 * in every @rs->ratelimit_jiffies
21 */ 25 */
22int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) 26int __ratelimit(struct ratelimit_state *rs)
23{ 27{
24 static DEFINE_SPINLOCK(ratelimit_lock);
25 static unsigned toks = 10 * 5 * HZ;
26 static unsigned long last_msg;
27 static int missed;
28 unsigned long flags; 28 unsigned long flags;
29 unsigned long now = jiffies;
30 29
31 spin_lock_irqsave(&ratelimit_lock, flags); 30 if (!rs->interval)
32 toks += now - last_msg;
33 last_msg = now;
34 if (toks > (ratelimit_burst * ratelimit_jiffies))
35 toks = ratelimit_burst * ratelimit_jiffies;
36 if (toks >= ratelimit_jiffies) {
37 int lost = missed;
38
39 missed = 0;
40 toks -= ratelimit_jiffies;
41 spin_unlock_irqrestore(&ratelimit_lock, flags);
42 if (lost)
43 printk(KERN_WARNING "%s: %d messages suppressed\n",
44 __func__, lost);
45 return 1; 31 return 1;
32
33 spin_lock_irqsave(&ratelimit_lock, flags);
34 if (!rs->begin)
35 rs->begin = jiffies;
36
37 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed);
41 rs->begin = 0;
42 rs->printed = 0;
43 rs->missed = 0;
46 } 44 }
47 missed++; 45 if (rs->burst && rs->burst > rs->printed)
46 goto print;
47
48 rs->missed++;
48 spin_unlock_irqrestore(&ratelimit_lock, flags); 49 spin_unlock_irqrestore(&ratelimit_lock, flags);
49 return 0; 50 return 0;
51
52print:
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
50} 56}
51EXPORT_SYMBOL(__ratelimit); 57EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
295EXPORT_SYMBOL(sg_alloc_table); 295EXPORT_SYMBOL(sg_alloc_table);
296 296
297/** 297/**
298 * sg_miter_start - start mapping iteration over a sg list
299 * @miter: sg mapping iter to be started
300 * @sgl: sg list to iterate over
301 * @nents: number of sg entries
302 *
303 * Description:
304 * Starts mapping iterator @miter.
305 *
306 * Context:
307 * Don't care.
308 */
309void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
310 unsigned int nents, unsigned int flags)
311{
312 memset(miter, 0, sizeof(struct sg_mapping_iter));
313
314 miter->__sg = sgl;
315 miter->__nents = nents;
316 miter->__offset = 0;
317 miter->__flags = flags;
318}
319EXPORT_SYMBOL(sg_miter_start);
320
321/**
322 * sg_miter_next - proceed mapping iterator to the next mapping
323 * @miter: sg mapping iter to proceed
324 *
325 * Description:
326 * Proceeds @miter@ to the next mapping. @miter@ should have been
327 * started using sg_miter_start(). On successful return,
328 * @miter@->page, @miter@->addr and @miter@->length point to the
329 * current mapping.
330 *
331 * Context:
332 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
333 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
334 *
335 * Returns:
336 * true if @miter contains the next mapping. false if end of sg
337 * list is reached.
338 */
339bool sg_miter_next(struct sg_mapping_iter *miter)
340{
341 unsigned int off, len;
342
343 /* check for end and drop resources from the last iteration */
344 if (!miter->__nents)
345 return false;
346
347 sg_miter_stop(miter);
348
349 /* get to the next sg if necessary. __offset is adjusted by stop */
350 if (miter->__offset == miter->__sg->length && --miter->__nents) {
351 miter->__sg = sg_next(miter->__sg);
352 miter->__offset = 0;
353 }
354
355 /* map the next page */
356 off = miter->__sg->offset + miter->__offset;
357 len = miter->__sg->length - miter->__offset;
358
359 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
360 off &= ~PAGE_MASK;
361 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
362 miter->consumed = miter->length;
363
364 if (miter->__flags & SG_MITER_ATOMIC)
365 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
366 else
367 miter->addr = kmap(miter->page) + off;
368
369 return true;
370}
371EXPORT_SYMBOL(sg_miter_next);
372
373/**
374 * sg_miter_stop - stop mapping iteration
375 * @miter: sg mapping iter to be stopped
376 *
377 * Description:
378 * Stops mapping iterator @miter. @miter should have been started
379 * started using sg_miter_start(). A stopped iteration can be
380 * resumed by calling sg_miter_next() on it. This is useful when
381 * resources (kmap) need to be released during iteration.
382 *
383 * Context:
384 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
385 */
386void sg_miter_stop(struct sg_mapping_iter *miter)
387{
388 WARN_ON(miter->consumed > miter->length);
389
390 /* drop resources from the last iteration */
391 if (miter->addr) {
392 miter->__offset += miter->consumed;
393
394 if (miter->__flags & SG_MITER_ATOMIC) {
395 WARN_ON(!irqs_disabled());
396 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
397 } else
398 kunmap(miter->addr);
399
400 miter->page = NULL;
401 miter->addr = NULL;
402 miter->length = 0;
403 miter->consumed = 0;
404 }
405}
406EXPORT_SYMBOL(sg_miter_stop);
407
408/**
298 * sg_copy_buffer - Copy data between a linear buffer and an SG list 409 * sg_copy_buffer - Copy data between a linear buffer and an SG list
299 * @sgl: The SG list 410 * @sgl: The SG list
300 * @nents: Number of SG entries 411 * @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
309static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 420static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
310 void *buf, size_t buflen, int to_buffer) 421 void *buf, size_t buflen, int to_buffer)
311{ 422{
312 struct scatterlist *sg; 423 unsigned int offset = 0;
313 size_t buf_off = 0; 424 struct sg_mapping_iter miter;
314 int i; 425
315 426 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
316 WARN_ON(!irqs_disabled()); 427
317 428 while (sg_miter_next(&miter) && offset < buflen) {
318 for_each_sg(sgl, sg, nents, i) { 429 unsigned int len;
319 struct page *page; 430
320 int n = 0; 431 len = min(miter.length, buflen - offset);
321 unsigned int sg_off = sg->offset; 432
322 unsigned int sg_copy = sg->length; 433 if (to_buffer)
323 434 memcpy(buf + offset, miter.addr, len);
324 if (sg_copy > buflen) 435 else {
325 sg_copy = buflen; 436 memcpy(miter.addr, buf + offset, len);
326 buflen -= sg_copy; 437 flush_kernel_dcache_page(miter.page);
327
328 while (sg_copy > 0) {
329 unsigned int page_copy;
330 void *p;
331
332 page_copy = PAGE_SIZE - sg_off;
333 if (page_copy > sg_copy)
334 page_copy = sg_copy;
335
336 page = nth_page(sg_page(sg), n);
337 p = kmap_atomic(page, KM_BIO_SRC_IRQ);
338
339 if (to_buffer)
340 memcpy(buf + buf_off, p + sg_off, page_copy);
341 else {
342 memcpy(p + sg_off, buf + buf_off, page_copy);
343 flush_kernel_dcache_page(page);
344 }
345
346 kunmap_atomic(p, KM_BIO_SRC_IRQ);
347
348 buf_off += page_copy;
349 sg_off += page_copy;
350 if (sg_off == PAGE_SIZE) {
351 sg_off = 0;
352 n++;
353 }
354 sg_copy -= page_copy;
355 } 438 }
356 439
357 if (!buflen) 440 offset += len;
358 break;
359 } 441 }
360 442
361 return buf_off; 443 sg_miter_stop(&miter);
444
445 return offset;
362} 446}
363 447
364/** 448/**
diff --git a/lib/show_mem.c b/lib/show_mem.c
new file mode 100644
index 000000000000..238e72a18ce1
--- /dev/null
+++ b/lib/show_mem.c
@@ -0,0 +1,63 @@
1/*
2 * Generic show_mem() implementation
3 *
4 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
5 * All code subject to the GPL version 2.
6 */
7
8#include <linux/mm.h>
9#include <linux/nmi.h>
10#include <linux/quicklist.h>
11
12void show_mem(void)
13{
14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0;
17
18 printk(KERN_INFO "Mem-Info:\n");
19 show_free_areas();
20
21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags;
23
24 pgdat_resize_lock(pgdat, &flags);
25 for (i = 0; i < pgdat->node_spanned_pages; i++) {
26 struct page *page;
27 unsigned long pfn = pgdat->node_start_pfn + i;
28
29 if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
30 touch_nmi_watchdog();
31
32 if (!pfn_valid(pfn))
33 continue;
34
35 page = pfn_to_page(pfn);
36
37 if (PageHighMem(page))
38 highmem++;
39
40 if (PageReserved(page))
41 reserved++;
42 else if (page_count(page) == 1)
43 nonshared++;
44 else if (page_count(page) > 1)
45 shared += page_count(page) - 1;
46
47 total++;
48 }
49 pgdat_resize_unlock(pgdat, &flags);
50 }
51
52 printk(KERN_INFO "%lu pages RAM\n", total);
53#ifdef CONFIG_HIGHMEM
54 printk(KERN_INFO "%lu pages HighMem\n", highmem);
55#endif
56 printk(KERN_INFO "%lu pages reserved\n", reserved);
57 printk(KERN_INFO "%lu pages shared\n", shared);
58 printk(KERN_INFO "%lu pages non-shared\n", nonshared);
59#ifdef CONFIG_QUICKLIST
60 printk(KERN_INFO "%lu pages in pagetable cache\n",
61 quicklist_total_size());
62#endif
63}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 6c90fb90e19c..0f8fc22ed103 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,11 +7,10 @@
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9 9
10unsigned int debug_smp_processor_id(void) 10notrace unsigned int debug_smp_processor_id(void)
11{ 11{
12 unsigned long preempt_count = preempt_count(); 12 unsigned long preempt_count = preempt_count();
13 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
14 cpumask_t this_mask;
15 14
16 if (likely(preempt_count)) 15 if (likely(preempt_count))
17 goto out; 16 goto out;
@@ -23,9 +22,7 @@ unsigned int debug_smp_processor_id(void)
23 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 23 * smp_processor_id():
25 */ 24 */
26 this_mask = cpumask_of_cpu(this_cpu); 25 if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
27
28 if (cpus_equal(current->cpus_allowed, this_mask))
29 goto out; 26 goto out;
30 27
31 /* 28 /*
@@ -37,7 +34,7 @@ unsigned int debug_smp_processor_id(void)
37 /* 34 /*
38 * Avoid recursion: 35 * Avoid recursion:
39 */ 36 */
40 preempt_disable(); 37 preempt_disable_notrace();
41 38
42 if (!printk_ratelimit()) 39 if (!printk_ratelimit())
43 goto out_enable; 40 goto out_enable;
@@ -49,7 +46,7 @@ unsigned int debug_smp_processor_id(void)
49 dump_stack(); 46 dump_stack();
50 47
51out_enable: 48out_enable:
52 preempt_enable_no_resched(); 49 preempt_enable_no_resched_notrace();
53out: 50out:
54 return this_cpu; 51 return this_cpu;
55} 52}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8cc..977edbdbc1de 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
492 */ 492 */
493 dma_addr_t handle; 493 dma_addr_t handle;
494 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); 494 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
495 if (swiotlb_dma_mapping_error(handle)) 495 if (swiotlb_dma_mapping_error(hwdev, handle))
496 return NULL; 496 return NULL;
497 497
498 ret = bus_to_virt(handle); 498 ret = bus_to_virt(handle);
@@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
824} 824}
825 825
826int 826int
827swiotlb_dma_mapping_error(dma_addr_t dma_addr) 827swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
828{ 828{
829 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 829 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
830} 830}
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 000000000000..a4f7067f72fa
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,75 @@
1#include <linux/ptrace.h>
2#include <linux/sched.h>
3#include <linux/module.h>
4#include <asm/syscall.h>
5
6static int collect_syscall(struct task_struct *target, long *callno,
7 unsigned long args[6], unsigned int maxargs,
8 unsigned long *sp, unsigned long *pc)
9{
10 struct pt_regs *regs = task_pt_regs(target);
11 if (unlikely(!regs))
12 return -EAGAIN;
13
14 *sp = user_stack_pointer(regs);
15 *pc = instruction_pointer(regs);
16
17 *callno = syscall_get_nr(target, regs);
18 if (*callno != -1L && maxargs > 0)
19 syscall_get_arguments(target, regs, 0, maxargs, args);
20
21 return 0;
22}
23
24/**
25 * task_current_syscall - Discover what a blocked task is doing.
26 * @target: thread to examine
27 * @callno: filled with system call number or -1
28 * @args: filled with @maxargs system call arguments
29 * @maxargs: number of elements in @args to fill
30 * @sp: filled with user stack pointer
31 * @pc: filled with user PC
32 *
33 * If @target is blocked in a system call, returns zero with *@callno
34 * set to the the call's number and @args filled in with its arguments.
35 * Registers not used for system call arguments may not be available and
36 * it is not kosher to use &struct user_regset calls while the system
37 * call is still in progress. Note we may get this result if @target
38 * has finished its system call but not yet returned to user mode, such
39 * as when it's stopped for signal handling or syscall exit tracing.
40 *
41 * If @target is blocked in the kernel during a fault or exception,
42 * returns zero with *@callno set to -1 and does not fill in @args.
43 * If so, it's now safe to examine @target using &struct user_regset
44 * get() calls as long as we're sure @target won't return to user mode.
45 *
46 * Returns -%EAGAIN if @target does not remain blocked.
47 *
48 * Returns -%EINVAL if @maxargs is too large (maximum is six).
49 */
50int task_current_syscall(struct task_struct *target, long *callno,
51 unsigned long args[6], unsigned int maxargs,
52 unsigned long *sp, unsigned long *pc)
53{
54 long state;
55 unsigned long ncsw;
56
57 if (unlikely(maxargs > 6))
58 return -EINVAL;
59
60 if (target == current)
61 return collect_syscall(target, callno, args, maxargs, sp, pc);
62
63 state = target->state;
64 if (unlikely(!state))
65 return -EAGAIN;
66
67 ncsw = wait_task_inactive(target, state);
68 if (unlikely(!ncsw) ||
69 unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
70 unlikely(wait_task_inactive(target, state) != ncsw))
71 return -EAGAIN;
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index be8bda3862f5..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
54 * USAGE 54 * USAGE
55 * 55 *
56 * Before a search can be performed, a configuration must be created 56 * Before a search can be performed, a configuration must be created
57 * by calling textsearch_prepare() specyfing the searching algorithm and 57 * by calling textsearch_prepare() specifying the searching algorithm,
58 * the pattern to look for. The returned configuration may then be used 58 * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
59 * for an arbitary amount of times and even in parallel as long as a 59 * to perform case insensitive matching. But it might slow down
60 * separate struct ts_state variable is provided to every instance. 60 * performance of algorithm, so you should use it at own your risk.
61 * The returned configuration may then be used for an arbitary
62 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance.
61 * 64 *
62 * The actual search is performed by either calling textsearch_find_- 65 * The actual search is performed by either calling textsearch_find_-
63 * continuous() for linear data or by providing an own get_next_block() 66 * continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
89 * panic("Oh my god, dancing chickens at %d\n", pos); 92 * panic("Oh my god, dancing chickens at %d\n", pos);
90 * 93 *
91 * textsearch_destroy(conf); 94 * textsearch_destroy(conf);
92 *
93 * ========================================================================== 95 * ==========================================================================
94 */ 96 */
95 97
@@ -97,6 +99,7 @@
97#include <linux/types.h> 99#include <linux/types.h>
98#include <linux/string.h> 100#include <linux/string.h>
99#include <linux/init.h> 101#include <linux/init.h>
102#include <linux/rculist.h>
100#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
101#include <linux/err.h> 104#include <linux/err.h>
102#include <linux/textsearch.h> 105#include <linux/textsearch.h>
@@ -264,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
264 return ERR_PTR(-EINVAL); 267 return ERR_PTR(-EINVAL);
265 268
266 ops = lookup_ts_algo(algo); 269 ops = lookup_ts_algo(algo);
267#ifdef CONFIG_KMOD 270#ifdef CONFIG_MODULES
268 /* 271 /*
269 * Why not always autoload you may ask. Some users are 272 * Why not always autoload you may ask. Some users are
270 * in a situation where requesting a module may deadlock, 273 * in a situation where requesting a module may deadlock,
@@ -279,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
279 if (ops == NULL) 282 if (ops == NULL)
280 goto errout; 283 goto errout;
281 284
282 conf = ops->init(pattern, len, gfp_mask); 285 conf = ops->init(pattern, len, gfp_mask, flags);
283 if (IS_ERR(conf)) { 286 if (IS_ERR(conf)) {
284 err = PTR_ERR(conf); 287 err = PTR_ERR(conf);
285 goto errout; 288 goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a4..9e66ee4020e9 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <linux/ctype.h>
42#include <linux/textsearch.h> 43#include <linux/textsearch.h>
43 44
44/* Alphabet size, use ASCII */ 45/* Alphabet size, use ASCII */
@@ -63,7 +64,8 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
63 struct ts_bm *bm = ts_config_priv(conf); 64 struct ts_bm *bm = ts_config_priv(conf);
64 unsigned int i, text_len, consumed = state->offset; 65 unsigned int i, text_len, consumed = state->offset;
65 const u8 *text; 66 const u8 *text;
66 int shift = bm->patlen, bs; 67 int shift = bm->patlen - 1, bs;
68 const u8 icase = conf->flags & TS_IGNORECASE;
67 69
68 for (;;) { 70 for (;;) {
69 text_len = conf->get_next_block(consumed, &text, conf, state); 71 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
75 DEBUGP("Searching in position %d (%c)\n", 77 DEBUGP("Searching in position %d (%c)\n",
76 shift, text[shift]); 78 shift, text[shift]);
77 for (i = 0; i < bm->patlen; i++) 79 for (i = 0; i < bm->patlen; i++)
78 if (text[shift-i] != bm->pattern[bm->patlen-1-i]) 80 if ((icase ? toupper(text[shift-i])
81 : text[shift-i])
82 != bm->pattern[bm->patlen-1-i])
79 goto next; 83 goto next;
80 84
81 /* London calling... */ 85 /* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
111 return ret; 115 return ret;
112} 116}
113 117
114static void compute_prefix_tbl(struct ts_bm *bm) 118static void compute_prefix_tbl(struct ts_bm *bm, int flags)
115{ 119{
116 int i, j, g; 120 int i, j, g;
117 121
118 for (i = 0; i < ASIZE; i++) 122 for (i = 0; i < ASIZE; i++)
119 bm->bad_shift[i] = bm->patlen; 123 bm->bad_shift[i] = bm->patlen;
120 for (i = 0; i < bm->patlen - 1; i++) 124 for (i = 0; i < bm->patlen - 1; i++) {
121 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i; 125 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
126 if (flags & TS_IGNORECASE)
127 bm->bad_shift[tolower(bm->pattern[i])]
128 = bm->patlen - 1 - i;
129 }
122 130
123 /* Compute the good shift array, used to match reocurrences 131 /* Compute the good shift array, used to match reocurrences
124 * of a subpattern */ 132 * of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
135} 143}
136 144
137static struct ts_config *bm_init(const void *pattern, unsigned int len, 145static struct ts_config *bm_init(const void *pattern, unsigned int len,
138 gfp_t gfp_mask) 146 gfp_t gfp_mask, int flags)
139{ 147{
140 struct ts_config *conf; 148 struct ts_config *conf;
141 struct ts_bm *bm; 149 struct ts_bm *bm;
150 int i;
142 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 151 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
143 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len; 152 size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
144 153
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
146 if (IS_ERR(conf)) 155 if (IS_ERR(conf))
147 return conf; 156 return conf;
148 157
158 conf->flags = flags;
149 bm = ts_config_priv(conf); 159 bm = ts_config_priv(conf);
150 bm->patlen = len; 160 bm->patlen = len;
151 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; 161 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
152 memcpy(bm->pattern, pattern, len); 162 if (flags & TS_IGNORECASE)
153 compute_prefix_tbl(bm); 163 for (i = 0; i < len; i++)
164 bm->pattern[i] = toupper(((u8 *)pattern)[i]);
165 else
166 memcpy(bm->pattern, pattern, len);
167 compute_prefix_tbl(bm, flags);
154 168
155 return conf; 169 return conf;
156} 170}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526b..5696a35184e4 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
257} 257}
258 258
259static struct ts_config *fsm_init(const void *pattern, unsigned int len, 259static struct ts_config *fsm_init(const void *pattern, unsigned int len,
260 gfp_t gfp_mask) 260 gfp_t gfp_mask, int flags)
261{ 261{
262 int i, err = -EINVAL; 262 int i, err = -EINVAL;
263 struct ts_config *conf; 263 struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1) 269 if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
270 goto errout; 270 goto errout;
271 271
272 if (flags & TS_IGNORECASE)
273 goto errout;
274
272 for (i = 0; i < ntokens; i++) { 275 for (i = 0; i < ntokens; i++) {
273 struct ts_fsm_token *t = &tokens[i]; 276 struct ts_fsm_token *t = &tokens[i];
274 277
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
284 if (IS_ERR(conf)) 287 if (IS_ERR(conf))
285 return conf; 288 return conf;
286 289
290 conf->flags = flags;
287 fsm = ts_config_priv(conf); 291 fsm = ts_config_priv(conf);
288 fsm->ntokens = ntokens; 292 fsm->ntokens = ntokens;
289 memcpy(fsm->tokens, pattern, len); 293 memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4b..632f783e65f1 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/ctype.h>
36#include <linux/textsearch.h> 37#include <linux/textsearch.h>
37 38
38struct ts_kmp 39struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
47 struct ts_kmp *kmp = ts_config_priv(conf); 48 struct ts_kmp *kmp = ts_config_priv(conf);
48 unsigned int i, q = 0, text_len, consumed = state->offset; 49 unsigned int i, q = 0, text_len, consumed = state->offset;
49 const u8 *text; 50 const u8 *text;
51 const int icase = conf->flags & TS_IGNORECASE;
50 52
51 for (;;) { 53 for (;;) {
52 text_len = conf->get_next_block(consumed, &text, conf, state); 54 text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
55 break; 57 break;
56 58
57 for (i = 0; i < text_len; i++) { 59 for (i = 0; i < text_len; i++) {
58 while (q > 0 && kmp->pattern[q] != text[i]) 60 while (q > 0 && kmp->pattern[q]
61 != (icase ? toupper(text[i]) : text[i]))
59 q = kmp->prefix_tbl[q - 1]; 62 q = kmp->prefix_tbl[q - 1];
60 if (kmp->pattern[q] == text[i]) 63 if (kmp->pattern[q]
64 == (icase ? toupper(text[i]) : text[i]))
61 q++; 65 q++;
62 if (unlikely(q == kmp->pattern_len)) { 66 if (unlikely(q == kmp->pattern_len)) {
63 state->offset = consumed + i + 1; 67 state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
72} 76}
73 77
74static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, 78static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
75 unsigned int *prefix_tbl) 79 unsigned int *prefix_tbl, int flags)
76{ 80{
77 unsigned int k, q; 81 unsigned int k, q;
82 const u8 icase = flags & TS_IGNORECASE;
78 83
79 for (k = 0, q = 1; q < len; q++) { 84 for (k = 0, q = 1; q < len; q++) {
80 while (k > 0 && pattern[k] != pattern[q]) 85 while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
86 != (icase ? toupper(pattern[q]) : pattern[q]))
81 k = prefix_tbl[k-1]; 87 k = prefix_tbl[k-1];
82 if (pattern[k] == pattern[q]) 88 if ((icase ? toupper(pattern[k]) : pattern[k])
89 == (icase ? toupper(pattern[q]) : pattern[q]))
83 k++; 90 k++;
84 prefix_tbl[q] = k; 91 prefix_tbl[q] = k;
85 } 92 }
86} 93}
87 94
88static struct ts_config *kmp_init(const void *pattern, unsigned int len, 95static struct ts_config *kmp_init(const void *pattern, unsigned int len,
89 gfp_t gfp_mask) 96 gfp_t gfp_mask, int flags)
90{ 97{
91 struct ts_config *conf; 98 struct ts_config *conf;
92 struct ts_kmp *kmp; 99 struct ts_kmp *kmp;
100 int i;
93 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 101 unsigned int prefix_tbl_len = len * sizeof(unsigned int);
94 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; 102 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
95 103
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
97 if (IS_ERR(conf)) 105 if (IS_ERR(conf))
98 return conf; 106 return conf;
99 107
108 conf->flags = flags;
100 kmp = ts_config_priv(conf); 109 kmp = ts_config_priv(conf);
101 kmp->pattern_len = len; 110 kmp->pattern_len = len;
102 compute_prefix_tbl(pattern, len, kmp->prefix_tbl); 111 compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
103 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; 112 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
104 memcpy(kmp->pattern, pattern, len); 113 if (flags & TS_IGNORECASE)
114 for (i = 0; i < len; i++)
115 kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
116 else
117 memcpy(kmp->pattern, pattern, len);
105 118
106 return conf; 119 return conf;
107} 120}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6021757a4496..1dc2d1d18fa8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -22,6 +22,8 @@
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/kallsyms.h>
26#include <linux/uaccess.h>
25 27
26#include <asm/page.h> /* for PAGE_SIZE */ 28#include <asm/page.h> /* for PAGE_SIZE */
27#include <asm/div64.h> 29#include <asm/div64.h>
@@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
482 return buf; 484 return buf;
483} 485}
484 486
487static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
488{
489 int len, i;
490
491 if ((unsigned long)s < PAGE_SIZE)
492 s = "<NULL>";
493
494 len = strnlen(s, precision);
495
496 if (!(flags & LEFT)) {
497 while (len < field_width--) {
498 if (buf < end)
499 *buf = ' ';
500 ++buf;
501 }
502 }
503 for (i = 0; i < len; ++i) {
504 if (buf < end)
505 *buf = *s;
506 ++buf; ++s;
507 }
508 while (len < field_width--) {
509 if (buf < end)
510 *buf = ' ';
511 ++buf;
512 }
513 return buf;
514}
515
516static inline void *dereference_function_descriptor(void *ptr)
517{
518#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
519 void *p;
520 if (!probe_kernel_address(ptr, p))
521 ptr = p;
522#endif
523 return ptr;
524}
525
526static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
527{
528 unsigned long value = (unsigned long) ptr;
529#ifdef CONFIG_KALLSYMS
530 char sym[KSYM_SYMBOL_LEN];
531 sprint_symbol(sym, value);
532 return string(buf, end, sym, field_width, precision, flags);
533#else
534 field_width = 2*sizeof(void *);
535 flags |= SPECIAL | SMALL | ZEROPAD;
536 return number(buf, end, value, 16, field_width, precision, flags);
537#endif
538}
539
540/*
541 * Show a '%p' thing. A kernel extension is that the '%p' is followed
542 * by an extra set of alphanumeric characters that are extended format
543 * specifiers.
544 *
545 * Right now we just handle 'F' (for symbolic Function descriptor pointers)
546 * and 'S' (for Symbolic direct pointers), but this can easily be
547 * extended in the future (network address types etc).
548 *
549 * The difference between 'S' and 'F' is that on ia64 and ppc64 function
550 * pointers are really function descriptors, which contain a pointer the
551 * real address.
552 */
553static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
554{
555 switch (*fmt) {
556 case 'F':
557 ptr = dereference_function_descriptor(ptr);
558 /* Fallthrough */
559 case 'S':
560 return symbol_string(buf, end, ptr, field_width, precision, flags);
561 }
562 flags |= SMALL;
563 if (field_width == -1) {
564 field_width = 2*sizeof(void *);
565 flags |= ZEROPAD;
566 }
567 return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
568}
569
485/** 570/**
486 * vsnprintf - Format a string and place it in a buffer 571 * vsnprintf - Format a string and place it in a buffer
487 * @buf: The buffer to place the result into 572 * @buf: The buffer to place the result into
@@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
502 */ 587 */
503int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 588int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
504{ 589{
505 int len;
506 unsigned long long num; 590 unsigned long long num;
507 int i, base; 591 int base;
508 char *str, *end, c; 592 char *str, *end, c;
509 const char *s;
510 593
511 int flags; /* flags to number() */ 594 int flags; /* flags to number() */
512 595
@@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
622 continue; 705 continue;
623 706
624 case 's': 707 case 's':
625 s = va_arg(args, char *); 708 str = string(str, end, va_arg(args, char *), field_width, precision, flags);
626 if ((unsigned long)s < PAGE_SIZE)
627 s = "<NULL>";
628
629 len = strnlen(s, precision);
630
631 if (!(flags & LEFT)) {
632 while (len < field_width--) {
633 if (str < end)
634 *str = ' ';
635 ++str;
636 }
637 }
638 for (i = 0; i < len; ++i) {
639 if (str < end)
640 *str = *s;
641 ++str; ++s;
642 }
643 while (len < field_width--) {
644 if (str < end)
645 *str = ' ';
646 ++str;
647 }
648 continue; 709 continue;
649 710
650 case 'p': 711 case 'p':
651 flags |= SMALL; 712 str = pointer(fmt+1, str, end,
652 if (field_width == -1) { 713 va_arg(args, void *),
653 field_width = 2*sizeof(void *); 714 field_width, precision, flags);
654 flags |= ZEROPAD; 715 /* Skip all alphanumeric pointer suffixes */
655 } 716 while (isalnum(fmt[1]))
656 str = number(str, end, 717 fmt++;
657 (unsigned long) va_arg(args, void *),
658 16, field_width, precision, flags);
659 continue; 718 continue;
660 719
661
662 case 'n': 720 case 'n':
663 /* FIXME: 721 /* FIXME:
664 * What does C99 say about the overflow case here? */ 722 * What does C99 say about the overflow case here? */