aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 10:03:21 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 10:04:25 -0400
commitffe315012510165ce82e4dd4767f0a5dba9edbf7 (patch)
treef601cd980af9d0ced5ca9aedecef4fa0d2ca0e15 /lib
parente2d3a35ee427aaba99b6c68a56609ce276c51270 (diff)
parent4a8e43feeac7996b8de2d5b2823e316917493df4 (diff)
Merge tag 'disintegrate-mtd-20121009' of git://git.infradead.org/users/dhowells/linux-headers
UAPI Disintegration 2012-10-09 Conflicts: MAINTAINERS arch/arm/configs/bcmring_defconfig arch/arm/mach-imx/clk-imx51-imx53.c drivers/mtd/nand/Kconfig drivers/mtd/nand/bcm_umi_nand.c drivers/mtd/nand/nand_bcm_umi.h drivers/mtd/nand/orion_nand.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug59
-rw-r--r--lib/Makefile7
-rw-r--r--lib/bcd.c8
-rw-r--r--lib/crc32.c9
-rw-r--r--lib/decompress.c9
-rw-r--r--lib/digsig.c6
-rw-r--r--lib/dma-debug.c5
-rw-r--r--lib/dynamic_debug.c56
-rw-r--r--lib/flex_proportions.c2
-rw-r--r--lib/gcd.c3
-rw-r--r--lib/gen_crc32table.c6
-rw-r--r--lib/genalloc.c88
-rw-r--r--lib/idr.c32
-rw-r--r--lib/interval_tree.c10
-rw-r--r--lib/interval_tree_test_main.c105
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--lib/nlattr.c4
-rw-r--r--lib/parser.c10
-rw-r--r--lib/plist.c4
-rw-r--r--lib/prio_tree.c466
-rw-r--r--lib/rbtree.c656
-rw-r--r--lib/rbtree_test.c234
-rw-r--r--lib/scatterlist.c16
-rw-r--r--lib/spinlock_debug.c32
-rw-r--r--lib/swiotlb.c33
-rw-r--r--lib/vsprintf.c139
26 files changed, 1057 insertions, 947 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2403a63b5da5..28e9d6c98941 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -196,12 +196,13 @@ config LOCKUP_DETECTOR
196 thresholds can be controlled through the sysctl watchdog_thresh. 196 thresholds can be controlled through the sysctl watchdog_thresh.
197 197
198config HARDLOCKUP_DETECTOR 198config HARDLOCKUP_DETECTOR
199 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ 199 def_bool y
200 !HAVE_NMI_WATCHDOG 200 depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
201 depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
201 202
202config BOOTPARAM_HARDLOCKUP_PANIC 203config BOOTPARAM_HARDLOCKUP_PANIC
203 bool "Panic (Reboot) On Hard Lockups" 204 bool "Panic (Reboot) On Hard Lockups"
204 depends on LOCKUP_DETECTOR 205 depends on HARDLOCKUP_DETECTOR
205 help 206 help
206 Say Y here to enable the kernel to panic on "hard lockups", 207 Say Y here to enable the kernel to panic on "hard lockups",
207 which are bugs that cause the kernel to loop in kernel 208 which are bugs that cause the kernel to loop in kernel
@@ -212,7 +213,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC
212 213
213config BOOTPARAM_HARDLOCKUP_PANIC_VALUE 214config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
214 int 215 int
215 depends on LOCKUP_DETECTOR 216 depends on HARDLOCKUP_DETECTOR
216 range 0 1 217 range 0 1
217 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC 218 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
218 default 1 if BOOTPARAM_HARDLOCKUP_PANIC 219 default 1 if BOOTPARAM_HARDLOCKUP_PANIC
@@ -449,11 +450,12 @@ config SLUB_STATS
449 out which slabs are relevant to a particular load. 450 out which slabs are relevant to a particular load.
450 Try running: slabinfo -DA 451 Try running: slabinfo -DA
451 452
453config HAVE_DEBUG_KMEMLEAK
454 bool
455
452config DEBUG_KMEMLEAK 456config DEBUG_KMEMLEAK
453 bool "Kernel memory leak detector" 457 bool "Kernel memory leak detector"
454 depends on DEBUG_KERNEL && EXPERIMENTAL && \ 458 depends on DEBUG_KERNEL && EXPERIMENTAL && HAVE_DEBUG_KMEMLEAK
455 (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
456
457 select DEBUG_FS 459 select DEBUG_FS
458 select STACKTRACE if STACKTRACE_SUPPORT 460 select STACKTRACE if STACKTRACE_SUPPORT
459 select KALLSYMS 461 select KALLSYMS
@@ -629,6 +631,20 @@ config PROVE_RCU_REPEATEDLY
629 631
630 Say N if you are unsure. 632 Say N if you are unsure.
631 633
634config PROVE_RCU_DELAY
635 bool "RCU debugging: preemptible RCU race provocation"
636 depends on DEBUG_KERNEL && PREEMPT_RCU
637 default n
638 help
639 There is a class of races that involve an unlikely preemption
640 of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
641 been set to INT_MIN. This feature inserts a delay at that
642 point to increase the probability of these races.
643
644 Say Y to increase probability of preemption of __rcu_read_unlock().
645
646 Say N if you are unsure.
647
632config SPARSE_RCU_POINTER 648config SPARSE_RCU_POINTER
633 bool "RCU debugging: sparse-based checks for pointer usage" 649 bool "RCU debugging: sparse-based checks for pointer usage"
634 default n 650 default n
@@ -735,11 +751,12 @@ config DEBUG_HIGHMEM
735 This options enables addition error checking for high memory systems. 751 This options enables addition error checking for high memory systems.
736 Disable for production systems. 752 Disable for production systems.
737 753
754config HAVE_DEBUG_BUGVERBOSE
755 bool
756
738config DEBUG_BUGVERBOSE 757config DEBUG_BUGVERBOSE
739 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT 758 bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
740 depends on BUG 759 depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
741 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
742 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
743 default y 760 default y
744 help 761 help
745 Say Y here to make BUG() panics output the file name and line number 762 Say Y here to make BUG() panics output the file name and line number
@@ -781,6 +798,15 @@ config DEBUG_VM
781 798
782 If unsure, say N. 799 If unsure, say N.
783 800
801config DEBUG_VM_RB
802 bool "Debug VM red-black trees"
803 depends on DEBUG_VM
804 help
805 Enable this to turn on more extended checks in the virtual-memory
806 system that may impact performance.
807
808 If unsure, say N.
809
784config DEBUG_VIRTUAL 810config DEBUG_VIRTUAL
785 bool "Debug VM translations" 811 bool "Debug VM translations"
786 depends on DEBUG_KERNEL && X86 812 depends on DEBUG_KERNEL && X86
@@ -1265,6 +1291,19 @@ config LATENCYTOP
1265source mm/Kconfig.debug 1291source mm/Kconfig.debug
1266source kernel/trace/Kconfig 1292source kernel/trace/Kconfig
1267 1293
1294config RBTREE_TEST
1295 tristate "Red-Black tree test"
1296 depends on m && DEBUG_KERNEL
1297 help
1298 A benchmark measuring the performance of the rbtree library.
1299 Also includes rbtree invariant checks.
1300
1301config INTERVAL_TREE_TEST
1302 tristate "Interval tree test"
1303 depends on m && DEBUG_KERNEL
1304 help
1305 A benchmark measuring the performance of the interval tree library
1306
1268config PROVIDE_OHCI1394_DMA_INIT 1307config PROVIDE_OHCI1394_DMA_INIT
1269 bool "Remote debugging over FireWire early on boot" 1308 bool "Remote debugging over FireWire early on boot"
1270 depends on PCI && X86 1309 depends on PCI && X86
diff --git a/lib/Makefile b/lib/Makefile
index 42d283edc4d3..3128e357e286 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -9,7 +9,7 @@ endif
9 9
10lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o
@@ -140,6 +140,11 @@ $(foreach file, $(libfdt_files), \
140 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) 140 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
141lib-$(CONFIG_LIBFDT) += $(libfdt_files) 141lib-$(CONFIG_LIBFDT) += $(libfdt_files)
142 142
143obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
144obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
145
146interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
147
143hostprogs-y := gen_crc32table 148hostprogs-y := gen_crc32table
144clean-files := crc32table.h 149clean-files := crc32table.h
145 150
diff --git a/lib/bcd.c b/lib/bcd.c
index 55efaf742346..40d304efe272 100644
--- a/lib/bcd.c
+++ b/lib/bcd.c
@@ -1,14 +1,14 @@
1#include <linux/bcd.h> 1#include <linux/bcd.h>
2#include <linux/export.h> 2#include <linux/export.h>
3 3
4unsigned bcd2bin(unsigned char val) 4unsigned _bcd2bin(unsigned char val)
5{ 5{
6 return (val & 0x0f) + (val >> 4) * 10; 6 return (val & 0x0f) + (val >> 4) * 10;
7} 7}
8EXPORT_SYMBOL(bcd2bin); 8EXPORT_SYMBOL(_bcd2bin);
9 9
10unsigned char bin2bcd(unsigned val) 10unsigned char _bin2bcd(unsigned val)
11{ 11{
12 return ((val / 10) << 4) + val % 10; 12 return ((val / 10) << 4) + val % 10;
13} 13}
14EXPORT_SYMBOL(bin2bcd); 14EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/crc32.c b/lib/crc32.c
index 61774b8db4de..072fbd8234d5 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -188,11 +188,13 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
188#else 188#else
189u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 189u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
190{ 190{
191 return crc32_le_generic(crc, p, len, crc32table_le, CRCPOLY_LE); 191 return crc32_le_generic(crc, p, len,
192 (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
192} 193}
193u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) 194u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
194{ 195{
195 return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); 196 return crc32_le_generic(crc, p, len,
197 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
196} 198}
197#endif 199#endif
198EXPORT_SYMBOL(crc32_le); 200EXPORT_SYMBOL(crc32_le);
@@ -253,7 +255,8 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
253#else 255#else
254u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 256u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
255{ 257{
256 return crc32_be_generic(crc, p, len, crc32table_be, CRCPOLY_BE); 258 return crc32_be_generic(crc, p, len,
259 (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
257} 260}
258#endif 261#endif
259EXPORT_SYMBOL(crc32_be); 262EXPORT_SYMBOL(crc32_be);
diff --git a/lib/decompress.c b/lib/decompress.c
index 3d766b7f60ab..31a804277282 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h>
17 18
18#ifndef CONFIG_DECOMPRESS_GZIP 19#ifndef CONFIG_DECOMPRESS_GZIP
19# define gunzip NULL 20# define gunzip NULL
@@ -31,11 +32,13 @@
31# define unlzo NULL 32# define unlzo NULL
32#endif 33#endif
33 34
34static const struct compress_format { 35struct compress_format {
35 unsigned char magic[2]; 36 unsigned char magic[2];
36 const char *name; 37 const char *name;
37 decompress_fn decompressor; 38 decompress_fn decompressor;
38} compressed_formats[] = { 39};
40
41static const struct compress_format compressed_formats[] __initdata = {
39 { {037, 0213}, "gzip", gunzip }, 42 { {037, 0213}, "gzip", gunzip },
40 { {037, 0236}, "gzip", gunzip }, 43 { {037, 0236}, "gzip", gunzip },
41 { {0x42, 0x5a}, "bzip2", bunzip2 }, 44 { {0x42, 0x5a}, "bzip2", bunzip2 },
@@ -45,7 +48,7 @@ static const struct compress_format {
45 { {0, 0}, NULL, NULL } 48 { {0, 0}, NULL, NULL }
46}; 49};
47 50
48decompress_fn decompress_method(const unsigned char *inbuf, int len, 51decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
49 const char **name) 52 const char **name)
50{ 53{
51 const struct compress_format *cf; 54 const struct compress_format *cf;
diff --git a/lib/digsig.c b/lib/digsig.c
index 286d558033e2..8c0e62975c88 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
163 memcpy(out1 + head, p, l); 163 memcpy(out1 + head, p, l);
164 164
165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); 165 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
166 if (err)
167 goto err;
166 168
167 if (!err && len == hlen) 169 if (len != hlen || memcmp(out2, h, hlen))
168 err = memcmp(out2, h, hlen); 170 err = -EINVAL;
169 171
170err: 172err:
171 mpi_free(in); 173 mpi_free(in);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 66ce41489133..b9087bff008b 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page",
120static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", 120static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
121 "DMA_FROM_DEVICE", "DMA_NONE" }; 121 "DMA_FROM_DEVICE", "DMA_NONE" };
122 122
123/* little merge helper - remove it after the merge window */
124#ifndef BUS_NOTIFY_UNBOUND_DRIVER
125#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
126#endif
127
128/* 123/*
129 * The access to some variables in this macro is racy. We can't use atomic_t 124 * The access to some variables in this macro is racy. We can't use atomic_t
130 * here because all these variables are exported to debugfs. Some of them even 125 * here because all these variables are exported to debugfs. Some of them even
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7ca29a0a3019..e7f7d993357a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -521,25 +521,25 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
521 int pos_after_tid; 521 int pos_after_tid;
522 int pos = 0; 522 int pos = 0;
523 523
524 pos += snprintf(buf + pos, remaining(pos), "%s", KERN_DEBUG); 524 *buf = '\0';
525
525 if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { 526 if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
526 if (in_interrupt()) 527 if (in_interrupt())
527 pos += snprintf(buf + pos, remaining(pos), "%s ", 528 pos += snprintf(buf + pos, remaining(pos), "<intr> ");
528 "<intr>");
529 else 529 else
530 pos += snprintf(buf + pos, remaining(pos), "[%d] ", 530 pos += snprintf(buf + pos, remaining(pos), "[%d] ",
531 task_pid_vnr(current)); 531 task_pid_vnr(current));
532 } 532 }
533 pos_after_tid = pos; 533 pos_after_tid = pos;
534 if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) 534 if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
535 pos += snprintf(buf + pos, remaining(pos), "%s:", 535 pos += snprintf(buf + pos, remaining(pos), "%s:",
536 desc->modname); 536 desc->modname);
537 if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) 537 if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
538 pos += snprintf(buf + pos, remaining(pos), "%s:", 538 pos += snprintf(buf + pos, remaining(pos), "%s:",
539 desc->function); 539 desc->function);
540 if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) 540 if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
541 pos += snprintf(buf + pos, remaining(pos), "%d:", 541 pos += snprintf(buf + pos, remaining(pos), "%d:",
542 desc->lineno); 542 desc->lineno);
543 if (pos - pos_after_tid) 543 if (pos - pos_after_tid)
544 pos += snprintf(buf + pos, remaining(pos), " "); 544 pos += snprintf(buf + pos, remaining(pos), " ");
545 if (pos >= PREFIX_SIZE) 545 if (pos >= PREFIX_SIZE)
@@ -559,9 +559,13 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
559 BUG_ON(!fmt); 559 BUG_ON(!fmt);
560 560
561 va_start(args, fmt); 561 va_start(args, fmt);
562
562 vaf.fmt = fmt; 563 vaf.fmt = fmt;
563 vaf.va = &args; 564 vaf.va = &args;
564 res = printk("%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); 565
566 res = printk(KERN_DEBUG "%s%pV",
567 dynamic_emit_prefix(descriptor, buf), &vaf);
568
565 va_end(args); 569 va_end(args);
566 570
567 return res; 571 return res;
@@ -574,15 +578,26 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
574 struct va_format vaf; 578 struct va_format vaf;
575 va_list args; 579 va_list args;
576 int res; 580 int res;
577 char buf[PREFIX_SIZE];
578 581
579 BUG_ON(!descriptor); 582 BUG_ON(!descriptor);
580 BUG_ON(!fmt); 583 BUG_ON(!fmt);
581 584
582 va_start(args, fmt); 585 va_start(args, fmt);
586
583 vaf.fmt = fmt; 587 vaf.fmt = fmt;
584 vaf.va = &args; 588 vaf.va = &args;
585 res = __dev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); 589
590 if (!dev) {
591 res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
592 } else {
593 char buf[PREFIX_SIZE];
594
595 res = dev_printk_emit(7, dev, "%s%s %s: %pV",
596 dynamic_emit_prefix(descriptor, buf),
597 dev_driver_string(dev), dev_name(dev),
598 &vaf);
599 }
600
586 va_end(args); 601 va_end(args);
587 602
588 return res; 603 return res;
@@ -592,20 +607,35 @@ EXPORT_SYMBOL(__dynamic_dev_dbg);
592#ifdef CONFIG_NET 607#ifdef CONFIG_NET
593 608
594int __dynamic_netdev_dbg(struct _ddebug *descriptor, 609int __dynamic_netdev_dbg(struct _ddebug *descriptor,
595 const struct net_device *dev, const char *fmt, ...) 610 const struct net_device *dev, const char *fmt, ...)
596{ 611{
597 struct va_format vaf; 612 struct va_format vaf;
598 va_list args; 613 va_list args;
599 int res; 614 int res;
600 char buf[PREFIX_SIZE];
601 615
602 BUG_ON(!descriptor); 616 BUG_ON(!descriptor);
603 BUG_ON(!fmt); 617 BUG_ON(!fmt);
604 618
605 va_start(args, fmt); 619 va_start(args, fmt);
620
606 vaf.fmt = fmt; 621 vaf.fmt = fmt;
607 vaf.va = &args; 622 vaf.va = &args;
608 res = __netdev_printk(dynamic_emit_prefix(descriptor, buf), dev, &vaf); 623
624 if (dev && dev->dev.parent) {
625 char buf[PREFIX_SIZE];
626
627 res = dev_printk_emit(7, dev->dev.parent,
628 "%s%s %s %s: %pV",
629 dynamic_emit_prefix(descriptor, buf),
630 dev_driver_string(dev->dev.parent),
631 dev_name(dev->dev.parent),
632 netdev_name(dev), &vaf);
633 } else if (dev) {
634 res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
635 } else {
636 res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
637 }
638
609 va_end(args); 639 va_end(args);
610 640
611 return res; 641 return res;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index c785554f9523..ebf3bac460b0 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
62 */ 62 */
63bool fprop_new_period(struct fprop_global *p, int periods) 63bool fprop_new_period(struct fprop_global *p, int periods)
64{ 64{
65 u64 events; 65 s64 events;
66 unsigned long flags; 66 unsigned long flags;
67 67
68 local_irq_save(flags); 68 local_irq_save(flags);
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3cd14b3..3657f129d7b8 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
9 9
10 if (a < b) 10 if (a < b)
11 swap(a, b); 11 swap(a, b);
12
13 if (!b)
14 return a;
12 while ((r = a % b) != 0) { 15 while ((r = a % b) != 0) {
13 a = b; 16 a = b;
14 b = r; 17 b = r;
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index 8f8d5439e2d9..71fcfcd96410 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -109,7 +109,7 @@ int main(int argc, char** argv)
109 109
110 if (CRC_LE_BITS > 1) { 110 if (CRC_LE_BITS > 1) {
111 crc32init_le(); 111 crc32init_le();
112 printf("static const u32 __cacheline_aligned " 112 printf("static u32 __cacheline_aligned "
113 "crc32table_le[%d][%d] = {", 113 "crc32table_le[%d][%d] = {",
114 LE_TABLE_ROWS, LE_TABLE_SIZE); 114 LE_TABLE_ROWS, LE_TABLE_SIZE);
115 output_table(crc32table_le, LE_TABLE_ROWS, 115 output_table(crc32table_le, LE_TABLE_ROWS,
@@ -119,7 +119,7 @@ int main(int argc, char** argv)
119 119
120 if (CRC_BE_BITS > 1) { 120 if (CRC_BE_BITS > 1) {
121 crc32init_be(); 121 crc32init_be();
122 printf("static const u32 __cacheline_aligned " 122 printf("static u32 __cacheline_aligned "
123 "crc32table_be[%d][%d] = {", 123 "crc32table_be[%d][%d] = {",
124 BE_TABLE_ROWS, BE_TABLE_SIZE); 124 BE_TABLE_ROWS, BE_TABLE_SIZE);
125 output_table(crc32table_be, LE_TABLE_ROWS, 125 output_table(crc32table_be, LE_TABLE_ROWS,
@@ -128,7 +128,7 @@ int main(int argc, char** argv)
128 } 128 }
129 if (CRC_LE_BITS > 1) { 129 if (CRC_LE_BITS > 1) {
130 crc32cinit_le(); 130 crc32cinit_le();
131 printf("static const u32 __cacheline_aligned " 131 printf("static u32 __cacheline_aligned "
132 "crc32ctable_le[%d][%d] = {", 132 "crc32ctable_le[%d][%d] = {",
133 LE_TABLE_ROWS, LE_TABLE_SIZE); 133 LE_TABLE_ROWS, LE_TABLE_SIZE);
134 output_table(crc32ctable_le, LE_TABLE_ROWS, 134 output_table(crc32ctable_le, LE_TABLE_ROWS,
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 6bc04aab6ec7..ca208a92628c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -152,6 +152,8 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
152 spin_lock_init(&pool->lock); 152 spin_lock_init(&pool->lock);
153 INIT_LIST_HEAD(&pool->chunks); 153 INIT_LIST_HEAD(&pool->chunks);
154 pool->min_alloc_order = min_alloc_order; 154 pool->min_alloc_order = min_alloc_order;
155 pool->algo = gen_pool_first_fit;
156 pool->data = NULL;
155 } 157 }
156 return pool; 158 return pool;
157} 159}
@@ -255,8 +257,9 @@ EXPORT_SYMBOL(gen_pool_destroy);
255 * @size: number of bytes to allocate from the pool 257 * @size: number of bytes to allocate from the pool
256 * 258 *
257 * Allocate the requested number of bytes from the specified pool. 259 * Allocate the requested number of bytes from the specified pool.
258 * Uses a first-fit algorithm. Can not be used in NMI handler on 260 * Uses the pool allocation function (with first-fit algorithm by default).
259 * architectures without NMI-safe cmpxchg implementation. 261 * Can not be used in NMI handler on architectures without
262 * NMI-safe cmpxchg implementation.
260 */ 263 */
261unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) 264unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
262{ 265{
@@ -280,8 +283,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
280 283
281 end_bit = (chunk->end_addr - chunk->start_addr) >> order; 284 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
282retry: 285retry:
283 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 286 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
284 start_bit, nbits, 0); 287 pool->data);
285 if (start_bit >= end_bit) 288 if (start_bit >= end_bit)
286 continue; 289 continue;
287 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 290 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -400,3 +403,80 @@ size_t gen_pool_size(struct gen_pool *pool)
400 return size; 403 return size;
401} 404}
402EXPORT_SYMBOL_GPL(gen_pool_size); 405EXPORT_SYMBOL_GPL(gen_pool_size);
406
407/**
408 * gen_pool_set_algo - set the allocation algorithm
409 * @pool: pool to change allocation algorithm
410 * @algo: custom algorithm function
411 * @data: additional data used by @algo
412 *
413 * Call @algo for each memory allocation in the pool.
414 * If @algo is NULL use gen_pool_first_fit as default
415 * memory allocation function.
416 */
417void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
418{
419 rcu_read_lock();
420
421 pool->algo = algo;
422 if (!pool->algo)
423 pool->algo = gen_pool_first_fit;
424
425 pool->data = data;
426
427 rcu_read_unlock();
428}
429EXPORT_SYMBOL(gen_pool_set_algo);
430
431/**
432 * gen_pool_first_fit - find the first available region
433 * of memory matching the size requirement (no alignment constraint)
434 * @map: The address to base the search on
435 * @size: The bitmap size in bits
436 * @start: The bitnumber to start searching at
437 * @nr: The number of zeroed bits we're looking for
438 * @data: additional data - unused
439 */
440unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
441 unsigned long start, unsigned int nr, void *data)
442{
443 return bitmap_find_next_zero_area(map, size, start, nr, 0);
444}
445EXPORT_SYMBOL(gen_pool_first_fit);
446
447/**
448 * gen_pool_best_fit - find the best fitting region of memory
449 * macthing the size requirement (no alignment constraint)
450 * @map: The address to base the search on
451 * @size: The bitmap size in bits
452 * @start: The bitnumber to start searching at
453 * @nr: The number of zeroed bits we're looking for
454 * @data: additional data - unused
455 *
456 * Iterate over the bitmap to find the smallest free region
457 * which we can allocate the memory.
458 */
459unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
460 unsigned long start, unsigned int nr, void *data)
461{
462 unsigned long start_bit = size;
463 unsigned long len = size + 1;
464 unsigned long index;
465
466 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
467
468 while (index < size) {
469 int next_bit = find_next_bit(map, size, index + nr);
470 if ((next_bit - index) < len) {
471 len = next_bit - index;
472 start_bit = index;
473 if (len == nr)
474 return start_bit;
475 }
476 index = bitmap_find_next_zero_area(map, size,
477 next_bit + 1, nr, 0);
478 }
479
480 return start_bit;
481}
482EXPORT_SYMBOL(gen_pool_best_fit);
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a99..648239079dd2 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
20 * that id to this code and it returns your pointer. 20 * that id to this code and it returns your pointer.
21 21
22 * You can release ids at any time. When all ids are released, most of 22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we 23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just 24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts 25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator. 26 * with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
122 */ 122 */
123int idr_pre_get(struct idr *idp, gfp_t gfp_mask) 123int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
124{ 124{
125 while (idp->id_free_cnt < IDR_FREE_MAX) { 125 while (idp->id_free_cnt < MAX_IDR_FREE) {
126 struct idr_layer *new; 126 struct idr_layer *new;
127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 127 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
128 if (new == NULL) 128 if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
179 sh = IDR_BITS*l; 179 sh = IDR_BITS*l;
180 id = ((id >> sh) ^ n ^ m) << sh; 180 id = ((id >> sh) ^ n ^ m) << sh;
181 } 181 }
182 if ((id >= MAX_ID_BIT) || (id < 0)) 182 if ((id >= MAX_IDR_BIT) || (id < 0))
183 return IDR_NOMORE_SPACE; 183 return IDR_NOMORE_SPACE;
184 if (l == 0) 184 if (l == 0)
185 break; 185 break;
@@ -223,7 +223,7 @@ build_up:
223 * Add a new layer to the top of the tree if the requested 223 * Add a new layer to the top of the tree if the requested
224 * id is larger than the currently allocated space. 224 * id is larger than the currently allocated space.
225 */ 225 */
226 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { 226 while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
227 layers++; 227 layers++;
228 if (!p->count) { 228 if (!p->count) {
229 /* special case: if the tree is currently empty, 229 /* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
265 265
266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) 266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267{ 267{
268 struct idr_layer *pa[MAX_LEVEL]; 268 struct idr_layer *pa[MAX_IDR_LEVEL];
269 int id; 269 int id;
270 270
271 id = idr_get_empty_slot(idp, starting_id, pa); 271 id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
357static void sub_remove(struct idr *idp, int shift, int id) 357static void sub_remove(struct idr *idp, int shift, int id)
358{ 358{
359 struct idr_layer *p = idp->top; 359 struct idr_layer *p = idp->top;
360 struct idr_layer **pa[MAX_LEVEL]; 360 struct idr_layer **pa[MAX_IDR_LEVEL];
361 struct idr_layer ***paa = &pa[0]; 361 struct idr_layer ***paa = &pa[0];
362 struct idr_layer *to_free; 362 struct idr_layer *to_free;
363 int n; 363 int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
402 struct idr_layer *to_free; 402 struct idr_layer *to_free;
403 403
404 /* Mask off upper bits we don't use for the search. */ 404 /* Mask off upper bits we don't use for the search. */
405 id &= MAX_ID_MASK; 405 id &= MAX_IDR_MASK;
406 406
407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 407 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 408 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
420 to_free->bitmap = to_free->count = 0; 420 to_free->bitmap = to_free->count = 0;
421 free_layer(to_free); 421 free_layer(to_free);
422 } 422 }
423 while (idp->id_free_cnt >= IDR_FREE_MAX) { 423 while (idp->id_free_cnt >= MAX_IDR_FREE) {
424 p = get_from_free_list(idp); 424 p = get_from_free_list(idp);
425 /* 425 /*
426 * Note: we don't call the rcu callback here, since the only 426 * Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
451 int n, id, max; 451 int n, id, max;
452 int bt_mask; 452 int bt_mask;
453 struct idr_layer *p; 453 struct idr_layer *p;
454 struct idr_layer *pa[MAX_LEVEL]; 454 struct idr_layer *pa[MAX_IDR_LEVEL];
455 struct idr_layer **paa = &pa[0]; 455 struct idr_layer **paa = &pa[0];
456 456
457 n = idp->layers * IDR_BITS; 457 n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
517 n = (p->layer+1) * IDR_BITS; 517 n = (p->layer+1) * IDR_BITS;
518 518
519 /* Mask off upper bits we don't use for the search. */ 519 /* Mask off upper bits we don't use for the search. */
520 id &= MAX_ID_MASK; 520 id &= MAX_IDR_MASK;
521 521
522 if (id >= (1 << n)) 522 if (id >= (1 << n))
523 return NULL; 523 return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
555{ 555{
556 int n, id, max, error = 0; 556 int n, id, max, error = 0;
557 struct idr_layer *p; 557 struct idr_layer *p;
558 struct idr_layer *pa[MAX_LEVEL]; 558 struct idr_layer *pa[MAX_IDR_LEVEL];
559 struct idr_layer **paa = &pa[0]; 559 struct idr_layer **paa = &pa[0];
560 560
561 n = idp->layers * IDR_BITS; 561 n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
601 */ 601 */
602void *idr_get_next(struct idr *idp, int *nextidp) 602void *idr_get_next(struct idr *idp, int *nextidp)
603{ 603{
604 struct idr_layer *p, *pa[MAX_LEVEL]; 604 struct idr_layer *p, *pa[MAX_IDR_LEVEL];
605 struct idr_layer **paa = &pa[0]; 605 struct idr_layer **paa = &pa[0];
606 int id = *nextidp; 606 int id = *nextidp;
607 int n, max; 607 int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
659 659
660 n = (p->layer+1) * IDR_BITS; 660 n = (p->layer+1) * IDR_BITS;
661 661
662 id &= MAX_ID_MASK; 662 id &= MAX_IDR_MASK;
663 663
664 if (id >= (1 << n)) 664 if (id >= (1 << n))
665 return ERR_PTR(-EINVAL); 665 return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
780 */ 780 */
781int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 781int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
782{ 782{
783 struct idr_layer *pa[MAX_LEVEL]; 783 struct idr_layer *pa[MAX_IDR_LEVEL];
784 struct ida_bitmap *bitmap; 784 struct ida_bitmap *bitmap;
785 unsigned long flags; 785 unsigned long flags;
786 int idr_id = starting_id / IDA_BITMAP_BITS; 786 int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
793 if (t < 0) 793 if (t < 0)
794 return _idr_rc_to_errno(t); 794 return _idr_rc_to_errno(t);
795 795
796 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 796 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
797 return -ENOSPC; 797 return -ENOSPC;
798 798
799 if (t != idr_id) 799 if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
827 } 827 }
828 828
829 id = idr_id * IDA_BITMAP_BITS + t; 829 id = idr_id * IDA_BITMAP_BITS + t;
830 if (id >= MAX_ID_BIT) 830 if (id >= MAX_IDR_BIT)
831 return -ENOSPC; 831 return -ENOSPC;
832 832
833 __set_bit(t, bitmap->bitmap); 833 __set_bit(t, bitmap->bitmap);
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
new file mode 100644
index 000000000000..e6eb406f2d65
--- /dev/null
+++ b/lib/interval_tree.c
@@ -0,0 +1,10 @@
1#include <linux/init.h>
2#include <linux/interval_tree.h>
3#include <linux/interval_tree_generic.h>
4
5#define START(node) ((node)->start)
6#define LAST(node) ((node)->last)
7
8INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
9 unsigned long, __subtree_last,
10 START, LAST,, interval_tree)
diff --git a/lib/interval_tree_test_main.c b/lib/interval_tree_test_main.c
new file mode 100644
index 000000000000..b25903987f7a
--- /dev/null
+++ b/lib/interval_tree_test_main.c
@@ -0,0 +1,105 @@
1#include <linux/module.h>
2#include <linux/interval_tree.h>
3#include <linux/random.h>
4#include <asm/timex.h>
5
6#define NODES 100
7#define PERF_LOOPS 100000
8#define SEARCHES 100
9#define SEARCH_LOOPS 10000
10
11static struct rb_root root = RB_ROOT;
12static struct interval_tree_node nodes[NODES];
13static u32 queries[SEARCHES];
14
15static struct rnd_state rnd;
16
17static inline unsigned long
18search(unsigned long query, struct rb_root *root)
19{
20 struct interval_tree_node *node;
21 unsigned long results = 0;
22
23 for (node = interval_tree_iter_first(root, query, query); node;
24 node = interval_tree_iter_next(node, query, query))
25 results++;
26 return results;
27}
28
29static void init(void)
30{
31 int i;
32 for (i = 0; i < NODES; i++) {
33 u32 a = prandom32(&rnd), b = prandom32(&rnd);
34 if (a <= b) {
35 nodes[i].start = a;
36 nodes[i].last = b;
37 } else {
38 nodes[i].start = b;
39 nodes[i].last = a;
40 }
41 }
42 for (i = 0; i < SEARCHES; i++)
43 queries[i] = prandom32(&rnd);
44}
45
46static int interval_tree_test_init(void)
47{
48 int i, j;
49 unsigned long results;
50 cycles_t time1, time2, time;
51
52 printk(KERN_ALERT "interval tree insert/remove");
53
54 prandom32_seed(&rnd, 3141592653589793238ULL);
55 init();
56
57 time1 = get_cycles();
58
59 for (i = 0; i < PERF_LOOPS; i++) {
60 for (j = 0; j < NODES; j++)
61 interval_tree_insert(nodes + j, &root);
62 for (j = 0; j < NODES; j++)
63 interval_tree_remove(nodes + j, &root);
64 }
65
66 time2 = get_cycles();
67 time = time2 - time1;
68
69 time = div_u64(time, PERF_LOOPS);
70 printk(" -> %llu cycles\n", (unsigned long long)time);
71
72 printk(KERN_ALERT "interval tree search");
73
74 for (j = 0; j < NODES; j++)
75 interval_tree_insert(nodes + j, &root);
76
77 time1 = get_cycles();
78
79 results = 0;
80 for (i = 0; i < SEARCH_LOOPS; i++)
81 for (j = 0; j < SEARCHES; j++)
82 results += search(queries[j], &root);
83
84 time2 = get_cycles();
85 time = time2 - time1;
86
87 time = div_u64(time, SEARCH_LOOPS);
88 results = div_u64(results, SEARCH_LOOPS);
89 printk(" -> %llu cycles (%lu results)\n",
90 (unsigned long long)time, results);
91
92 return -EAGAIN; /* Fail will directly unload the module */
93}
94
95static void interval_tree_test_exit(void)
96{
97 printk(KERN_ALERT "test exit\n");
98}
99
100module_init(interval_tree_test_init)
101module_exit(interval_tree_test_exit)
102
103MODULE_LICENSE("GPL");
104MODULE_AUTHOR("Michel Lespinasse");
105MODULE_DESCRIPTION("Interval Tree test");
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 0401d2916d9f..52e5abbc41db 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -375,14 +375,14 @@ static int uevent_net_init(struct net *net)
375 struct uevent_sock *ue_sk; 375 struct uevent_sock *ue_sk;
376 struct netlink_kernel_cfg cfg = { 376 struct netlink_kernel_cfg cfg = {
377 .groups = 1, 377 .groups = 1,
378 .flags = NL_CFG_F_NONROOT_RECV,
378 }; 379 };
379 380
380 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 381 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
381 if (!ue_sk) 382 if (!ue_sk)
382 return -ENOMEM; 383 return -ENOMEM;
383 384
384 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, 385 ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
385 THIS_MODULE, &cfg);
386 if (!ue_sk->sk) { 386 if (!ue_sk->sk) {
387 printk(KERN_ERR 387 printk(KERN_ERR
388 "kobject_uevent: unable to create netlink socket!\n"); 388 "kobject_uevent: unable to create netlink socket!\n");
@@ -422,7 +422,6 @@ static struct pernet_operations uevent_net_ops = {
422 422
423static int __init kobject_uevent_init(void) 423static int __init kobject_uevent_init(void)
424{ 424{
425 netlink_set_nonroot(NETLINK_KOBJECT_UEVENT, NL_NONROOT_RECV);
426 return register_pernet_subsys(&uevent_net_ops); 425 return register_pernet_subsys(&uevent_net_ops);
427} 426}
428 427
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 4226dfeb5178..18eca7809b08 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -22,6 +22,10 @@ static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
22 [NLA_U64] = sizeof(u64), 22 [NLA_U64] = sizeof(u64),
23 [NLA_MSECS] = sizeof(u64), 23 [NLA_MSECS] = sizeof(u64),
24 [NLA_NESTED] = NLA_HDRLEN, 24 [NLA_NESTED] = NLA_HDRLEN,
25 [NLA_S8] = sizeof(s8),
26 [NLA_S16] = sizeof(s16),
27 [NLA_S32] = sizeof(s32),
28 [NLA_S64] = sizeof(s64),
25}; 29};
26 30
27static int validate_nla(const struct nlattr *nla, int maxtype, 31static int validate_nla(const struct nlattr *nla, int maxtype,
diff --git a/lib/parser.c b/lib/parser.c
index c43410084838..52cfa69f73df 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -122,13 +122,14 @@ int match_token(char *s, const match_table_t table, substring_t args[])
122 * 122 *
123 * Description: Given a &substring_t and a base, attempts to parse the substring 123 * Description: Given a &substring_t and a base, attempts to parse the substring
124 * as a number in that base. On success, sets @result to the integer represented 124 * as a number in that base. On success, sets @result to the integer represented
125 * by the string and returns 0. Returns either -ENOMEM or -EINVAL on failure. 125 * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
126 */ 126 */
127static int match_number(substring_t *s, int *result, int base) 127static int match_number(substring_t *s, int *result, int base)
128{ 128{
129 char *endp; 129 char *endp;
130 char *buf; 130 char *buf;
131 int ret; 131 int ret;
132 long val;
132 size_t len = s->to - s->from; 133 size_t len = s->to - s->from;
133 134
134 buf = kmalloc(len + 1, GFP_KERNEL); 135 buf = kmalloc(len + 1, GFP_KERNEL);
@@ -136,10 +137,15 @@ static int match_number(substring_t *s, int *result, int base)
136 return -ENOMEM; 137 return -ENOMEM;
137 memcpy(buf, s->from, len); 138 memcpy(buf, s->from, len);
138 buf[len] = '\0'; 139 buf[len] = '\0';
139 *result = simple_strtol(buf, &endp, base); 140
140 ret = 0; 141 ret = 0;
142 val = simple_strtol(buf, &endp, base);
141 if (endp == buf) 143 if (endp == buf)
142 ret = -EINVAL; 144 ret = -EINVAL;
145 else if (val < (long)INT_MIN || val > (long)INT_MAX)
146 ret = -ERANGE;
147 else
148 *result = (int) val;
143 kfree(buf); 149 kfree(buf);
144 return ret; 150 return ret;
145} 151}
diff --git a/lib/plist.c b/lib/plist.c
index 6ab0e521c48b..1ebc95f7a46f 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -175,7 +175,7 @@ static int __init plist_test(void)
175 int nr_expect = 0, i, loop; 175 int nr_expect = 0, i, loop;
176 unsigned int r = local_clock(); 176 unsigned int r = local_clock();
177 177
178 printk(KERN_INFO "start plist test\n"); 178 pr_debug("start plist test\n");
179 plist_head_init(&test_head); 179 plist_head_init(&test_head);
180 for (i = 0; i < ARRAY_SIZE(test_node); i++) 180 for (i = 0; i < ARRAY_SIZE(test_node); i++)
181 plist_node_init(test_node + i, 0); 181 plist_node_init(test_node + i, 0);
@@ -203,7 +203,7 @@ static int __init plist_test(void)
203 plist_test_check(nr_expect); 203 plist_test_check(nr_expect);
204 } 204 }
205 205
206 printk(KERN_INFO "end plist test\n"); 206 pr_debug("end plist test\n");
207 return 0; 207 return 0;
208} 208}
209 209
diff --git a/lib/prio_tree.c b/lib/prio_tree.c
deleted file mode 100644
index 8d443af03b4c..000000000000
--- a/lib/prio_tree.c
+++ /dev/null
@@ -1,466 +0,0 @@
1/*
2 * lib/prio_tree.c - priority search tree
3 *
4 * Copyright (C) 2004, Rajesh Venkatasubramanian <vrajesh@umich.edu>
5 *
6 * This file is released under the GPL v2.
7 *
8 * Based on the radix priority search tree proposed by Edward M. McCreight
9 * SIAM Journal of Computing, vol. 14, no.2, pages 257-276, May 1985
10 *
11 * 02Feb2004 Initial version
12 */
13
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/prio_tree.h>
17
18/*
19 * A clever mix of heap and radix trees forms a radix priority search tree (PST)
20 * which is useful for storing intervals, e.g, we can consider a vma as a closed
21 * interval of file pages [offset_begin, offset_end], and store all vmas that
22 * map a file in a PST. Then, using the PST, we can answer a stabbing query,
23 * i.e., selecting a set of stored intervals (vmas) that overlap with (map) a
24 * given input interval X (a set of consecutive file pages), in "O(log n + m)"
25 * time where 'log n' is the height of the PST, and 'm' is the number of stored
26 * intervals (vmas) that overlap (map) with the input interval X (the set of
27 * consecutive file pages).
28 *
29 * In our implementation, we store closed intervals of the form [radix_index,
30 * heap_index]. We assume that always radix_index <= heap_index. McCreight's PST
31 * is designed for storing intervals with unique radix indices, i.e., each
32 * interval have different radix_index. However, this limitation can be easily
33 * overcome by using the size, i.e., heap_index - radix_index, as part of the
34 * index, so we index the tree using [(radix_index,size), heap_index].
35 *
36 * When the above-mentioned indexing scheme is used, theoretically, in a 32 bit
37 * machine, the maximum height of a PST can be 64. We can use a balanced version
38 * of the priority search tree to optimize the tree height, but the balanced
39 * tree proposed by McCreight is too complex and memory-hungry for our purpose.
40 */
41
42/*
43 * The following macros are used for implementing prio_tree for i_mmap
44 */
45
46#define RADIX_INDEX(vma) ((vma)->vm_pgoff)
47#define VMA_SIZE(vma) (((vma)->vm_end - (vma)->vm_start) >> PAGE_SHIFT)
48/* avoid overflow */
49#define HEAP_INDEX(vma) ((vma)->vm_pgoff + (VMA_SIZE(vma) - 1))
50
51
52static void get_index(const struct prio_tree_root *root,
53 const struct prio_tree_node *node,
54 unsigned long *radix, unsigned long *heap)
55{
56 if (root->raw) {
57 struct vm_area_struct *vma = prio_tree_entry(
58 node, struct vm_area_struct, shared.prio_tree_node);
59
60 *radix = RADIX_INDEX(vma);
61 *heap = HEAP_INDEX(vma);
62 }
63 else {
64 *radix = node->start;
65 *heap = node->last;
66 }
67}
68
69static unsigned long index_bits_to_maxindex[BITS_PER_LONG];
70
71void __init prio_tree_init(void)
72{
73 unsigned int i;
74
75 for (i = 0; i < ARRAY_SIZE(index_bits_to_maxindex) - 1; i++)
76 index_bits_to_maxindex[i] = (1UL << (i + 1)) - 1;
77 index_bits_to_maxindex[ARRAY_SIZE(index_bits_to_maxindex) - 1] = ~0UL;
78}
79
80/*
81 * Maximum heap_index that can be stored in a PST with index_bits bits
82 */
83static inline unsigned long prio_tree_maxindex(unsigned int bits)
84{
85 return index_bits_to_maxindex[bits - 1];
86}
87
88static void prio_set_parent(struct prio_tree_node *parent,
89 struct prio_tree_node *child, bool left)
90{
91 if (left)
92 parent->left = child;
93 else
94 parent->right = child;
95
96 child->parent = parent;
97}
98
99/*
100 * Extend a priority search tree so that it can store a node with heap_index
101 * max_heap_index. In the worst case, this algorithm takes O((log n)^2).
102 * However, this function is used rarely and the common case performance is
103 * not bad.
104 */
105static struct prio_tree_node *prio_tree_expand(struct prio_tree_root *root,
106 struct prio_tree_node *node, unsigned long max_heap_index)
107{
108 struct prio_tree_node *prev;
109
110 if (max_heap_index > prio_tree_maxindex(root->index_bits))
111 root->index_bits++;
112
113 prev = node;
114 INIT_PRIO_TREE_NODE(node);
115
116 while (max_heap_index > prio_tree_maxindex(root->index_bits)) {
117 struct prio_tree_node *tmp = root->prio_tree_node;
118
119 root->index_bits++;
120
121 if (prio_tree_empty(root))
122 continue;
123
124 prio_tree_remove(root, root->prio_tree_node);
125 INIT_PRIO_TREE_NODE(tmp);
126
127 prio_set_parent(prev, tmp, true);
128 prev = tmp;
129 }
130
131 if (!prio_tree_empty(root))
132 prio_set_parent(prev, root->prio_tree_node, true);
133
134 root->prio_tree_node = node;
135 return node;
136}
137
138/*
139 * Replace a prio_tree_node with a new node and return the old node
140 */
141struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
142 struct prio_tree_node *old, struct prio_tree_node *node)
143{
144 INIT_PRIO_TREE_NODE(node);
145
146 if (prio_tree_root(old)) {
147 BUG_ON(root->prio_tree_node != old);
148 /*
149 * We can reduce root->index_bits here. However, it is complex
150 * and does not help much to improve performance (IMO).
151 */
152 root->prio_tree_node = node;
153 } else
154 prio_set_parent(old->parent, node, old->parent->left == old);
155
156 if (!prio_tree_left_empty(old))
157 prio_set_parent(node, old->left, true);
158
159 if (!prio_tree_right_empty(old))
160 prio_set_parent(node, old->right, false);
161
162 return old;
163}
164
165/*
166 * Insert a prio_tree_node @node into a radix priority search tree @root. The
167 * algorithm typically takes O(log n) time where 'log n' is the number of bits
168 * required to represent the maximum heap_index. In the worst case, the algo
169 * can take O((log n)^2) - check prio_tree_expand.
170 *
171 * If a prior node with same radix_index and heap_index is already found in
172 * the tree, then returns the address of the prior node. Otherwise, inserts
173 * @node into the tree and returns @node.
174 */
175struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
176 struct prio_tree_node *node)
177{
178 struct prio_tree_node *cur, *res = node;
179 unsigned long radix_index, heap_index;
180 unsigned long r_index, h_index, index, mask;
181 int size_flag = 0;
182
183 get_index(root, node, &radix_index, &heap_index);
184
185 if (prio_tree_empty(root) ||
186 heap_index > prio_tree_maxindex(root->index_bits))
187 return prio_tree_expand(root, node, heap_index);
188
189 cur = root->prio_tree_node;
190 mask = 1UL << (root->index_bits - 1);
191
192 while (mask) {
193 get_index(root, cur, &r_index, &h_index);
194
195 if (r_index == radix_index && h_index == heap_index)
196 return cur;
197
198 if (h_index < heap_index ||
199 (h_index == heap_index && r_index > radix_index)) {
200 struct prio_tree_node *tmp = node;
201 node = prio_tree_replace(root, cur, node);
202 cur = tmp;
203 /* swap indices */
204 index = r_index;
205 r_index = radix_index;
206 radix_index = index;
207 index = h_index;
208 h_index = heap_index;
209 heap_index = index;
210 }
211
212 if (size_flag)
213 index = heap_index - radix_index;
214 else
215 index = radix_index;
216
217 if (index & mask) {
218 if (prio_tree_right_empty(cur)) {
219 INIT_PRIO_TREE_NODE(node);
220 prio_set_parent(cur, node, false);
221 return res;
222 } else
223 cur = cur->right;
224 } else {
225 if (prio_tree_left_empty(cur)) {
226 INIT_PRIO_TREE_NODE(node);
227 prio_set_parent(cur, node, true);
228 return res;
229 } else
230 cur = cur->left;
231 }
232
233 mask >>= 1;
234
235 if (!mask) {
236 mask = 1UL << (BITS_PER_LONG - 1);
237 size_flag = 1;
238 }
239 }
240 /* Should not reach here */
241 BUG();
242 return NULL;
243}
244
245/*
246 * Remove a prio_tree_node @node from a radix priority search tree @root. The
247 * algorithm takes O(log n) time where 'log n' is the number of bits required
248 * to represent the maximum heap_index.
249 */
250void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node)
251{
252 struct prio_tree_node *cur;
253 unsigned long r_index, h_index_right, h_index_left;
254
255 cur = node;
256
257 while (!prio_tree_left_empty(cur) || !prio_tree_right_empty(cur)) {
258 if (!prio_tree_left_empty(cur))
259 get_index(root, cur->left, &r_index, &h_index_left);
260 else {
261 cur = cur->right;
262 continue;
263 }
264
265 if (!prio_tree_right_empty(cur))
266 get_index(root, cur->right, &r_index, &h_index_right);
267 else {
268 cur = cur->left;
269 continue;
270 }
271
272 /* both h_index_left and h_index_right cannot be 0 */
273 if (h_index_left >= h_index_right)
274 cur = cur->left;
275 else
276 cur = cur->right;
277 }
278
279 if (prio_tree_root(cur)) {
280 BUG_ON(root->prio_tree_node != cur);
281 __INIT_PRIO_TREE_ROOT(root, root->raw);
282 return;
283 }
284
285 if (cur->parent->right == cur)
286 cur->parent->right = cur->parent;
287 else
288 cur->parent->left = cur->parent;
289
290 while (cur != node)
291 cur = prio_tree_replace(root, cur->parent, cur);
292}
293
294static void iter_walk_down(struct prio_tree_iter *iter)
295{
296 iter->mask >>= 1;
297 if (iter->mask) {
298 if (iter->size_level)
299 iter->size_level++;
300 return;
301 }
302
303 if (iter->size_level) {
304 BUG_ON(!prio_tree_left_empty(iter->cur));
305 BUG_ON(!prio_tree_right_empty(iter->cur));
306 iter->size_level++;
307 iter->mask = ULONG_MAX;
308 } else {
309 iter->size_level = 1;
310 iter->mask = 1UL << (BITS_PER_LONG - 1);
311 }
312}
313
314static void iter_walk_up(struct prio_tree_iter *iter)
315{
316 if (iter->mask == ULONG_MAX)
317 iter->mask = 1UL;
318 else if (iter->size_level == 1)
319 iter->mask = 1UL;
320 else
321 iter->mask <<= 1;
322 if (iter->size_level)
323 iter->size_level--;
324 if (!iter->size_level && (iter->value & iter->mask))
325 iter->value ^= iter->mask;
326}
327
328/*
329 * Following functions help to enumerate all prio_tree_nodes in the tree that
330 * overlap with the input interval X [radix_index, heap_index]. The enumeration
331 * takes O(log n + m) time where 'log n' is the height of the tree (which is
332 * proportional to # of bits required to represent the maximum heap_index) and
333 * 'm' is the number of prio_tree_nodes that overlap the interval X.
334 */
335
336static struct prio_tree_node *prio_tree_left(struct prio_tree_iter *iter,
337 unsigned long *r_index, unsigned long *h_index)
338{
339 if (prio_tree_left_empty(iter->cur))
340 return NULL;
341
342 get_index(iter->root, iter->cur->left, r_index, h_index);
343
344 if (iter->r_index <= *h_index) {
345 iter->cur = iter->cur->left;
346 iter_walk_down(iter);
347 return iter->cur;
348 }
349
350 return NULL;
351}
352
353static struct prio_tree_node *prio_tree_right(struct prio_tree_iter *iter,
354 unsigned long *r_index, unsigned long *h_index)
355{
356 unsigned long value;
357
358 if (prio_tree_right_empty(iter->cur))
359 return NULL;
360
361 if (iter->size_level)
362 value = iter->value;
363 else
364 value = iter->value | iter->mask;
365
366 if (iter->h_index < value)
367 return NULL;
368
369 get_index(iter->root, iter->cur->right, r_index, h_index);
370
371 if (iter->r_index <= *h_index) {
372 iter->cur = iter->cur->right;
373 iter_walk_down(iter);
374 return iter->cur;
375 }
376
377 return NULL;
378}
379
380static struct prio_tree_node *prio_tree_parent(struct prio_tree_iter *iter)
381{
382 iter->cur = iter->cur->parent;
383 iter_walk_up(iter);
384 return iter->cur;
385}
386
387static inline int overlap(struct prio_tree_iter *iter,
388 unsigned long r_index, unsigned long h_index)
389{
390 return iter->h_index >= r_index && iter->r_index <= h_index;
391}
392
393/*
394 * prio_tree_first:
395 *
396 * Get the first prio_tree_node that overlaps with the interval [radix_index,
397 * heap_index]. Note that always radix_index <= heap_index. We do a pre-order
398 * traversal of the tree.
399 */
400static struct prio_tree_node *prio_tree_first(struct prio_tree_iter *iter)
401{
402 struct prio_tree_root *root;
403 unsigned long r_index, h_index;
404
405 INIT_PRIO_TREE_ITER(iter);
406
407 root = iter->root;
408 if (prio_tree_empty(root))
409 return NULL;
410
411 get_index(root, root->prio_tree_node, &r_index, &h_index);
412
413 if (iter->r_index > h_index)
414 return NULL;
415
416 iter->mask = 1UL << (root->index_bits - 1);
417 iter->cur = root->prio_tree_node;
418
419 while (1) {
420 if (overlap(iter, r_index, h_index))
421 return iter->cur;
422
423 if (prio_tree_left(iter, &r_index, &h_index))
424 continue;
425
426 if (prio_tree_right(iter, &r_index, &h_index))
427 continue;
428
429 break;
430 }
431 return NULL;
432}
433
434/*
435 * prio_tree_next:
436 *
437 * Get the next prio_tree_node that overlaps with the input interval in iter
438 */
439struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter)
440{
441 unsigned long r_index, h_index;
442
443 if (iter->cur == NULL)
444 return prio_tree_first(iter);
445
446repeat:
447 while (prio_tree_left(iter, &r_index, &h_index))
448 if (overlap(iter, r_index, h_index))
449 return iter->cur;
450
451 while (!prio_tree_right(iter, &r_index, &h_index)) {
452 while (!prio_tree_root(iter->cur) &&
453 iter->cur->parent->right == iter->cur)
454 prio_tree_parent(iter);
455
456 if (prio_tree_root(iter->cur))
457 return NULL;
458
459 prio_tree_parent(iter);
460 }
461
462 if (overlap(iter, r_index, h_index))
463 return iter->cur;
464
465 goto repeat;
466}
diff --git a/lib/rbtree.c b/lib/rbtree.c
index d4175565dc2c..4f56a11d67fa 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -2,7 +2,8 @@
2 Red Black Trees 2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de> 3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org> 4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 5 (C) 2012 Michel Lespinasse <walken@google.com>
6
6 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or 9 the Free Software Foundation; either version 2 of the License, or
@@ -20,339 +21,382 @@
20 linux/lib/rbtree.c 21 linux/lib/rbtree.c
21*/ 22*/
22 23
23#include <linux/rbtree.h> 24#include <linux/rbtree_augmented.h>
24#include <linux/export.h> 25#include <linux/export.h>
25 26
26static void __rb_rotate_left(struct rb_node *node, struct rb_root *root) 27/*
27{ 28 * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
28 struct rb_node *right = node->rb_right; 29 *
29 struct rb_node *parent = rb_parent(node); 30 * 1) A node is either red or black
30 31 * 2) The root is black
31 if ((node->rb_right = right->rb_left)) 32 * 3) All leaves (NULL) are black
32 rb_set_parent(right->rb_left, node); 33 * 4) Both children of every red node are black
33 right->rb_left = node; 34 * 5) Every simple path from root to leaves contains the same number
34 35 * of black nodes.
35 rb_set_parent(right, parent); 36 *
37 * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
38 * consecutive red nodes in a path and every red node is therefore followed by
39 * a black. So if B is the number of black nodes on every simple path (as per
40 * 5), then the longest possible path due to 4 is 2B.
41 *
42 * We shall indicate color with case, where black nodes are uppercase and red
43 * nodes will be lowercase. Unknown color nodes shall be drawn as red within
44 * parentheses and have some accompanying text comment.
45 */
36 46
37 if (parent) 47static inline void rb_set_black(struct rb_node *rb)
38 { 48{
39 if (node == parent->rb_left) 49 rb->__rb_parent_color |= RB_BLACK;
40 parent->rb_left = right;
41 else
42 parent->rb_right = right;
43 }
44 else
45 root->rb_node = right;
46 rb_set_parent(node, right);
47} 50}
48 51
49static void __rb_rotate_right(struct rb_node *node, struct rb_root *root) 52static inline struct rb_node *rb_red_parent(struct rb_node *red)
50{ 53{
51 struct rb_node *left = node->rb_left; 54 return (struct rb_node *)red->__rb_parent_color;
52 struct rb_node *parent = rb_parent(node); 55}
53
54 if ((node->rb_left = left->rb_right))
55 rb_set_parent(left->rb_right, node);
56 left->rb_right = node;
57
58 rb_set_parent(left, parent);
59 56
60 if (parent) 57/*
61 { 58 * Helper function for rotations:
62 if (node == parent->rb_right) 59 * - old's parent and color get assigned to new
63 parent->rb_right = left; 60 * - old gets assigned new as a parent and 'color' as a color.
64 else 61 */
65 parent->rb_left = left; 62static inline void
66 } 63__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
67 else 64 struct rb_root *root, int color)
68 root->rb_node = left; 65{
69 rb_set_parent(node, left); 66 struct rb_node *parent = rb_parent(old);
67 new->__rb_parent_color = old->__rb_parent_color;
68 rb_set_parent_color(old, new, color);
69 __rb_change_child(old, new, parent, root);
70} 70}
71 71
72void rb_insert_color(struct rb_node *node, struct rb_root *root) 72static __always_inline void
73__rb_insert(struct rb_node *node, struct rb_root *root,
74 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
73{ 75{
74 struct rb_node *parent, *gparent; 76 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
75 77
76 while ((parent = rb_parent(node)) && rb_is_red(parent)) 78 while (true) {
77 { 79 /*
78 gparent = rb_parent(parent); 80 * Loop invariant: node is red
79 81 *
80 if (parent == gparent->rb_left) 82 * If there is a black parent, we are done.
81 { 83 * Otherwise, take some corrective action as we don't
82 { 84 * want a red root or two consecutive red nodes.
83 register struct rb_node *uncle = gparent->rb_right; 85 */
84 if (uncle && rb_is_red(uncle)) 86 if (!parent) {
85 { 87 rb_set_parent_color(node, NULL, RB_BLACK);
86 rb_set_black(uncle); 88 break;
87 rb_set_black(parent); 89 } else if (rb_is_black(parent))
88 rb_set_red(gparent); 90 break;
89 node = gparent; 91
90 continue; 92 gparent = rb_red_parent(parent);
91 } 93
94 tmp = gparent->rb_right;
95 if (parent != tmp) { /* parent == gparent->rb_left */
96 if (tmp && rb_is_red(tmp)) {
97 /*
98 * Case 1 - color flips
99 *
100 * G g
101 * / \ / \
102 * p u --> P U
103 * / /
104 * n N
105 *
106 * However, since g's parent might be red, and
107 * 4) does not allow this, we need to recurse
108 * at g.
109 */
110 rb_set_parent_color(tmp, gparent, RB_BLACK);
111 rb_set_parent_color(parent, gparent, RB_BLACK);
112 node = gparent;
113 parent = rb_parent(node);
114 rb_set_parent_color(node, parent, RB_RED);
115 continue;
92 } 116 }
93 117
94 if (parent->rb_right == node) 118 tmp = parent->rb_right;
95 { 119 if (node == tmp) {
96 register struct rb_node *tmp; 120 /*
97 __rb_rotate_left(parent, root); 121 * Case 2 - left rotate at parent
98 tmp = parent; 122 *
123 * G G
124 * / \ / \
125 * p U --> n U
126 * \ /
127 * n p
128 *
129 * This still leaves us in violation of 4), the
130 * continuation into Case 3 will fix that.
131 */
132 parent->rb_right = tmp = node->rb_left;
133 node->rb_left = parent;
134 if (tmp)
135 rb_set_parent_color(tmp, parent,
136 RB_BLACK);
137 rb_set_parent_color(parent, node, RB_RED);
138 augment_rotate(parent, node);
99 parent = node; 139 parent = node;
100 node = tmp; 140 tmp = node->rb_right;
101 } 141 }
102 142
103 rb_set_black(parent); 143 /*
104 rb_set_red(gparent); 144 * Case 3 - right rotate at gparent
105 __rb_rotate_right(gparent, root); 145 *
146 * G P
147 * / \ / \
148 * p U --> n g
149 * / \
150 * n U
151 */
152 gparent->rb_left = tmp; /* == parent->rb_right */
153 parent->rb_right = gparent;
154 if (tmp)
155 rb_set_parent_color(tmp, gparent, RB_BLACK);
156 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
157 augment_rotate(gparent, parent);
158 break;
106 } else { 159 } else {
107 { 160 tmp = gparent->rb_left;
108 register struct rb_node *uncle = gparent->rb_left; 161 if (tmp && rb_is_red(tmp)) {
109 if (uncle && rb_is_red(uncle)) 162 /* Case 1 - color flips */
110 { 163 rb_set_parent_color(tmp, gparent, RB_BLACK);
111 rb_set_black(uncle); 164 rb_set_parent_color(parent, gparent, RB_BLACK);
112 rb_set_black(parent); 165 node = gparent;
113 rb_set_red(gparent); 166 parent = rb_parent(node);
114 node = gparent; 167 rb_set_parent_color(node, parent, RB_RED);
115 continue; 168 continue;
116 }
117 } 169 }
118 170
119 if (parent->rb_left == node) 171 tmp = parent->rb_left;
120 { 172 if (node == tmp) {
121 register struct rb_node *tmp; 173 /* Case 2 - right rotate at parent */
122 __rb_rotate_right(parent, root); 174 parent->rb_left = tmp = node->rb_right;
123 tmp = parent; 175 node->rb_right = parent;
176 if (tmp)
177 rb_set_parent_color(tmp, parent,
178 RB_BLACK);
179 rb_set_parent_color(parent, node, RB_RED);
180 augment_rotate(parent, node);
124 parent = node; 181 parent = node;
125 node = tmp; 182 tmp = node->rb_left;
126 } 183 }
127 184
128 rb_set_black(parent); 185 /* Case 3 - left rotate at gparent */
129 rb_set_red(gparent); 186 gparent->rb_right = tmp; /* == parent->rb_left */
130 __rb_rotate_left(gparent, root); 187 parent->rb_left = gparent;
188 if (tmp)
189 rb_set_parent_color(tmp, gparent, RB_BLACK);
190 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
191 augment_rotate(gparent, parent);
192 break;
131 } 193 }
132 } 194 }
133
134 rb_set_black(root->rb_node);
135} 195}
136EXPORT_SYMBOL(rb_insert_color);
137 196
138static void __rb_erase_color(struct rb_node *node, struct rb_node *parent, 197__always_inline void
139 struct rb_root *root) 198__rb_erase_color(struct rb_node *parent, struct rb_root *root,
199 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
140{ 200{
141 struct rb_node *other; 201 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
142 202
143 while ((!node || rb_is_black(node)) && node != root->rb_node) 203 while (true) {
144 { 204 /*
145 if (parent->rb_left == node) 205 * Loop invariants:
146 { 206 * - node is black (or NULL on first iteration)
147 other = parent->rb_right; 207 * - node is not the root (parent is not NULL)
148 if (rb_is_red(other)) 208 * - All leaf paths going through parent and node have a
149 { 209 * black node count that is 1 lower than other leaf paths.
150 rb_set_black(other); 210 */
151 rb_set_red(parent); 211 sibling = parent->rb_right;
152 __rb_rotate_left(parent, root); 212 if (node != sibling) { /* node == parent->rb_left */
153 other = parent->rb_right; 213 if (rb_is_red(sibling)) {
214 /*
215 * Case 1 - left rotate at parent
216 *
217 * P S
218 * / \ / \
219 * N s --> p Sr
220 * / \ / \
221 * Sl Sr N Sl
222 */
223 parent->rb_right = tmp1 = sibling->rb_left;
224 sibling->rb_left = parent;
225 rb_set_parent_color(tmp1, parent, RB_BLACK);
226 __rb_rotate_set_parents(parent, sibling, root,
227 RB_RED);
228 augment_rotate(parent, sibling);
229 sibling = tmp1;
154 } 230 }
155 if ((!other->rb_left || rb_is_black(other->rb_left)) && 231 tmp1 = sibling->rb_right;
156 (!other->rb_right || rb_is_black(other->rb_right))) 232 if (!tmp1 || rb_is_black(tmp1)) {
157 { 233 tmp2 = sibling->rb_left;
158 rb_set_red(other); 234 if (!tmp2 || rb_is_black(tmp2)) {
159 node = parent; 235 /*
160 parent = rb_parent(node); 236 * Case 2 - sibling color flip
161 } 237 * (p could be either color here)
162 else 238 *
163 { 239 * (p) (p)
164 if (!other->rb_right || rb_is_black(other->rb_right)) 240 * / \ / \
165 { 241 * N S --> N s
166 rb_set_black(other->rb_left); 242 * / \ / \
167 rb_set_red(other); 243 * Sl Sr Sl Sr
168 __rb_rotate_right(other, root); 244 *
169 other = parent->rb_right; 245 * This leaves us violating 5) which
246 * can be fixed by flipping p to black
247 * if it was red, or by recursing at p.
248 * p is red when coming from Case 1.
249 */
250 rb_set_parent_color(sibling, parent,
251 RB_RED);
252 if (rb_is_red(parent))
253 rb_set_black(parent);
254 else {
255 node = parent;
256 parent = rb_parent(node);
257 if (parent)
258 continue;
259 }
260 break;
170 } 261 }
171 rb_set_color(other, rb_color(parent)); 262 /*
172 rb_set_black(parent); 263 * Case 3 - right rotate at sibling
173 rb_set_black(other->rb_right); 264 * (p could be either color here)
174 __rb_rotate_left(parent, root); 265 *
175 node = root->rb_node; 266 * (p) (p)
176 break; 267 * / \ / \
177 } 268 * N S --> N Sl
178 } 269 * / \ \
179 else 270 * sl Sr s
180 { 271 * \
181 other = parent->rb_left; 272 * Sr
182 if (rb_is_red(other)) 273 */
183 { 274 sibling->rb_left = tmp1 = tmp2->rb_right;
184 rb_set_black(other); 275 tmp2->rb_right = sibling;
185 rb_set_red(parent); 276 parent->rb_right = tmp2;
186 __rb_rotate_right(parent, root); 277 if (tmp1)
187 other = parent->rb_left; 278 rb_set_parent_color(tmp1, sibling,
279 RB_BLACK);
280 augment_rotate(sibling, tmp2);
281 tmp1 = sibling;
282 sibling = tmp2;
188 } 283 }
189 if ((!other->rb_left || rb_is_black(other->rb_left)) && 284 /*
190 (!other->rb_right || rb_is_black(other->rb_right))) 285 * Case 4 - left rotate at parent + color flips
191 { 286 * (p and sl could be either color here.
192 rb_set_red(other); 287 * After rotation, p becomes black, s acquires
193 node = parent; 288 * p's color, and sl keeps its color)
194 parent = rb_parent(node); 289 *
290 * (p) (s)
291 * / \ / \
292 * N S --> P Sr
293 * / \ / \
294 * (sl) sr N (sl)
295 */
296 parent->rb_right = tmp2 = sibling->rb_left;
297 sibling->rb_left = parent;
298 rb_set_parent_color(tmp1, sibling, RB_BLACK);
299 if (tmp2)
300 rb_set_parent(tmp2, parent);
301 __rb_rotate_set_parents(parent, sibling, root,
302 RB_BLACK);
303 augment_rotate(parent, sibling);
304 break;
305 } else {
306 sibling = parent->rb_left;
307 if (rb_is_red(sibling)) {
308 /* Case 1 - right rotate at parent */
309 parent->rb_left = tmp1 = sibling->rb_right;
310 sibling->rb_right = parent;
311 rb_set_parent_color(tmp1, parent, RB_BLACK);
312 __rb_rotate_set_parents(parent, sibling, root,
313 RB_RED);
314 augment_rotate(parent, sibling);
315 sibling = tmp1;
195 } 316 }
196 else 317 tmp1 = sibling->rb_left;
197 { 318 if (!tmp1 || rb_is_black(tmp1)) {
198 if (!other->rb_left || rb_is_black(other->rb_left)) 319 tmp2 = sibling->rb_right;
199 { 320 if (!tmp2 || rb_is_black(tmp2)) {
200 rb_set_black(other->rb_right); 321 /* Case 2 - sibling color flip */
201 rb_set_red(other); 322 rb_set_parent_color(sibling, parent,
202 __rb_rotate_left(other, root); 323 RB_RED);
203 other = parent->rb_left; 324 if (rb_is_red(parent))
325 rb_set_black(parent);
326 else {
327 node = parent;
328 parent = rb_parent(node);
329 if (parent)
330 continue;
331 }
332 break;
204 } 333 }
205 rb_set_color(other, rb_color(parent)); 334 /* Case 3 - right rotate at sibling */
206 rb_set_black(parent); 335 sibling->rb_right = tmp1 = tmp2->rb_left;
207 rb_set_black(other->rb_left); 336 tmp2->rb_left = sibling;
208 __rb_rotate_right(parent, root); 337 parent->rb_left = tmp2;
209 node = root->rb_node; 338 if (tmp1)
210 break; 339 rb_set_parent_color(tmp1, sibling,
340 RB_BLACK);
341 augment_rotate(sibling, tmp2);
342 tmp1 = sibling;
343 sibling = tmp2;
211 } 344 }
345 /* Case 4 - left rotate at parent + color flips */
346 parent->rb_left = tmp2 = sibling->rb_right;
347 sibling->rb_right = parent;
348 rb_set_parent_color(tmp1, sibling, RB_BLACK);
349 if (tmp2)
350 rb_set_parent(tmp2, parent);
351 __rb_rotate_set_parents(parent, sibling, root,
352 RB_BLACK);
353 augment_rotate(parent, sibling);
354 break;
212 } 355 }
213 } 356 }
214 if (node)
215 rb_set_black(node);
216} 357}
358EXPORT_SYMBOL(__rb_erase_color);
217 359
218void rb_erase(struct rb_node *node, struct rb_root *root) 360/*
219{ 361 * Non-augmented rbtree manipulation functions.
220 struct rb_node *child, *parent; 362 *
221 int color; 363 * We use dummy augmented callbacks here, and have the compiler optimize them
222 364 * out of the rb_insert_color() and rb_erase() function definitions.
223 if (!node->rb_left) 365 */
224 child = node->rb_right;
225 else if (!node->rb_right)
226 child = node->rb_left;
227 else
228 {
229 struct rb_node *old = node, *left;
230
231 node = node->rb_right;
232 while ((left = node->rb_left) != NULL)
233 node = left;
234
235 if (rb_parent(old)) {
236 if (rb_parent(old)->rb_left == old)
237 rb_parent(old)->rb_left = node;
238 else
239 rb_parent(old)->rb_right = node;
240 } else
241 root->rb_node = node;
242
243 child = node->rb_right;
244 parent = rb_parent(node);
245 color = rb_color(node);
246
247 if (parent == old) {
248 parent = node;
249 } else {
250 if (child)
251 rb_set_parent(child, parent);
252 parent->rb_left = child;
253
254 node->rb_right = old->rb_right;
255 rb_set_parent(old->rb_right, node);
256 }
257
258 node->rb_parent_color = old->rb_parent_color;
259 node->rb_left = old->rb_left;
260 rb_set_parent(old->rb_left, node);
261 366
262 goto color; 367static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
263 } 368static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
369static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
264 370
265 parent = rb_parent(node); 371static const struct rb_augment_callbacks dummy_callbacks = {
266 color = rb_color(node); 372 dummy_propagate, dummy_copy, dummy_rotate
267 373};
268 if (child)
269 rb_set_parent(child, parent);
270 if (parent)
271 {
272 if (parent->rb_left == node)
273 parent->rb_left = child;
274 else
275 parent->rb_right = child;
276 }
277 else
278 root->rb_node = child;
279 374
280 color: 375void rb_insert_color(struct rb_node *node, struct rb_root *root)
281 if (color == RB_BLACK)
282 __rb_erase_color(child, parent, root);
283}
284EXPORT_SYMBOL(rb_erase);
285
286static void rb_augment_path(struct rb_node *node, rb_augment_f func, void *data)
287{ 376{
288 struct rb_node *parent; 377 __rb_insert(node, root, dummy_rotate);
289
290up:
291 func(node, data);
292 parent = rb_parent(node);
293 if (!parent)
294 return;
295
296 if (node == parent->rb_left && parent->rb_right)
297 func(parent->rb_right, data);
298 else if (parent->rb_left)
299 func(parent->rb_left, data);
300
301 node = parent;
302 goto up;
303} 378}
379EXPORT_SYMBOL(rb_insert_color);
304 380
305/* 381void rb_erase(struct rb_node *node, struct rb_root *root)
306 * after inserting @node into the tree, update the tree to account for
307 * both the new entry and any damage done by rebalance
308 */
309void rb_augment_insert(struct rb_node *node, rb_augment_f func, void *data)
310{ 382{
311 if (node->rb_left) 383 rb_erase_augmented(node, root, &dummy_callbacks);
312 node = node->rb_left;
313 else if (node->rb_right)
314 node = node->rb_right;
315
316 rb_augment_path(node, func, data);
317} 384}
318EXPORT_SYMBOL(rb_augment_insert); 385EXPORT_SYMBOL(rb_erase);
319 386
320/* 387/*
321 * before removing the node, find the deepest node on the rebalance path 388 * Augmented rbtree manipulation functions.
322 * that will still be there after @node gets removed 389 *
390 * This instantiates the same __always_inline functions as in the non-augmented
391 * case, but this time with user-defined callbacks.
323 */ 392 */
324struct rb_node *rb_augment_erase_begin(struct rb_node *node)
325{
326 struct rb_node *deepest;
327
328 if (!node->rb_right && !node->rb_left)
329 deepest = rb_parent(node);
330 else if (!node->rb_right)
331 deepest = node->rb_left;
332 else if (!node->rb_left)
333 deepest = node->rb_right;
334 else {
335 deepest = rb_next(node);
336 if (deepest->rb_right)
337 deepest = deepest->rb_right;
338 else if (rb_parent(deepest) != node)
339 deepest = rb_parent(deepest);
340 }
341
342 return deepest;
343}
344EXPORT_SYMBOL(rb_augment_erase_begin);
345 393
346/* 394void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
347 * after removal, update the tree to account for the removed entry 395 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
348 * and any rebalance damage.
349 */
350void rb_augment_erase_end(struct rb_node *node, rb_augment_f func, void *data)
351{ 396{
352 if (node) 397 __rb_insert(node, root, augment_rotate);
353 rb_augment_path(node, func, data);
354} 398}
355EXPORT_SYMBOL(rb_augment_erase_end); 399EXPORT_SYMBOL(__rb_insert_augmented);
356 400
357/* 401/*
358 * This function returns the first node (in sort order) of the tree. 402 * This function returns the first node (in sort order) of the tree.
@@ -387,11 +431,13 @@ struct rb_node *rb_next(const struct rb_node *node)
387{ 431{
388 struct rb_node *parent; 432 struct rb_node *parent;
389 433
390 if (rb_parent(node) == node) 434 if (RB_EMPTY_NODE(node))
391 return NULL; 435 return NULL;
392 436
393 /* If we have a right-hand child, go down and then left as far 437 /*
394 as we can. */ 438 * If we have a right-hand child, go down and then left as far
439 * as we can.
440 */
395 if (node->rb_right) { 441 if (node->rb_right) {
396 node = node->rb_right; 442 node = node->rb_right;
397 while (node->rb_left) 443 while (node->rb_left)
@@ -399,12 +445,13 @@ struct rb_node *rb_next(const struct rb_node *node)
399 return (struct rb_node *)node; 445 return (struct rb_node *)node;
400 } 446 }
401 447
402 /* No right-hand children. Everything down and left is 448 /*
403 smaller than us, so any 'next' node must be in the general 449 * No right-hand children. Everything down and left is smaller than us,
404 direction of our parent. Go up the tree; any time the 450 * so any 'next' node must be in the general direction of our parent.
405 ancestor is a right-hand child of its parent, keep going 451 * Go up the tree; any time the ancestor is a right-hand child of its
406 up. First time it's a left-hand child of its parent, said 452 * parent, keep going up. First time it's a left-hand child of its
407 parent is our 'next' node. */ 453 * parent, said parent is our 'next' node.
454 */
408 while ((parent = rb_parent(node)) && node == parent->rb_right) 455 while ((parent = rb_parent(node)) && node == parent->rb_right)
409 node = parent; 456 node = parent;
410 457
@@ -416,11 +463,13 @@ struct rb_node *rb_prev(const struct rb_node *node)
416{ 463{
417 struct rb_node *parent; 464 struct rb_node *parent;
418 465
419 if (rb_parent(node) == node) 466 if (RB_EMPTY_NODE(node))
420 return NULL; 467 return NULL;
421 468
422 /* If we have a left-hand child, go down and then right as far 469 /*
423 as we can. */ 470 * If we have a left-hand child, go down and then right as far
471 * as we can.
472 */
424 if (node->rb_left) { 473 if (node->rb_left) {
425 node = node->rb_left; 474 node = node->rb_left;
426 while (node->rb_right) 475 while (node->rb_right)
@@ -428,8 +477,10 @@ struct rb_node *rb_prev(const struct rb_node *node)
428 return (struct rb_node *)node; 477 return (struct rb_node *)node;
429 } 478 }
430 479
431 /* No left-hand children. Go up till we find an ancestor which 480 /*
432 is a right-hand child of its parent */ 481 * No left-hand children. Go up till we find an ancestor which
482 * is a right-hand child of its parent.
483 */
433 while ((parent = rb_parent(node)) && node == parent->rb_left) 484 while ((parent = rb_parent(node)) && node == parent->rb_left)
434 node = parent; 485 node = parent;
435 486
@@ -443,14 +494,7 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
443 struct rb_node *parent = rb_parent(victim); 494 struct rb_node *parent = rb_parent(victim);
444 495
445 /* Set the surrounding nodes to point to the replacement */ 496 /* Set the surrounding nodes to point to the replacement */
446 if (parent) { 497 __rb_change_child(victim, new, parent, root);
447 if (victim == parent->rb_left)
448 parent->rb_left = new;
449 else
450 parent->rb_right = new;
451 } else {
452 root->rb_node = new;
453 }
454 if (victim->rb_left) 498 if (victim->rb_left)
455 rb_set_parent(victim->rb_left, new); 499 rb_set_parent(victim->rb_left, new);
456 if (victim->rb_right) 500 if (victim->rb_right)
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
new file mode 100644
index 000000000000..268b23951fec
--- /dev/null
+++ b/lib/rbtree_test.c
@@ -0,0 +1,234 @@
1#include <linux/module.h>
2#include <linux/rbtree_augmented.h>
3#include <linux/random.h>
4#include <asm/timex.h>
5
6#define NODES 100
7#define PERF_LOOPS 100000
8#define CHECK_LOOPS 100
9
10struct test_node {
11 struct rb_node rb;
12 u32 key;
13
14 /* following fields used for testing augmented rbtree functionality */
15 u32 val;
16 u32 augmented;
17};
18
19static struct rb_root root = RB_ROOT;
20static struct test_node nodes[NODES];
21
22static struct rnd_state rnd;
23
24static void insert(struct test_node *node, struct rb_root *root)
25{
26 struct rb_node **new = &root->rb_node, *parent = NULL;
27 u32 key = node->key;
28
29 while (*new) {
30 parent = *new;
31 if (key < rb_entry(parent, struct test_node, rb)->key)
32 new = &parent->rb_left;
33 else
34 new = &parent->rb_right;
35 }
36
37 rb_link_node(&node->rb, parent, new);
38 rb_insert_color(&node->rb, root);
39}
40
41static inline void erase(struct test_node *node, struct rb_root *root)
42{
43 rb_erase(&node->rb, root);
44}
45
46static inline u32 augment_recompute(struct test_node *node)
47{
48 u32 max = node->val, child_augmented;
49 if (node->rb.rb_left) {
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node,
51 rb)->augmented;
52 if (max < child_augmented)
53 max = child_augmented;
54 }
55 if (node->rb.rb_right) {
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node,
57 rb)->augmented;
58 if (max < child_augmented)
59 max = child_augmented;
60 }
61 return max;
62}
63
64RB_DECLARE_CALLBACKS(static, augment_callbacks, struct test_node, rb,
65 u32, augmented, augment_recompute)
66
67static void insert_augmented(struct test_node *node, struct rb_root *root)
68{
69 struct rb_node **new = &root->rb_node, *rb_parent = NULL;
70 u32 key = node->key;
71 u32 val = node->val;
72 struct test_node *parent;
73
74 while (*new) {
75 rb_parent = *new;
76 parent = rb_entry(rb_parent, struct test_node, rb);
77 if (parent->augmented < val)
78 parent->augmented = val;
79 if (key < parent->key)
80 new = &parent->rb.rb_left;
81 else
82 new = &parent->rb.rb_right;
83 }
84
85 node->augmented = val;
86 rb_link_node(&node->rb, rb_parent, new);
87 rb_insert_augmented(&node->rb, root, &augment_callbacks);
88}
89
90static void erase_augmented(struct test_node *node, struct rb_root *root)
91{
92 rb_erase_augmented(&node->rb, root, &augment_callbacks);
93}
94
95static void init(void)
96{
97 int i;
98 for (i = 0; i < NODES; i++) {
99 nodes[i].key = prandom32(&rnd);
100 nodes[i].val = prandom32(&rnd);
101 }
102}
103
104static bool is_red(struct rb_node *rb)
105{
106 return !(rb->__rb_parent_color & 1);
107}
108
109static int black_path_count(struct rb_node *rb)
110{
111 int count;
112 for (count = 0; rb; rb = rb_parent(rb))
113 count += !is_red(rb);
114 return count;
115}
116
117static void check(int nr_nodes)
118{
119 struct rb_node *rb;
120 int count = 0;
121 int blacks;
122 u32 prev_key = 0;
123
124 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
125 struct test_node *node = rb_entry(rb, struct test_node, rb);
126 WARN_ON_ONCE(node->key < prev_key);
127 WARN_ON_ONCE(is_red(rb) &&
128 (!rb_parent(rb) || is_red(rb_parent(rb))));
129 if (!count)
130 blacks = black_path_count(rb);
131 else
132 WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
133 blacks != black_path_count(rb));
134 prev_key = node->key;
135 count++;
136 }
137 WARN_ON_ONCE(count != nr_nodes);
138}
139
140static void check_augmented(int nr_nodes)
141{
142 struct rb_node *rb;
143
144 check(nr_nodes);
145 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
146 struct test_node *node = rb_entry(rb, struct test_node, rb);
147 WARN_ON_ONCE(node->augmented != augment_recompute(node));
148 }
149}
150
151static int rbtree_test_init(void)
152{
153 int i, j;
154 cycles_t time1, time2, time;
155
156 printk(KERN_ALERT "rbtree testing");
157
158 prandom32_seed(&rnd, 3141592653589793238ULL);
159 init();
160
161 time1 = get_cycles();
162
163 for (i = 0; i < PERF_LOOPS; i++) {
164 for (j = 0; j < NODES; j++)
165 insert(nodes + j, &root);
166 for (j = 0; j < NODES; j++)
167 erase(nodes + j, &root);
168 }
169
170 time2 = get_cycles();
171 time = time2 - time1;
172
173 time = div_u64(time, PERF_LOOPS);
174 printk(" -> %llu cycles\n", (unsigned long long)time);
175
176 for (i = 0; i < CHECK_LOOPS; i++) {
177 init();
178 for (j = 0; j < NODES; j++) {
179 check(j);
180 insert(nodes + j, &root);
181 }
182 for (j = 0; j < NODES; j++) {
183 check(NODES - j);
184 erase(nodes + j, &root);
185 }
186 check(0);
187 }
188
189 printk(KERN_ALERT "augmented rbtree testing");
190
191 init();
192
193 time1 = get_cycles();
194
195 for (i = 0; i < PERF_LOOPS; i++) {
196 for (j = 0; j < NODES; j++)
197 insert_augmented(nodes + j, &root);
198 for (j = 0; j < NODES; j++)
199 erase_augmented(nodes + j, &root);
200 }
201
202 time2 = get_cycles();
203 time = time2 - time1;
204
205 time = div_u64(time, PERF_LOOPS);
206 printk(" -> %llu cycles\n", (unsigned long long)time);
207
208 for (i = 0; i < CHECK_LOOPS; i++) {
209 init();
210 for (j = 0; j < NODES; j++) {
211 check_augmented(j);
212 insert_augmented(nodes + j, &root);
213 }
214 for (j = 0; j < NODES; j++) {
215 check_augmented(NODES - j);
216 erase_augmented(nodes + j, &root);
217 }
218 check_augmented(0);
219 }
220
221 return -EAGAIN; /* Fail will directly unload the module */
222}
223
224static void rbtree_test_exit(void)
225{
226 printk(KERN_ALERT "test exit\n");
227}
228
229module_init(rbtree_test_init)
230module_exit(rbtree_test_exit)
231
232MODULE_LICENSE("GPL");
233MODULE_AUTHOR("Michel Lespinasse");
234MODULE_DESCRIPTION("Red Black Tree test");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index fadae774a20c..e76d85cf3175 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -404,14 +404,13 @@ EXPORT_SYMBOL(sg_miter_start);
404 * @miter: sg mapping iter to proceed 404 * @miter: sg mapping iter to proceed
405 * 405 *
406 * Description: 406 * Description:
407 * Proceeds @miter@ to the next mapping. @miter@ should have been 407 * Proceeds @miter to the next mapping. @miter should have been started
408 * started using sg_miter_start(). On successful return, 408 * using sg_miter_start(). On successful return, @miter->page,
409 * @miter@->page, @miter@->addr and @miter@->length point to the 409 * @miter->addr and @miter->length point to the current mapping.
410 * current mapping.
411 * 410 *
412 * Context: 411 * Context:
413 * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till 412 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
414 * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC. 413 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
415 * 414 *
416 * Returns: 415 * Returns:
417 * true if @miter contains the next mapping. false if end of sg 416 * true if @miter contains the next mapping. false if end of sg
@@ -465,7 +464,8 @@ EXPORT_SYMBOL(sg_miter_next);
465 * resources (kmap) need to be released during iteration. 464 * resources (kmap) need to be released during iteration.
466 * 465 *
467 * Context: 466 * Context:
468 * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. 467 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
468 * otherwise.
469 */ 469 */
470void sg_miter_stop(struct sg_mapping_iter *miter) 470void sg_miter_stop(struct sg_mapping_iter *miter)
471{ 471{
@@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
479 flush_kernel_dcache_page(miter->page); 479 flush_kernel_dcache_page(miter->page);
480 480
481 if (miter->__flags & SG_MITER_ATOMIC) { 481 if (miter->__flags & SG_MITER_ATOMIC) {
482 WARN_ON(!irqs_disabled()); 482 WARN_ON_ONCE(preemptible());
483 kunmap_atomic(miter->addr); 483 kunmap_atomic(miter->addr);
484 } else 484 } else
485 kunmap(miter->page); 485 kunmap(miter->page);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
107{ 107{
108 u64 i; 108 u64 i;
109 u64 loops = loops_per_jiffy * HZ; 109 u64 loops = loops_per_jiffy * HZ;
110 int print_once = 1;
111 110
112 for (;;) { 111 for (i = 0; i < loops; i++) {
113 for (i = 0; i < loops; i++) { 112 if (arch_spin_trylock(&lock->raw_lock))
114 if (arch_spin_trylock(&lock->raw_lock)) 113 return;
115 return; 114 __delay(1);
116 __delay(1); 115 }
117 } 116 /* lockup suspected: */
118 /* lockup suspected: */ 117 spin_dump(lock, "lockup suspected");
119 if (print_once) {
120 print_once = 0;
121 spin_dump(lock, "lockup suspected");
122#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
123 trigger_all_cpu_backtrace(); 119 trigger_all_cpu_backtrace();
124#endif 120#endif
125 } 121
126 } 122 /*
123 * The trylock above was causing a livelock. Give the lower level arch
124 * specific lock code a chance to acquire the lock. We have already
125 * printed a warning/backtrace at this point. The non-debug arch
126 * specific code might actually succeed in acquiring the lock. If it is
127 * not successful, the end-result is the same - there is no forward
128 * progress.
129 */
130 arch_spin_lock(&lock->raw_lock);
127} 131}
128 132
129void do_raw_spin_lock(raw_spinlock_t *lock) 133void do_raw_spin_lock(raw_spinlock_t *lock)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 45bc1f83a5ad..f114bf6a8e13 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
170 * Statically reserve bounce buffer space and initialize bounce buffer data 170 * Statically reserve bounce buffer space and initialize bounce buffer data
171 * structures for the software IO TLB used to implement the DMA API. 171 * structures for the software IO TLB used to implement the DMA API.
172 */ 172 */
173void __init 173static void __init
174swiotlb_init_with_default_size(size_t default_size, int verbose) 174swiotlb_init_with_default_size(size_t default_size, int verbose)
175{ 175{
176 unsigned long bytes; 176 unsigned long bytes;
@@ -206,8 +206,9 @@ swiotlb_init(int verbose)
206int 206int
207swiotlb_late_init_with_default_size(size_t default_size) 207swiotlb_late_init_with_default_size(size_t default_size)
208{ 208{
209 unsigned long i, bytes, req_nslabs = io_tlb_nslabs; 209 unsigned long bytes, req_nslabs = io_tlb_nslabs;
210 unsigned int order; 210 unsigned int order;
211 int rc = 0;
211 212
212 if (!io_tlb_nslabs) { 213 if (!io_tlb_nslabs) {
213 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 214 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
@@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size)
229 order--; 230 order--;
230 } 231 }
231 232
232 if (!io_tlb_start) 233 if (!io_tlb_start) {
233 goto cleanup1; 234 io_tlb_nslabs = req_nslabs;
234 235 return -ENOMEM;
236 }
235 if (order != get_order(bytes)) { 237 if (order != get_order(bytes)) {
236 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 238 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
237 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 239 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
238 io_tlb_nslabs = SLABS_PER_PAGE << order; 240 io_tlb_nslabs = SLABS_PER_PAGE << order;
239 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
240 } 241 }
242 rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs);
243 if (rc)
244 free_pages((unsigned long)io_tlb_start, order);
245 return rc;
246}
247
248int
249swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
250{
251 unsigned long i, bytes;
252
253 bytes = nslabs << IO_TLB_SHIFT;
254
255 io_tlb_nslabs = nslabs;
256 io_tlb_start = tlb;
241 io_tlb_end = io_tlb_start + bytes; 257 io_tlb_end = io_tlb_start + bytes;
258
242 memset(io_tlb_start, 0, bytes); 259 memset(io_tlb_start, 0, bytes);
243 260
244 /* 261 /*
@@ -288,10 +305,8 @@ cleanup3:
288 io_tlb_list = NULL; 305 io_tlb_list = NULL;
289cleanup2: 306cleanup2:
290 io_tlb_end = NULL; 307 io_tlb_end = NULL;
291 free_pages((unsigned long)io_tlb_start, order);
292 io_tlb_start = NULL; 308 io_tlb_start = NULL;
293cleanup1: 309 io_tlb_nslabs = 0;
294 io_tlb_nslabs = req_nslabs;
295 return -ENOMEM; 310 return -ENOMEM;
296} 311}
297 312
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0e337541f005..39c99fea7c03 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -174,35 +174,25 @@ char *put_dec_trunc8(char *buf, unsigned r)
174 unsigned q; 174 unsigned q;
175 175
176 /* Copy of previous function's body with added early returns */ 176 /* Copy of previous function's body with added early returns */
177 q = (r * (uint64_t)0x1999999a) >> 32; 177 while (r >= 10000) {
178 *buf++ = (r - 10 * q) + '0'; /* 2 */ 178 q = r + '0';
179 if (q == 0) 179 r = (r * (uint64_t)0x1999999a) >> 32;
180 return buf; 180 *buf++ = q - 10*r;
181 r = (q * (uint64_t)0x1999999a) >> 32; 181 }
182 *buf++ = (q - 10 * r) + '0'; /* 3 */ 182
183 if (r == 0) 183 q = (r * 0x199a) >> 16; /* r <= 9999 */
184 return buf; 184 *buf++ = (r - 10 * q) + '0';
185 q = (r * (uint64_t)0x1999999a) >> 32;
186 *buf++ = (r - 10 * q) + '0'; /* 4 */
187 if (q == 0)
188 return buf;
189 r = (q * (uint64_t)0x1999999a) >> 32;
190 *buf++ = (q - 10 * r) + '0'; /* 5 */
191 if (r == 0)
192 return buf;
193 q = (r * 0x199a) >> 16;
194 *buf++ = (r - 10 * q) + '0'; /* 6 */
195 if (q == 0) 185 if (q == 0)
196 return buf; 186 return buf;
197 r = (q * 0xcd) >> 11; 187 r = (q * 0xcd) >> 11; /* q <= 999 */
198 *buf++ = (q - 10 * r) + '0'; /* 7 */ 188 *buf++ = (q - 10 * r) + '0';
199 if (r == 0) 189 if (r == 0)
200 return buf; 190 return buf;
201 q = (r * 0xcd) >> 11; 191 q = (r * 0xcd) >> 11; /* r <= 99 */
202 *buf++ = (r - 10 * q) + '0'; /* 8 */ 192 *buf++ = (r - 10 * q) + '0';
203 if (q == 0) 193 if (q == 0)
204 return buf; 194 return buf;
205 *buf++ = q + '0'; /* 9 */ 195 *buf++ = q + '0'; /* q <= 9 */
206 return buf; 196 return buf;
207} 197}
208 198
@@ -243,18 +233,34 @@ char *put_dec(char *buf, unsigned long long n)
243 233
244/* Second algorithm: valid only for 64-bit long longs */ 234/* Second algorithm: valid only for 64-bit long longs */
245 235
236/* See comment in put_dec_full9 for choice of constants */
246static noinline_for_stack 237static noinline_for_stack
247char *put_dec_full4(char *buf, unsigned q) 238void put_dec_full4(char *buf, unsigned q)
248{ 239{
249 unsigned r; 240 unsigned r;
250 r = (q * 0xcccd) >> 19; 241 r = (q * 0xccd) >> 15;
251 *buf++ = (q - 10 * r) + '0'; 242 buf[0] = (q - 10 * r) + '0';
252 q = (r * 0x199a) >> 16; 243 q = (r * 0xcd) >> 11;
253 *buf++ = (r - 10 * q) + '0'; 244 buf[1] = (r - 10 * q) + '0';
254 r = (q * 0xcd) >> 11; 245 r = (q * 0xcd) >> 11;
255 *buf++ = (q - 10 * r) + '0'; 246 buf[2] = (q - 10 * r) + '0';
256 *buf++ = r + '0'; 247 buf[3] = r + '0';
257 return buf; 248}
249
250/*
251 * Call put_dec_full4 on x % 10000, return x / 10000.
252 * The approximation x/10000 == (x * 0x346DC5D7) >> 43
253 * holds for all x < 1,128,869,999. The largest value this
254 * helper will ever be asked to convert is 1,125,520,955.
255 * (d1 in the put_dec code, assuming n is all-ones).
256 */
257static
258unsigned put_dec_helper4(char *buf, unsigned x)
259{
260 uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
261
262 put_dec_full4(buf, x - q * 10000);
263 return q;
258} 264}
259 265
260/* Based on code by Douglas W. Jones found at 266/* Based on code by Douglas W. Jones found at
@@ -276,28 +282,19 @@ char *put_dec(char *buf, unsigned long long n)
276 d3 = (h >> 16); /* implicit "& 0xffff" */ 282 d3 = (h >> 16); /* implicit "& 0xffff" */
277 283
278 q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); 284 q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
285 q = put_dec_helper4(buf, q);
286
287 q += 7671 * d3 + 9496 * d2 + 6 * d1;
288 q = put_dec_helper4(buf+4, q);
289
290 q += 4749 * d3 + 42 * d2;
291 q = put_dec_helper4(buf+8, q);
279 292
280 buf = put_dec_full4(buf, q % 10000); 293 q += 281 * d3;
281 q = q / 10000; 294 buf += 12;
282 295 if (q)
283 d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1; 296 buf = put_dec_trunc8(buf, q);
284 buf = put_dec_full4(buf, d1 % 10000); 297 else while (buf[-1] == '0')
285 q = d1 / 10000;
286
287 d2 = q + 4749 * d3 + 42 * d2;
288 buf = put_dec_full4(buf, d2 % 10000);
289 q = d2 / 10000;
290
291 d3 = q + 281 * d3;
292 if (!d3)
293 goto done;
294 buf = put_dec_full4(buf, d3 % 10000);
295 q = d3 / 10000;
296 if (!q)
297 goto done;
298 buf = put_dec_full4(buf, q);
299 done:
300 while (buf[-1] == '0')
301 --buf; 298 --buf;
302 299
303 return buf; 300 return buf;
@@ -990,7 +987,7 @@ int kptr_restrict __read_mostly;
990 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 987 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
991 * - 'MF' For a 6-byte MAC FDDI address, it prints the address 988 * - 'MF' For a 6-byte MAC FDDI address, it prints the address
992 * with a dash-separated hex notation 989 * with a dash-separated hex notation
993 * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth) 990 * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
994 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way 991 * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
995 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) 992 * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
996 * IPv6 uses colon separated network-order 16 bit hex with leading 0's 993 * IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -1341,7 +1338,10 @@ qualifier:
1341 * %pR output the address range in a struct resource with decoded flags 1338 * %pR output the address range in a struct resource with decoded flags
1342 * %pr output the address range in a struct resource with raw flags 1339 * %pr output the address range in a struct resource with raw flags
1343 * %pM output a 6-byte MAC address with colons 1340 * %pM output a 6-byte MAC address with colons
1341 * %pMR output a 6-byte MAC address with colons in reversed order
1342 * %pMF output a 6-byte MAC address with dashes
1344 * %pm output a 6-byte MAC address without colons 1343 * %pm output a 6-byte MAC address without colons
1344 * %pmR output a 6-byte MAC address without colons in reversed order
1345 * %pI4 print an IPv4 address without leading zeros 1345 * %pI4 print an IPv4 address without leading zeros
1346 * %pi4 print an IPv4 address with leading zeros 1346 * %pi4 print an IPv4 address with leading zeros
1347 * %pI6 print an IPv6 address with colons 1347 * %pI6 print an IPv6 address with colons
@@ -2017,7 +2017,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2017 s16 field_width; 2017 s16 field_width;
2018 bool is_sign; 2018 bool is_sign;
2019 2019
2020 while (*fmt && *str) { 2020 while (*fmt) {
2021 /* skip any white space in format */ 2021 /* skip any white space in format */
2022 /* white space in format matchs any amount of 2022 /* white space in format matchs any amount of
2023 * white space, including none, in the input. 2023 * white space, including none, in the input.
@@ -2042,6 +2042,8 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2042 * advance both strings to next white space 2042 * advance both strings to next white space
2043 */ 2043 */
2044 if (*fmt == '*') { 2044 if (*fmt == '*') {
2045 if (!*str)
2046 break;
2045 while (!isspace(*fmt) && *fmt != '%' && *fmt) 2047 while (!isspace(*fmt) && *fmt != '%' && *fmt)
2046 fmt++; 2048 fmt++;
2047 while (!isspace(*str) && *str) 2049 while (!isspace(*str) && *str)
@@ -2070,7 +2072,17 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2070 } 2072 }
2071 } 2073 }
2072 2074
2073 if (!*fmt || !*str) 2075 if (!*fmt)
2076 break;
2077
2078 if (*fmt == 'n') {
2079 /* return number of characters read so far */
2080 *va_arg(args, int *) = str - buf;
2081 ++fmt;
2082 continue;
2083 }
2084
2085 if (!*str)
2074 break; 2086 break;
2075 2087
2076 base = 10; 2088 base = 10;
@@ -2103,13 +2115,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2103 num++; 2115 num++;
2104 } 2116 }
2105 continue; 2117 continue;
2106 case 'n':
2107 /* return number of characters read so far */
2108 {
2109 int *i = (int *)va_arg(args, int*);
2110 *i = str - buf;
2111 }
2112 continue;
2113 case 'o': 2118 case 'o':
2114 base = 8; 2119 base = 8;
2115 break; 2120 break;
@@ -2210,16 +2215,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2210 str = next; 2215 str = next;
2211 } 2216 }
2212 2217
2213 /*
2214 * Now we've come all the way through so either the input string or the
2215 * format ended. In the former case, there can be a %n at the current
2216 * position in the format that needs to be filled.
2217 */
2218 if (*fmt == '%' && *(fmt + 1) == 'n') {
2219 int *p = (int *)va_arg(args, int *);
2220 *p = str - buf;
2221 }
2222
2223 return num; 2218 return num;
2224} 2219}
2225EXPORT_SYMBOL(vsscanf); 2220EXPORT_SYMBOL(vsscanf);