aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/Makefile2
-rw-r--r--lib/audit.c15
-rw-r--r--lib/clz_ctz.c7
-rw-r--r--lib/compat_audit.c50
-rw-r--r--lib/decompress.c3
-rw-r--r--lib/decompress_inflate.c1
-rw-r--r--lib/devres.c16
-rw-r--r--lib/dma-debug.c131
-rw-r--r--lib/fonts/Kconfig6
-rw-r--r--lib/idr.c34
-rw-r--r--lib/iomap.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c42
-rw-r--r--lib/nlattr.c10
-rw-r--r--lib/percpu_counter.c2
-rw-r--r--lib/percpu_ida.c7
-rw-r--r--lib/radix-tree.c389
-rw-r--r--lib/random32.c89
-rw-r--r--lib/smp_processor_id.c18
-rw-r--r--lib/string.c2
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/vsprintf.c58
24 files changed, 544 insertions, 390 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 991c98bc4a3f..4771fb3f4da4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -182,6 +182,15 @@ config AUDIT_GENERIC
182 depends on AUDIT && !AUDIT_ARCH 182 depends on AUDIT && !AUDIT_ARCH
183 default y 183 default y
184 184
185config AUDIT_ARCH_COMPAT_GENERIC
186 bool
187 default n
188
189config AUDIT_COMPAT_GENERIC
190 bool
191 depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
192 default y
193
185config RANDOM32_SELFTEST 194config RANDOM32_SELFTEST
186 bool "PRNG perform self test on init" 195 bool "PRNG perform self test on init"
187 default n 196 default n
@@ -342,9 +351,9 @@ config HAS_IOMEM
342 select GENERIC_IO 351 select GENERIC_IO
343 default y 352 default y
344 353
345config HAS_IOPORT 354config HAS_IOPORT_MAP
346 boolean 355 boolean
347 depends on HAS_IOMEM && !NO_IOPORT 356 depends on HAS_IOMEM && !NO_IOPORT_MAP
348 default y 357 default y
349 358
350config HAS_DMA 359config HAS_DMA
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dbf94a7d25a8..140b66a874c1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
119 119
120config DEBUG_INFO 120config DEBUG_INFO
121 bool "Compile the kernel with debug info" 121 bool "Compile the kernel with debug info"
122 depends on DEBUG_KERNEL 122 depends on DEBUG_KERNEL && !COMPILE_TEST
123 help 123 help
124 If you say Y here the resulting kernel image will include 124 If you say Y here the resulting kernel image will include
125 debugging info resulting in a larger kernel image. 125 debugging info resulting in a larger kernel image.
@@ -980,6 +980,21 @@ config DEBUG_LOCKING_API_SELFTESTS
980 The following locking APIs are covered: spinlocks, rwlocks, 980 The following locking APIs are covered: spinlocks, rwlocks,
981 mutexes and rwsems. 981 mutexes and rwsems.
982 982
983config LOCK_TORTURE_TEST
984 tristate "torture tests for locking"
985 depends on DEBUG_KERNEL
986 select TORTURE_TEST
987 default n
988 help
989 This option provides a kernel module that runs torture tests
990 on kernel locking primitives. The kernel module may be built
991 after the fact on the running kernel to be tested, if desired.
992
993 Say Y here if you want kernel locking-primitive torture tests
994 to be built into the kernel.
995 Say M if you want these torture tests to build as a module.
996 Say N if you are unsure.
997
983endmenu # lock debugging 998endmenu # lock debugging
984 999
985config TRACE_IRQFLAGS 1000config TRACE_IRQFLAGS
@@ -1030,16 +1045,6 @@ config DEBUG_BUGVERBOSE
1030 of the BUG call as well as the EIP and oops trace. This aids 1045 of the BUG call as well as the EIP and oops trace. This aids
1031 debugging but costs about 70-100K of memory. 1046 debugging but costs about 70-100K of memory.
1032 1047
1033config DEBUG_WRITECOUNT
1034 bool "Debug filesystem writers count"
1035 depends on DEBUG_KERNEL
1036 help
1037 Enable this to catch wrong use of the writers count in struct
1038 vfsmount. This will increase the size of each file struct by
1039 32 bits.
1040
1041 If unsure, say N.
1042
1043config DEBUG_LIST 1048config DEBUG_LIST
1044 bool "Debug linked list manipulation" 1049 bool "Debug linked list manipulation"
1045 depends on DEBUG_KERNEL 1050 depends on DEBUG_KERNEL
@@ -1141,9 +1146,14 @@ config SPARSE_RCU_POINTER
1141 1146
1142 Say N if you are unsure. 1147 Say N if you are unsure.
1143 1148
1149config TORTURE_TEST
1150 tristate
1151 default n
1152
1144config RCU_TORTURE_TEST 1153config RCU_TORTURE_TEST
1145 tristate "torture tests for RCU" 1154 tristate "torture tests for RCU"
1146 depends on DEBUG_KERNEL 1155 depends on DEBUG_KERNEL
1156 select TORTURE_TEST
1147 default n 1157 default n
1148 help 1158 help
1149 This option provides a kernel module that runs torture tests 1159 This option provides a kernel module that runs torture tests
diff --git a/lib/Makefile b/lib/Makefile
index 126b34f2eb16..0cd7b68e1382 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
45obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 45obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
46obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 46obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
47 47
48GCOV_PROFILE_hweight.o := n
48CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 49CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
49obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 50obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
50 51
@@ -95,6 +96,7 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
95obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o 96obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
96obj-$(CONFIG_SMP) += percpu_counter.o 97obj-$(CONFIG_SMP) += percpu_counter.o
97obj-$(CONFIG_AUDIT_GENERIC) += audit.o 98obj-$(CONFIG_AUDIT_GENERIC) += audit.o
99obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
98 100
99obj-$(CONFIG_SWIOTLB) += swiotlb.o 101obj-$(CONFIG_SWIOTLB) += swiotlb.o
100obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 102obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
diff --git a/lib/audit.c b/lib/audit.c
index 76bbed4a20e5..1d726a22565b 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -30,11 +30,17 @@ static unsigned signal_class[] = {
30 30
31int audit_classify_arch(int arch) 31int audit_classify_arch(int arch)
32{ 32{
33 return 0; 33 if (audit_is_compat(arch))
34 return 1;
35 else
36 return 0;
34} 37}
35 38
36int audit_classify_syscall(int abi, unsigned syscall) 39int audit_classify_syscall(int abi, unsigned syscall)
37{ 40{
41 if (audit_is_compat(abi))
42 return audit_classify_compat_syscall(abi, syscall);
43
38 switch(syscall) { 44 switch(syscall) {
39#ifdef __NR_open 45#ifdef __NR_open
40 case __NR_open: 46 case __NR_open:
@@ -57,6 +63,13 @@ int audit_classify_syscall(int abi, unsigned syscall)
57 63
58static int __init audit_classes_init(void) 64static int __init audit_classes_init(void)
59{ 65{
66#ifdef CONFIG_AUDIT_COMPAT_GENERIC
67 audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
68 audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
69 audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
70 audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
71 audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
72#endif
60 audit_register_class(AUDIT_CLASS_WRITE, write_class); 73 audit_register_class(AUDIT_CLASS_WRITE, write_class);
61 audit_register_class(AUDIT_CLASS_READ, read_class); 74 audit_register_class(AUDIT_CLASS_READ, read_class);
62 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); 75 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index a8f8379eb49f..2e11e48446ab 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -6,6 +6,9 @@
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * The functions in this file aren't called directly, but are required by
10 * GCC builtins such as __builtin_ctz, and therefore they can't be removed
11 * despite appearing unreferenced in kernel source.
9 * 12 *
10 * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. 13 * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
11 */ 14 */
@@ -13,18 +16,22 @@
13#include <linux/export.h> 16#include <linux/export.h>
14#include <linux/kernel.h> 17#include <linux/kernel.h>
15 18
19int __weak __ctzsi2(int val);
16int __weak __ctzsi2(int val) 20int __weak __ctzsi2(int val)
17{ 21{
18 return __ffs(val); 22 return __ffs(val);
19} 23}
20EXPORT_SYMBOL(__ctzsi2); 24EXPORT_SYMBOL(__ctzsi2);
21 25
26int __weak __clzsi2(int val);
22int __weak __clzsi2(int val) 27int __weak __clzsi2(int val)
23{ 28{
24 return 32 - fls(val); 29 return 32 - fls(val);
25} 30}
26EXPORT_SYMBOL(__clzsi2); 31EXPORT_SYMBOL(__clzsi2);
27 32
33int __weak __clzdi2(long val);
34int __weak __ctzdi2(long val);
28#if BITS_PER_LONG == 32 35#if BITS_PER_LONG == 32
29 36
30int __weak __clzdi2(long val) 37int __weak __clzdi2(long val)
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 000000000000..873f75b640ab
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,50 @@
1#include <linux/init.h>
2#include <linux/types.h>
3#include <asm/unistd32.h>
4
5unsigned compat_dir_class[] = {
6#include <asm-generic/audit_dir_write.h>
7~0U
8};
9
10unsigned compat_read_class[] = {
11#include <asm-generic/audit_read.h>
12~0U
13};
14
15unsigned compat_write_class[] = {
16#include <asm-generic/audit_write.h>
17~0U
18};
19
20unsigned compat_chattr_class[] = {
21#include <asm-generic/audit_change_attr.h>
22~0U
23};
24
25unsigned compat_signal_class[] = {
26#include <asm-generic/audit_signal.h>
27~0U
28};
29
30int audit_classify_compat_syscall(int abi, unsigned syscall)
31{
32 switch (syscall) {
33#ifdef __NR_open
34 case __NR_open:
35 return 2;
36#endif
37#ifdef __NR_openat
38 case __NR_openat:
39 return 3;
40#endif
41#ifdef __NR_socketcall
42 case __NR_socketcall:
43 return 4;
44#endif
45 case __NR_execve:
46 return 5;
47 default:
48 return 1;
49 }
50}
diff --git a/lib/decompress.c b/lib/decompress.c
index 4d1cd0397aab..86069d74c062 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -16,6 +16,7 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/printk.h>
19 20
20#ifndef CONFIG_DECOMPRESS_GZIP 21#ifndef CONFIG_DECOMPRESS_GZIP
21# define gunzip NULL 22# define gunzip NULL
@@ -61,6 +62,8 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
61 if (len < 2) 62 if (len < 2)
62 return NULL; /* Need at least this much... */ 63 return NULL; /* Need at least this much... */
63 64
65 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
66
64 for (cf = compressed_formats; cf->name; cf++) { 67 for (cf = compressed_formats; cf->name; cf++) {
65 if (!memcmp(inbuf, cf->magic, 2)) 68 if (!memcmp(inbuf, cf->magic, 2))
66 break; 69 break;
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d619b28c456f..0edfd742a154 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,6 +19,7 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/decompress/inflate.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
diff --git a/lib/devres.c b/lib/devres.c
index 823533138fa0..2f16c133fd36 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -81,11 +81,13 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
81void devm_iounmap(struct device *dev, void __iomem *addr) 81void devm_iounmap(struct device *dev, void __iomem *addr)
82{ 82{
83 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 83 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
84 (void *)addr)); 84 (__force void *)addr));
85 iounmap(addr); 85 iounmap(addr);
86} 86}
87EXPORT_SYMBOL(devm_iounmap); 87EXPORT_SYMBOL(devm_iounmap);
88 88
89#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
90
89/** 91/**
90 * devm_ioremap_resource() - check, request region, and ioremap resource 92 * devm_ioremap_resource() - check, request region, and ioremap resource
91 * @dev: generic device to handle the resource for 93 * @dev: generic device to handle the resource for
@@ -114,7 +116,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
114 116
115 if (!res || resource_type(res) != IORESOURCE_MEM) { 117 if (!res || resource_type(res) != IORESOURCE_MEM) {
116 dev_err(dev, "invalid resource\n"); 118 dev_err(dev, "invalid resource\n");
117 return ERR_PTR(-EINVAL); 119 return IOMEM_ERR_PTR(-EINVAL);
118 } 120 }
119 121
120 size = resource_size(res); 122 size = resource_size(res);
@@ -122,7 +124,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
122 124
123 if (!devm_request_mem_region(dev, res->start, size, name)) { 125 if (!devm_request_mem_region(dev, res->start, size, name)) {
124 dev_err(dev, "can't request region for resource %pR\n", res); 126 dev_err(dev, "can't request region for resource %pR\n", res);
125 return ERR_PTR(-EBUSY); 127 return IOMEM_ERR_PTR(-EBUSY);
126 } 128 }
127 129
128 if (res->flags & IORESOURCE_CACHEABLE) 130 if (res->flags & IORESOURCE_CACHEABLE)
@@ -133,7 +135,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
133 if (!dest_ptr) { 135 if (!dest_ptr) {
134 dev_err(dev, "ioremap failed for resource %pR\n", res); 136 dev_err(dev, "ioremap failed for resource %pR\n", res);
135 devm_release_mem_region(dev, res->start, size); 137 devm_release_mem_region(dev, res->start, size);
136 dest_ptr = ERR_PTR(-ENOMEM); 138 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
137 } 139 }
138 140
139 return dest_ptr; 141 return dest_ptr;
@@ -168,7 +170,7 @@ void __iomem *devm_request_and_ioremap(struct device *device,
168} 170}
169EXPORT_SYMBOL(devm_request_and_ioremap); 171EXPORT_SYMBOL(devm_request_and_ioremap);
170 172
171#ifdef CONFIG_HAS_IOPORT 173#ifdef CONFIG_HAS_IOPORT_MAP
172/* 174/*
173 * Generic iomap devres 175 * Generic iomap devres
174 */ 176 */
@@ -224,10 +226,10 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
224{ 226{
225 ioport_unmap(addr); 227 ioport_unmap(addr);
226 WARN_ON(devres_destroy(dev, devm_ioport_map_release, 228 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
227 devm_ioport_map_match, (void *)addr)); 229 devm_ioport_map_match, (__force void *)addr));
228} 230}
229EXPORT_SYMBOL(devm_ioport_unmap); 231EXPORT_SYMBOL(devm_ioport_unmap);
230#endif /* CONFIG_HAS_IOPORT */ 232#endif /* CONFIG_HAS_IOPORT_MAP */
231 233
232#ifdef CONFIG_PCI 234#ifdef CONFIG_PCI
233/* 235/*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 2defd1308b04..98f2d7e91a91 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev)
424EXPORT_SYMBOL(debug_dma_dump_mappings); 424EXPORT_SYMBOL(debug_dma_dump_mappings);
425 425
426/* 426/*
427 * For each page mapped (initial page in the case of 427 * For each mapping (initial cacheline in the case of
428 * dma_alloc_coherent/dma_map_{single|page}, or each page in a 428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
429 * scatterlist) insert into this tree using the pfn as the key. At 429 * scatterlist, or the cacheline specified in dma_map_single) insert
430 * into this tree using the cacheline as the key. At
430 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
431 * the pfn already exists at insertion time add a tag as a reference 432 * the entry already exists at insertion time add a tag as a reference
432 * count for the overlapping mappings. For now, the overlap tracking 433 * count for the overlapping mappings. For now, the overlap tracking
433 * just ensures that 'unmaps' balance 'maps' before marking the pfn 434 * just ensures that 'unmaps' balance 'maps' before marking the
434 * idle, but we should also be flagging overlaps as an API violation. 435 * cacheline idle, but we should also be flagging overlaps as an API
436 * violation.
435 * 437 *
436 * Memory usage is mostly constrained by the maximum number of available 438 * Memory usage is mostly constrained by the maximum number of available
437 * dma-debug entries in that we need a free dma_debug_entry before 439 * dma-debug entries in that we need a free dma_debug_entry before
438 * inserting into the tree. In the case of dma_map_{single|page} and 440 * inserting into the tree. In the case of dma_map_page and
439 * dma_alloc_coherent there is only one dma_debug_entry and one pfn to 441 * dma_alloc_coherent there is only one dma_debug_entry and one
440 * track per event. dma_map_sg(), on the other hand, 442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
441 * consumes a single dma_debug_entry, but inserts 'nents' entries into 443 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
442 * the tree. 444 * entries into the tree.
443 * 445 *
444 * At any time debug_dma_assert_idle() can be called to trigger a 446 * At any time debug_dma_assert_idle() can be called to trigger a
445 * warning if the given page is in the active set. 447 * warning if any cachelines in the given page are in the active set.
446 */ 448 */
447static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); 449static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
448static DEFINE_SPINLOCK(radix_lock); 450static DEFINE_SPINLOCK(radix_lock);
449#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
450 454
451static int active_pfn_read_overlap(unsigned long pfn) 455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
452{ 462{
453 int overlap = 0, i; 463 int overlap = 0, i;
454 464
455 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
456 if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) 466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
457 overlap |= 1 << i; 467 overlap |= 1 << i;
458 return overlap; 468 return overlap;
459} 469}
460 470
461static int active_pfn_set_overlap(unsigned long pfn, int overlap) 471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
462{ 472{
463 int i; 473 int i;
464 474
465 if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) 475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
466 return overlap; 476 return overlap;
467 477
468 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
469 if (overlap & 1 << i) 479 if (overlap & 1 << i)
470 radix_tree_tag_set(&dma_active_pfn, pfn, i); 480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
471 else 481 else
472 radix_tree_tag_clear(&dma_active_pfn, pfn, i); 482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
473 483
474 return overlap; 484 return overlap;
475} 485}
476 486
477static void active_pfn_inc_overlap(unsigned long pfn) 487static void active_cacheline_inc_overlap(phys_addr_t cln)
478{ 488{
479 int overlap = active_pfn_read_overlap(pfn); 489 int overlap = active_cacheline_read_overlap(cln);
480 490
481 overlap = active_pfn_set_overlap(pfn, ++overlap); 491 overlap = active_cacheline_set_overlap(cln, ++overlap);
482 492
483 /* If we overflowed the overlap counter then we're potentially 493 /* If we overflowed the overlap counter then we're potentially
484 * leaking dma-mappings. Otherwise, if maps and unmaps are 494 * leaking dma-mappings. Otherwise, if maps and unmaps are
485 * balanced then this overflow may cause false negatives in 495 * balanced then this overflow may cause false negatives in
486 * debug_dma_assert_idle() as the pfn may be marked idle 496 * debug_dma_assert_idle() as the cacheline may be marked idle
487 * prematurely. 497 * prematurely.
488 */ 498 */
489 WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, 499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
490 "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", 500 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
491 ACTIVE_PFN_MAX_OVERLAP, pfn); 501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
492} 502}
493 503
494static int active_pfn_dec_overlap(unsigned long pfn) 504static int active_cacheline_dec_overlap(phys_addr_t cln)
495{ 505{
496 int overlap = active_pfn_read_overlap(pfn); 506 int overlap = active_cacheline_read_overlap(cln);
497 507
498 return active_pfn_set_overlap(pfn, --overlap); 508 return active_cacheline_set_overlap(cln, --overlap);
499} 509}
500 510
501static int active_pfn_insert(struct dma_debug_entry *entry) 511static int active_cacheline_insert(struct dma_debug_entry *entry)
502{ 512{
513 phys_addr_t cln = to_cacheline_number(entry);
503 unsigned long flags; 514 unsigned long flags;
504 int rc; 515 int rc;
505 516
517 /* If the device is not writing memory then we don't have any
518 * concerns about the cpu consuming stale data. This mitigates
519 * legitimate usages of overlapping mappings.
520 */
521 if (entry->direction == DMA_TO_DEVICE)
522 return 0;
523
506 spin_lock_irqsave(&radix_lock, flags); 524 spin_lock_irqsave(&radix_lock, flags);
507 rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); 525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
508 if (rc == -EEXIST) 526 if (rc == -EEXIST)
509 active_pfn_inc_overlap(entry->pfn); 527 active_cacheline_inc_overlap(cln);
510 spin_unlock_irqrestore(&radix_lock, flags); 528 spin_unlock_irqrestore(&radix_lock, flags);
511 529
512 return rc; 530 return rc;
513} 531}
514 532
515static void active_pfn_remove(struct dma_debug_entry *entry) 533static void active_cacheline_remove(struct dma_debug_entry *entry)
516{ 534{
535 phys_addr_t cln = to_cacheline_number(entry);
517 unsigned long flags; 536 unsigned long flags;
518 537
538 /* ...mirror the insert case */
539 if (entry->direction == DMA_TO_DEVICE)
540 return;
541
519 spin_lock_irqsave(&radix_lock, flags); 542 spin_lock_irqsave(&radix_lock, flags);
520 /* since we are counting overlaps the final put of the 543 /* since we are counting overlaps the final put of the
521 * entry->pfn will occur when the overlap count is 0. 544 * cacheline will occur when the overlap count is 0.
522 * active_pfn_dec_overlap() returns -1 in that case 545 * active_cacheline_dec_overlap() returns -1 in that case
523 */ 546 */
524 if (active_pfn_dec_overlap(entry->pfn) < 0) 547 if (active_cacheline_dec_overlap(cln) < 0)
525 radix_tree_delete(&dma_active_pfn, entry->pfn); 548 radix_tree_delete(&dma_active_cacheline, cln);
526 spin_unlock_irqrestore(&radix_lock, flags); 549 spin_unlock_irqrestore(&radix_lock, flags);
527} 550}
528 551
529/** 552/**
530 * debug_dma_assert_idle() - assert that a page is not undergoing dma 553 * debug_dma_assert_idle() - assert that a page is not undergoing dma
531 * @page: page to lookup in the dma_active_pfn tree 554 * @page: page to lookup in the dma_active_cacheline tree
532 * 555 *
533 * Place a call to this routine in cases where the cpu touching the page 556 * Place a call to this routine in cases where the cpu touching the page
534 * before the dma completes (page is dma_unmapped) will lead to data 557 * before the dma completes (page is dma_unmapped) will lead to data
@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry)
536 */ 559 */
537void debug_dma_assert_idle(struct page *page) 560void debug_dma_assert_idle(struct page *page)
538{ 561{
562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
563 struct dma_debug_entry *entry = NULL;
564 void **results = (void **) &ents;
565 unsigned int nents, i;
539 unsigned long flags; 566 unsigned long flags;
540 struct dma_debug_entry *entry; 567 phys_addr_t cln;
541 568
542 if (!page) 569 if (!page)
543 return; 570 return;
544 571
572 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
545 spin_lock_irqsave(&radix_lock, flags); 573 spin_lock_irqsave(&radix_lock, flags);
546 entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); 574 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
575 CACHELINES_PER_PAGE);
576 for (i = 0; i < nents; i++) {
577 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
578
579 if (ent_cln == cln) {
580 entry = ents[i];
581 break;
582 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
583 break;
584 }
547 spin_unlock_irqrestore(&radix_lock, flags); 585 spin_unlock_irqrestore(&radix_lock, flags);
548 586
549 if (!entry) 587 if (!entry)
550 return; 588 return;
551 589
590 cln = to_cacheline_number(entry);
552 err_printk(entry->dev, entry, 591 err_printk(entry->dev, entry,
553 "DMA-API: cpu touching an active dma mapped page " 592 "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
554 "[pfn=0x%lx]\n", entry->pfn); 593 &cln);
555} 594}
556 595
557/* 596/*
@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
568 hash_bucket_add(bucket, entry); 607 hash_bucket_add(bucket, entry);
569 put_hash_bucket(bucket, &flags); 608 put_hash_bucket(bucket, &flags);
570 609
571 rc = active_pfn_insert(entry); 610 rc = active_cacheline_insert(entry);
572 if (rc == -ENOMEM) { 611 if (rc == -ENOMEM) {
573 pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); 612 pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
574 global_disable = true; 613 global_disable = true;
575 } 614 }
576 615
@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry)
631{ 670{
632 unsigned long flags; 671 unsigned long flags;
633 672
634 active_pfn_remove(entry); 673 active_cacheline_remove(entry);
635 674
636 /* 675 /*
637 * add to beginning of the list - this way the entries are 676 * add to beginning of the list - this way the entries are
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 4dc1b990aa23..34fd931b54b5 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -9,7 +9,7 @@ if FONT_SUPPORT
9 9
10config FONTS 10config FONTS
11 bool "Select compiled-in fonts" 11 bool "Select compiled-in fonts"
12 depends on FRAMEBUFFER_CONSOLE 12 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
13 help 13 help
14 Say Y here if you would like to use fonts other than the default 14 Say Y here if you would like to use fonts other than the default
15 your frame buffer console usually use. 15 your frame buffer console usually use.
@@ -22,7 +22,7 @@ config FONTS
22 22
23config FONT_8x8 23config FONT_8x8
24 bool "VGA 8x8 font" if FONTS 24 bool "VGA 8x8 font" if FONTS
25 depends on FRAMEBUFFER_CONSOLE 25 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
26 default y if !SPARC && !FONTS 26 default y if !SPARC && !FONTS
27 help 27 help
28 This is the "high resolution" font for the VGA frame buffer (the one 28 This is the "high resolution" font for the VGA frame buffer (the one
@@ -45,7 +45,7 @@ config FONT_8x16
45 45
46config FONT_6x11 46config FONT_6x11
47 bool "Mac console 6x11 font (not supported by all drivers)" if FONTS 47 bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
48 depends on FRAMEBUFFER_CONSOLE 48 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
49 default y if !SPARC && !FONTS && MAC 49 default y if !SPARC && !FONTS && MAC
50 help 50 help
51 Small console font with Macintosh-style high-half glyphs. Some Mac 51 Small console font with Macintosh-style high-half glyphs. Some Mac
diff --git a/lib/idr.c b/lib/idr.c
index bfe4db4e165f..2642fa8e424d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -196,7 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
196 } 196 }
197} 197}
198 198
199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 199static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
200{ 200{
201 while (idp->id_free_cnt < MAX_IDR_FREE) { 201 while (idp->id_free_cnt < MAX_IDR_FREE) {
202 struct idr_layer *new; 202 struct idr_layer *new;
@@ -207,7 +207,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
207 } 207 }
208 return 1; 208 return 1;
209} 209}
210EXPORT_SYMBOL(__idr_pre_get);
211 210
212/** 211/**
213 * sub_alloc - try to allocate an id without growing the tree depth 212 * sub_alloc - try to allocate an id without growing the tree depth
@@ -374,20 +373,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
374 idr_mark_full(pa, id); 373 idr_mark_full(pa, id);
375} 374}
376 375
377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
378{
379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
380 int rv;
381
382 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
383 if (rv < 0)
384 return rv == -ENOMEM ? -EAGAIN : rv;
385
386 idr_fill_slot(idp, ptr, rv, pa);
387 *id = rv;
388 return 0;
389}
390EXPORT_SYMBOL(__idr_get_new_above);
391 376
392/** 377/**
393 * idr_preload - preload for idr_alloc() 378 * idr_preload - preload for idr_alloc()
@@ -548,7 +533,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
548 n = id & IDR_MASK; 533 n = id & IDR_MASK;
549 if (likely(p != NULL && test_bit(n, p->bitmap))) { 534 if (likely(p != NULL && test_bit(n, p->bitmap))) {
550 __clear_bit(n, p->bitmap); 535 __clear_bit(n, p->bitmap);
551 rcu_assign_pointer(p->ary[n], NULL); 536 RCU_INIT_POINTER(p->ary[n], NULL);
552 to_free = NULL; 537 to_free = NULL;
553 while(*paa && ! --((**paa)->count)){ 538 while(*paa && ! --((**paa)->count)){
554 if (to_free) 539 if (to_free)
@@ -607,7 +592,7 @@ void idr_remove(struct idr *idp, int id)
607} 592}
608EXPORT_SYMBOL(idr_remove); 593EXPORT_SYMBOL(idr_remove);
609 594
610void __idr_remove_all(struct idr *idp) 595static void __idr_remove_all(struct idr *idp)
611{ 596{
612 int n, id, max; 597 int n, id, max;
613 int bt_mask; 598 int bt_mask;
@@ -617,7 +602,7 @@ void __idr_remove_all(struct idr *idp)
617 602
618 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
619 p = idp->top; 604 p = idp->top;
620 rcu_assign_pointer(idp->top, NULL); 605 RCU_INIT_POINTER(idp->top, NULL);
621 max = idr_max(idp->layers); 606 max = idr_max(idp->layers);
622 607
623 id = 0; 608 id = 0;
@@ -640,7 +625,6 @@ void __idr_remove_all(struct idr *idp)
640 } 625 }
641 idp->layers = 0; 626 idp->layers = 0;
642} 627}
643EXPORT_SYMBOL(__idr_remove_all);
644 628
645/** 629/**
646 * idr_destroy - release all cached layers within an idr tree 630 * idr_destroy - release all cached layers within an idr tree
@@ -869,6 +853,16 @@ void idr_init(struct idr *idp)
869} 853}
870EXPORT_SYMBOL(idr_init); 854EXPORT_SYMBOL(idr_init);
871 855
856static int idr_has_entry(int id, void *p, void *data)
857{
858 return 1;
859}
860
861bool idr_is_empty(struct idr *idp)
862{
863 return !idr_for_each(idp, idr_has_entry, NULL);
864}
865EXPORT_SYMBOL(idr_is_empty);
872 866
873/** 867/**
874 * DOC: IDA description 868 * DOC: IDA description
diff --git a/lib/iomap.c b/lib/iomap.c
index 2c08f36862eb..fc3dcb4b238e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep);
224EXPORT_SYMBOL(iowrite16_rep); 224EXPORT_SYMBOL(iowrite16_rep);
225EXPORT_SYMBOL(iowrite32_rep); 225EXPORT_SYMBOL(iowrite32_rep);
226 226
227#ifdef CONFIG_HAS_IOPORT 227#ifdef CONFIG_HAS_IOPORT_MAP
228/* Create a virtual mapping cookie for an IO port range */ 228/* Create a virtual mapping cookie for an IO port range */
229void __iomem *ioport_map(unsigned long port, unsigned int nr) 229void __iomem *ioport_map(unsigned long port, unsigned int nr)
230{ 230{
@@ -239,7 +239,7 @@ void ioport_unmap(void __iomem *addr)
239} 239}
240EXPORT_SYMBOL(ioport_map); 240EXPORT_SYMBOL(ioport_map);
241EXPORT_SYMBOL(ioport_unmap); 241EXPORT_SYMBOL(ioport_unmap);
242#endif /* CONFIG_HAS_IOPORT */ 242#endif /* CONFIG_HAS_IOPORT_MAP */
243 243
244#ifdef CONFIG_PCI 244#ifdef CONFIG_PCI
245/* Hide the details if this is a MMIO or PIO address space and just do what 245/* Hide the details if this is a MMIO or PIO address space and just do what
diff --git a/lib/kobject.c b/lib/kobject.c
index cb14aeac4cca..58751bb80a7c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -94,7 +94,7 @@ static int create_dir(struct kobject *kobj)
94 BUG_ON(ops->type >= KOBJ_NS_TYPES); 94 BUG_ON(ops->type >= KOBJ_NS_TYPES);
95 BUG_ON(!kobj_ns_type_registered(ops->type)); 95 BUG_ON(!kobj_ns_type_registered(ops->type));
96 96
97 kernfs_enable_ns(kobj->sd); 97 sysfs_enable_ns(kobj->sd);
98 } 98 }
99 99
100 return 0; 100 return 0;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5f72767ddd9b..4e3bd71bd949 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -124,6 +124,30 @@ static int kobj_usermode_filter(struct kobject *kobj)
124 return 0; 124 return 0;
125} 125}
126 126
127static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
128{
129 int len;
130
131 len = strlcpy(&env->buf[env->buflen], subsystem,
132 sizeof(env->buf) - env->buflen);
133 if (len >= (sizeof(env->buf) - env->buflen)) {
134 WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
135 return -ENOMEM;
136 }
137
138 env->argv[0] = uevent_helper;
139 env->argv[1] = &env->buf[env->buflen];
140 env->argv[2] = NULL;
141
142 env->buflen += len + 1;
143 return 0;
144}
145
146static void cleanup_uevent_env(struct subprocess_info *info)
147{
148 kfree(info->data);
149}
150
127/** 151/**
128 * kobject_uevent_env - send an uevent with environmental data 152 * kobject_uevent_env - send an uevent with environmental data
129 * 153 *
@@ -301,11 +325,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
301 325
302 /* call uevent_helper, usually only enabled during early boot */ 326 /* call uevent_helper, usually only enabled during early boot */
303 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { 327 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
304 char *argv [3]; 328 struct subprocess_info *info;
305 329
306 argv [0] = uevent_helper;
307 argv [1] = (char *)subsystem;
308 argv [2] = NULL;
309 retval = add_uevent_var(env, "HOME=/"); 330 retval = add_uevent_var(env, "HOME=/");
310 if (retval) 331 if (retval)
311 goto exit; 332 goto exit;
@@ -313,9 +334,18 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
313 "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); 334 "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
314 if (retval) 335 if (retval)
315 goto exit; 336 goto exit;
337 retval = init_uevent_argv(env, subsystem);
338 if (retval)
339 goto exit;
316 340
317 retval = call_usermodehelper(argv[0], argv, 341 retval = -ENOMEM;
318 env->envp, UMH_WAIT_EXEC); 342 info = call_usermodehelper_setup(env->argv[0], env->argv,
343 env->envp, GFP_KERNEL,
344 NULL, cleanup_uevent_env, env);
345 if (info) {
346 retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
347 env = NULL; /* freed by cleanup_uevent_env */
348 }
319 } 349 }
320 350
321exit: 351exit:
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca7809b08..fc6754720ced 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
303 */ 303 */
304int nla_strcmp(const struct nlattr *nla, const char *str) 304int nla_strcmp(const struct nlattr *nla, const char *str)
305{ 305{
306 int len = strlen(str) + 1; 306 int len = strlen(str);
307 int d = nla_len(nla) - len; 307 char *buf = nla_data(nla);
308 int attrlen = nla_len(nla);
309 int d;
308 310
311 if (attrlen > 0 && buf[attrlen - 1] == '\0')
312 attrlen--;
313
314 d = attrlen - len;
309 if (d == 0) 315 if (d == 0)
310 d = memcmp(nla_data(nla), str, len); 316 d = memcmp(nla_data(nla), str, len);
311 317
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 8280a5dd1727..7dd33577b905 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -169,7 +169,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
169 struct percpu_counter *fbc; 169 struct percpu_counter *fbc;
170 170
171 compute_batch_value(); 171 compute_batch_value();
172 if (action != CPU_DEAD) 172 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
173 return NOTIFY_OK; 173 return NOTIFY_OK;
174 174
175 cpu = (unsigned long)hcpu; 175 cpu = (unsigned long)hcpu;
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 7be235f1a70b..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
54/* 54/*
55 * Try to steal tags from a remote cpu's percpu freelist. 55 * Try to steal tags from a remote cpu's percpu freelist.
56 * 56 *
57 * We first check how many percpu freelists have tags - we don't steal tags 57 * We first check how many percpu freelists have tags
58 * unless enough percpu freelists have tags on them that it's possible more than
59 * half the total tags could be stuck on remote percpu freelists.
60 * 58 *
61 * Then we iterate through the cpus until we find some tags - we don't attempt 59 * Then we iterate through the cpus until we find some tags - we don't attempt
62 * to find the "best" cpu to steal from, to keep cacheline bouncing to a 60 * to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
69 struct percpu_ida_cpu *remote; 67 struct percpu_ida_cpu *remote;
70 68
71 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); 69 for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
72 cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; 70 cpus_have_tags; cpus_have_tags--) {
73 cpus_have_tags--) {
74 cpu = cpumask_next(cpu, &pool->cpus_have_tags); 71 cpu = cpumask_next(cpu, &pool->cpus_have_tags);
75 72
76 if (cpu >= nr_cpu_ids) { 73 if (cpu >= nr_cpu_ids) {
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7811ed3b4e70..9599aa72d7a0 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -35,33 +35,6 @@
35#include <linux/hardirq.h> /* in_interrupt() */ 35#include <linux/hardirq.h> /* in_interrupt() */
36 36
37 37
38#ifdef __KERNEL__
39#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
40#else
41#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
42#endif
43
44#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
45#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
46
47#define RADIX_TREE_TAG_LONGS \
48 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
49
50struct radix_tree_node {
51 unsigned int height; /* Height from the bottom */
52 unsigned int count;
53 union {
54 struct radix_tree_node *parent; /* Used when ascending tree */
55 struct rcu_head rcu_head; /* Used when freeing node */
56 };
57 void __rcu *slots[RADIX_TREE_MAP_SIZE];
58 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
59};
60
61#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
62#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
63 RADIX_TREE_MAP_SHIFT))
64
65/* 38/*
66 * The height_to_maxindex array needs to be one deeper than the maximum 39 * The height_to_maxindex array needs to be one deeper than the maximum
67 * path as height 0 holds only 1 entry. 40 * path as height 0 holds only 1 entry.
@@ -369,7 +342,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
369 342
370 /* Increase the height. */ 343 /* Increase the height. */
371 newheight = root->height+1; 344 newheight = root->height+1;
372 node->height = newheight; 345 BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK);
346 node->path = newheight;
373 node->count = 1; 347 node->count = 1;
374 node->parent = NULL; 348 node->parent = NULL;
375 slot = root->rnode; 349 slot = root->rnode;
@@ -387,23 +361,28 @@ out:
387} 361}
388 362
389/** 363/**
390 * radix_tree_insert - insert into a radix tree 364 * __radix_tree_create - create a slot in a radix tree
391 * @root: radix tree root 365 * @root: radix tree root
392 * @index: index key 366 * @index: index key
393 * @item: item to insert 367 * @nodep: returns node
368 * @slotp: returns slot
394 * 369 *
395 * Insert an item into the radix tree at position @index. 370 * Create, if necessary, and return the node and slot for an item
371 * at position @index in the radix tree @root.
372 *
373 * Until there is more than one item in the tree, no nodes are
374 * allocated and @root->rnode is used as a direct slot instead of
375 * pointing to a node, in which case *@nodep will be NULL.
376 *
377 * Returns -ENOMEM, or 0 for success.
396 */ 378 */
397int radix_tree_insert(struct radix_tree_root *root, 379int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
398 unsigned long index, void *item) 380 struct radix_tree_node **nodep, void ***slotp)
399{ 381{
400 struct radix_tree_node *node = NULL, *slot; 382 struct radix_tree_node *node = NULL, *slot;
401 unsigned int height, shift; 383 unsigned int height, shift, offset;
402 int offset;
403 int error; 384 int error;
404 385
405 BUG_ON(radix_tree_is_indirect_ptr(item));
406
407 /* Make sure the tree is high enough. */ 386 /* Make sure the tree is high enough. */
408 if (index > radix_tree_maxindex(root->height)) { 387 if (index > radix_tree_maxindex(root->height)) {
409 error = radix_tree_extend(root, index); 388 error = radix_tree_extend(root, index);
@@ -422,11 +401,12 @@ int radix_tree_insert(struct radix_tree_root *root,
422 /* Have to add a child node. */ 401 /* Have to add a child node. */
423 if (!(slot = radix_tree_node_alloc(root))) 402 if (!(slot = radix_tree_node_alloc(root)))
424 return -ENOMEM; 403 return -ENOMEM;
425 slot->height = height; 404 slot->path = height;
426 slot->parent = node; 405 slot->parent = node;
427 if (node) { 406 if (node) {
428 rcu_assign_pointer(node->slots[offset], slot); 407 rcu_assign_pointer(node->slots[offset], slot);
429 node->count++; 408 node->count++;
409 slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT;
430 } else 410 } else
431 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); 411 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
432 } 412 }
@@ -439,16 +419,42 @@ int radix_tree_insert(struct radix_tree_root *root,
439 height--; 419 height--;
440 } 420 }
441 421
442 if (slot != NULL) 422 if (nodep)
423 *nodep = node;
424 if (slotp)
425 *slotp = node ? node->slots + offset : (void **)&root->rnode;
426 return 0;
427}
428
429/**
430 * radix_tree_insert - insert into a radix tree
431 * @root: radix tree root
432 * @index: index key
433 * @item: item to insert
434 *
435 * Insert an item into the radix tree at position @index.
436 */
437int radix_tree_insert(struct radix_tree_root *root,
438 unsigned long index, void *item)
439{
440 struct radix_tree_node *node;
441 void **slot;
442 int error;
443
444 BUG_ON(radix_tree_is_indirect_ptr(item));
445
446 error = __radix_tree_create(root, index, &node, &slot);
447 if (error)
448 return error;
449 if (*slot != NULL)
443 return -EEXIST; 450 return -EEXIST;
451 rcu_assign_pointer(*slot, item);
444 452
445 if (node) { 453 if (node) {
446 node->count++; 454 node->count++;
447 rcu_assign_pointer(node->slots[offset], item); 455 BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK));
448 BUG_ON(tag_get(node, 0, offset)); 456 BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK));
449 BUG_ON(tag_get(node, 1, offset));
450 } else { 457 } else {
451 rcu_assign_pointer(root->rnode, item);
452 BUG_ON(root_tag_get(root, 0)); 458 BUG_ON(root_tag_get(root, 0));
453 BUG_ON(root_tag_get(root, 1)); 459 BUG_ON(root_tag_get(root, 1));
454 } 460 }
@@ -457,15 +463,26 @@ int radix_tree_insert(struct radix_tree_root *root,
457} 463}
458EXPORT_SYMBOL(radix_tree_insert); 464EXPORT_SYMBOL(radix_tree_insert);
459 465
460/* 466/**
461 * is_slot == 1 : search for the slot. 467 * __radix_tree_lookup - lookup an item in a radix tree
462 * is_slot == 0 : search for the node. 468 * @root: radix tree root
469 * @index: index key
470 * @nodep: returns node
471 * @slotp: returns slot
472 *
473 * Lookup and return the item at position @index in the radix
474 * tree @root.
475 *
476 * Until there is more than one item in the tree, no nodes are
477 * allocated and @root->rnode is used as a direct slot instead of
478 * pointing to a node, in which case *@nodep will be NULL.
463 */ 479 */
464static void *radix_tree_lookup_element(struct radix_tree_root *root, 480void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
465 unsigned long index, int is_slot) 481 struct radix_tree_node **nodep, void ***slotp)
466{ 482{
483 struct radix_tree_node *node, *parent;
467 unsigned int height, shift; 484 unsigned int height, shift;
468 struct radix_tree_node *node, **slot; 485 void **slot;
469 486
470 node = rcu_dereference_raw(root->rnode); 487 node = rcu_dereference_raw(root->rnode);
471 if (node == NULL) 488 if (node == NULL)
@@ -474,19 +491,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
474 if (!radix_tree_is_indirect_ptr(node)) { 491 if (!radix_tree_is_indirect_ptr(node)) {
475 if (index > 0) 492 if (index > 0)
476 return NULL; 493 return NULL;
477 return is_slot ? (void *)&root->rnode : node; 494
495 if (nodep)
496 *nodep = NULL;
497 if (slotp)
498 *slotp = (void **)&root->rnode;
499 return node;
478 } 500 }
479 node = indirect_to_ptr(node); 501 node = indirect_to_ptr(node);
480 502
481 height = node->height; 503 height = node->path & RADIX_TREE_HEIGHT_MASK;
482 if (index > radix_tree_maxindex(height)) 504 if (index > radix_tree_maxindex(height))
483 return NULL; 505 return NULL;
484 506
485 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 507 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
486 508
487 do { 509 do {
488 slot = (struct radix_tree_node **) 510 parent = node;
489 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 511 slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK);
490 node = rcu_dereference_raw(*slot); 512 node = rcu_dereference_raw(*slot);
491 if (node == NULL) 513 if (node == NULL)
492 return NULL; 514 return NULL;
@@ -495,7 +517,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
495 height--; 517 height--;
496 } while (height > 0); 518 } while (height > 0);
497 519
498 return is_slot ? (void *)slot : indirect_to_ptr(node); 520 if (nodep)
521 *nodep = parent;
522 if (slotp)
523 *slotp = slot;
524 return node;
499} 525}
500 526
501/** 527/**
@@ -513,7 +539,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
513 */ 539 */
514void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 540void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
515{ 541{
516 return (void **)radix_tree_lookup_element(root, index, 1); 542 void **slot;
543
544 if (!__radix_tree_lookup(root, index, NULL, &slot))
545 return NULL;
546 return slot;
517} 547}
518EXPORT_SYMBOL(radix_tree_lookup_slot); 548EXPORT_SYMBOL(radix_tree_lookup_slot);
519 549
@@ -531,7 +561,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
531 */ 561 */
532void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 562void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
533{ 563{
534 return radix_tree_lookup_element(root, index, 0); 564 return __radix_tree_lookup(root, index, NULL, NULL);
535} 565}
536EXPORT_SYMBOL(radix_tree_lookup); 566EXPORT_SYMBOL(radix_tree_lookup);
537 567
@@ -676,7 +706,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
676 return (index == 0); 706 return (index == 0);
677 node = indirect_to_ptr(node); 707 node = indirect_to_ptr(node);
678 708
679 height = node->height; 709 height = node->path & RADIX_TREE_HEIGHT_MASK;
680 if (index > radix_tree_maxindex(height)) 710 if (index > radix_tree_maxindex(height))
681 return 0; 711 return 0;
682 712
@@ -713,7 +743,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
713{ 743{
714 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; 744 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
715 struct radix_tree_node *rnode, *node; 745 struct radix_tree_node *rnode, *node;
716 unsigned long index, offset; 746 unsigned long index, offset, height;
717 747
718 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) 748 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
719 return NULL; 749 return NULL;
@@ -744,7 +774,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
744 return NULL; 774 return NULL;
745 775
746restart: 776restart:
747 shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT; 777 height = rnode->path & RADIX_TREE_HEIGHT_MASK;
778 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
748 offset = index >> shift; 779 offset = index >> shift;
749 780
750 /* Index outside of the tree */ 781 /* Index outside of the tree */
@@ -946,81 +977,6 @@ next:
946} 977}
947EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); 978EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
948 979
949
950/**
951 * radix_tree_next_hole - find the next hole (not-present entry)
952 * @root: tree root
953 * @index: index key
954 * @max_scan: maximum range to search
955 *
956 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
957 * indexed hole.
958 *
959 * Returns: the index of the hole if found, otherwise returns an index
960 * outside of the set specified (in which case 'return - index >= max_scan'
961 * will be true). In rare cases of index wrap-around, 0 will be returned.
962 *
963 * radix_tree_next_hole may be called under rcu_read_lock. However, like
964 * radix_tree_gang_lookup, this will not atomically search a snapshot of
965 * the tree at a single point in time. For example, if a hole is created
966 * at index 5, then subsequently a hole is created at index 10,
967 * radix_tree_next_hole covering both indexes may return 10 if called
968 * under rcu_read_lock.
969 */
970unsigned long radix_tree_next_hole(struct radix_tree_root *root,
971 unsigned long index, unsigned long max_scan)
972{
973 unsigned long i;
974
975 for (i = 0; i < max_scan; i++) {
976 if (!radix_tree_lookup(root, index))
977 break;
978 index++;
979 if (index == 0)
980 break;
981 }
982
983 return index;
984}
985EXPORT_SYMBOL(radix_tree_next_hole);
986
987/**
988 * radix_tree_prev_hole - find the prev hole (not-present entry)
989 * @root: tree root
990 * @index: index key
991 * @max_scan: maximum range to search
992 *
993 * Search backwards in the range [max(index-max_scan+1, 0), index]
994 * for the first hole.
995 *
996 * Returns: the index of the hole if found, otherwise returns an index
997 * outside of the set specified (in which case 'index - return >= max_scan'
998 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
999 *
1000 * radix_tree_next_hole may be called under rcu_read_lock. However, like
1001 * radix_tree_gang_lookup, this will not atomically search a snapshot of
1002 * the tree at a single point in time. For example, if a hole is created
1003 * at index 10, then subsequently a hole is created at index 5,
1004 * radix_tree_prev_hole covering both indexes may return 5 if called under
1005 * rcu_read_lock.
1006 */
1007unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
1008 unsigned long index, unsigned long max_scan)
1009{
1010 unsigned long i;
1011
1012 for (i = 0; i < max_scan; i++) {
1013 if (!radix_tree_lookup(root, index))
1014 break;
1015 index--;
1016 if (index == ULONG_MAX)
1017 break;
1018 }
1019
1020 return index;
1021}
1022EXPORT_SYMBOL(radix_tree_prev_hole);
1023
1024/** 980/**
1025 * radix_tree_gang_lookup - perform multiple lookup on a radix tree 981 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1026 * @root: radix tree root 982 * @root: radix tree root
@@ -1189,7 +1145,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
1189 unsigned int shift, height; 1145 unsigned int shift, height;
1190 unsigned long i; 1146 unsigned long i;
1191 1147
1192 height = slot->height; 1148 height = slot->path & RADIX_TREE_HEIGHT_MASK;
1193 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 1149 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
1194 1150
1195 for ( ; height > 1; height--) { 1151 for ( ; height > 1; height--) {
@@ -1252,9 +1208,12 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1252 } 1208 }
1253 1209
1254 node = indirect_to_ptr(node); 1210 node = indirect_to_ptr(node);
1255 max_index = radix_tree_maxindex(node->height); 1211 max_index = radix_tree_maxindex(node->path &
1256 if (cur_index > max_index) 1212 RADIX_TREE_HEIGHT_MASK);
1213 if (cur_index > max_index) {
1214 rcu_read_unlock();
1257 break; 1215 break;
1216 }
1258 1217
1259 cur_index = __locate(node, item, cur_index, &found_index); 1218 cur_index = __locate(node, item, cur_index, &found_index);
1260 rcu_read_unlock(); 1219 rcu_read_unlock();
@@ -1335,48 +1294,90 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1335} 1294}
1336 1295
1337/** 1296/**
1338 * radix_tree_delete - delete an item from a radix tree 1297 * __radix_tree_delete_node - try to free node after clearing a slot
1339 * @root: radix tree root 1298 * @root: radix tree root
1340 * @index: index key 1299 * @index: index key
1300 * @node: node containing @index
1341 * 1301 *
1342 * Remove the item at @index from the radix tree rooted at @root. 1302 * After clearing the slot at @index in @node from radix tree
1303 * rooted at @root, call this function to attempt freeing the
1304 * node and shrinking the tree.
1343 * 1305 *
1344 * Returns the address of the deleted item, or NULL if it was not present. 1306 * Returns %true if @node was freed, %false otherwise.
1345 */ 1307 */
1346void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) 1308bool __radix_tree_delete_node(struct radix_tree_root *root,
1309 struct radix_tree_node *node)
1347{ 1310{
1348 struct radix_tree_node *node = NULL; 1311 bool deleted = false;
1349 struct radix_tree_node *slot = NULL; 1312
1350 struct radix_tree_node *to_free; 1313 do {
1351 unsigned int height, shift; 1314 struct radix_tree_node *parent;
1315
1316 if (node->count) {
1317 if (node == indirect_to_ptr(root->rnode)) {
1318 radix_tree_shrink(root);
1319 if (root->height == 0)
1320 deleted = true;
1321 }
1322 return deleted;
1323 }
1324
1325 parent = node->parent;
1326 if (parent) {
1327 unsigned int offset;
1328
1329 offset = node->path >> RADIX_TREE_HEIGHT_SHIFT;
1330 parent->slots[offset] = NULL;
1331 parent->count--;
1332 } else {
1333 root_tag_clear_all(root);
1334 root->height = 0;
1335 root->rnode = NULL;
1336 }
1337
1338 radix_tree_node_free(node);
1339 deleted = true;
1340
1341 node = parent;
1342 } while (node);
1343
1344 return deleted;
1345}
1346
1347/**
1348 * radix_tree_delete_item - delete an item from a radix tree
1349 * @root: radix tree root
1350 * @index: index key
1351 * @item: expected item
1352 *
1353 * Remove @item at @index from the radix tree rooted at @root.
1354 *
1355 * Returns the address of the deleted item, or NULL if it was not present
1356 * or the entry at the given @index was not @item.
1357 */
1358void *radix_tree_delete_item(struct radix_tree_root *root,
1359 unsigned long index, void *item)
1360{
1361 struct radix_tree_node *node;
1362 unsigned int offset;
1363 void **slot;
1364 void *entry;
1352 int tag; 1365 int tag;
1353 int uninitialized_var(offset);
1354 1366
1355 height = root->height; 1367 entry = __radix_tree_lookup(root, index, &node, &slot);
1356 if (index > radix_tree_maxindex(height)) 1368 if (!entry)
1357 goto out; 1369 return NULL;
1358 1370
1359 slot = root->rnode; 1371 if (item && entry != item)
1360 if (height == 0) { 1372 return NULL;
1373
1374 if (!node) {
1361 root_tag_clear_all(root); 1375 root_tag_clear_all(root);
1362 root->rnode = NULL; 1376 root->rnode = NULL;
1363 goto out; 1377 return entry;
1364 } 1378 }
1365 slot = indirect_to_ptr(slot);
1366 shift = height * RADIX_TREE_MAP_SHIFT;
1367 1379
1368 do { 1380 offset = index & RADIX_TREE_MAP_MASK;
1369 if (slot == NULL)
1370 goto out;
1371
1372 shift -= RADIX_TREE_MAP_SHIFT;
1373 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1374 node = slot;
1375 slot = slot->slots[offset];
1376 } while (shift);
1377
1378 if (slot == NULL)
1379 goto out;
1380 1381
1381 /* 1382 /*
1382 * Clear all tags associated with the item to be deleted. 1383 * Clear all tags associated with the item to be deleted.
@@ -1387,40 +1388,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1387 radix_tree_tag_clear(root, index, tag); 1388 radix_tree_tag_clear(root, index, tag);
1388 } 1389 }
1389 1390
1390 to_free = NULL; 1391 node->slots[offset] = NULL;
1391 /* Now free the nodes we do not need anymore */ 1392 node->count--;
1392 while (node) {
1393 node->slots[offset] = NULL;
1394 node->count--;
1395 /*
1396 * Queue the node for deferred freeing after the
1397 * last reference to it disappears (set NULL, above).
1398 */
1399 if (to_free)
1400 radix_tree_node_free(to_free);
1401
1402 if (node->count) {
1403 if (node == indirect_to_ptr(root->rnode))
1404 radix_tree_shrink(root);
1405 goto out;
1406 }
1407
1408 /* Node with zero slots in use so free it */
1409 to_free = node;
1410 1393
1411 index >>= RADIX_TREE_MAP_SHIFT; 1394 __radix_tree_delete_node(root, node);
1412 offset = index & RADIX_TREE_MAP_MASK;
1413 node = node->parent;
1414 }
1415 1395
1416 root_tag_clear_all(root); 1396 return entry;
1417 root->height = 0; 1397}
1418 root->rnode = NULL; 1398EXPORT_SYMBOL(radix_tree_delete_item);
1419 if (to_free)
1420 radix_tree_node_free(to_free);
1421 1399
1422out: 1400/**
1423 return slot; 1401 * radix_tree_delete - delete an item from a radix tree
1402 * @root: radix tree root
1403 * @index: index key
1404 *
1405 * Remove the item at @index from the radix tree rooted at @root.
1406 *
1407 * Returns the address of the deleted item, or NULL if it was not present.
1408 */
1409void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1410{
1411 return radix_tree_delete_item(root, index, NULL);
1424} 1412}
1425EXPORT_SYMBOL(radix_tree_delete); 1413EXPORT_SYMBOL(radix_tree_delete);
1426 1414
@@ -1436,9 +1424,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1436EXPORT_SYMBOL(radix_tree_tagged); 1424EXPORT_SYMBOL(radix_tree_tagged);
1437 1425
1438static void 1426static void
1439radix_tree_node_ctor(void *node) 1427radix_tree_node_ctor(void *arg)
1440{ 1428{
1441 memset(node, 0, sizeof(struct radix_tree_node)); 1429 struct radix_tree_node *node = arg;
1430
1431 memset(node, 0, sizeof(*node));
1432 INIT_LIST_HEAD(&node->private_list);
1442} 1433}
1443 1434
1444static __init unsigned long __maxindex(unsigned int height) 1435static __init unsigned long __maxindex(unsigned int height)
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df44291..fa5da61ce7ad 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -1,37 +1,35 @@
1/* 1/*
2 This is a maximally equidistributed combined Tausworthe generator 2 * This is a maximally equidistributed combined Tausworthe generator
3 based on code from GNU Scientific Library 1.5 (30 Jun 2004) 3 * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
4 4 *
5 lfsr113 version: 5 * lfsr113 version:
6 6 *
7 x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) 7 * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
8 8 *
9 s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) 9 * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
10 s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) 10 * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
11 s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) 11 * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
12 s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) 12 * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
13 13 *
14 The period of this generator is about 2^113 (see erratum paper). 14 * The period of this generator is about 2^113 (see erratum paper).
15 15 *
16 From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe 16 * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
17 Generators", Mathematics of Computation, 65, 213 (1996), 203--213: 17 * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
18 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps 18 * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
19 ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps 19 * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
20 20 *
21 There is an erratum in the paper "Tables of Maximally 21 * There is an erratum in the paper "Tables of Maximally Equidistributed
22 Equidistributed Combined LFSR Generators", Mathematics of 22 * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
23 Computation, 68, 225 (1999), 261--269: 23 * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
24 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps 24 *
25 25 * ... the k_j most significant bits of z_j must be non-zero,
26 ... the k_j most significant bits of z_j must be non- 26 * for each j. (Note: this restriction also applies to the
27 zero, for each j. (Note: this restriction also applies to the 27 * computer code given in [4], but was mistakenly not mentioned
28 computer code given in [4], but was mistakenly not mentioned in 28 * in that paper.)
29 that paper.) 29 *
30 30 * This affects the seeding procedure by imposing the requirement
31 This affects the seeding procedure by imposing the requirement 31 * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
32 s1 > 1, s2 > 7, s3 > 15, s4 > 127. 32 */
33
34*/
35 33
36#include <linux/types.h> 34#include <linux/types.h>
37#include <linux/percpu.h> 35#include <linux/percpu.h>
@@ -75,15 +73,17 @@ EXPORT_SYMBOL(prandom_u32_state);
75 */ 73 */
76u32 prandom_u32(void) 74u32 prandom_u32(void)
77{ 75{
78 unsigned long r;
79 struct rnd_state *state = &get_cpu_var(net_rand_state); 76 struct rnd_state *state = &get_cpu_var(net_rand_state);
80 r = prandom_u32_state(state); 77 u32 res;
78
79 res = prandom_u32_state(state);
81 put_cpu_var(state); 80 put_cpu_var(state);
82 return r; 81
82 return res;
83} 83}
84EXPORT_SYMBOL(prandom_u32); 84EXPORT_SYMBOL(prandom_u32);
85 85
86/* 86/**
87 * prandom_bytes_state - get the requested number of pseudo-random bytes 87 * prandom_bytes_state - get the requested number of pseudo-random bytes
88 * 88 *
89 * @state: pointer to state structure holding seeded state. 89 * @state: pointer to state structure holding seeded state.
@@ -204,6 +204,7 @@ static int __init prandom_init(void)
204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); 204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
205 prandom_warmup(state); 205 prandom_warmup(state);
206 } 206 }
207
207 return 0; 208 return 0;
208} 209}
209core_initcall(prandom_init); 210core_initcall(prandom_init);
@@ -244,10 +245,22 @@ static void __prandom_reseed(bool late)
244 static bool latch = false; 245 static bool latch = false;
245 static DEFINE_SPINLOCK(lock); 246 static DEFINE_SPINLOCK(lock);
246 247
248 /* Asking for random bytes might result in bytes getting
249 * moved into the nonblocking pool and thus marking it
250 * as initialized. In this case we would double back into
251 * this function and attempt to do a late reseed.
252 * Ignore the pointless attempt to reseed again if we're
253 * already waiting for bytes when the nonblocking pool
254 * got initialized.
255 */
256
247 /* only allow initial seeding (late == false) once */ 257 /* only allow initial seeding (late == false) once */
248 spin_lock_irqsave(&lock, flags); 258 if (!spin_trylock_irqsave(&lock, flags))
259 return;
260
249 if (latch && !late) 261 if (latch && !late)
250 goto out; 262 goto out;
263
251 latch = true; 264 latch = true;
252 265
253 for_each_possible_cpu(i) { 266 for_each_possible_cpu(i) {
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 04abe53f12a1..1afec32de6f2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,8 @@
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9 9
10notrace unsigned int debug_smp_processor_id(void) 10notrace static unsigned int check_preemption_disabled(const char *what1,
11 const char *what2)
11{ 12{
12 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
13 14
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
38 if (!printk_ratelimit()) 39 if (!printk_ratelimit())
39 goto out_enable; 40 goto out_enable;
40 41
41 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " 42 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
42 "code: %s/%d\n", 43 what1, what2, preempt_count() - 1, current->comm, current->pid);
43 preempt_count() - 1, current->comm, current->pid); 44
44 print_symbol("caller is %s\n", (long)__builtin_return_address(0)); 45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
45 dump_stack(); 46 dump_stack();
46 47
@@ -50,5 +51,14 @@ out:
50 return this_cpu; 51 return this_cpu;
51} 52}
52 53
54notrace unsigned int debug_smp_processor_id(void)
55{
56 return check_preemption_disabled("smp_processor_id", "");
57}
53EXPORT_SYMBOL(debug_smp_processor_id); 58EXPORT_SYMBOL(debug_smp_processor_id);
54 59
60notrace void __this_cpu_preempt_check(const char *op)
61{
62 check_preemption_disabled("__this_cpu_", op);
63}
64EXPORT_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/string.c b/lib/string.c
index e5878de4f101..9b1f9062a202 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -648,7 +648,7 @@ EXPORT_SYMBOL(memmove);
648 * @count: The size of the area. 648 * @count: The size of the area.
649 */ 649 */
650#undef memcmp 650#undef memcmp
651int memcmp(const void *cs, const void *ct, size_t count) 651__visible int memcmp(const void *cs, const void *ct, size_t count)
652{ 652{
653 const unsigned char *su1, *su2; 653 const unsigned char *su1, *su2;
654 int res = 0; 654 int res = 0;
diff --git a/lib/syscall.c b/lib/syscall.c
index 58710eefeac8..e30e03932480 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno,
72 72
73 return 0; 73 return 0;
74} 74}
75EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 185b6d300ebc..0648291cdafe 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -364,7 +364,6 @@ enum format_type {
364 FORMAT_TYPE_SHORT, 364 FORMAT_TYPE_SHORT,
365 FORMAT_TYPE_UINT, 365 FORMAT_TYPE_UINT,
366 FORMAT_TYPE_INT, 366 FORMAT_TYPE_INT,
367 FORMAT_TYPE_NRCHARS,
368 FORMAT_TYPE_SIZE_T, 367 FORMAT_TYPE_SIZE_T,
369 FORMAT_TYPE_PTRDIFF 368 FORMAT_TYPE_PTRDIFF
370}; 369};
@@ -719,10 +718,15 @@ char *resource_string(char *buf, char *end, struct resource *res,
719 specp = &mem_spec; 718 specp = &mem_spec;
720 decode = 0; 719 decode = 0;
721 } 720 }
722 p = number(p, pend, res->start, *specp); 721 if (decode && res->flags & IORESOURCE_UNSET) {
723 if (res->start != res->end) { 722 p = string(p, pend, "size ", str_spec);
724 *p++ = '-'; 723 p = number(p, pend, resource_size(res), *specp);
725 p = number(p, pend, res->end, *specp); 724 } else {
725 p = number(p, pend, res->start, *specp);
726 if (res->start != res->end) {
727 *p++ = '-';
728 p = number(p, pend, res->end, *specp);
729 }
726 } 730 }
727 if (decode) { 731 if (decode) {
728 if (res->flags & IORESOURCE_MEM_64) 732 if (res->flags & IORESOURCE_MEM_64)
@@ -1533,10 +1537,6 @@ qualifier:
1533 return fmt - start; 1537 return fmt - start;
1534 /* skip alnum */ 1538 /* skip alnum */
1535 1539
1536 case 'n':
1537 spec->type = FORMAT_TYPE_NRCHARS;
1538 return ++fmt - start;
1539
1540 case '%': 1540 case '%':
1541 spec->type = FORMAT_TYPE_PERCENT_CHAR; 1541 spec->type = FORMAT_TYPE_PERCENT_CHAR;
1542 return ++fmt - start; 1542 return ++fmt - start;
@@ -1559,6 +1559,15 @@ qualifier:
1559 case 'u': 1559 case 'u':
1560 break; 1560 break;
1561 1561
1562 case 'n':
1563 /*
1564 * Since %n poses a greater security risk than utility, treat
1565 * it as an invalid format specifier. Warn about its use so
1566 * that new instances don't get added.
1567 */
1568 WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt);
1569 /* Fall-through */
1570
1562 default: 1571 default:
1563 spec->type = FORMAT_TYPE_INVALID; 1572 spec->type = FORMAT_TYPE_INVALID;
1564 return fmt - start; 1573 return fmt - start;
@@ -1732,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1732 ++str; 1741 ++str;
1733 break; 1742 break;
1734 1743
1735 case FORMAT_TYPE_NRCHARS: {
1736 /*
1737 * Since %n poses a greater security risk than
1738 * utility, ignore %n and skip its argument.
1739 */
1740 void *skip_arg;
1741
1742 WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
1743 old_fmt);
1744
1745 skip_arg = va_arg(args, void *);
1746 break;
1747 }
1748
1749 default: 1744 default:
1750 switch (spec.type) { 1745 switch (spec.type) {
1751 case FORMAT_TYPE_LONG_LONG: 1746 case FORMAT_TYPE_LONG_LONG:
@@ -2020,19 +2015,6 @@ do { \
2020 fmt++; 2015 fmt++;
2021 break; 2016 break;
2022 2017
2023 case FORMAT_TYPE_NRCHARS: {
2024 /* skip %n 's argument */
2025 u8 qualifier = spec.qualifier;
2026 void *skip_arg;
2027 if (qualifier == 'l')
2028 skip_arg = va_arg(args, long *);
2029 else if (_tolower(qualifier) == 'z')
2030 skip_arg = va_arg(args, size_t *);
2031 else
2032 skip_arg = va_arg(args, int *);
2033 break;
2034 }
2035
2036 default: 2018 default:
2037 switch (spec.type) { 2019 switch (spec.type) {
2038 2020
@@ -2191,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
2191 ++str; 2173 ++str;
2192 break; 2174 break;
2193 2175
2194 case FORMAT_TYPE_NRCHARS:
2195 /* skip */
2196 break;
2197
2198 default: { 2176 default: {
2199 unsigned long long num; 2177 unsigned long long num;
2200 2178