aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/clz_ctz.c7
-rw-r--r--lib/decompress.c3
-rw-r--r--lib/decompress_inflate.c1
-rw-r--r--lib/devres.c16
-rw-r--r--lib/idr.c34
-rw-r--r--lib/iomap.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c42
-rw-r--r--lib/nlattr.c10
-rw-r--r--lib/radix-tree.c385
-rw-r--r--lib/random32.c76
-rw-r--r--lib/smp_processor_id.c18
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/vsprintf.c58
15 files changed, 341 insertions, 320 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 991c98bc4a3f..5d4984c505f8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -342,9 +342,9 @@ config HAS_IOMEM
342 select GENERIC_IO 342 select GENERIC_IO
343 default y 343 default y
344 344
345config HAS_IOPORT 345config HAS_IOPORT_MAP
346 boolean 346 boolean
347 depends on HAS_IOMEM && !NO_IOPORT 347 depends on HAS_IOMEM && !NO_IOPORT_MAP
348 default y 348 default y
349 349
350config HAS_DMA 350config HAS_DMA
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index a8f8379eb49f..2e11e48446ab 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -6,6 +6,9 @@
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * The functions in this file aren't called directly, but are required by
10 * GCC builtins such as __builtin_ctz, and therefore they can't be removed
11 * despite appearing unreferenced in kernel source.
9 * 12 *
10 * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions. 13 * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
11 */ 14 */
@@ -13,18 +16,22 @@
13#include <linux/export.h> 16#include <linux/export.h>
14#include <linux/kernel.h> 17#include <linux/kernel.h>
15 18
19int __weak __ctzsi2(int val);
16int __weak __ctzsi2(int val) 20int __weak __ctzsi2(int val)
17{ 21{
18 return __ffs(val); 22 return __ffs(val);
19} 23}
20EXPORT_SYMBOL(__ctzsi2); 24EXPORT_SYMBOL(__ctzsi2);
21 25
26int __weak __clzsi2(int val);
22int __weak __clzsi2(int val) 27int __weak __clzsi2(int val)
23{ 28{
24 return 32 - fls(val); 29 return 32 - fls(val);
25} 30}
26EXPORT_SYMBOL(__clzsi2); 31EXPORT_SYMBOL(__clzsi2);
27 32
33int __weak __clzdi2(long val);
34int __weak __ctzdi2(long val);
28#if BITS_PER_LONG == 32 35#if BITS_PER_LONG == 32
29 36
30int __weak __clzdi2(long val) 37int __weak __clzdi2(long val)
diff --git a/lib/decompress.c b/lib/decompress.c
index 4d1cd0397aab..86069d74c062 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -16,6 +16,7 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/printk.h>
19 20
20#ifndef CONFIG_DECOMPRESS_GZIP 21#ifndef CONFIG_DECOMPRESS_GZIP
21# define gunzip NULL 22# define gunzip NULL
@@ -61,6 +62,8 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, int len,
61 if (len < 2) 62 if (len < 2)
62 return NULL; /* Need at least this much... */ 63 return NULL; /* Need at least this much... */
63 64
65 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
66
64 for (cf = compressed_formats; cf->name; cf++) { 67 for (cf = compressed_formats; cf->name; cf++) {
65 if (!memcmp(inbuf, cf->magic, 2)) 68 if (!memcmp(inbuf, cf->magic, 2))
66 break; 69 break;
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index d619b28c456f..0edfd742a154 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,6 +19,7 @@
19#include "zlib_inflate/inflate.h" 19#include "zlib_inflate/inflate.h"
20 20
21#include "zlib_inflate/infutil.h" 21#include "zlib_inflate/infutil.h"
22#include <linux/decompress/inflate.h>
22 23
23#endif /* STATIC */ 24#endif /* STATIC */
24 25
diff --git a/lib/devres.c b/lib/devres.c
index 823533138fa0..2f16c133fd36 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -81,11 +81,13 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
81void devm_iounmap(struct device *dev, void __iomem *addr) 81void devm_iounmap(struct device *dev, void __iomem *addr)
82{ 82{
83 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 83 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
84 (void *)addr)); 84 (__force void *)addr));
85 iounmap(addr); 85 iounmap(addr);
86} 86}
87EXPORT_SYMBOL(devm_iounmap); 87EXPORT_SYMBOL(devm_iounmap);
88 88
89#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
90
89/** 91/**
90 * devm_ioremap_resource() - check, request region, and ioremap resource 92 * devm_ioremap_resource() - check, request region, and ioremap resource
91 * @dev: generic device to handle the resource for 93 * @dev: generic device to handle the resource for
@@ -114,7 +116,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
114 116
115 if (!res || resource_type(res) != IORESOURCE_MEM) { 117 if (!res || resource_type(res) != IORESOURCE_MEM) {
116 dev_err(dev, "invalid resource\n"); 118 dev_err(dev, "invalid resource\n");
117 return ERR_PTR(-EINVAL); 119 return IOMEM_ERR_PTR(-EINVAL);
118 } 120 }
119 121
120 size = resource_size(res); 122 size = resource_size(res);
@@ -122,7 +124,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
122 124
123 if (!devm_request_mem_region(dev, res->start, size, name)) { 125 if (!devm_request_mem_region(dev, res->start, size, name)) {
124 dev_err(dev, "can't request region for resource %pR\n", res); 126 dev_err(dev, "can't request region for resource %pR\n", res);
125 return ERR_PTR(-EBUSY); 127 return IOMEM_ERR_PTR(-EBUSY);
126 } 128 }
127 129
128 if (res->flags & IORESOURCE_CACHEABLE) 130 if (res->flags & IORESOURCE_CACHEABLE)
@@ -133,7 +135,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
133 if (!dest_ptr) { 135 if (!dest_ptr) {
134 dev_err(dev, "ioremap failed for resource %pR\n", res); 136 dev_err(dev, "ioremap failed for resource %pR\n", res);
135 devm_release_mem_region(dev, res->start, size); 137 devm_release_mem_region(dev, res->start, size);
136 dest_ptr = ERR_PTR(-ENOMEM); 138 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
137 } 139 }
138 140
139 return dest_ptr; 141 return dest_ptr;
@@ -168,7 +170,7 @@ void __iomem *devm_request_and_ioremap(struct device *device,
168} 170}
169EXPORT_SYMBOL(devm_request_and_ioremap); 171EXPORT_SYMBOL(devm_request_and_ioremap);
170 172
171#ifdef CONFIG_HAS_IOPORT 173#ifdef CONFIG_HAS_IOPORT_MAP
172/* 174/*
173 * Generic iomap devres 175 * Generic iomap devres
174 */ 176 */
@@ -224,10 +226,10 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
224{ 226{
225 ioport_unmap(addr); 227 ioport_unmap(addr);
226 WARN_ON(devres_destroy(dev, devm_ioport_map_release, 228 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
227 devm_ioport_map_match, (void *)addr)); 229 devm_ioport_map_match, (__force void *)addr));
228} 230}
229EXPORT_SYMBOL(devm_ioport_unmap); 231EXPORT_SYMBOL(devm_ioport_unmap);
230#endif /* CONFIG_HAS_IOPORT */ 232#endif /* CONFIG_HAS_IOPORT_MAP */
231 233
232#ifdef CONFIG_PCI 234#ifdef CONFIG_PCI
233/* 235/*
diff --git a/lib/idr.c b/lib/idr.c
index bfe4db4e165f..2642fa8e424d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -196,7 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
196 } 196 }
197} 197}
198 198
199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) 199static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
200{ 200{
201 while (idp->id_free_cnt < MAX_IDR_FREE) { 201 while (idp->id_free_cnt < MAX_IDR_FREE) {
202 struct idr_layer *new; 202 struct idr_layer *new;
@@ -207,7 +207,6 @@ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
207 } 207 }
208 return 1; 208 return 1;
209} 209}
210EXPORT_SYMBOL(__idr_pre_get);
211 210
212/** 211/**
213 * sub_alloc - try to allocate an id without growing the tree depth 212 * sub_alloc - try to allocate an id without growing the tree depth
@@ -374,20 +373,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
374 idr_mark_full(pa, id); 373 idr_mark_full(pa, id);
375} 374}
376 375
377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
378{
379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
380 int rv;
381
382 rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp);
383 if (rv < 0)
384 return rv == -ENOMEM ? -EAGAIN : rv;
385
386 idr_fill_slot(idp, ptr, rv, pa);
387 *id = rv;
388 return 0;
389}
390EXPORT_SYMBOL(__idr_get_new_above);
391 376
392/** 377/**
393 * idr_preload - preload for idr_alloc() 378 * idr_preload - preload for idr_alloc()
@@ -548,7 +533,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
548 n = id & IDR_MASK; 533 n = id & IDR_MASK;
549 if (likely(p != NULL && test_bit(n, p->bitmap))) { 534 if (likely(p != NULL && test_bit(n, p->bitmap))) {
550 __clear_bit(n, p->bitmap); 535 __clear_bit(n, p->bitmap);
551 rcu_assign_pointer(p->ary[n], NULL); 536 RCU_INIT_POINTER(p->ary[n], NULL);
552 to_free = NULL; 537 to_free = NULL;
553 while(*paa && ! --((**paa)->count)){ 538 while(*paa && ! --((**paa)->count)){
554 if (to_free) 539 if (to_free)
@@ -607,7 +592,7 @@ void idr_remove(struct idr *idp, int id)
607} 592}
608EXPORT_SYMBOL(idr_remove); 593EXPORT_SYMBOL(idr_remove);
609 594
610void __idr_remove_all(struct idr *idp) 595static void __idr_remove_all(struct idr *idp)
611{ 596{
612 int n, id, max; 597 int n, id, max;
613 int bt_mask; 598 int bt_mask;
@@ -617,7 +602,7 @@ void __idr_remove_all(struct idr *idp)
617 602
618 n = idp->layers * IDR_BITS; 603 n = idp->layers * IDR_BITS;
619 p = idp->top; 604 p = idp->top;
620 rcu_assign_pointer(idp->top, NULL); 605 RCU_INIT_POINTER(idp->top, NULL);
621 max = idr_max(idp->layers); 606 max = idr_max(idp->layers);
622 607
623 id = 0; 608 id = 0;
@@ -640,7 +625,6 @@ void __idr_remove_all(struct idr *idp)
640 } 625 }
641 idp->layers = 0; 626 idp->layers = 0;
642} 627}
643EXPORT_SYMBOL(__idr_remove_all);
644 628
645/** 629/**
646 * idr_destroy - release all cached layers within an idr tree 630 * idr_destroy - release all cached layers within an idr tree
@@ -869,6 +853,16 @@ void idr_init(struct idr *idp)
869} 853}
870EXPORT_SYMBOL(idr_init); 854EXPORT_SYMBOL(idr_init);
871 855
856static int idr_has_entry(int id, void *p, void *data)
857{
858 return 1;
859}
860
861bool idr_is_empty(struct idr *idp)
862{
863 return !idr_for_each(idp, idr_has_entry, NULL);
864}
865EXPORT_SYMBOL(idr_is_empty);
872 866
873/** 867/**
874 * DOC: IDA description 868 * DOC: IDA description
diff --git a/lib/iomap.c b/lib/iomap.c
index 2c08f36862eb..fc3dcb4b238e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(iowrite8_rep);
224EXPORT_SYMBOL(iowrite16_rep); 224EXPORT_SYMBOL(iowrite16_rep);
225EXPORT_SYMBOL(iowrite32_rep); 225EXPORT_SYMBOL(iowrite32_rep);
226 226
227#ifdef CONFIG_HAS_IOPORT 227#ifdef CONFIG_HAS_IOPORT_MAP
228/* Create a virtual mapping cookie for an IO port range */ 228/* Create a virtual mapping cookie for an IO port range */
229void __iomem *ioport_map(unsigned long port, unsigned int nr) 229void __iomem *ioport_map(unsigned long port, unsigned int nr)
230{ 230{
@@ -239,7 +239,7 @@ void ioport_unmap(void __iomem *addr)
239} 239}
240EXPORT_SYMBOL(ioport_map); 240EXPORT_SYMBOL(ioport_map);
241EXPORT_SYMBOL(ioport_unmap); 241EXPORT_SYMBOL(ioport_unmap);
242#endif /* CONFIG_HAS_IOPORT */ 242#endif /* CONFIG_HAS_IOPORT_MAP */
243 243
244#ifdef CONFIG_PCI 244#ifdef CONFIG_PCI
245/* Hide the details if this is a MMIO or PIO address space and just do what 245/* Hide the details if this is a MMIO or PIO address space and just do what
diff --git a/lib/kobject.c b/lib/kobject.c
index cb14aeac4cca..58751bb80a7c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -94,7 +94,7 @@ static int create_dir(struct kobject *kobj)
94 BUG_ON(ops->type >= KOBJ_NS_TYPES); 94 BUG_ON(ops->type >= KOBJ_NS_TYPES);
95 BUG_ON(!kobj_ns_type_registered(ops->type)); 95 BUG_ON(!kobj_ns_type_registered(ops->type));
96 96
97 kernfs_enable_ns(kobj->sd); 97 sysfs_enable_ns(kobj->sd);
98 } 98 }
99 99
100 return 0; 100 return 0;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5f72767ddd9b..4e3bd71bd949 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -124,6 +124,30 @@ static int kobj_usermode_filter(struct kobject *kobj)
124 return 0; 124 return 0;
125} 125}
126 126
127static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
128{
129 int len;
130
131 len = strlcpy(&env->buf[env->buflen], subsystem,
132 sizeof(env->buf) - env->buflen);
133 if (len >= (sizeof(env->buf) - env->buflen)) {
134 WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
135 return -ENOMEM;
136 }
137
138 env->argv[0] = uevent_helper;
139 env->argv[1] = &env->buf[env->buflen];
140 env->argv[2] = NULL;
141
142 env->buflen += len + 1;
143 return 0;
144}
145
146static void cleanup_uevent_env(struct subprocess_info *info)
147{
148 kfree(info->data);
149}
150
127/** 151/**
128 * kobject_uevent_env - send an uevent with environmental data 152 * kobject_uevent_env - send an uevent with environmental data
129 * 153 *
@@ -301,11 +325,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
301 325
302 /* call uevent_helper, usually only enabled during early boot */ 326 /* call uevent_helper, usually only enabled during early boot */
303 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { 327 if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
304 char *argv [3]; 328 struct subprocess_info *info;
305 329
306 argv [0] = uevent_helper;
307 argv [1] = (char *)subsystem;
308 argv [2] = NULL;
309 retval = add_uevent_var(env, "HOME=/"); 330 retval = add_uevent_var(env, "HOME=/");
310 if (retval) 331 if (retval)
311 goto exit; 332 goto exit;
@@ -313,9 +334,18 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
313 "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); 334 "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
314 if (retval) 335 if (retval)
315 goto exit; 336 goto exit;
337 retval = init_uevent_argv(env, subsystem);
338 if (retval)
339 goto exit;
316 340
317 retval = call_usermodehelper(argv[0], argv, 341 retval = -ENOMEM;
318 env->envp, UMH_WAIT_EXEC); 342 info = call_usermodehelper_setup(env->argv[0], env->argv,
343 env->envp, GFP_KERNEL,
344 NULL, cleanup_uevent_env, env);
345 if (info) {
346 retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
347 env = NULL; /* freed by cleanup_uevent_env */
348 }
319 } 349 }
320 350
321exit: 351exit:
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 18eca7809b08..fc6754720ced 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
303 */ 303 */
304int nla_strcmp(const struct nlattr *nla, const char *str) 304int nla_strcmp(const struct nlattr *nla, const char *str)
305{ 305{
306 int len = strlen(str) + 1; 306 int len = strlen(str);
307 int d = nla_len(nla) - len; 307 char *buf = nla_data(nla);
308 int attrlen = nla_len(nla);
309 int d;
308 310
311 if (attrlen > 0 && buf[attrlen - 1] == '\0')
312 attrlen--;
313
314 d = attrlen - len;
309 if (d == 0) 315 if (d == 0)
310 d = memcmp(nla_data(nla), str, len); 316 d = memcmp(nla_data(nla), str, len);
311 317
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd4a8dfdf0b8..9599aa72d7a0 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -35,33 +35,6 @@
35#include <linux/hardirq.h> /* in_interrupt() */ 35#include <linux/hardirq.h> /* in_interrupt() */
36 36
37 37
38#ifdef __KERNEL__
39#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
40#else
41#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
42#endif
43
44#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
45#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
46
47#define RADIX_TREE_TAG_LONGS \
48 ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
49
50struct radix_tree_node {
51 unsigned int height; /* Height from the bottom */
52 unsigned int count;
53 union {
54 struct radix_tree_node *parent; /* Used when ascending tree */
55 struct rcu_head rcu_head; /* Used when freeing node */
56 };
57 void __rcu *slots[RADIX_TREE_MAP_SIZE];
58 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
59};
60
61#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
62#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
63 RADIX_TREE_MAP_SHIFT))
64
65/* 38/*
66 * The height_to_maxindex array needs to be one deeper than the maximum 39 * The height_to_maxindex array needs to be one deeper than the maximum
67 * path as height 0 holds only 1 entry. 40 * path as height 0 holds only 1 entry.
@@ -369,7 +342,8 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
369 342
370 /* Increase the height. */ 343 /* Increase the height. */
371 newheight = root->height+1; 344 newheight = root->height+1;
372 node->height = newheight; 345 BUG_ON(newheight & ~RADIX_TREE_HEIGHT_MASK);
346 node->path = newheight;
373 node->count = 1; 347 node->count = 1;
374 node->parent = NULL; 348 node->parent = NULL;
375 slot = root->rnode; 349 slot = root->rnode;
@@ -387,23 +361,28 @@ out:
387} 361}
388 362
389/** 363/**
390 * radix_tree_insert - insert into a radix tree 364 * __radix_tree_create - create a slot in a radix tree
391 * @root: radix tree root 365 * @root: radix tree root
392 * @index: index key 366 * @index: index key
393 * @item: item to insert 367 * @nodep: returns node
368 * @slotp: returns slot
394 * 369 *
395 * Insert an item into the radix tree at position @index. 370 * Create, if necessary, and return the node and slot for an item
371 * at position @index in the radix tree @root.
372 *
373 * Until there is more than one item in the tree, no nodes are
374 * allocated and @root->rnode is used as a direct slot instead of
375 * pointing to a node, in which case *@nodep will be NULL.
376 *
377 * Returns -ENOMEM, or 0 for success.
396 */ 378 */
397int radix_tree_insert(struct radix_tree_root *root, 379int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
398 unsigned long index, void *item) 380 struct radix_tree_node **nodep, void ***slotp)
399{ 381{
400 struct radix_tree_node *node = NULL, *slot; 382 struct radix_tree_node *node = NULL, *slot;
401 unsigned int height, shift; 383 unsigned int height, shift, offset;
402 int offset;
403 int error; 384 int error;
404 385
405 BUG_ON(radix_tree_is_indirect_ptr(item));
406
407 /* Make sure the tree is high enough. */ 386 /* Make sure the tree is high enough. */
408 if (index > radix_tree_maxindex(root->height)) { 387 if (index > radix_tree_maxindex(root->height)) {
409 error = radix_tree_extend(root, index); 388 error = radix_tree_extend(root, index);
@@ -422,11 +401,12 @@ int radix_tree_insert(struct radix_tree_root *root,
422 /* Have to add a child node. */ 401 /* Have to add a child node. */
423 if (!(slot = radix_tree_node_alloc(root))) 402 if (!(slot = radix_tree_node_alloc(root)))
424 return -ENOMEM; 403 return -ENOMEM;
425 slot->height = height; 404 slot->path = height;
426 slot->parent = node; 405 slot->parent = node;
427 if (node) { 406 if (node) {
428 rcu_assign_pointer(node->slots[offset], slot); 407 rcu_assign_pointer(node->slots[offset], slot);
429 node->count++; 408 node->count++;
409 slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT;
430 } else 410 } else
431 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); 411 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
432 } 412 }
@@ -439,16 +419,42 @@ int radix_tree_insert(struct radix_tree_root *root,
439 height--; 419 height--;
440 } 420 }
441 421
442 if (slot != NULL) 422 if (nodep)
423 *nodep = node;
424 if (slotp)
425 *slotp = node ? node->slots + offset : (void **)&root->rnode;
426 return 0;
427}
428
429/**
430 * radix_tree_insert - insert into a radix tree
431 * @root: radix tree root
432 * @index: index key
433 * @item: item to insert
434 *
435 * Insert an item into the radix tree at position @index.
436 */
437int radix_tree_insert(struct radix_tree_root *root,
438 unsigned long index, void *item)
439{
440 struct radix_tree_node *node;
441 void **slot;
442 int error;
443
444 BUG_ON(radix_tree_is_indirect_ptr(item));
445
446 error = __radix_tree_create(root, index, &node, &slot);
447 if (error)
448 return error;
449 if (*slot != NULL)
443 return -EEXIST; 450 return -EEXIST;
451 rcu_assign_pointer(*slot, item);
444 452
445 if (node) { 453 if (node) {
446 node->count++; 454 node->count++;
447 rcu_assign_pointer(node->slots[offset], item); 455 BUG_ON(tag_get(node, 0, index & RADIX_TREE_MAP_MASK));
448 BUG_ON(tag_get(node, 0, offset)); 456 BUG_ON(tag_get(node, 1, index & RADIX_TREE_MAP_MASK));
449 BUG_ON(tag_get(node, 1, offset));
450 } else { 457 } else {
451 rcu_assign_pointer(root->rnode, item);
452 BUG_ON(root_tag_get(root, 0)); 458 BUG_ON(root_tag_get(root, 0));
453 BUG_ON(root_tag_get(root, 1)); 459 BUG_ON(root_tag_get(root, 1));
454 } 460 }
@@ -457,15 +463,26 @@ int radix_tree_insert(struct radix_tree_root *root,
457} 463}
458EXPORT_SYMBOL(radix_tree_insert); 464EXPORT_SYMBOL(radix_tree_insert);
459 465
460/* 466/**
461 * is_slot == 1 : search for the slot. 467 * __radix_tree_lookup - lookup an item in a radix tree
462 * is_slot == 0 : search for the node. 468 * @root: radix tree root
469 * @index: index key
470 * @nodep: returns node
471 * @slotp: returns slot
472 *
473 * Lookup and return the item at position @index in the radix
474 * tree @root.
475 *
476 * Until there is more than one item in the tree, no nodes are
477 * allocated and @root->rnode is used as a direct slot instead of
478 * pointing to a node, in which case *@nodep will be NULL.
463 */ 479 */
464static void *radix_tree_lookup_element(struct radix_tree_root *root, 480void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
465 unsigned long index, int is_slot) 481 struct radix_tree_node **nodep, void ***slotp)
466{ 482{
483 struct radix_tree_node *node, *parent;
467 unsigned int height, shift; 484 unsigned int height, shift;
468 struct radix_tree_node *node, **slot; 485 void **slot;
469 486
470 node = rcu_dereference_raw(root->rnode); 487 node = rcu_dereference_raw(root->rnode);
471 if (node == NULL) 488 if (node == NULL)
@@ -474,19 +491,24 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
474 if (!radix_tree_is_indirect_ptr(node)) { 491 if (!radix_tree_is_indirect_ptr(node)) {
475 if (index > 0) 492 if (index > 0)
476 return NULL; 493 return NULL;
477 return is_slot ? (void *)&root->rnode : node; 494
495 if (nodep)
496 *nodep = NULL;
497 if (slotp)
498 *slotp = (void **)&root->rnode;
499 return node;
478 } 500 }
479 node = indirect_to_ptr(node); 501 node = indirect_to_ptr(node);
480 502
481 height = node->height; 503 height = node->path & RADIX_TREE_HEIGHT_MASK;
482 if (index > radix_tree_maxindex(height)) 504 if (index > radix_tree_maxindex(height))
483 return NULL; 505 return NULL;
484 506
485 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 507 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
486 508
487 do { 509 do {
488 slot = (struct radix_tree_node **) 510 parent = node;
489 (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); 511 slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK);
490 node = rcu_dereference_raw(*slot); 512 node = rcu_dereference_raw(*slot);
491 if (node == NULL) 513 if (node == NULL)
492 return NULL; 514 return NULL;
@@ -495,7 +517,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
495 height--; 517 height--;
496 } while (height > 0); 518 } while (height > 0);
497 519
498 return is_slot ? (void *)slot : indirect_to_ptr(node); 520 if (nodep)
521 *nodep = parent;
522 if (slotp)
523 *slotp = slot;
524 return node;
499} 525}
500 526
501/** 527/**
@@ -513,7 +539,11 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
513 */ 539 */
514void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 540void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
515{ 541{
516 return (void **)radix_tree_lookup_element(root, index, 1); 542 void **slot;
543
544 if (!__radix_tree_lookup(root, index, NULL, &slot))
545 return NULL;
546 return slot;
517} 547}
518EXPORT_SYMBOL(radix_tree_lookup_slot); 548EXPORT_SYMBOL(radix_tree_lookup_slot);
519 549
@@ -531,7 +561,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
531 */ 561 */
532void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 562void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
533{ 563{
534 return radix_tree_lookup_element(root, index, 0); 564 return __radix_tree_lookup(root, index, NULL, NULL);
535} 565}
536EXPORT_SYMBOL(radix_tree_lookup); 566EXPORT_SYMBOL(radix_tree_lookup);
537 567
@@ -676,7 +706,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
676 return (index == 0); 706 return (index == 0);
677 node = indirect_to_ptr(node); 707 node = indirect_to_ptr(node);
678 708
679 height = node->height; 709 height = node->path & RADIX_TREE_HEIGHT_MASK;
680 if (index > radix_tree_maxindex(height)) 710 if (index > radix_tree_maxindex(height))
681 return 0; 711 return 0;
682 712
@@ -713,7 +743,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
713{ 743{
714 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK; 744 unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
715 struct radix_tree_node *rnode, *node; 745 struct radix_tree_node *rnode, *node;
716 unsigned long index, offset; 746 unsigned long index, offset, height;
717 747
718 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) 748 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
719 return NULL; 749 return NULL;
@@ -744,7 +774,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
744 return NULL; 774 return NULL;
745 775
746restart: 776restart:
747 shift = (rnode->height - 1) * RADIX_TREE_MAP_SHIFT; 777 height = rnode->path & RADIX_TREE_HEIGHT_MASK;
778 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
748 offset = index >> shift; 779 offset = index >> shift;
749 780
750 /* Index outside of the tree */ 781 /* Index outside of the tree */
@@ -946,81 +977,6 @@ next:
946} 977}
947EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); 978EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
948 979
949
950/**
951 * radix_tree_next_hole - find the next hole (not-present entry)
952 * @root: tree root
953 * @index: index key
954 * @max_scan: maximum range to search
955 *
956 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the lowest
957 * indexed hole.
958 *
959 * Returns: the index of the hole if found, otherwise returns an index
960 * outside of the set specified (in which case 'return - index >= max_scan'
961 * will be true). In rare cases of index wrap-around, 0 will be returned.
962 *
963 * radix_tree_next_hole may be called under rcu_read_lock. However, like
964 * radix_tree_gang_lookup, this will not atomically search a snapshot of
965 * the tree at a single point in time. For example, if a hole is created
966 * at index 5, then subsequently a hole is created at index 10,
967 * radix_tree_next_hole covering both indexes may return 10 if called
968 * under rcu_read_lock.
969 */
970unsigned long radix_tree_next_hole(struct radix_tree_root *root,
971 unsigned long index, unsigned long max_scan)
972{
973 unsigned long i;
974
975 for (i = 0; i < max_scan; i++) {
976 if (!radix_tree_lookup(root, index))
977 break;
978 index++;
979 if (index == 0)
980 break;
981 }
982
983 return index;
984}
985EXPORT_SYMBOL(radix_tree_next_hole);
986
987/**
988 * radix_tree_prev_hole - find the prev hole (not-present entry)
989 * @root: tree root
990 * @index: index key
991 * @max_scan: maximum range to search
992 *
993 * Search backwards in the range [max(index-max_scan+1, 0), index]
994 * for the first hole.
995 *
996 * Returns: the index of the hole if found, otherwise returns an index
997 * outside of the set specified (in which case 'index - return >= max_scan'
998 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
999 *
1000 * radix_tree_next_hole may be called under rcu_read_lock. However, like
1001 * radix_tree_gang_lookup, this will not atomically search a snapshot of
1002 * the tree at a single point in time. For example, if a hole is created
1003 * at index 10, then subsequently a hole is created at index 5,
1004 * radix_tree_prev_hole covering both indexes may return 5 if called under
1005 * rcu_read_lock.
1006 */
1007unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
1008 unsigned long index, unsigned long max_scan)
1009{
1010 unsigned long i;
1011
1012 for (i = 0; i < max_scan; i++) {
1013 if (!radix_tree_lookup(root, index))
1014 break;
1015 index--;
1016 if (index == ULONG_MAX)
1017 break;
1018 }
1019
1020 return index;
1021}
1022EXPORT_SYMBOL(radix_tree_prev_hole);
1023
1024/** 980/**
1025 * radix_tree_gang_lookup - perform multiple lookup on a radix tree 981 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1026 * @root: radix tree root 982 * @root: radix tree root
@@ -1189,7 +1145,7 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
1189 unsigned int shift, height; 1145 unsigned int shift, height;
1190 unsigned long i; 1146 unsigned long i;
1191 1147
1192 height = slot->height; 1148 height = slot->path & RADIX_TREE_HEIGHT_MASK;
1193 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 1149 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
1194 1150
1195 for ( ; height > 1; height--) { 1151 for ( ; height > 1; height--) {
@@ -1252,7 +1208,8 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1252 } 1208 }
1253 1209
1254 node = indirect_to_ptr(node); 1210 node = indirect_to_ptr(node);
1255 max_index = radix_tree_maxindex(node->height); 1211 max_index = radix_tree_maxindex(node->path &
1212 RADIX_TREE_HEIGHT_MASK);
1256 if (cur_index > max_index) { 1213 if (cur_index > max_index) {
1257 rcu_read_unlock(); 1214 rcu_read_unlock();
1258 break; 1215 break;
@@ -1337,48 +1294,90 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1337} 1294}
1338 1295
1339/** 1296/**
1340 * radix_tree_delete - delete an item from a radix tree 1297 * __radix_tree_delete_node - try to free node after clearing a slot
1341 * @root: radix tree root 1298 * @root: radix tree root
1342 * @index: index key 1299 * @index: index key
1300 * @node: node containing @index
1343 * 1301 *
1344 * Remove the item at @index from the radix tree rooted at @root. 1302 * After clearing the slot at @index in @node from radix tree
1303 * rooted at @root, call this function to attempt freeing the
1304 * node and shrinking the tree.
1345 * 1305 *
1346 * Returns the address of the deleted item, or NULL if it was not present. 1306 * Returns %true if @node was freed, %false otherwise.
1347 */ 1307 */
1348void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) 1308bool __radix_tree_delete_node(struct radix_tree_root *root,
1309 struct radix_tree_node *node)
1349{ 1310{
1350 struct radix_tree_node *node = NULL; 1311 bool deleted = false;
1351 struct radix_tree_node *slot = NULL; 1312
1352 struct radix_tree_node *to_free; 1313 do {
1353 unsigned int height, shift; 1314 struct radix_tree_node *parent;
1315
1316 if (node->count) {
1317 if (node == indirect_to_ptr(root->rnode)) {
1318 radix_tree_shrink(root);
1319 if (root->height == 0)
1320 deleted = true;
1321 }
1322 return deleted;
1323 }
1324
1325 parent = node->parent;
1326 if (parent) {
1327 unsigned int offset;
1328
1329 offset = node->path >> RADIX_TREE_HEIGHT_SHIFT;
1330 parent->slots[offset] = NULL;
1331 parent->count--;
1332 } else {
1333 root_tag_clear_all(root);
1334 root->height = 0;
1335 root->rnode = NULL;
1336 }
1337
1338 radix_tree_node_free(node);
1339 deleted = true;
1340
1341 node = parent;
1342 } while (node);
1343
1344 return deleted;
1345}
1346
1347/**
1348 * radix_tree_delete_item - delete an item from a radix tree
1349 * @root: radix tree root
1350 * @index: index key
1351 * @item: expected item
1352 *
1353 * Remove @item at @index from the radix tree rooted at @root.
1354 *
1355 * Returns the address of the deleted item, or NULL if it was not present
1356 * or the entry at the given @index was not @item.
1357 */
1358void *radix_tree_delete_item(struct radix_tree_root *root,
1359 unsigned long index, void *item)
1360{
1361 struct radix_tree_node *node;
1362 unsigned int offset;
1363 void **slot;
1364 void *entry;
1354 int tag; 1365 int tag;
1355 int uninitialized_var(offset);
1356 1366
1357 height = root->height; 1367 entry = __radix_tree_lookup(root, index, &node, &slot);
1358 if (index > radix_tree_maxindex(height)) 1368 if (!entry)
1359 goto out; 1369 return NULL;
1360 1370
1361 slot = root->rnode; 1371 if (item && entry != item)
1362 if (height == 0) { 1372 return NULL;
1373
1374 if (!node) {
1363 root_tag_clear_all(root); 1375 root_tag_clear_all(root);
1364 root->rnode = NULL; 1376 root->rnode = NULL;
1365 goto out; 1377 return entry;
1366 } 1378 }
1367 slot = indirect_to_ptr(slot);
1368 shift = height * RADIX_TREE_MAP_SHIFT;
1369 1379
1370 do { 1380 offset = index & RADIX_TREE_MAP_MASK;
1371 if (slot == NULL)
1372 goto out;
1373
1374 shift -= RADIX_TREE_MAP_SHIFT;
1375 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
1376 node = slot;
1377 slot = slot->slots[offset];
1378 } while (shift);
1379
1380 if (slot == NULL)
1381 goto out;
1382 1381
1383 /* 1382 /*
1384 * Clear all tags associated with the item to be deleted. 1383 * Clear all tags associated with the item to be deleted.
@@ -1389,40 +1388,27 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1389 radix_tree_tag_clear(root, index, tag); 1388 radix_tree_tag_clear(root, index, tag);
1390 } 1389 }
1391 1390
1392 to_free = NULL; 1391 node->slots[offset] = NULL;
1393 /* Now free the nodes we do not need anymore */ 1392 node->count--;
1394 while (node) {
1395 node->slots[offset] = NULL;
1396 node->count--;
1397 /*
1398 * Queue the node for deferred freeing after the
1399 * last reference to it disappears (set NULL, above).
1400 */
1401 if (to_free)
1402 radix_tree_node_free(to_free);
1403
1404 if (node->count) {
1405 if (node == indirect_to_ptr(root->rnode))
1406 radix_tree_shrink(root);
1407 goto out;
1408 }
1409 1393
1410 /* Node with zero slots in use so free it */ 1394 __radix_tree_delete_node(root, node);
1411 to_free = node;
1412 1395
1413 index >>= RADIX_TREE_MAP_SHIFT; 1396 return entry;
1414 offset = index & RADIX_TREE_MAP_MASK; 1397}
1415 node = node->parent; 1398EXPORT_SYMBOL(radix_tree_delete_item);
1416 }
1417
1418 root_tag_clear_all(root);
1419 root->height = 0;
1420 root->rnode = NULL;
1421 if (to_free)
1422 radix_tree_node_free(to_free);
1423 1399
1424out: 1400/**
1425 return slot; 1401 * radix_tree_delete - delete an item from a radix tree
1402 * @root: radix tree root
1403 * @index: index key
1404 *
1405 * Remove the item at @index from the radix tree rooted at @root.
1406 *
1407 * Returns the address of the deleted item, or NULL if it was not present.
1408 */
1409void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1410{
1411 return radix_tree_delete_item(root, index, NULL);
1426} 1412}
1427EXPORT_SYMBOL(radix_tree_delete); 1413EXPORT_SYMBOL(radix_tree_delete);
1428 1414
@@ -1438,9 +1424,12 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1438EXPORT_SYMBOL(radix_tree_tagged); 1424EXPORT_SYMBOL(radix_tree_tagged);
1439 1425
1440static void 1426static void
1441radix_tree_node_ctor(void *node) 1427radix_tree_node_ctor(void *arg)
1442{ 1428{
1443 memset(node, 0, sizeof(struct radix_tree_node)); 1429 struct radix_tree_node *node = arg;
1430
1431 memset(node, 0, sizeof(*node));
1432 INIT_LIST_HEAD(&node->private_list);
1444} 1433}
1445 1434
1446static __init unsigned long __maxindex(unsigned int height) 1435static __init unsigned long __maxindex(unsigned int height)
diff --git a/lib/random32.c b/lib/random32.c
index 614896778700..fa5da61ce7ad 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -1,37 +1,35 @@
1/* 1/*
2 This is a maximally equidistributed combined Tausworthe generator 2 * This is a maximally equidistributed combined Tausworthe generator
3 based on code from GNU Scientific Library 1.5 (30 Jun 2004) 3 * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
4 4 *
5 lfsr113 version: 5 * lfsr113 version:
6 6 *
7 x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n) 7 * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
8 8 *
9 s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13)) 9 * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
10 s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27)) 10 * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
11 s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21)) 11 * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
12 s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12)) 12 * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
13 13 *
14 The period of this generator is about 2^113 (see erratum paper). 14 * The period of this generator is about 2^113 (see erratum paper).
15 15 *
16 From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe 16 * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
17 Generators", Mathematics of Computation, 65, 213 (1996), 203--213: 17 * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
18 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps 18 * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
19 ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps 19 * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
20 20 *
21 There is an erratum in the paper "Tables of Maximally 21 * There is an erratum in the paper "Tables of Maximally Equidistributed
22 Equidistributed Combined LFSR Generators", Mathematics of 22 * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
23 Computation, 68, 225 (1999), 261--269: 23 * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
24 http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps 24 *
25 25 * ... the k_j most significant bits of z_j must be non-zero,
26 ... the k_j most significant bits of z_j must be non- 26 * for each j. (Note: this restriction also applies to the
27 zero, for each j. (Note: this restriction also applies to the 27 * computer code given in [4], but was mistakenly not mentioned
28 computer code given in [4], but was mistakenly not mentioned in 28 * in that paper.)
29 that paper.) 29 *
30 30 * This affects the seeding procedure by imposing the requirement
31 This affects the seeding procedure by imposing the requirement 31 * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
32 s1 > 1, s2 > 7, s3 > 15, s4 > 127. 32 */
33
34*/
35 33
36#include <linux/types.h> 34#include <linux/types.h>
37#include <linux/percpu.h> 35#include <linux/percpu.h>
@@ -75,15 +73,17 @@ EXPORT_SYMBOL(prandom_u32_state);
75 */ 73 */
76u32 prandom_u32(void) 74u32 prandom_u32(void)
77{ 75{
78 unsigned long r;
79 struct rnd_state *state = &get_cpu_var(net_rand_state); 76 struct rnd_state *state = &get_cpu_var(net_rand_state);
80 r = prandom_u32_state(state); 77 u32 res;
78
79 res = prandom_u32_state(state);
81 put_cpu_var(state); 80 put_cpu_var(state);
82 return r; 81
82 return res;
83} 83}
84EXPORT_SYMBOL(prandom_u32); 84EXPORT_SYMBOL(prandom_u32);
85 85
86/* 86/**
87 * prandom_bytes_state - get the requested number of pseudo-random bytes 87 * prandom_bytes_state - get the requested number of pseudo-random bytes
88 * 88 *
89 * @state: pointer to state structure holding seeded state. 89 * @state: pointer to state structure holding seeded state.
@@ -204,6 +204,7 @@ static int __init prandom_init(void)
204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); 204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
205 prandom_warmup(state); 205 prandom_warmup(state);
206 } 206 }
207
207 return 0; 208 return 0;
208} 209}
209core_initcall(prandom_init); 210core_initcall(prandom_init);
@@ -259,6 +260,7 @@ static void __prandom_reseed(bool late)
259 260
260 if (latch && !late) 261 if (latch && !late)
261 goto out; 262 goto out;
263
262 latch = true; 264 latch = true;
263 265
264 for_each_possible_cpu(i) { 266 for_each_possible_cpu(i) {
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 04abe53f12a1..1afec32de6f2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,7 +7,8 @@
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9 9
10notrace unsigned int debug_smp_processor_id(void) 10notrace static unsigned int check_preemption_disabled(const char *what1,
11 const char *what2)
11{ 12{
12 int this_cpu = raw_smp_processor_id(); 13 int this_cpu = raw_smp_processor_id();
13 14
@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
38 if (!printk_ratelimit()) 39 if (!printk_ratelimit())
39 goto out_enable; 40 goto out_enable;
40 41
41 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " 42 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
42 "code: %s/%d\n", 43 what1, what2, preempt_count() - 1, current->comm, current->pid);
43 preempt_count() - 1, current->comm, current->pid); 44
44 print_symbol("caller is %s\n", (long)__builtin_return_address(0)); 45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
45 dump_stack(); 46 dump_stack();
46 47
@@ -50,5 +51,14 @@ out:
50 return this_cpu; 51 return this_cpu;
51} 52}
52 53
54notrace unsigned int debug_smp_processor_id(void)
55{
56 return check_preemption_disabled("smp_processor_id", "");
57}
53EXPORT_SYMBOL(debug_smp_processor_id); 58EXPORT_SYMBOL(debug_smp_processor_id);
54 59
60notrace void __this_cpu_preempt_check(const char *op)
61{
62 check_preemption_disabled("__this_cpu_", op);
63}
64EXPORT_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/syscall.c b/lib/syscall.c
index 58710eefeac8..e30e03932480 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -72,4 +72,3 @@ int task_current_syscall(struct task_struct *target, long *callno,
72 72
73 return 0; 73 return 0;
74} 74}
75EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 185b6d300ebc..0648291cdafe 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -364,7 +364,6 @@ enum format_type {
364 FORMAT_TYPE_SHORT, 364 FORMAT_TYPE_SHORT,
365 FORMAT_TYPE_UINT, 365 FORMAT_TYPE_UINT,
366 FORMAT_TYPE_INT, 366 FORMAT_TYPE_INT,
367 FORMAT_TYPE_NRCHARS,
368 FORMAT_TYPE_SIZE_T, 367 FORMAT_TYPE_SIZE_T,
369 FORMAT_TYPE_PTRDIFF 368 FORMAT_TYPE_PTRDIFF
370}; 369};
@@ -719,10 +718,15 @@ char *resource_string(char *buf, char *end, struct resource *res,
719 specp = &mem_spec; 718 specp = &mem_spec;
720 decode = 0; 719 decode = 0;
721 } 720 }
722 p = number(p, pend, res->start, *specp); 721 if (decode && res->flags & IORESOURCE_UNSET) {
723 if (res->start != res->end) { 722 p = string(p, pend, "size ", str_spec);
724 *p++ = '-'; 723 p = number(p, pend, resource_size(res), *specp);
725 p = number(p, pend, res->end, *specp); 724 } else {
725 p = number(p, pend, res->start, *specp);
726 if (res->start != res->end) {
727 *p++ = '-';
728 p = number(p, pend, res->end, *specp);
729 }
726 } 730 }
727 if (decode) { 731 if (decode) {
728 if (res->flags & IORESOURCE_MEM_64) 732 if (res->flags & IORESOURCE_MEM_64)
@@ -1533,10 +1537,6 @@ qualifier:
1533 return fmt - start; 1537 return fmt - start;
1534 /* skip alnum */ 1538 /* skip alnum */
1535 1539
1536 case 'n':
1537 spec->type = FORMAT_TYPE_NRCHARS;
1538 return ++fmt - start;
1539
1540 case '%': 1540 case '%':
1541 spec->type = FORMAT_TYPE_PERCENT_CHAR; 1541 spec->type = FORMAT_TYPE_PERCENT_CHAR;
1542 return ++fmt - start; 1542 return ++fmt - start;
@@ -1559,6 +1559,15 @@ qualifier:
1559 case 'u': 1559 case 'u':
1560 break; 1560 break;
1561 1561
1562 case 'n':
1563 /*
1564 * Since %n poses a greater security risk than utility, treat
1565 * it as an invalid format specifier. Warn about its use so
1566 * that new instances don't get added.
1567 */
1568 WARN_ONCE(1, "Please remove ignored %%n in '%s'\n", fmt);
1569 /* Fall-through */
1570
1562 default: 1571 default:
1563 spec->type = FORMAT_TYPE_INVALID; 1572 spec->type = FORMAT_TYPE_INVALID;
1564 return fmt - start; 1573 return fmt - start;
@@ -1732,20 +1741,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1732 ++str; 1741 ++str;
1733 break; 1742 break;
1734 1743
1735 case FORMAT_TYPE_NRCHARS: {
1736 /*
1737 * Since %n poses a greater security risk than
1738 * utility, ignore %n and skip its argument.
1739 */
1740 void *skip_arg;
1741
1742 WARN_ONCE(1, "Please remove ignored %%n in '%s'\n",
1743 old_fmt);
1744
1745 skip_arg = va_arg(args, void *);
1746 break;
1747 }
1748
1749 default: 1744 default:
1750 switch (spec.type) { 1745 switch (spec.type) {
1751 case FORMAT_TYPE_LONG_LONG: 1746 case FORMAT_TYPE_LONG_LONG:
@@ -2020,19 +2015,6 @@ do { \
2020 fmt++; 2015 fmt++;
2021 break; 2016 break;
2022 2017
2023 case FORMAT_TYPE_NRCHARS: {
2024 /* skip %n 's argument */
2025 u8 qualifier = spec.qualifier;
2026 void *skip_arg;
2027 if (qualifier == 'l')
2028 skip_arg = va_arg(args, long *);
2029 else if (_tolower(qualifier) == 'z')
2030 skip_arg = va_arg(args, size_t *);
2031 else
2032 skip_arg = va_arg(args, int *);
2033 break;
2034 }
2035
2036 default: 2018 default:
2037 switch (spec.type) { 2019 switch (spec.type) {
2038 2020
@@ -2191,10 +2173,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
2191 ++str; 2173 ++str;
2192 break; 2174 break;
2193 2175
2194 case FORMAT_TYPE_NRCHARS:
2195 /* skip */
2196 break;
2197
2198 default: { 2176 default: {
2199 unsigned long long num; 2177 unsigned long long num;
2200 2178