aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:37:49 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-05-02 11:54:19 -0400
commitc032862fba51a3ca504752d3a25186b324c5ce83 (patch)
tree955dc2ba4ab3df76ecc2bb780ee84aca04967e8d /lib
parentfda76e074c7737fc57855dd17c762e50ed526052 (diff)
parent8700c95adb033843fc163d112b9d21d4fda78018 (diff)
Merge commit '8700c95adb03' into timers/nohz
The full dynticks tree needs the latest RCU and sched upstream updates in order to fix some dependencies. Merge a common upstream merge point that has these updates. Conflicts: include/linux/perf_event.h kernel/rcutree.h kernel/rcutree_plugin.h Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/argv_split.c87
-rw-r--r--lib/bust_spinlocks.c3
-rw-r--r--lib/dma-debug.c45
-rw-r--r--lib/dynamic_debug.c1
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/genalloc.c81
-rw-r--r--lib/idr.c123
-rw-r--r--lib/int_sqrt.c32
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/show_mem.c3
-rw-r--r--lib/swiotlb.c19
-rw-r--r--lib/ucs2_string.c51
-rw-r--r--lib/uuid.c8
-rw-r--r--lib/xz/Kconfig2
17 files changed, 318 insertions, 155 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3958dc4389f9..fe01d418b09a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -404,4 +404,7 @@ config OID_REGISTRY
404 help 404 help
405 Enable fast lookup object identifier registry. 405 Enable fast lookup object identifier registry.
406 406
407config UCS2_STRING
408 tristate
409
407endmenu 410endmenu
diff --git a/lib/Makefile b/lib/Makefile
index d7946ff75b2e..6e2cc561f761 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@
174 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ 174 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
175 175
176clean-files += oid_registry_data.c 176clean-files += oid_registry_data.c
177
178obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 1e9a6cbc3689..e927ed0e18a8 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -8,23 +8,17 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/export.h> 9#include <linux/export.h>
10 10
11static const char *skip_arg(const char *cp)
12{
13 while (*cp && !isspace(*cp))
14 cp++;
15
16 return cp;
17}
18
19static int count_argc(const char *str) 11static int count_argc(const char *str)
20{ 12{
21 int count = 0; 13 int count = 0;
14 bool was_space;
22 15
23 while (*str) { 16 for (was_space = true; *str; str++) {
24 str = skip_spaces(str); 17 if (isspace(*str)) {
25 if (*str) { 18 was_space = true;
19 } else if (was_space) {
20 was_space = false;
26 count++; 21 count++;
27 str = skip_arg(str);
28 } 22 }
29 } 23 }
30 24
@@ -39,10 +33,8 @@ static int count_argc(const char *str)
39 */ 33 */
40void argv_free(char **argv) 34void argv_free(char **argv)
41{ 35{
42 char **p; 36 argv--;
43 for (p = argv; *p; p++) 37 kfree(argv[0]);
44 kfree(*p);
45
46 kfree(argv); 38 kfree(argv);
47} 39}
48EXPORT_SYMBOL(argv_free); 40EXPORT_SYMBOL(argv_free);
@@ -59,43 +51,44 @@ EXPORT_SYMBOL(argv_free);
59 * considered to be a single argument separator. The returned array 51 * considered to be a single argument separator. The returned array
60 * is always NULL-terminated. Returns NULL on memory allocation 52 * is always NULL-terminated. Returns NULL on memory allocation
61 * failure. 53 * failure.
54 *
55 * The source string at `str' may be undergoing concurrent alteration via
56 * userspace sysctl activity (at least). The argv_split() implementation
57 * attempts to handle this gracefully by taking a local copy to work on.
62 */ 58 */
63char **argv_split(gfp_t gfp, const char *str, int *argcp) 59char **argv_split(gfp_t gfp, const char *str, int *argcp)
64{ 60{
65 int argc = count_argc(str); 61 char *argv_str;
66 char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); 62 bool was_space;
67 char **argvp; 63 char **argv, **argv_ret;
68 64 int argc;
69 if (argv == NULL) 65
70 goto out; 66 argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
71 67 if (!argv_str)
72 if (argcp) 68 return NULL;
73 *argcp = argc; 69
74 70 argc = count_argc(argv_str);
75 argvp = argv; 71 argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
76 72 if (!argv) {
77 while (*str) { 73 kfree(argv_str);
78 str = skip_spaces(str); 74 return NULL;
79 75 }
80 if (*str) {
81 const char *p = str;
82 char *t;
83
84 str = skip_arg(str);
85 76
86 t = kstrndup(p, str-p, gfp); 77 *argv = argv_str;
87 if (t == NULL) 78 argv_ret = ++argv;
88 goto fail; 79 for (was_space = true; *argv_str; argv_str++) {
89 *argvp++ = t; 80 if (isspace(*argv_str)) {
81 was_space = true;
82 *argv_str = 0;
83 } else if (was_space) {
84 was_space = false;
85 *argv++ = argv_str;
90 } 86 }
91 } 87 }
92 *argvp = NULL; 88 *argv = NULL;
93
94 out:
95 return argv;
96 89
97 fail: 90 if (argcp)
98 argv_free(argv); 91 *argcp = argc;
99 return NULL; 92 return argv_ret;
100} 93}
101EXPORT_SYMBOL(argv_split); 94EXPORT_SYMBOL(argv_split);
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index 9681d54b95d1..f8e0e5367398 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/printk.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/tty.h> 13#include <linux/tty.h>
13#include <linux/wait.h> 14#include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
28 wake_up_klogd(); 29 wake_up_klogd();
29 } 30 }
30} 31}
31
32
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 5e396accd3d0..d87a17a819d0 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
862 entry = bucket_find_exact(bucket, ref); 862 entry = bucket_find_exact(bucket, ref);
863 863
864 if (!entry) { 864 if (!entry) {
865 /* must drop lock before calling dma_mapping_error */
866 put_hash_bucket(bucket, &flags);
867
865 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 868 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
866 err_printk(ref->dev, NULL, 869 err_printk(ref->dev, NULL,
867 "DMA-API: device driver tries " 870 "DMA-API: device driver tries to free an "
868 "to free an invalid DMA memory address\n"); 871 "invalid DMA memory address\n");
869 return; 872 } else {
873 err_printk(ref->dev, NULL,
874 "DMA-API: device driver tries to free DMA "
875 "memory it has not allocated [device "
876 "address=0x%016llx] [size=%llu bytes]\n",
877 ref->dev_addr, ref->size);
870 } 878 }
871 err_printk(ref->dev, NULL, "DMA-API: device driver tries " 879 return;
872 "to free DMA memory it has not allocated "
873 "[device address=0x%016llx] [size=%llu bytes]\n",
874 ref->dev_addr, ref->size);
875 goto out;
876 } 880 }
877 881
878 if (ref->size != entry->size) { 882 if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
936 hash_bucket_del(entry); 940 hash_bucket_del(entry);
937 dma_entry_free(entry); 941 dma_entry_free(entry);
938 942
939out:
940 put_hash_bucket(bucket, &flags); 943 put_hash_bucket(bucket, &flags);
941} 944}
942 945
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1082 ref.dev = dev; 1085 ref.dev = dev;
1083 ref.dev_addr = dma_addr; 1086 ref.dev_addr = dma_addr;
1084 bucket = get_hash_bucket(&ref, &flags); 1087 bucket = get_hash_bucket(&ref, &flags);
1085 entry = bucket_find_exact(bucket, &ref);
1086 1088
1087 if (!entry) 1089 list_for_each_entry(entry, &bucket->list, list) {
1088 goto out; 1090 if (!exact_match(&ref, entry))
1091 continue;
1092
1093 /*
1094 * The same physical address can be mapped multiple
1095 * times. Without a hardware IOMMU this results in the
1096 * same device addresses being put into the dma-debug
1097 * hash multiple times too. This can result in false
1098 * positives being reported. Therefore we implement a
1099 * best-fit algorithm here which updates the first entry
1100 * from the hash which fits the reference value and is
1101 * not currently listed as being checked.
1102 */
1103 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1104 entry->map_err_type = MAP_ERR_CHECKED;
1105 break;
1106 }
1107 }
1089 1108
1090 entry->map_err_type = MAP_ERR_CHECKED;
1091out:
1092 put_hash_bucket(bucket, &flags); 1109 put_hash_bucket(bucket, &flags);
1093} 1110}
1094EXPORT_SYMBOL(debug_dma_mapping_error); 1111EXPORT_SYMBOL(debug_dma_mapping_error);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 5276b99ca650..46032453abd5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -281,7 +281,6 @@ static inline int parse_lineno(const char *str, unsigned int *val)
281 * allow the user to express a query which matches a format 281 * allow the user to express a query which matches a format
282 * containing embedded spaces. 282 * containing embedded spaces.
283 */ 283 */
284#define isodigit(c) ((c) >= '0' && (c) <= '7')
285static char *unescape(char *str) 284static char *unescape(char *str)
286{ 285{
287 char *in = str; 286 char *in = str;
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f7210ad6cffd..c5c7a762b850 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -122,7 +122,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
122 return false; 122 return false;
123 } 123 }
124 124
125 if (attr->probability <= random32() % 100) 125 if (attr->probability <= prandom_u32() % 100)
126 return false; 126 return false;
127 127
128 if (!fail_stacktrace(attr)) 128 if (!fail_stacktrace(attr))
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 54920433705a..b35cfa9bc3d4 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -34,6 +34,8 @@
34#include <linux/rculist.h> 34#include <linux/rculist.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/genalloc.h> 36#include <linux/genalloc.h>
37#include <linux/of_address.h>
38#include <linux/of_device.h>
37 39
38static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 40static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39{ 41{
@@ -480,3 +482,82 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
480 return start_bit; 482 return start_bit;
481} 483}
482EXPORT_SYMBOL(gen_pool_best_fit); 484EXPORT_SYMBOL(gen_pool_best_fit);
485
486static void devm_gen_pool_release(struct device *dev, void *res)
487{
488 gen_pool_destroy(*(struct gen_pool **)res);
489}
490
491/**
492 * devm_gen_pool_create - managed gen_pool_create
493 * @dev: device that provides the gen_pool
494 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
495 * @nid: node id of the node the pool structure should be allocated on, or -1
496 *
497 * Create a new special memory pool that can be used to manage special purpose
498 * memory not managed by the regular kmalloc/kfree interface. The pool will be
499 * automatically destroyed by the device management code.
500 */
501struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
502 int nid)
503{
504 struct gen_pool **ptr, *pool;
505
506 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
507
508 pool = gen_pool_create(min_alloc_order, nid);
509 if (pool) {
510 *ptr = pool;
511 devres_add(dev, ptr);
512 } else {
513 devres_free(ptr);
514 }
515
516 return pool;
517}
518
519/**
520 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
521 * @dev: device to retrieve the gen_pool from
522 * @name: Optional name for the gen_pool, usually NULL
523 *
524 * Returns the gen_pool for the device if one is present, or NULL.
525 */
526struct gen_pool *dev_get_gen_pool(struct device *dev)
527{
528 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
529 NULL);
530
531 if (!p)
532 return NULL;
533 return *p;
534}
535EXPORT_SYMBOL_GPL(dev_get_gen_pool);
536
537#ifdef CONFIG_OF
538/**
539 * of_get_named_gen_pool - find a pool by phandle property
540 * @np: device node
541 * @propname: property name containing phandle(s)
542 * @index: index into the phandle array
543 *
544 * Returns the pool that contains the chunk starting at the physical
545 * address of the device tree node pointed at by the phandle property,
546 * or NULL if not found.
547 */
548struct gen_pool *of_get_named_gen_pool(struct device_node *np,
549 const char *propname, int index)
550{
551 struct platform_device *pdev;
552 struct device_node *np_pool;
553
554 np_pool = of_parse_phandle(np, propname, index);
555 if (!np_pool)
556 return NULL;
557 pdev = of_find_device_by_node(np_pool);
558 if (!pdev)
559 return NULL;
560 return dev_get_gen_pool(&pdev->dev);
561}
562EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
563#endif /* CONFIG_OF */
diff --git a/lib/idr.c b/lib/idr.c
index 73f4d53c02f3..cca4b9302a71 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
106 if (layer_idr) 106 if (layer_idr)
107 return get_from_free_list(layer_idr); 107 return get_from_free_list(layer_idr);
108 108
109 /* try to allocate directly from kmem_cache */ 109 /*
110 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); 110 * Try to allocate directly from kmem_cache. We want to try this
111 * before preload buffer; otherwise, non-preloading idr_alloc()
112 * users will end up taking advantage of preloading ones. As the
113 * following is allowed to fail for preloaded cases, suppress
114 * warning this time.
115 */
116 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
111 if (new) 117 if (new)
112 return new; 118 return new;
113 119
@@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
115 * Try to fetch one from the per-cpu preload buffer if in process 121 * Try to fetch one from the per-cpu preload buffer if in process
116 * context. See idr_preload() for details. 122 * context. See idr_preload() for details.
117 */ 123 */
118 if (in_interrupt()) 124 if (!in_interrupt()) {
119 return NULL; 125 preempt_disable();
120 126 new = __this_cpu_read(idr_preload_head);
121 preempt_disable(); 127 if (new) {
122 new = __this_cpu_read(idr_preload_head); 128 __this_cpu_write(idr_preload_head, new->ary[0]);
123 if (new) { 129 __this_cpu_dec(idr_preload_cnt);
124 __this_cpu_write(idr_preload_head, new->ary[0]); 130 new->ary[0] = NULL;
125 __this_cpu_dec(idr_preload_cnt); 131 }
126 new->ary[0] = NULL; 132 preempt_enable();
133 if (new)
134 return new;
127 } 135 }
128 preempt_enable(); 136
129 return new; 137 /*
138 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
139 * that memory allocation failure warning is printed as intended.
140 */
141 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
130} 142}
131 143
132static void idr_layer_rcu_free(struct rcu_head *head) 144static void idr_layer_rcu_free(struct rcu_head *head)
@@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
184 } 196 }
185} 197}
186 198
187/** 199int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
188 * idr_pre_get - reserve resources for idr allocation
189 * @idp: idr handle
190 * @gfp_mask: memory allocation flags
191 *
192 * This function should be called prior to calling the idr_get_new* functions.
193 * It preallocates enough memory to satisfy the worst possible allocation. The
194 * caller should pass in GFP_KERNEL if possible. This of course requires that
195 * no spinning locks be held.
196 *
197 * If the system is REALLY out of memory this function returns %0,
198 * otherwise %1.
199 */
200int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
201{ 200{
202 while (idp->id_free_cnt < MAX_IDR_FREE) { 201 while (idp->id_free_cnt < MAX_IDR_FREE) {
203 struct idr_layer *new; 202 struct idr_layer *new;
@@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
208 } 207 }
209 return 1; 208 return 1;
210} 209}
211EXPORT_SYMBOL(idr_pre_get); 210EXPORT_SYMBOL(__idr_pre_get);
212 211
213/** 212/**
214 * sub_alloc - try to allocate an id without growing the tree depth 213 * sub_alloc - try to allocate an id without growing the tree depth
215 * @idp: idr handle 214 * @idp: idr handle
216 * @starting_id: id to start search at 215 * @starting_id: id to start search at
217 * @id: pointer to the allocated handle
218 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer 216 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
219 * @gfp_mask: allocation mask for idr_layer_alloc() 217 * @gfp_mask: allocation mask for idr_layer_alloc()
220 * @layer_idr: optional idr passed to idr_layer_alloc() 218 * @layer_idr: optional idr passed to idr_layer_alloc()
@@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,
376 idr_mark_full(pa, id); 374 idr_mark_full(pa, id);
377} 375}
378 376
379/** 377int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
380 * idr_get_new_above - allocate new idr entry above or equal to a start id
381 * @idp: idr handle
382 * @ptr: pointer you want associated with the id
383 * @starting_id: id to start search at
384 * @id: pointer to the allocated handle
385 *
386 * This is the allocate id function. It should be called with any
387 * required locks.
388 *
389 * If allocation from IDR's private freelist fails, idr_get_new_above() will
390 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
391 * IDR's preallocation and then retry the idr_get_new_above() call.
392 *
393 * If the idr is full idr_get_new_above() will return %-ENOSPC.
394 *
395 * @id returns a value in the range @starting_id ... %0x7fffffff
396 */
397int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
398{ 378{
399 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 379 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
400 int rv; 380 int rv;
@@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
407 *id = rv; 387 *id = rv;
408 return 0; 388 return 0;
409} 389}
410EXPORT_SYMBOL(idr_get_new_above); 390EXPORT_SYMBOL(__idr_get_new_above);
411 391
412/** 392/**
413 * idr_preload - preload for idr_alloc() 393 * idr_preload - preload for idr_alloc()
@@ -515,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
515} 495}
516EXPORT_SYMBOL_GPL(idr_alloc); 496EXPORT_SYMBOL_GPL(idr_alloc);
517 497
498/**
499 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
500 * @idr: the (initialized) idr
501 * @ptr: pointer to be associated with the new id
502 * @start: the minimum id (inclusive)
503 * @end: the maximum id (exclusive, <= 0 for max)
504 * @gfp_mask: memory allocation flags
505 *
506 * Essentially the same as idr_alloc, but prefers to allocate progressively
507 * higher ids if it can. If the "cur" counter wraps, then it will start again
508 * at the "start" end of the range and allocate one that has already been used.
509 */
510int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
511 gfp_t gfp_mask)
512{
513 int id;
514
515 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
516 if (id == -ENOSPC)
517 id = idr_alloc(idr, ptr, start, end, gfp_mask);
518
519 if (likely(id >= 0))
520 idr->cur = id + 1;
521 return id;
522}
523EXPORT_SYMBOL(idr_alloc_cyclic);
524
518static void idr_remove_warning(int id) 525static void idr_remove_warning(int id)
519{ 526{
520 printk(KERN_WARNING 527 printk(KERN_WARNING
@@ -569,8 +576,7 @@ void idr_remove(struct idr *idp, int id)
569 struct idr_layer *p; 576 struct idr_layer *p;
570 struct idr_layer *to_free; 577 struct idr_layer *to_free;
571 578
572 /* see comment in idr_find_slowpath() */ 579 if (id < 0)
573 if (WARN_ON_ONCE(id < 0))
574 return; 580 return;
575 581
576 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 582 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
@@ -667,15 +673,7 @@ void *idr_find_slowpath(struct idr *idp, int id)
667 int n; 673 int n;
668 struct idr_layer *p; 674 struct idr_layer *p;
669 675
670 /* 676 if (id < 0)
671 * If @id is negative, idr_find() used to ignore the sign bit and
672 * performed lookup with the rest of bits, which is weird and can
673 * lead to very obscure bugs. We're now returning NULL for all
674 * negative IDs but just in case somebody was depending on the sign
675 * bit being ignored, let's trigger WARN_ON_ONCE() so that they can
676 * be detected and fixed. WARN_ON_ONCE() can later be removed.
677 */
678 if (WARN_ON_ONCE(id < 0))
679 return NULL; 677 return NULL;
680 678
681 p = rcu_dereference_raw(idp->top); 679 p = rcu_dereference_raw(idp->top);
@@ -824,8 +822,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
824 int n; 822 int n;
825 struct idr_layer *p, *old_p; 823 struct idr_layer *p, *old_p;
826 824
827 /* see comment in idr_find_slowpath() */ 825 if (id < 0)
828 if (WARN_ON_ONCE(id < 0))
829 return ERR_PTR(-EINVAL); 826 return ERR_PTR(-EINVAL);
830 827
831 p = idp->top; 828 p = idp->top;
@@ -918,7 +915,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
918int ida_pre_get(struct ida *ida, gfp_t gfp_mask) 915int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
919{ 916{
920 /* allocate idr_layers */ 917 /* allocate idr_layers */
921 if (!idr_pre_get(&ida->idr, gfp_mask)) 918 if (!__idr_pre_get(&ida->idr, gfp_mask))
922 return 0; 919 return 0;
923 920
924 /* allocate free_bitmap */ 921 /* allocate free_bitmap */
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index fc2eeb7cb2ea..1ef4cc344977 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -1,3 +1,9 @@
1/*
2 * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com>
3 *
4 * Based on the shift-and-subtract algorithm for computing integer
5 * square root from Guy L. Steele.
6 */
1 7
2#include <linux/kernel.h> 8#include <linux/kernel.h>
3#include <linux/export.h> 9#include <linux/export.h>
@@ -10,23 +16,23 @@
10 */ 16 */
11unsigned long int_sqrt(unsigned long x) 17unsigned long int_sqrt(unsigned long x)
12{ 18{
13 unsigned long op, res, one; 19 unsigned long b, m, y = 0;
14 20
15 op = x; 21 if (x <= 1)
16 res = 0; 22 return x;
17 23
18 one = 1UL << (BITS_PER_LONG - 2); 24 m = 1UL << (BITS_PER_LONG - 2);
19 while (one > op) 25 while (m != 0) {
20 one >>= 2; 26 b = y + m;
27 y >>= 1;
21 28
22 while (one != 0) { 29 if (x >= b) {
23 if (op >= res + one) { 30 x -= b;
24 op = op - (res + one); 31 y += m;
25 res = res + 2 * one;
26 } 32 }
27 res /= 2; 33 m >>= 2;
28 one /= 4;
29 } 34 }
30 return res; 35
36 return y;
31} 37}
32EXPORT_SYMBOL(int_sqrt); 38EXPORT_SYMBOL(int_sqrt);
diff --git a/lib/kobject.c b/lib/kobject.c
index e07ee1fcd6f1..a65486613d79 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
529 return kobj; 529 return kobj;
530} 530}
531 531
532static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
533{
534 if (!kref_get_unless_zero(&kobj->kref))
535 kobj = NULL;
536 return kobj;
537}
538
532/* 539/*
533 * kobject_cleanup - free kobject resources. 540 * kobject_cleanup - free kobject resources.
534 * @kobj: object to cleanup 541 * @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
751 758
752 list_for_each_entry(k, &kset->list, entry) { 759 list_for_each_entry(k, &kset->list, entry) {
753 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 760 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
754 ret = kobject_get(k); 761 ret = kobject_get_unless_zero(k);
755 break; 762 break;
756 } 763 }
757 } 764 }
diff --git a/lib/list_sort.c b/lib/list_sort.c
index d7325c6b103f..1183fa70a44d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -229,7 +229,7 @@ static int __init list_sort_test(void)
229 goto exit; 229 goto exit;
230 } 230 }
231 /* force some equivalencies */ 231 /* force some equivalencies */
232 el->value = random32() % (TEST_LIST_LEN/3); 232 el->value = prandom_u32() % (TEST_LIST_LEN / 3);
233 el->serial = i; 233 el->serial = i;
234 el->poison1 = TEST_POISON1; 234 el->poison1 = TEST_POISON1;
235 el->poison2 = TEST_POISON2; 235 el->poison2 = TEST_POISON2;
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 4407f8c9b1f7..b7c72311ad0c 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(filter); 19 show_free_areas(filter);
20 20
21 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
22 return;
23
21 for_each_online_pgdat(pgdat) { 24 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 25 unsigned long i, flags;
23 26
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bfe02b8fc55b..d23762e6652c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)
105 if (!strcmp(str, "force")) 105 if (!strcmp(str, "force"))
106 swiotlb_force = 1; 106 swiotlb_force = 1;
107 107
108 return 1; 108 return 0;
109} 109}
110__setup("swiotlb=", setup_io_tlb_npages); 110early_param("swiotlb", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swiotlb_nr_tbl(void) 113unsigned long swiotlb_nr_tbl(void)
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)
115 return io_tlb_nslabs; 115 return io_tlb_nslabs;
116} 116}
117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
118
119/* default to 64MB */
120#define IO_TLB_DEFAULT_SIZE (64UL<<20)
121unsigned long swiotlb_size_or_default(void)
122{
123 unsigned long size;
124
125 size = io_tlb_nslabs << IO_TLB_SHIFT;
126
127 return size ? size : (IO_TLB_DEFAULT_SIZE);
128}
129
118/* Note that this doesn't work with highmem page */ 130/* Note that this doesn't work with highmem page */
119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 131static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
120 volatile void *address) 132 volatile void *address)
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188void __init 200void __init
189swiotlb_init(int verbose) 201swiotlb_init(int verbose)
190{ 202{
191 /* default to 64MB */ 203 size_t default_size = IO_TLB_DEFAULT_SIZE;
192 size_t default_size = 64UL<<20;
193 unsigned char *vstart; 204 unsigned char *vstart;
194 unsigned long bytes; 205 unsigned long bytes;
195 206
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644
index 000000000000..6f500ef2301d
--- /dev/null
+++ b/lib/ucs2_string.c
@@ -0,0 +1,51 @@
1#include <linux/ucs2_string.h>
2#include <linux/module.h>
3
4/* Return the number of unicode characters in data */
5unsigned long
6ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
7{
8 unsigned long length = 0;
9
10 while (*s++ != 0 && length < maxlength)
11 length++;
12 return length;
13}
14EXPORT_SYMBOL(ucs2_strnlen);
15
16unsigned long
17ucs2_strlen(const ucs2_char_t *s)
18{
19 return ucs2_strnlen(s, ~0UL);
20}
21EXPORT_SYMBOL(ucs2_strlen);
22
23/*
24 * Return the number of bytes is the length of this string
25 * Note: this is NOT the same as the number of unicode characters
26 */
27unsigned long
28ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
29{
30 return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
31}
32EXPORT_SYMBOL(ucs2_strsize);
33
34int
35ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
36{
37 while (1) {
38 if (len == 0)
39 return 0;
40 if (*a < *b)
41 return -1;
42 if (*a > *b)
43 return 1;
44 if (*a == 0) /* implies *b == 0 */
45 return 0;
46 a++;
47 b++;
48 len--;
49 }
50}
51EXPORT_SYMBOL(ucs2_strncmp);
diff --git a/lib/uuid.c b/lib/uuid.c
index 52a6fe6387de..398821e4dce1 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -25,13 +25,7 @@
25 25
26static void __uuid_gen_common(__u8 b[16]) 26static void __uuid_gen_common(__u8 b[16])
27{ 27{
28 int i; 28 prandom_bytes(b, 16);
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */ 29 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80; 30 b[8] = (b[8] & 0x3F) | 0x80;
37} 31}
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index 82a04d7ba99e..08837db52d94 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -15,7 +15,7 @@ config XZ_DEC_X86
15 15
16config XZ_DEC_POWERPC 16config XZ_DEC_POWERPC
17 bool "PowerPC BCJ filter decoder" 17 bool "PowerPC BCJ filter decoder"
18 default y if POWERPC 18 default y if PPC
19 select XZ_DEC_BCJ 19 select XZ_DEC_BCJ
20 20
21config XZ_DEC_IA64 21config XZ_DEC_IA64