summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/generic-radix-tree.c32
-rw-r--r--lib/string.c21
-rw-r--r--lib/test_meminit.c27
-rw-r--r--lib/vdso/Kconfig9
4 files changed, 53 insertions, 36 deletions
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index ae25e2fa2187..f25eb111c051 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -2,6 +2,7 @@
2#include <linux/export.h> 2#include <linux/export.h>
3#include <linux/generic-radix-tree.h> 3#include <linux/generic-radix-tree.h>
4#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/kmemleak.h>
5 6
6#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) 7#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
7#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) 8#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
@@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset)
75} 76}
76EXPORT_SYMBOL(__genradix_ptr); 77EXPORT_SYMBOL(__genradix_ptr);
77 78
79static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
80{
81 struct genradix_node *node;
82
83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
84
85 /*
86 * We're using pages (not slab allocations) directly for kernel data
87 * structures, so we need to explicitly inform kmemleak of them in order
88 * to avoid false positive memory leak reports.
89 */
90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
91 return node;
92}
93
94static inline void genradix_free_node(struct genradix_node *node)
95{
96 kmemleak_free(node);
97 free_page((unsigned long)node);
98}
99
78/* 100/*
79 * Returns pointer to the specified byte @offset within @radix, allocating it if 101 * Returns pointer to the specified byte @offset within @radix, allocating it if
80 * necessary - newly allocated slots are always zeroed out: 102 * necessary - newly allocated slots are always zeroed out:
@@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
97 break; 119 break;
98 120
99 if (!new_node) { 121 if (!new_node) {
100 new_node = (void *) 122 new_node = genradix_alloc_node(gfp_mask);
101 __get_free_page(gfp_mask|__GFP_ZERO);
102 if (!new_node) 123 if (!new_node)
103 return NULL; 124 return NULL;
104 } 125 }
@@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
121 n = READ_ONCE(*p); 142 n = READ_ONCE(*p);
122 if (!n) { 143 if (!n) {
123 if (!new_node) { 144 if (!new_node) {
124 new_node = (void *) 145 new_node = genradix_alloc_node(gfp_mask);
125 __get_free_page(gfp_mask|__GFP_ZERO);
126 if (!new_node) 146 if (!new_node)
127 return NULL; 147 return NULL;
128 } 148 }
@@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
133 } 153 }
134 154
135 if (new_node) 155 if (new_node)
136 free_page((unsigned long) new_node); 156 genradix_free_node(new_node);
137 157
138 return &n->data[offset]; 158 return &n->data[offset];
139} 159}
@@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level)
191 genradix_free_recurse(n->children[i], level - 1); 211 genradix_free_recurse(n->children[i], level - 1);
192 } 212 }
193 213
194 free_page((unsigned long) n); 214 genradix_free_node(n);
195} 215}
196 216
197int __genradix_prealloc(struct __genradix *radix, size_t size, 217int __genradix_prealloc(struct __genradix *radix, size_t size,
diff --git a/lib/string.c b/lib/string.c
index cd7a10c19210..08ec58cc673b 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -748,27 +748,6 @@ void *memset(void *s, int c, size_t count)
748EXPORT_SYMBOL(memset); 748EXPORT_SYMBOL(memset);
749#endif 749#endif
750 750
751/**
752 * memzero_explicit - Fill a region of memory (e.g. sensitive
753 * keying data) with 0s.
754 * @s: Pointer to the start of the area.
755 * @count: The size of the area.
756 *
757 * Note: usually using memset() is just fine (!), but in cases
758 * where clearing out _local_ data at the end of a scope is
759 * necessary, memzero_explicit() should be used instead in
760 * order to prevent the compiler from optimising away zeroing.
761 *
762 * memzero_explicit() doesn't need an arch-specific version as
763 * it just invokes the one of memset() implicitly.
764 */
765void memzero_explicit(void *s, size_t count)
766{
767 memset(s, 0, count);
768 barrier_data(s);
769}
770EXPORT_SYMBOL(memzero_explicit);
771
772#ifndef __HAVE_ARCH_MEMSET16 751#ifndef __HAVE_ARCH_MEMSET16
773/** 752/**
774 * memset16() - Fill a memory area with a uint16_t 753 * memset16() - Fill a memory area with a uint16_t
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 9729f271d150..9742e5cb853a 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -297,6 +297,32 @@ out:
297 return 1; 297 return 1;
298} 298}
299 299
300static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
301{
302 struct kmem_cache *c;
303 int i, iter, maxiter = 1024;
304 int num, bytes;
305 bool fail = false;
306 void *objects[10];
307
308 c = kmem_cache_create("test_cache", size, size, 0, NULL);
309 for (iter = 0; (iter < maxiter) && !fail; iter++) {
310 num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
311 objects);
312 for (i = 0; i < num; i++) {
313 bytes = count_nonzero_bytes(objects[i], size);
314 if (bytes)
315 fail = true;
316 fill_with_garbage(objects[i], size);
317 }
318
319 if (num)
320 kmem_cache_free_bulk(c, num, objects);
321 }
322 *total_failures += fail;
323 return 1;
324}
325
300/* 326/*
301 * Test kmem_cache allocation by creating caches of different sizes, with and 327 * Test kmem_cache allocation by creating caches of different sizes, with and
302 * without constructors, with and without SLAB_TYPESAFE_BY_RCU. 328 * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
@@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures)
318 num_tests += do_kmem_cache_size(size, ctor, rcu, zero, 344 num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
319 &failures); 345 &failures);
320 } 346 }
347 num_tests += do_kmem_cache_size_bulk(size, &failures);
321 } 348 }
322 REPORT_FAILURES_IN_FN(); 349 REPORT_FAILURES_IN_FN();
323 *total_failures += failures; 350 *total_failures += failures;
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index cc00364bd2c2..9fe698ff62ec 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO
24 help 24 help
25 This config option enables the compat VDSO layer. 25 This config option enables the compat VDSO layer.
26 26
27config CROSS_COMPILE_COMPAT_VDSO
28 string "32 bit Toolchain prefix for compat vDSO"
29 default ""
30 depends on GENERIC_COMPAT_VDSO
31 help
32 Defines the cross-compiler prefix for compiling compat vDSO.
33 If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
34 32 bit, it does not need to define this parameter.
35
36endif 27endif