aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/Kconfig.kmemcheck91
-rw-r--r--lib/Makefile2
-rw-r--r--lib/atomic64.c175
-rw-r--r--lib/kobject.c7
6 files changed, 284 insertions, 3 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9960be04cbbe..bb1326d3839c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -194,4 +194,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
194config NLATTR 194config NLATTR
195 bool 195 bool
196 196
197#
198# Generic 64-bit atomic support is selected if needed
199#
200config GENERIC_ATOMIC64
201 bool
202
197endmenu 203endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 116a35051be6..6b0c2d8a2129 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
300 300
301config DEBUG_SLAB 301config DEBUG_SLAB
302 bool "Debug slab memory allocations" 302 bool "Debug slab memory allocations"
303 depends on DEBUG_KERNEL && SLAB 303 depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
304 help 304 help
305 Say Y here to have the kernel do limited verification on memory 305 Say Y here to have the kernel do limited verification on memory
306 allocation as well as poisoning memory on free to catch use of freed 306 allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
312 312
313config SLUB_DEBUG_ON 313config SLUB_DEBUG_ON
314 bool "SLUB debugging on by default" 314 bool "SLUB debugging on by default"
315 depends on SLUB && SLUB_DEBUG 315 depends on SLUB && SLUB_DEBUG && !KMEMCHECK
316 default n 316 default n
317 help 317 help
318 Boot with debugging on by default. SLUB boots by default with 318 Boot with debugging on by default. SLUB boots by default with
@@ -996,3 +996,5 @@ config DMA_API_DEBUG
996source "samples/Kconfig" 996source "samples/Kconfig"
997 997
998source "lib/Kconfig.kgdb" 998source "lib/Kconfig.kgdb"
999
1000source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644
index 000000000000..603c81b66549
--- /dev/null
+++ b/lib/Kconfig.kmemcheck
@@ -0,0 +1,91 @@
1config HAVE_ARCH_KMEMCHECK
2 bool
3
4menuconfig KMEMCHECK
5 bool "kmemcheck: trap use of uninitialized memory"
6 depends on DEBUG_KERNEL
7 depends on !X86_USE_3DNOW
8 depends on SLUB || SLAB
9 depends on !CC_OPTIMIZE_FOR_SIZE
10 depends on !FUNCTION_TRACER
11 select FRAME_POINTER
12 select STACKTRACE
13 default n
14 help
15 This option enables tracing of dynamically allocated kernel memory
16 to see if memory is used before it has been given an initial value.
17 Be aware that this requires half of your memory for bookkeeping and
18 will insert extra code at *every* read and write to tracked memory
19 thus slow down the kernel code (but user code is unaffected).
20
21 The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
22 or enable kmemcheck at boot-time. If the kernel is started with
23 kmemcheck=0, the large memory and CPU overhead is not incurred.
24
25choice
26 prompt "kmemcheck: default mode at boot"
27 depends on KMEMCHECK
28 default KMEMCHECK_ONESHOT_BY_DEFAULT
29 help
30 This option controls the default behaviour of kmemcheck when the
31 kernel boots and no kmemcheck= parameter is given.
32
33config KMEMCHECK_DISABLED_BY_DEFAULT
34 bool "disabled"
35 depends on KMEMCHECK
36
37config KMEMCHECK_ENABLED_BY_DEFAULT
38 bool "enabled"
39 depends on KMEMCHECK
40
41config KMEMCHECK_ONESHOT_BY_DEFAULT
42 bool "one-shot"
43 depends on KMEMCHECK
44 help
45 In one-shot mode, only the first error detected is reported before
46 kmemcheck is disabled.
47
48endchoice
49
50config KMEMCHECK_QUEUE_SIZE
51 int "kmemcheck: error queue size"
52 depends on KMEMCHECK
53 default 64
54 help
55 Select the maximum number of errors to store in the queue. Since
56 errors can occur virtually anywhere and in any context, we need a
57 temporary storage area which is guarantueed not to generate any
58 other faults. The queue will be emptied as soon as a tasklet may
59 be scheduled. If the queue is full, new error reports will be
60 lost.
61
62config KMEMCHECK_SHADOW_COPY_SHIFT
63 int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
64 depends on KMEMCHECK
65 range 2 8
66 default 5
67 help
68 Select the number of shadow bytes to save along with each entry of
69 the queue. These bytes indicate what parts of an allocation are
70 initialized, uninitialized, etc. and will be displayed when an
71 error is detected to help the debugging of a particular problem.
72
73config KMEMCHECK_PARTIAL_OK
74 bool "kmemcheck: allow partially uninitialized memory"
75 depends on KMEMCHECK
76 default y
77 help
78 This option works around certain GCC optimizations that produce
79 32-bit reads from 16-bit variables where the upper 16 bits are
80 thrown away afterwards. This may of course also hide some real
81 bugs.
82
83config KMEMCHECK_BITOPS_OK
84 bool "kmemcheck: allow bit-field manipulation"
85 depends on KMEMCHECK
86 default n
87 help
88 This option silences warnings that would be generated for bit-field
89 accesses where not all the bits are initialized at the same time.
90 This may also hide some real bugs.
91
diff --git a/lib/Makefile b/lib/Makefile
index 34c5c0e6222e..8e9bcf9d3261 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -95,6 +95,8 @@ obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
95 95
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o 96obj-$(CONFIG_GENERIC_CSUM) += checksum.o
97 97
98obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
99
98hostprogs-y := gen_crc32table 100hostprogs-y := gen_crc32table
99clean-files := crc32table.h 101clean-files := crc32table.h
100 102
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 000000000000..c5e725562416
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,175 @@
1/*
2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
4 *
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/types.h>
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <asm/atomic.h>
17
18/*
19 * We use a hashed array of spinlocks to provide exclusive access
20 * to each atomic64_t variable. Since this is expected to used on
21 * systems with small numbers of CPUs (<= 4 or so), we use a
22 * relatively small array of 16 spinlocks to avoid wasting too much
23 * memory on the spinlock array.
24 */
25#define NR_LOCKS 16
26
27/*
28 * Ensure each lock is in a separate cacheline.
29 */
30static union {
31 spinlock_t lock;
32 char pad[L1_CACHE_BYTES];
33} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
34
35static inline spinlock_t *lock_addr(const atomic64_t *v)
36{
37 unsigned long addr = (unsigned long) v;
38
39 addr >>= L1_CACHE_SHIFT;
40 addr ^= (addr >> 8) ^ (addr >> 16);
41 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
42}
43
44long long atomic64_read(const atomic64_t *v)
45{
46 unsigned long flags;
47 spinlock_t *lock = lock_addr(v);
48 long long val;
49
50 spin_lock_irqsave(lock, flags);
51 val = v->counter;
52 spin_unlock_irqrestore(lock, flags);
53 return val;
54}
55
56void atomic64_set(atomic64_t *v, long long i)
57{
58 unsigned long flags;
59 spinlock_t *lock = lock_addr(v);
60
61 spin_lock_irqsave(lock, flags);
62 v->counter = i;
63 spin_unlock_irqrestore(lock, flags);
64}
65
66void atomic64_add(long long a, atomic64_t *v)
67{
68 unsigned long flags;
69 spinlock_t *lock = lock_addr(v);
70
71 spin_lock_irqsave(lock, flags);
72 v->counter += a;
73 spin_unlock_irqrestore(lock, flags);
74}
75
76long long atomic64_add_return(long long a, atomic64_t *v)
77{
78 unsigned long flags;
79 spinlock_t *lock = lock_addr(v);
80 long long val;
81
82 spin_lock_irqsave(lock, flags);
83 val = v->counter += a;
84 spin_unlock_irqrestore(lock, flags);
85 return val;
86}
87
88void atomic64_sub(long long a, atomic64_t *v)
89{
90 unsigned long flags;
91 spinlock_t *lock = lock_addr(v);
92
93 spin_lock_irqsave(lock, flags);
94 v->counter -= a;
95 spin_unlock_irqrestore(lock, flags);
96}
97
98long long atomic64_sub_return(long long a, atomic64_t *v)
99{
100 unsigned long flags;
101 spinlock_t *lock = lock_addr(v);
102 long long val;
103
104 spin_lock_irqsave(lock, flags);
105 val = v->counter -= a;
106 spin_unlock_irqrestore(lock, flags);
107 return val;
108}
109
110long long atomic64_dec_if_positive(atomic64_t *v)
111{
112 unsigned long flags;
113 spinlock_t *lock = lock_addr(v);
114 long long val;
115
116 spin_lock_irqsave(lock, flags);
117 val = v->counter - 1;
118 if (val >= 0)
119 v->counter = val;
120 spin_unlock_irqrestore(lock, flags);
121 return val;
122}
123
124long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
125{
126 unsigned long flags;
127 spinlock_t *lock = lock_addr(v);
128 long long val;
129
130 spin_lock_irqsave(lock, flags);
131 val = v->counter;
132 if (val == o)
133 v->counter = n;
134 spin_unlock_irqrestore(lock, flags);
135 return val;
136}
137
138long long atomic64_xchg(atomic64_t *v, long long new)
139{
140 unsigned long flags;
141 spinlock_t *lock = lock_addr(v);
142 long long val;
143
144 spin_lock_irqsave(lock, flags);
145 val = v->counter;
146 v->counter = new;
147 spin_unlock_irqrestore(lock, flags);
148 return val;
149}
150
151int atomic64_add_unless(atomic64_t *v, long long a, long long u)
152{
153 unsigned long flags;
154 spinlock_t *lock = lock_addr(v);
155 int ret = 1;
156
157 spin_lock_irqsave(lock, flags);
158 if (v->counter != u) {
159 v->counter += a;
160 ret = 0;
161 }
162 spin_unlock_irqrestore(lock, flags);
163 return ret;
164}
165
166static int init_atomic64_lock(void)
167{
168 int i;
169
170 for (i = 0; i < NR_LOCKS; ++i)
171 spin_lock_init(&atomic64_lock[i].lock);
172 return 0;
173}
174
175pure_initcall(init_atomic64_lock);
diff --git a/lib/kobject.c b/lib/kobject.c
index bacf6fe4f7a0..b512b746d2af 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
793 struct kobject *parent_kobj) 793 struct kobject *parent_kobj)
794{ 794{
795 struct kset *kset; 795 struct kset *kset;
796 int retval;
796 797
797 kset = kzalloc(sizeof(*kset), GFP_KERNEL); 798 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
798 if (!kset) 799 if (!kset)
799 return NULL; 800 return NULL;
800 kobject_set_name(&kset->kobj, name); 801 retval = kobject_set_name(&kset->kobj, name);
802 if (retval) {
803 kfree(kset);
804 return NULL;
805 }
801 kset->uevent_ops = uevent_ops; 806 kset->uevent_ops = uevent_ops;
802 kset->kobj.parent = parent_kobj; 807 kset->kobj.parent = parent_kobj;
803 808