aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 16:09:51 -0400
commitb3fec0fe35a4ff048484f1408385a27695d4273b (patch)
tree088c23f098421ea681d9976a83aad73d15be1027 /include/linux
parente1f5b94fd0c93c3e27ede88b7ab652d086dc960f (diff)
parent722f2a6c87f34ee0fd0130a8cf45f81e0705594a (diff)
Merge branch 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck
* 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard/kmemcheck: (39 commits) signal: fix __send_signal() false positive kmemcheck warning fs: fix do_mount_root() false positive kmemcheck warning fs: introduce __getname_gfp() trace: annotate bitfields in struct ring_buffer_event net: annotate struct sock bitfield c2port: annotate bitfield for kmemcheck net: annotate inet_timewait_sock bitfields ieee1394/csr1212: fix false positive kmemcheck report ieee1394: annotate bitfield net: annotate bitfields in struct inet_sock net: use kmemcheck bitfields API for skbuff kmemcheck: introduce bitfield API kmemcheck: add opcode self-testing at boot x86: unify pte_hidden x86: make _PAGE_HIDDEN conditional kmemcheck: make kconfig accessible for other architectures kmemcheck: enable in the x86 Kconfig kmemcheck: add hooks for the page allocator kmemcheck: add hooks for page- and sg-dma-mappings kmemcheck: don't track page tables ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/gfp.h14
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/kmemcheck.h153
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h7
-rw-r--r--include/linux/slab_def.h81
-rw-r--r--include/linux/stacktrace.h3
11 files changed, 296 insertions, 3 deletions
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 7b5a2388ba6..2a5cd867c36 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/kmemcheck.h>
13 14
14#define C2PORT_NAME_LEN 32 15#define C2PORT_NAME_LEN 32
15 16
@@ -20,8 +21,10 @@
20/* Main struct */ 21/* Main struct */
21struct c2port_ops; 22struct c2port_ops;
22struct c2port_device { 23struct c2port_device {
24 kmemcheck_bitfield_begin(flags);
23 unsigned int access:1; 25 unsigned int access:1;
24 unsigned int flash_access:1; 26 unsigned int flash_access:1;
27 kmemcheck_bitfield_end(flags);
25 28
26 int id; 29 int id;
27 char name[C2PORT_NAME_LEN]; 30 char name[C2PORT_NAME_LEN];
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ede84fa7da5..6d12174fbe1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1919,8 +1919,9 @@ extern void __init vfs_caches_init(unsigned long);
1919 1919
1920extern struct kmem_cache *names_cachep; 1920extern struct kmem_cache *names_cachep;
1921 1921
1922#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) 1922#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp))
1923#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) 1923#define __getname() __getname_gfp(GFP_KERNEL)
1924#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
1924#ifndef CONFIG_AUDITSYSCALL 1925#ifndef CONFIG_AUDITSYSCALL
1925#define putname(name) __putname(name) 1926#define putname(name) __putname(name)
1926#else 1927#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3760e7c5de0..80e14b8c2e7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -52,7 +52,19 @@ struct vm_area_struct;
52#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ 52#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
53#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ 53#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
54 54
55#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ 55#ifdef CONFIG_KMEMCHECK
56#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
57#else
58#define __GFP_NOTRACK ((__force gfp_t)0)
59#endif
60
61/*
62 * This may seem redundant, but it's a way of annotating false positives vs.
63 * allocations that simply cannot be supported (e.g. page tables).
64 */
65#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
66
67#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
56#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 68#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
57 69
58/* This equals 0, but use constants in case they ever change */ 70/* This equals 0, but use constants in case they ever change */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c41e812e9d5..2721f07e935 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -472,6 +472,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
472 __tasklet_hi_schedule(t); 472 __tasklet_hi_schedule(t);
473} 473}
474 474
475extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
476
477/*
478 * This version avoids touching any other tasklets. Needed for kmemcheck
479 * in order not to take any page faults while enqueueing this tasklet;
480 * consider VERY carefully whether you really need this or
481 * tasklet_hi_schedule()...
482 */
483static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
484{
485 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
486 __tasklet_hi_schedule_first(t);
487}
488
475 489
476static inline void tasklet_disable_nosync(struct tasklet_struct *t) 490static inline void tasklet_disable_nosync(struct tasklet_struct *t)
477{ 491{
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 00000000000..47b39b7c7e8
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,153 @@
1#ifndef LINUX_KMEMCHECK_H
2#define LINUX_KMEMCHECK_H
3
4#include <linux/mm_types.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_KMEMCHECK
8extern int kmemcheck_enabled;
9
10/* The slab-related functions. */
11void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
12void kmemcheck_free_shadow(struct page *page, int order);
13void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
14 size_t size);
15void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
16
17void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
18 gfp_t gfpflags);
19
20void kmemcheck_show_pages(struct page *p, unsigned int n);
21void kmemcheck_hide_pages(struct page *p, unsigned int n);
22
23bool kmemcheck_page_is_tracked(struct page *p);
24
25void kmemcheck_mark_unallocated(void *address, unsigned int n);
26void kmemcheck_mark_uninitialized(void *address, unsigned int n);
27void kmemcheck_mark_initialized(void *address, unsigned int n);
28void kmemcheck_mark_freed(void *address, unsigned int n);
29
30void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
31void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
32void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
33
34int kmemcheck_show_addr(unsigned long address);
35int kmemcheck_hide_addr(unsigned long address);
36
37#else
38#define kmemcheck_enabled 0
39
40static inline void
41kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
42{
43}
44
45static inline void
46kmemcheck_free_shadow(struct page *page, int order)
47{
48}
49
50static inline void
51kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
52 size_t size)
53{
54}
55
56static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
57 size_t size)
58{
59}
60
61static inline void kmemcheck_pagealloc_alloc(struct page *p,
62 unsigned int order, gfp_t gfpflags)
63{
64}
65
66static inline bool kmemcheck_page_is_tracked(struct page *p)
67{
68 return false;
69}
70
71static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
72{
73}
74
75static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
76{
77}
78
79static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
80{
81}
82
83static inline void kmemcheck_mark_freed(void *address, unsigned int n)
84{
85}
86
87static inline void kmemcheck_mark_unallocated_pages(struct page *p,
88 unsigned int n)
89{
90}
91
92static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
93 unsigned int n)
94{
95}
96
97static inline void kmemcheck_mark_initialized_pages(struct page *p,
98 unsigned int n)
99{
100}
101
102#endif /* CONFIG_KMEMCHECK */
103
104/*
105 * Bitfield annotations
106 *
107 * How to use: If you have a struct using bitfields, for example
108 *
109 * struct a {
110 * int x:8, y:8;
111 * };
112 *
113 * then this should be rewritten as
114 *
115 * struct a {
116 * kmemcheck_bitfield_begin(flags);
117 * int x:8, y:8;
118 * kmemcheck_bitfield_end(flags);
119 * };
120 *
121 * Now the "flags_begin" and "flags_end" members may be used to refer to the
122 * beginning and end, respectively, of the bitfield (and things like
123 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
124 * fields should be annotated:
125 *
126 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
127 * kmemcheck_annotate_bitfield(a, flags);
128 *
129 * Note: We provide the same definitions for both kmemcheck and non-
130 * kmemcheck kernels. This makes it harder to introduce accidental errors. It
131 * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
132 */
133#define kmemcheck_bitfield_begin(name) \
134 int name##_begin[0];
135
136#define kmemcheck_bitfield_end(name) \
137 int name##_end[0];
138
139#define kmemcheck_annotate_bitfield(ptr, name) \
140 do if (ptr) { \
141 int _n = (long) &((ptr)->name##_end) \
142 - (long) &((ptr)->name##_begin); \
143 BUILD_BUG_ON(_n < 0); \
144 \
145 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
146 } while (0)
147
148#define kmemcheck_annotate_variable(var) \
149 do { \
150 kmemcheck_mark_initialized(&(var), sizeof(var)); \
151 } while (0) \
152
153#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0e80e26ecf2..0042090a4d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -98,6 +98,14 @@ struct page {
98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS 98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
99 unsigned long debug_flags; /* Use atomic bitops on this */ 99 unsigned long debug_flags; /* Use atomic bitops on this */
100#endif 100#endif
101
102#ifdef CONFIG_KMEMCHECK
103 /*
104 * kmemcheck wants to track the status of each byte in a page; this
105 * is a pointer to such a status block. NULL if not tracked.
106 */
107 void *shadow;
108#endif
101}; 109};
102 110
103/* 111/*
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 8670f1575fe..29f8599e6be 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_RING_BUFFER_H 1#ifndef _LINUX_RING_BUFFER_H
2#define _LINUX_RING_BUFFER_H 2#define _LINUX_RING_BUFFER_H
3 3
4#include <linux/kmemcheck.h>
4#include <linux/mm.h> 5#include <linux/mm.h>
5#include <linux/seq_file.h> 6#include <linux/seq_file.h>
6 7
@@ -11,7 +12,10 @@ struct ring_buffer_iter;
11 * Don't refer to this struct directly, use functions below. 12 * Don't refer to this struct directly, use functions below.
12 */ 13 */
13struct ring_buffer_event { 14struct ring_buffer_event {
15 kmemcheck_bitfield_begin(bitfield);
14 u32 type_len:5, time_delta:27; 16 u32 type_len:5, time_delta:27;
17 kmemcheck_bitfield_end(bitfield);
18
15 u32 array[]; 19 u32 array[];
16}; 20};
17 21
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fa51293f270..63ef24bc01d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,7 @@
15#define _LINUX_SKBUFF_H 15#define _LINUX_SKBUFF_H
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
18#include <linux/compiler.h> 19#include <linux/compiler.h>
19#include <linux/time.h> 20#include <linux/time.h>
20#include <linux/cache.h> 21#include <linux/cache.h>
@@ -343,6 +344,7 @@ struct sk_buff {
343 }; 344 };
344 }; 345 };
345 __u32 priority; 346 __u32 priority;
347 kmemcheck_bitfield_begin(flags1);
346 __u8 local_df:1, 348 __u8 local_df:1,
347 cloned:1, 349 cloned:1,
348 ip_summed:2, 350 ip_summed:2,
@@ -353,6 +355,7 @@ struct sk_buff {
353 ipvs_property:1, 355 ipvs_property:1,
354 peeked:1, 356 peeked:1,
355 nf_trace:1; 357 nf_trace:1;
358 kmemcheck_bitfield_end(flags1);
356 __be16 protocol; 359 __be16 protocol;
357 360
358 void (*destructor)(struct sk_buff *skb); 361 void (*destructor)(struct sk_buff *skb);
@@ -372,12 +375,16 @@ struct sk_buff {
372 __u16 tc_verd; /* traffic control verdict */ 375 __u16 tc_verd; /* traffic control verdict */
373#endif 376#endif
374#endif 377#endif
378
379 kmemcheck_bitfield_begin(flags2);
375#ifdef CONFIG_IPV6_NDISC_NODETYPE 380#ifdef CONFIG_IPV6_NDISC_NODETYPE
376 __u8 ndisc_nodetype:2; 381 __u8 ndisc_nodetype:2;
377#endif 382#endif
378#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 383#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
379 __u8 do_not_encrypt:1; 384 __u8 do_not_encrypt:1;
380#endif 385#endif
386 kmemcheck_bitfield_end(flags2);
387
381 /* 0/13/14 bit hole */ 388 /* 0/13/14 bit hole */
382 389
383#ifdef CONFIG_NET_DMA 390#ifdef CONFIG_NET_DMA
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 219b8fb4651..2da8372519f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,13 @@
64 64
65#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 65#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
66 66
67/* Don't track use of uninitialized memory */
68#ifdef CONFIG_KMEMCHECK
69# define SLAB_NOTRACK 0x01000000UL
70#else
71# define SLAB_NOTRACK 0x00000000UL
72#endif
73
67/* The following flags affect the page allocator grouping pages by mobility */ 74/* The following flags affect the page allocator grouping pages by mobility */
68#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 75#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
69#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 76#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 713f841ecaa..850d057500d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -16,6 +16,87 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/kmemtrace.h> 17#include <linux/kmemtrace.h>
18 18
19/*
20 * struct kmem_cache
21 *
22 * manages a cache.
23 */
24
25struct kmem_cache {
26/* 1) per-cpu data, touched during every alloc/free */
27 struct array_cache *array[NR_CPUS];
28/* 2) Cache tunables. Protected by cache_chain_mutex */
29 unsigned int batchcount;
30 unsigned int limit;
31 unsigned int shared;
32
33 unsigned int buffer_size;
34 u32 reciprocal_buffer_size;
35/* 3) touched by every alloc & free from the backend */
36
37 unsigned int flags; /* constant flags */
38 unsigned int num; /* # of objs per slab */
39
40/* 4) cache_grow/shrink */
41 /* order of pgs per slab (2^n) */
42 unsigned int gfporder;
43
44 /* force GFP flags, e.g. GFP_DMA */
45 gfp_t gfpflags;
46
47 size_t colour; /* cache colouring range */
48 unsigned int colour_off; /* colour offset */
49 struct kmem_cache *slabp_cache;
50 unsigned int slab_size;
51 unsigned int dflags; /* dynamic flags */
52
53 /* constructor func */
54 void (*ctor)(void *obj);
55
56/* 5) cache creation/removal */
57 const char *name;
58 struct list_head next;
59
60/* 6) statistics */
61#ifdef CONFIG_DEBUG_SLAB
62 unsigned long num_active;
63 unsigned long num_allocations;
64 unsigned long high_mark;
65 unsigned long grown;
66 unsigned long reaped;
67 unsigned long errors;
68 unsigned long max_freeable;
69 unsigned long node_allocs;
70 unsigned long node_frees;
71 unsigned long node_overflow;
72 atomic_t allochit;
73 atomic_t allocmiss;
74 atomic_t freehit;
75 atomic_t freemiss;
76
77 /*
78 * If debugging is enabled, then the allocator can add additional
79 * fields and/or padding to every object. buffer_size contains the total
80 * object size including these internal fields, the following two
81 * variables contain the offset to the user object and its size.
82 */
83 int obj_offset;
84 int obj_size;
85#endif /* CONFIG_DEBUG_SLAB */
86
87 /*
88 * We put nodelists[] at the end of kmem_cache, because we want to size
89 * this array to nr_node_ids slots instead of MAX_NUMNODES
90 * (see kmem_cache_init())
91 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of nodes.
93 */
94 struct kmem_list3 *nodelists[MAX_NUMNODES];
95 /*
96 * Do not add fields after nodelists[]
97 */
98};
99
19/* Size description struct for general caches. */ 100/* Size description struct for general caches. */
20struct cache_sizes { 101struct cache_sizes {
21 size_t cs_size; 102 size_t cs_size;
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1a8cecc4f38..51efbef38fb 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -4,6 +4,8 @@
4struct task_struct; 4struct task_struct;
5 5
6#ifdef CONFIG_STACKTRACE 6#ifdef CONFIG_STACKTRACE
7struct task_struct;
8
7struct stack_trace { 9struct stack_trace {
8 unsigned int nr_entries, max_entries; 10 unsigned int nr_entries, max_entries;
9 unsigned long *entries; 11 unsigned long *entries;
@@ -11,6 +13,7 @@ struct stack_trace {
11}; 13};
12 14
13extern void save_stack_trace(struct stack_trace *trace); 15extern void save_stack_trace(struct stack_trace *trace);
16extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
14extern void save_stack_trace_tsk(struct task_struct *tsk, 17extern void save_stack_trace_tsk(struct task_struct *tsk,
15 struct stack_trace *trace); 18 struct stack_trace *trace);
16 19