diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/gfp.h | 9 | ||||
-rw-r--r-- | include/linux/kmemcheck.h | 47 | ||||
-rw-r--r-- | include/linux/slab.h | 7 |
3 files changed, 62 insertions, 1 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..daeaa8fe1bbd 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -51,8 +51,15 @@ struct vm_area_struct; | |||
51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ | 51 | #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ |
52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 52 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ |
53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ | 53 | #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ |
54 | #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ | ||
54 | 55 | ||
55 | #define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ | 56 | /* |
57 | * This may seem redundant, but it's a way of annotating false positives vs. | ||
58 | * allocations that simply cannot be supported (e.g. page tables). | ||
59 | */ | ||
60 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | ||
61 | |||
62 | #define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ | ||
56 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 63 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
57 | 64 | ||
58 | /* This equals 0, but use constants in case they ever change */ | 65 | /* This equals 0, but use constants in case they ever change */ |
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 39480c91b2f9..5b65f4ebeadf 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h | |||
@@ -7,11 +7,58 @@ | |||
7 | #ifdef CONFIG_KMEMCHECK | 7 | #ifdef CONFIG_KMEMCHECK |
8 | extern int kmemcheck_enabled; | 8 | extern int kmemcheck_enabled; |
9 | 9 | ||
10 | /* The slab-related functions. */ | ||
11 | void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | ||
12 | struct page *page, int order); | ||
13 | void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); | ||
14 | void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | ||
15 | size_t size); | ||
16 | void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); | ||
17 | |||
18 | void kmemcheck_show_pages(struct page *p, unsigned int n); | ||
19 | void kmemcheck_hide_pages(struct page *p, unsigned int n); | ||
20 | |||
21 | bool kmemcheck_page_is_tracked(struct page *p); | ||
22 | |||
23 | void kmemcheck_mark_unallocated(void *address, unsigned int n); | ||
24 | void kmemcheck_mark_uninitialized(void *address, unsigned int n); | ||
25 | void kmemcheck_mark_initialized(void *address, unsigned int n); | ||
26 | void kmemcheck_mark_freed(void *address, unsigned int n); | ||
27 | |||
28 | void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); | ||
29 | void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); | ||
30 | |||
10 | int kmemcheck_show_addr(unsigned long address); | 31 | int kmemcheck_show_addr(unsigned long address); |
11 | int kmemcheck_hide_addr(unsigned long address); | 32 | int kmemcheck_hide_addr(unsigned long address); |
12 | #else | 33 | #else |
13 | #define kmemcheck_enabled 0 | 34 | #define kmemcheck_enabled 0 |
14 | 35 | ||
36 | static inline void | ||
37 | kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, | ||
38 | struct page *page, int order) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static inline void | ||
43 | kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, | ||
49 | size_t size) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, | ||
54 | size_t size) | ||
55 | { | ||
56 | } | ||
57 | |||
58 | static inline bool kmemcheck_page_is_tracked(struct page *p) | ||
59 | { | ||
60 | return false; | ||
61 | } | ||
15 | #endif /* CONFIG_KMEMCHECK */ | 62 | #endif /* CONFIG_KMEMCHECK */ |
16 | 63 | ||
17 | #endif /* LINUX_KMEMCHECK_H */ | 64 | #endif /* LINUX_KMEMCHECK_H */ |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 48803064cedf..e339fcf17cd3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -64,6 +64,13 @@ | |||
64 | 64 | ||
65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | 65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
66 | 66 | ||
67 | /* Don't track use of uninitialized memory */ | ||
68 | #ifdef CONFIG_KMEMCHECK | ||
69 | # define SLAB_NOTRACK 0x01000000UL | ||
70 | #else | ||
71 | # define SLAB_NOTRACK 0x00000000UL | ||
72 | #endif | ||
73 | |||
67 | /* The following flags affect the page allocator grouping pages by mobility */ | 74 | /* The following flags affect the page allocator grouping pages by mobility */ |
68 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | 75 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
69 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | 76 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |