aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-03-25 17:21:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 19:37:42 -0400
commit7ed2f9e663854db313f177a511145630e398b402 (patch)
treef9dfba81a688864a4d78689470f624b0a482f545 /include
parente6e8379c876de16c6b78f83b15d5ac32c79cb440 (diff)
mm, kasan: SLAB support
Add KASAN hooks to SLAB allocator. This patch is based on the "mm: kasan: unified support for SLUB and SLAB allocators" patch originally prepared by Dmitry Chernenkov. Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kasan.h12
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/slab_def.h14
-rw-r--r--include/linux/slub_def.h11
4 files changed, 43 insertions, 0 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0fdc798e3ff7..839f2007a0f9 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -48,6 +48,9 @@ void kasan_unpoison_task_stack(struct task_struct *task);
48void kasan_alloc_pages(struct page *page, unsigned int order); 48void kasan_alloc_pages(struct page *page, unsigned int order);
49void kasan_free_pages(struct page *page, unsigned int order); 49void kasan_free_pages(struct page *page, unsigned int order);
50 50
51void kasan_cache_create(struct kmem_cache *cache, size_t *size,
52 unsigned long *flags);
53
51void kasan_poison_slab(struct page *page); 54void kasan_poison_slab(struct page *page);
52void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
53void kasan_poison_object_data(struct kmem_cache *cache, void *object); 56void kasan_poison_object_data(struct kmem_cache *cache, void *object);
@@ -61,6 +64,11 @@ void kasan_krealloc(const void *object, size_t new_size);
61void kasan_slab_alloc(struct kmem_cache *s, void *object); 64void kasan_slab_alloc(struct kmem_cache *s, void *object);
62void kasan_slab_free(struct kmem_cache *s, void *object); 65void kasan_slab_free(struct kmem_cache *s, void *object);
63 66
67struct kasan_cache {
68 int alloc_meta_offset;
69 int free_meta_offset;
70};
71
64int kasan_module_alloc(void *addr, size_t size); 72int kasan_module_alloc(void *addr, size_t size);
65void kasan_free_shadow(const struct vm_struct *vm); 73void kasan_free_shadow(const struct vm_struct *vm);
66 74
@@ -76,6 +84,10 @@ static inline void kasan_disable_current(void) {}
76static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 84static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
77static inline void kasan_free_pages(struct page *page, unsigned int order) {} 85static inline void kasan_free_pages(struct page *page, unsigned int order) {}
78 86
87static inline void kasan_cache_create(struct kmem_cache *cache,
88 size_t *size,
89 unsigned long *flags) {}
90
79static inline void kasan_poison_slab(struct page *page) {} 91static inline void kasan_poison_slab(struct page *page) {}
80static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 92static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
81 void *object) {} 93 void *object) {}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index e4b568738ca3..aa61595a1482 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -92,6 +92,12 @@
92# define SLAB_ACCOUNT 0x00000000UL 92# define SLAB_ACCOUNT 0x00000000UL
93#endif 93#endif
94 94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN 0x08000000UL
97#else
98#define SLAB_KASAN 0x00000000UL
99#endif
100
95/* The following flags affect the page allocator grouping pages by mobility */ 101/* The following flags affect the page allocator grouping pages by mobility */
96#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 102#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
97#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 103#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e878ba35ae91..9edbbf352340 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -76,8 +76,22 @@ struct kmem_cache {
76#ifdef CONFIG_MEMCG 76#ifdef CONFIG_MEMCG
77 struct memcg_cache_params memcg_params; 77 struct memcg_cache_params memcg_params;
78#endif 78#endif
79#ifdef CONFIG_KASAN
80 struct kasan_cache kasan_info;
81#endif
79 82
80 struct kmem_cache_node *node[MAX_NUMNODES]; 83 struct kmem_cache_node *node[MAX_NUMNODES];
81}; 84};
82 85
86static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
87 void *x) {
88 void *object = x - (x - page->s_mem) % cache->size;
89 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
90
91 if (unlikely(object > last_object))
92 return last_object;
93 else
94 return object;
95}
96
83#endif /* _LINUX_SLAB_DEF_H */ 97#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ac5143f95ee6..665cd0cd18b8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -130,4 +130,15 @@ static inline void *virt_to_obj(struct kmem_cache *s,
130void object_err(struct kmem_cache *s, struct page *page, 130void object_err(struct kmem_cache *s, struct page *page,
131 u8 *object, char *reason); 131 u8 *object, char *reason);
132 132
133static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
134 void *x) {
135 void *object = x - (x - page_address(page)) % cache->size;
136 void *last_object = page_address(page) +
137 (page->objects - 1) * cache->size;
138 if (unlikely(object > last_object))
139 return last_object;
140 else
141 return object;
142}
143
133#endif /* _LINUX_SLUB_DEF_H */ 144#endif /* _LINUX_SLUB_DEF_H */