aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/include/asm/kmemcheck.h42
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/kmemcheck/Makefile1
-rw-r--r--arch/x86/mm/kmemcheck/error.c229
-rw-r--r--arch/x86/mm/kmemcheck/error.h15
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c650
-rw-r--r--arch/x86/mm/kmemcheck/opcode.c101
-rw-r--r--arch/x86/mm/kmemcheck/opcode.h9
-rw-r--r--arch/x86/mm/kmemcheck/pte.c22
-rw-r--r--arch/x86/mm/kmemcheck/pte.h10
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c153
-rw-r--r--arch/x86/mm/kmemcheck/shadow.h16
-rw-r--r--include/linux/kmemcheck.h17
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--init/main.c1
-rw-r--r--kernel/sysctl.c12
19 files changed, 1304 insertions, 2 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index edbd0ca62067..1b68659c41b4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
81 endif 81 endif
82endif 82endif
83 83
84# Don't unroll struct assignments with kmemcheck enabled
85ifeq ($(CONFIG_KMEMCHECK),y)
86 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
87endif
88
84# Stackpointer is addressed different for 32 bit and 64 bit x86 89# Stackpointer is addressed different for 32 bit and 64 bit x86
85sp-$(CONFIG_X86_32) := esp 90sp-$(CONFIG_X86_32) := esp
86sp-$(CONFIG_X86_64) := rsp 91sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
new file mode 100644
index 000000000000..ed01518f297e
--- /dev/null
+++ b/arch/x86/include/asm/kmemcheck.h
@@ -0,0 +1,42 @@
1#ifndef ASM_X86_KMEMCHECK_H
2#define ASM_X86_KMEMCHECK_H
3
4#include <linux/types.h>
5#include <asm/ptrace.h>
6
7#ifdef CONFIG_KMEMCHECK
8bool kmemcheck_active(struct pt_regs *regs);
9
10void kmemcheck_show(struct pt_regs *regs);
11void kmemcheck_hide(struct pt_regs *regs);
12
13bool kmemcheck_fault(struct pt_regs *regs,
14 unsigned long address, unsigned long error_code);
15bool kmemcheck_trap(struct pt_regs *regs);
16#else
17static inline bool kmemcheck_active(struct pt_regs *regs)
18{
19 return false;
20}
21
22static inline void kmemcheck_show(struct pt_regs *regs)
23{
24}
25
26static inline void kmemcheck_hide(struct pt_regs *regs)
27{
28}
29
30static inline bool kmemcheck_fault(struct pt_regs *regs,
31 unsigned long address, unsigned long error_code)
32{
33 return false;
34}
35
36static inline bool kmemcheck_trap(struct pt_regs *regs)
37{
38 return false;
39}
40#endif /* CONFIG_KMEMCHECK */
41
42#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18ef7ebf2631..c5a08079ad5e 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -317,6 +317,15 @@ static inline int pte_present(pte_t a)
317 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 317 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
318} 318}
319 319
320static inline int pte_hidden(pte_t x)
321{
322#ifdef CONFIG_KMEMCHECK
323 return pte_flags(x) & _PAGE_HIDDEN;
324#else
325 return 0;
326#endif
327}
328
320static inline int pmd_present(pmd_t pmd) 329static inline int pmd_present(pmd_t pmd)
321{ 330{
322 return pmd_flags(pmd) & _PAGE_PRESENT; 331 return pmd_flags(pmd) & _PAGE_PRESENT;
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4d258ad76a0f..9b5c92140aab 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -18,7 +18,7 @@
18#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ 18#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
19#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ 19#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
20#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ 20#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
21#define _PAGE_BIT_UNUSED3 11 21#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 22#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
23#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 23#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 24#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
@@ -41,7 +41,7 @@
41#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) 41#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
42#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) 42#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
43#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) 43#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
44#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) 44#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
45#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) 45#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) 46#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
47#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) 47#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fdd30d08ab52..eefdeee8a871 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
10 10
11obj-$(CONFIG_HIGHMEM) += highmem_32.o 11obj-$(CONFIG_HIGHMEM) += highmem_32.o
12 12
13obj-$(CONFIG_KMEMCHECK) += kmemcheck/
14
13obj-$(CONFIG_MMIOTRACE) += mmiotrace.o 15obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
14mmiotrace-y := kmmio.o pf_in.o mmio-mod.o 16mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
15obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 17obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
new file mode 100644
index 000000000000..4666b7a778be
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/Makefile
@@ -0,0 +1 @@
obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
new file mode 100644
index 000000000000..5ec9f5a93f47
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -0,0 +1,229 @@
1#include <linux/interrupt.h>
2#include <linux/kdebug.h>
3#include <linux/kmemcheck.h>
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/ptrace.h>
7#include <linux/stacktrace.h>
8#include <linux/string.h>
9
10#include "error.h"
11#include "shadow.h"
12
13enum kmemcheck_error_type {
14 KMEMCHECK_ERROR_INVALID_ACCESS,
15 KMEMCHECK_ERROR_BUG,
16};
17
18#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
19
20struct kmemcheck_error {
21 enum kmemcheck_error_type type;
22
23 union {
24 /* KMEMCHECK_ERROR_INVALID_ACCESS */
25 struct {
26 /* Kind of access that caused the error */
27 enum kmemcheck_shadow state;
28 /* Address and size of the erroneous read */
29 unsigned long address;
30 unsigned int size;
31 };
32 };
33
34 struct pt_regs regs;
35 struct stack_trace trace;
36 unsigned long trace_entries[32];
37
38 /* We compress it to a char. */
39 unsigned char shadow_copy[SHADOW_COPY_SIZE];
40 unsigned char memory_copy[SHADOW_COPY_SIZE];
41};
42
43/*
44 * Create a ring queue of errors to output. We can't call printk() directly
45 * from the kmemcheck traps, since this may call the console drivers and
46 * result in a recursive fault.
47 */
48static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
49static unsigned int error_count;
50static unsigned int error_rd;
51static unsigned int error_wr;
52static unsigned int error_missed_count;
53
54static struct kmemcheck_error *error_next_wr(void)
55{
56 struct kmemcheck_error *e;
57
58 if (error_count == ARRAY_SIZE(error_fifo)) {
59 ++error_missed_count;
60 return NULL;
61 }
62
63 e = &error_fifo[error_wr];
64 if (++error_wr == ARRAY_SIZE(error_fifo))
65 error_wr = 0;
66 ++error_count;
67 return e;
68}
69
70static struct kmemcheck_error *error_next_rd(void)
71{
72 struct kmemcheck_error *e;
73
74 if (error_count == 0)
75 return NULL;
76
77 e = &error_fifo[error_rd];
78 if (++error_rd == ARRAY_SIZE(error_fifo))
79 error_rd = 0;
80 --error_count;
81 return e;
82}
83
84static void do_wakeup(unsigned long);
85static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
86
87/*
88 * Save the context of an error report.
89 */
90void kmemcheck_error_save(enum kmemcheck_shadow state,
91 unsigned long address, unsigned int size, struct pt_regs *regs)
92{
93 static unsigned long prev_ip;
94
95 struct kmemcheck_error *e;
96 void *shadow_copy;
97 void *memory_copy;
98
99 /* Don't report several adjacent errors from the same EIP. */
100 if (regs->ip == prev_ip)
101 return;
102 prev_ip = regs->ip;
103
104 e = error_next_wr();
105 if (!e)
106 return;
107
108 e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
109
110 e->state = state;
111 e->address = address;
112 e->size = size;
113
114 /* Save regs */
115 memcpy(&e->regs, regs, sizeof(*regs));
116
117 /* Save stack trace */
118 e->trace.nr_entries = 0;
119 e->trace.entries = e->trace_entries;
120 e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
121 e->trace.skip = 0;
122 save_stack_trace_bp(&e->trace, regs->bp);
123
124 /* Round address down to nearest 16 bytes */
125 shadow_copy = kmemcheck_shadow_lookup(address
126 & ~(SHADOW_COPY_SIZE - 1));
127 BUG_ON(!shadow_copy);
128
129 memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
130
131 kmemcheck_show_addr(address);
132 memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
133 memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
134 kmemcheck_hide_addr(address);
135
136 tasklet_hi_schedule_first(&kmemcheck_tasklet);
137}
138
139/*
140 * Save the context of a kmemcheck bug.
141 */
142void kmemcheck_error_save_bug(struct pt_regs *regs)
143{
144 struct kmemcheck_error *e;
145
146 e = error_next_wr();
147 if (!e)
148 return;
149
150 e->type = KMEMCHECK_ERROR_BUG;
151
152 memcpy(&e->regs, regs, sizeof(*regs));
153
154 e->trace.nr_entries = 0;
155 e->trace.entries = e->trace_entries;
156 e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
157 e->trace.skip = 1;
158 save_stack_trace(&e->trace);
159
160 tasklet_hi_schedule_first(&kmemcheck_tasklet);
161}
162
163void kmemcheck_error_recall(void)
164{
165 static const char *desc[] = {
166 [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
167 [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
168 [KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
169 [KMEMCHECK_SHADOW_FREED] = "freed",
170 };
171
172 static const char short_desc[] = {
173 [KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
174 [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
175 [KMEMCHECK_SHADOW_INITIALIZED] = 'i',
176 [KMEMCHECK_SHADOW_FREED] = 'f',
177 };
178
179 struct kmemcheck_error *e;
180 unsigned int i;
181
182 e = error_next_rd();
183 if (!e)
184 return;
185
186 switch (e->type) {
187 case KMEMCHECK_ERROR_INVALID_ACCESS:
188 printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read "
189 "from %s memory (%p)\n",
190 8 * e->size, e->state < ARRAY_SIZE(desc) ?
191 desc[e->state] : "(invalid shadow state)",
192 (void *) e->address);
193
194 printk(KERN_INFO);
195 for (i = 0; i < SHADOW_COPY_SIZE; ++i)
196 printk("%02x", e->memory_copy[i]);
197 printk("\n");
198
199 printk(KERN_INFO);
200 for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
201 if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
202 printk(" %c", short_desc[e->shadow_copy[i]]);
203 else
204 printk(" ?");
205 }
206 printk("\n");
207 printk(KERN_INFO "%*c\n", 2 + 2
208 * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
209 break;
210 case KMEMCHECK_ERROR_BUG:
211 printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
212 break;
213 }
214
215 __show_regs(&e->regs, 1);
216 print_stack_trace(&e->trace, 0);
217}
218
219static void do_wakeup(unsigned long data)
220{
221 while (error_count > 0)
222 kmemcheck_error_recall();
223
224 if (error_missed_count > 0) {
225 printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
226 "the queue was too small\n", error_missed_count);
227 error_missed_count = 0;
228 }
229}
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
new file mode 100644
index 000000000000..0efc2e8d0a20
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.h
@@ -0,0 +1,15 @@
1#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
2#define ARCH__X86__MM__KMEMCHECK__ERROR_H
3
4#include <linux/ptrace.h>
5
6#include "shadow.h"
7
8void kmemcheck_error_save(enum kmemcheck_shadow state,
9 unsigned long address, unsigned int size, struct pt_regs *regs);
10
11void kmemcheck_error_save_bug(struct pt_regs *regs);
12
13void kmemcheck_error_recall(void);
14
15#endif
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
new file mode 100644
index 000000000000..9de7d8f6b6e1
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -0,0 +1,650 @@
1/**
2 * kmemcheck - a heavyweight memory checker for the linux kernel
3 * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
4 * (With a lot of help from Ingo Molnar and Pekka Enberg.)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2) as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/kallsyms.h>
14#include <linux/kernel.h>
15#include <linux/kmemcheck.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/page-flags.h>
19#include <linux/percpu.h>
20#include <linux/ptrace.h>
21#include <linux/string.h>
22#include <linux/types.h>
23
24#include <asm/cacheflush.h>
25#include <asm/kmemcheck.h>
26#include <asm/pgtable.h>
27#include <asm/tlbflush.h>
28
29#include "error.h"
30#include "opcode.h"
31#include "pte.h"
32#include "shadow.h"
33
34#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
35# define KMEMCHECK_ENABLED 0
36#endif
37
38#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
39# define KMEMCHECK_ENABLED 1
40#endif
41
42#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
43# define KMEMCHECK_ENABLED 2
44#endif
45
46int kmemcheck_enabled = KMEMCHECK_ENABLED;
47
48int __init kmemcheck_init(void)
49{
50 printk(KERN_INFO "kmemcheck: \"Bugs, beware!\"\n");
51
52#ifdef CONFIG_SMP
53 /*
54 * Limit SMP to use a single CPU. We rely on the fact that this code
55 * runs before SMP is set up.
56 */
57 if (setup_max_cpus > 1) {
58 printk(KERN_INFO
59 "kmemcheck: Limiting number of CPUs to 1.\n");
60 setup_max_cpus = 1;
61 }
62#endif
63
64 return 0;
65}
66
67early_initcall(kmemcheck_init);
68
69#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
70int kmemcheck_enabled = 0;
71#endif
72
73#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
74int kmemcheck_enabled = 1;
75#endif
76
77#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
78int kmemcheck_enabled = 2;
79#endif
80
81/*
82 * We need to parse the kmemcheck= option before any memory is allocated.
83 */
84static int __init param_kmemcheck(char *str)
85{
86 if (!str)
87 return -EINVAL;
88
89 sscanf(str, "%d", &kmemcheck_enabled);
90 return 0;
91}
92
93early_param("kmemcheck", param_kmemcheck);
94
95int kmemcheck_show_addr(unsigned long address)
96{
97 pte_t *pte;
98
99 pte = kmemcheck_pte_lookup(address);
100 if (!pte)
101 return 0;
102
103 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
104 __flush_tlb_one(address);
105 return 1;
106}
107
108int kmemcheck_hide_addr(unsigned long address)
109{
110 pte_t *pte;
111
112 pte = kmemcheck_pte_lookup(address);
113 if (!pte)
114 return 0;
115
116 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
117 __flush_tlb_one(address);
118 return 1;
119}
120
121struct kmemcheck_context {
122 bool busy;
123 int balance;
124
125 /*
126 * There can be at most two memory operands to an instruction, but
127 * each address can cross a page boundary -- so we may need up to
128 * four addresses that must be hidden/revealed for each fault.
129 */
130 unsigned long addr[4];
131 unsigned long n_addrs;
132 unsigned long flags;
133
134 /* Data size of the instruction that caused a fault. */
135 unsigned int size;
136};
137
138static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
139
140bool kmemcheck_active(struct pt_regs *regs)
141{
142 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
143
144 return data->balance > 0;
145}
146
147/* Save an address that needs to be shown/hidden */
148static void kmemcheck_save_addr(unsigned long addr)
149{
150 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
151
152 BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
153 data->addr[data->n_addrs++] = addr;
154}
155
156static unsigned int kmemcheck_show_all(void)
157{
158 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
159 unsigned int i;
160 unsigned int n;
161
162 n = 0;
163 for (i = 0; i < data->n_addrs; ++i)
164 n += kmemcheck_show_addr(data->addr[i]);
165
166 return n;
167}
168
169static unsigned int kmemcheck_hide_all(void)
170{
171 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
172 unsigned int i;
173 unsigned int n;
174
175 n = 0;
176 for (i = 0; i < data->n_addrs; ++i)
177 n += kmemcheck_hide_addr(data->addr[i]);
178
179 return n;
180}
181
182/*
183 * Called from the #PF handler.
184 */
185void kmemcheck_show(struct pt_regs *regs)
186{
187 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
188
189 BUG_ON(!irqs_disabled());
190
191 if (unlikely(data->balance != 0)) {
192 kmemcheck_show_all();
193 kmemcheck_error_save_bug(regs);
194 data->balance = 0;
195 return;
196 }
197
198 /*
199 * None of the addresses actually belonged to kmemcheck. Note that
200 * this is not an error.
201 */
202 if (kmemcheck_show_all() == 0)
203 return;
204
205 ++data->balance;
206
207 /*
208 * The IF needs to be cleared as well, so that the faulting
209 * instruction can run "uninterrupted". Otherwise, we might take
210 * an interrupt and start executing that before we've had a chance
211 * to hide the page again.
212 *
213 * NOTE: In the rare case of multiple faults, we must not override
214 * the original flags:
215 */
216 if (!(regs->flags & X86_EFLAGS_TF))
217 data->flags = regs->flags;
218
219 regs->flags |= X86_EFLAGS_TF;
220 regs->flags &= ~X86_EFLAGS_IF;
221}
222
223/*
224 * Called from the #DB handler.
225 */
226void kmemcheck_hide(struct pt_regs *regs)
227{
228 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
229 int n;
230
231 BUG_ON(!irqs_disabled());
232
233 if (data->balance == 0)
234 return;
235
236 if (unlikely(data->balance != 1)) {
237 kmemcheck_show_all();
238 kmemcheck_error_save_bug(regs);
239 data->n_addrs = 0;
240 data->balance = 0;
241
242 if (!(data->flags & X86_EFLAGS_TF))
243 regs->flags &= ~X86_EFLAGS_TF;
244 if (data->flags & X86_EFLAGS_IF)
245 regs->flags |= X86_EFLAGS_IF;
246 return;
247 }
248
249 if (kmemcheck_enabled)
250 n = kmemcheck_hide_all();
251 else
252 n = kmemcheck_show_all();
253
254 if (n == 0)
255 return;
256
257 --data->balance;
258
259 data->n_addrs = 0;
260
261 if (!(data->flags & X86_EFLAGS_TF))
262 regs->flags &= ~X86_EFLAGS_TF;
263 if (data->flags & X86_EFLAGS_IF)
264 regs->flags |= X86_EFLAGS_IF;
265}
266
267void kmemcheck_show_pages(struct page *p, unsigned int n)
268{
269 unsigned int i;
270
271 for (i = 0; i < n; ++i) {
272 unsigned long address;
273 pte_t *pte;
274 unsigned int level;
275
276 address = (unsigned long) page_address(&p[i]);
277 pte = lookup_address(address, &level);
278 BUG_ON(!pte);
279 BUG_ON(level != PG_LEVEL_4K);
280
281 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
282 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
283 __flush_tlb_one(address);
284 }
285}
286
287bool kmemcheck_page_is_tracked(struct page *p)
288{
289 /* This will also check the "hidden" flag of the PTE. */
290 return kmemcheck_pte_lookup((unsigned long) page_address(p));
291}
292
293void kmemcheck_hide_pages(struct page *p, unsigned int n)
294{
295 unsigned int i;
296
297 for (i = 0; i < n; ++i) {
298 unsigned long address;
299 pte_t *pte;
300 unsigned int level;
301
302 address = (unsigned long) page_address(&p[i]);
303 pte = lookup_address(address, &level);
304 BUG_ON(!pte);
305 BUG_ON(level != PG_LEVEL_4K);
306
307 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
308 set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
309 __flush_tlb_one(address);
310 }
311}
312
313/* Access may NOT cross page boundary */
314static void kmemcheck_read_strict(struct pt_regs *regs,
315 unsigned long addr, unsigned int size)
316{
317 void *shadow;
318 enum kmemcheck_shadow status;
319
320 shadow = kmemcheck_shadow_lookup(addr);
321 if (!shadow)
322 return;
323
324 kmemcheck_save_addr(addr);
325 status = kmemcheck_shadow_test(shadow, size);
326 if (status == KMEMCHECK_SHADOW_INITIALIZED)
327 return;
328
329 if (kmemcheck_enabled)
330 kmemcheck_error_save(status, addr, size, regs);
331
332 if (kmemcheck_enabled == 2)
333 kmemcheck_enabled = 0;
334
335 /* Don't warn about it again. */
336 kmemcheck_shadow_set(shadow, size);
337}
338
339/* Access may cross page boundary */
340static void kmemcheck_read(struct pt_regs *regs,
341 unsigned long addr, unsigned int size)
342{
343 unsigned long page = addr & PAGE_MASK;
344 unsigned long next_addr = addr + size - 1;
345 unsigned long next_page = next_addr & PAGE_MASK;
346
347 if (likely(page == next_page)) {
348 kmemcheck_read_strict(regs, addr, size);
349 return;
350 }
351
352 /*
353 * What we do is basically to split the access across the
354 * two pages and handle each part separately. Yes, this means
355 * that we may now see reads that are 3 + 5 bytes, for
356 * example (and if both are uninitialized, there will be two
357 * reports), but it makes the code a lot simpler.
358 */
359 kmemcheck_read_strict(regs, addr, next_page - addr);
360 kmemcheck_read_strict(regs, next_page, next_addr - next_page);
361}
362
363static void kmemcheck_write_strict(struct pt_regs *regs,
364 unsigned long addr, unsigned int size)
365{
366 void *shadow;
367
368 shadow = kmemcheck_shadow_lookup(addr);
369 if (!shadow)
370 return;
371
372 kmemcheck_save_addr(addr);
373 kmemcheck_shadow_set(shadow, size);
374}
375
376static void kmemcheck_write(struct pt_regs *regs,
377 unsigned long addr, unsigned int size)
378{
379 unsigned long page = addr & PAGE_MASK;
380 unsigned long next_addr = addr + size - 1;
381 unsigned long next_page = next_addr & PAGE_MASK;
382
383 if (likely(page == next_page)) {
384 kmemcheck_write_strict(regs, addr, size);
385 return;
386 }
387
388 /* See comment in kmemcheck_read(). */
389 kmemcheck_write_strict(regs, addr, next_page - addr);
390 kmemcheck_write_strict(regs, next_page, next_addr - next_page);
391}
392
393/*
394 * Copying is hard. We have two addresses, each of which may be split across
395 * a page (and each page will have different shadow addresses).
396 */
397static void kmemcheck_copy(struct pt_regs *regs,
398 unsigned long src_addr, unsigned long dst_addr, unsigned int size)
399{
400 uint8_t shadow[8];
401 enum kmemcheck_shadow status;
402
403 unsigned long page;
404 unsigned long next_addr;
405 unsigned long next_page;
406
407 uint8_t *x;
408 unsigned int i;
409 unsigned int n;
410
411 BUG_ON(size > sizeof(shadow));
412
413 page = src_addr & PAGE_MASK;
414 next_addr = src_addr + size - 1;
415 next_page = next_addr & PAGE_MASK;
416
417 if (likely(page == next_page)) {
418 /* Same page */
419 x = kmemcheck_shadow_lookup(src_addr);
420 if (x) {
421 kmemcheck_save_addr(src_addr);
422 for (i = 0; i < size; ++i)
423 shadow[i] = x[i];
424 } else {
425 for (i = 0; i < size; ++i)
426 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
427 }
428 } else {
429 n = next_page - src_addr;
430 BUG_ON(n > sizeof(shadow));
431
432 /* First page */
433 x = kmemcheck_shadow_lookup(src_addr);
434 if (x) {
435 kmemcheck_save_addr(src_addr);
436 for (i = 0; i < n; ++i)
437 shadow[i] = x[i];
438 } else {
439 /* Not tracked */
440 for (i = 0; i < n; ++i)
441 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
442 }
443
444 /* Second page */
445 x = kmemcheck_shadow_lookup(next_page);
446 if (x) {
447 kmemcheck_save_addr(next_page);
448 for (i = n; i < size; ++i)
449 shadow[i] = x[i - n];
450 } else {
451 /* Not tracked */
452 for (i = n; i < size; ++i)
453 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
454 }
455 }
456
457 page = dst_addr & PAGE_MASK;
458 next_addr = dst_addr + size - 1;
459 next_page = next_addr & PAGE_MASK;
460
461 if (likely(page == next_page)) {
462 /* Same page */
463 x = kmemcheck_shadow_lookup(dst_addr);
464 if (x) {
465 kmemcheck_save_addr(dst_addr);
466 for (i = 0; i < size; ++i) {
467 x[i] = shadow[i];
468 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
469 }
470 }
471 } else {
472 n = next_page - dst_addr;
473 BUG_ON(n > sizeof(shadow));
474
475 /* First page */
476 x = kmemcheck_shadow_lookup(dst_addr);
477 if (x) {
478 kmemcheck_save_addr(dst_addr);
479 for (i = 0; i < n; ++i) {
480 x[i] = shadow[i];
481 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
482 }
483 }
484
485 /* Second page */
486 x = kmemcheck_shadow_lookup(next_page);
487 if (x) {
488 kmemcheck_save_addr(next_page);
489 for (i = n; i < size; ++i) {
490 x[i - n] = shadow[i];
491 shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
492 }
493 }
494 }
495
496 status = kmemcheck_shadow_test(shadow, size);
497 if (status == KMEMCHECK_SHADOW_INITIALIZED)
498 return;
499
500 if (kmemcheck_enabled)
501 kmemcheck_error_save(status, src_addr, size, regs);
502
503 if (kmemcheck_enabled == 2)
504 kmemcheck_enabled = 0;
505}
506
507enum kmemcheck_method {
508 KMEMCHECK_READ,
509 KMEMCHECK_WRITE,
510};
511
512static void kmemcheck_access(struct pt_regs *regs,
513 unsigned long fallback_address, enum kmemcheck_method fallback_method)
514{
515 const uint8_t *insn;
516 const uint8_t *insn_primary;
517 unsigned int size;
518
519 struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
520
521 /* Recursive fault -- ouch. */
522 if (data->busy) {
523 kmemcheck_show_addr(fallback_address);
524 kmemcheck_error_save_bug(regs);
525 return;
526 }
527
528 data->busy = true;
529
530 insn = (const uint8_t *) regs->ip;
531 insn_primary = kmemcheck_opcode_get_primary(insn);
532
533 kmemcheck_opcode_decode(insn, &size);
534
535 switch (insn_primary[0]) {
536#ifdef CONFIG_KMEMCHECK_BITOPS_OK
537 /* AND, OR, XOR */
538 /*
539 * Unfortunately, these instructions have to be excluded from
540 * our regular checking since they access only some (and not
541 * all) bits. This clears out "bogus" bitfield-access warnings.
542 */
543 case 0x80:
544 case 0x81:
545 case 0x82:
546 case 0x83:
547 switch ((insn_primary[1] >> 3) & 7) {
548 /* OR */
549 case 1:
550 /* AND */
551 case 4:
552 /* XOR */
553 case 6:
554 kmemcheck_write(regs, fallback_address, size);
555 goto out;
556
557 /* ADD */
558 case 0:
559 /* ADC */
560 case 2:
561 /* SBB */
562 case 3:
563 /* SUB */
564 case 5:
565 /* CMP */
566 case 7:
567 break;
568 }
569 break;
570#endif
571
572 /* MOVS, MOVSB, MOVSW, MOVSD */
573 case 0xa4:
574 case 0xa5:
575 /*
576 * These instructions are special because they take two
577 * addresses, but we only get one page fault.
578 */
579 kmemcheck_copy(regs, regs->si, regs->di, size);
580 goto out;
581
582 /* CMPS, CMPSB, CMPSW, CMPSD */
583 case 0xa6:
584 case 0xa7:
585 kmemcheck_read(regs, regs->si, size);
586 kmemcheck_read(regs, regs->di, size);
587 goto out;
588 }
589
590 /*
591 * If the opcode isn't special in any way, we use the data from the
592 * page fault handler to determine the address and type of memory
593 * access.
594 */
595 switch (fallback_method) {
596 case KMEMCHECK_READ:
597 kmemcheck_read(regs, fallback_address, size);
598 goto out;
599 case KMEMCHECK_WRITE:
600 kmemcheck_write(regs, fallback_address, size);
601 goto out;
602 }
603
604out:
605 data->busy = false;
606}
607
608bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
609 unsigned long error_code)
610{
611 pte_t *pte;
612 unsigned int level;
613
614 /*
615 * XXX: Is it safe to assume that memory accesses from virtual 86
616 * mode or non-kernel code segments will _never_ access kernel
617 * memory (e.g. tracked pages)? For now, we need this to avoid
618 * invoking kmemcheck for PnP BIOS calls.
619 */
620 if (regs->flags & X86_VM_MASK)
621 return false;
622 if (regs->cs != __KERNEL_CS)
623 return false;
624
625 pte = lookup_address(address, &level);
626 if (!pte)
627 return false;
628 if (level != PG_LEVEL_4K)
629 return false;
630 if (!pte_hidden(*pte))
631 return false;
632
633 if (error_code & 2)
634 kmemcheck_access(regs, address, KMEMCHECK_WRITE);
635 else
636 kmemcheck_access(regs, address, KMEMCHECK_READ);
637
638 kmemcheck_show(regs);
639 return true;
640}
641
642bool kmemcheck_trap(struct pt_regs *regs)
643{
644 if (!kmemcheck_active(regs))
645 return false;
646
647 /* We're done. */
648 kmemcheck_hide(regs);
649 return true;
650}
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
new file mode 100644
index 000000000000..a4100b6e783a
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.c
@@ -0,0 +1,101 @@
1#include <linux/types.h>
2
3#include "opcode.h"
4
5static bool opcode_is_prefix(uint8_t b)
6{
7 return
8 /* Group 1 */
9 b == 0xf0 || b == 0xf2 || b == 0xf3
10 /* Group 2 */
11 || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
12 || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
13 /* Group 3 */
14 || b == 0x66
15 /* Group 4 */
16 || b == 0x67;
17}
18
19static bool opcode_is_rex_prefix(uint8_t b)
20{
21 return (b & 0xf0) == 0x40;
22}
23
24#define REX_W (1 << 3)
25
26/*
27 * This is a VERY crude opcode decoder. We only need to find the size of the
28 * load/store that caused our #PF and this should work for all the opcodes
29 * that we care about. Moreover, the ones who invented this instruction set
30 * should be shot.
31 */
32void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
33{
34 /* Default operand size */
35 int operand_size_override = 4;
36
37 /* prefixes */
38 for (; opcode_is_prefix(*op); ++op) {
39 if (*op == 0x66)
40 operand_size_override = 2;
41 }
42
43#ifdef CONFIG_X86_64
44 /* REX prefix */
45 if (opcode_is_rex_prefix(*op)) {
46 uint8_t rex = *op;
47
48 ++op;
49 if (rex & REX_W) {
50 switch (*op) {
51 case 0x63:
52 *size = 4;
53 return;
54 case 0x0f:
55 ++op;
56
57 switch (*op) {
58 case 0xb6:
59 case 0xbe:
60 *size = 1;
61 return;
62 case 0xb7:
63 case 0xbf:
64 *size = 2;
65 return;
66 }
67
68 break;
69 }
70
71 *size = 8;
72 return;
73 }
74 }
75#endif
76
77 /* escape opcode */
78 if (*op == 0x0f) {
79 ++op;
80
81 /*
82 * This is move with zero-extend and sign-extend, respectively;
83 * we don't have to think about 0xb6/0xbe, because this is
84 * already handled in the conditional below.
85 */
86 if (*op == 0xb7 || *op == 0xbf)
87 operand_size_override = 2;
88 }
89
90 *size = (*op & 1) ? operand_size_override : 1;
91}
92
93const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
94{
95 /* skip prefixes */
96 while (opcode_is_prefix(*op))
97 ++op;
98 if (opcode_is_rex_prefix(*op))
99 ++op;
100 return op;
101}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
new file mode 100644
index 000000000000..6956aad66b5b
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.h
@@ -0,0 +1,9 @@
1#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
2#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
3
4#include <linux/types.h>
5
6void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
7const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
8
9#endif
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
new file mode 100644
index 000000000000..4ead26eeaf96
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.c
@@ -0,0 +1,22 @@
1#include <linux/mm.h>
2
3#include <asm/pgtable.h>
4
5#include "pte.h"
6
7pte_t *kmemcheck_pte_lookup(unsigned long address)
8{
9 pte_t *pte;
10 unsigned int level;
11
12 pte = lookup_address(address, &level);
13 if (!pte)
14 return NULL;
15 if (level != PG_LEVEL_4K)
16 return NULL;
17 if (!pte_hidden(*pte))
18 return NULL;
19
20 return pte;
21}
22
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
new file mode 100644
index 000000000000..9f5966456492
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.h
@@ -0,0 +1,10 @@
1#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
2#define ARCH__X86__MM__KMEMCHECK__PTE_H
3
4#include <linux/mm.h>
5
6#include <asm/pgtable.h>
7
8pte_t *kmemcheck_pte_lookup(unsigned long address);
9
10#endif
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
new file mode 100644
index 000000000000..5544d3600877
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -0,0 +1,153 @@
1#include <linux/kmemcheck.h>
2#include <linux/module.h>
3#include <linux/mm.h>
4
5#include <asm/page.h>
6#include <asm/pgtable.h>
7
8#include "pte.h"
9#include "shadow.h"
10
11/*
12 * Return the shadow address for the given address. Returns NULL if the
13 * address is not tracked.
14 *
15 * We need to be extremely careful not to follow any invalid pointers,
16 * because this function can be called for *any* possible address.
17 */
18void *kmemcheck_shadow_lookup(unsigned long address)
19{
20 pte_t *pte;
21 struct page *page;
22
23 if (!virt_addr_valid(address))
24 return NULL;
25
26 pte = kmemcheck_pte_lookup(address);
27 if (!pte)
28 return NULL;
29
30 page = virt_to_page(address);
31 if (!page->shadow)
32 return NULL;
33 return page->shadow + (address & (PAGE_SIZE - 1));
34}
35
36static void mark_shadow(void *address, unsigned int n,
37 enum kmemcheck_shadow status)
38{
39 unsigned long addr = (unsigned long) address;
40 unsigned long last_addr = addr + n - 1;
41 unsigned long page = addr & PAGE_MASK;
42 unsigned long last_page = last_addr & PAGE_MASK;
43 unsigned int first_n;
44 void *shadow;
45
46 /* If the memory range crosses a page boundary, stop there. */
47 if (page == last_page)
48 first_n = n;
49 else
50 first_n = page + PAGE_SIZE - addr;
51
52 shadow = kmemcheck_shadow_lookup(addr);
53 if (shadow)
54 memset(shadow, status, first_n);
55
56 addr += first_n;
57 n -= first_n;
58
59 /* Do full-page memset()s. */
60 while (n >= PAGE_SIZE) {
61 shadow = kmemcheck_shadow_lookup(addr);
62 if (shadow)
63 memset(shadow, status, PAGE_SIZE);
64
65 addr += PAGE_SIZE;
66 n -= PAGE_SIZE;
67 }
68
69 /* Do the remaining page, if any. */
70 if (n > 0) {
71 shadow = kmemcheck_shadow_lookup(addr);
72 if (shadow)
73 memset(shadow, status, n);
74 }
75}
76
77void kmemcheck_mark_unallocated(void *address, unsigned int n)
78{
79 mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
80}
81
82void kmemcheck_mark_uninitialized(void *address, unsigned int n)
83{
84 mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
85}
86
87/*
88 * Fill the shadow memory of the given address such that the memory at that
89 * address is marked as being initialized.
90 */
91void kmemcheck_mark_initialized(void *address, unsigned int n)
92{
93 mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
94}
95EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
96
97void kmemcheck_mark_freed(void *address, unsigned int n)
98{
99 mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
100}
101
102void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
103{
104 unsigned int i;
105
106 for (i = 0; i < n; ++i)
107 kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
108}
109
110void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
111{
112 unsigned int i;
113
114 for (i = 0; i < n; ++i)
115 kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
116}
117
118enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
119{
120 uint8_t *x;
121 unsigned int i;
122
123 x = shadow;
124
125#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
126 /*
127 * Make sure _some_ bytes are initialized. Gcc frequently generates
128 * code to access neighboring bytes.
129 */
130 for (i = 0; i < size; ++i) {
131 if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
132 return x[i];
133 }
134#else
135 /* All bytes must be initialized. */
136 for (i = 0; i < size; ++i) {
137 if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
138 return x[i];
139 }
140#endif
141
142 return x[0];
143}
144
145void kmemcheck_shadow_set(void *shadow, unsigned int size)
146{
147 uint8_t *x;
148 unsigned int i;
149
150 x = shadow;
151 for (i = 0; i < size; ++i)
152 x[i] = KMEMCHECK_SHADOW_INITIALIZED;
153}
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
new file mode 100644
index 000000000000..af46d9ab9d86
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.h
@@ -0,0 +1,16 @@
1#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
2#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
3
4enum kmemcheck_shadow {
5 KMEMCHECK_SHADOW_UNALLOCATED,
6 KMEMCHECK_SHADOW_UNINITIALIZED,
7 KMEMCHECK_SHADOW_INITIALIZED,
8 KMEMCHECK_SHADOW_FREED,
9};
10
11void *kmemcheck_shadow_lookup(unsigned long address);
12
13enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
14void kmemcheck_shadow_set(void *shadow, unsigned int size);
15
16#endif
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 000000000000..39480c91b2f9
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,17 @@
1#ifndef LINUX_KMEMCHECK_H
2#define LINUX_KMEMCHECK_H
3
4#include <linux/mm_types.h>
5#include <linux/types.h>
6
7#ifdef CONFIG_KMEMCHECK
8extern int kmemcheck_enabled;
9
10int kmemcheck_show_addr(unsigned long address);
11int kmemcheck_hide_addr(unsigned long address);
12#else
13#define kmemcheck_enabled 0
14
15#endif /* CONFIG_KMEMCHECK */
16
17#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0e80e26ecf21..0042090a4d70 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -98,6 +98,14 @@ struct page {
98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS 98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
99 unsigned long debug_flags; /* Use atomic bitops on this */ 99 unsigned long debug_flags; /* Use atomic bitops on this */
100#endif 100#endif
101
102#ifdef CONFIG_KMEMCHECK
103 /*
104 * kmemcheck wants to track the status of each byte in a page; this
105 * is a pointer to such a status block. NULL if not tracked.
106 */
107 void *shadow;
108#endif
101}; 109};
102 110
103/* 111/*
diff --git a/init/main.c b/init/main.c
index 5616661eac01..e3c335e47cd2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -65,6 +65,7 @@
65#include <linux/idr.h> 65#include <linux/idr.h>
66#include <linux/ftrace.h> 66#include <linux/ftrace.h>
67#include <linux/async.h> 67#include <linux/async.h>
68#include <linux/kmemcheck.h>
68#include <linux/kmemtrace.h> 69#include <linux/kmemtrace.h>
69#include <trace/boot.h> 70#include <trace/boot.h>
70 71
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ce664f98e3fb..9ef80bba3509 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,6 +27,7 @@
27#include <linux/security.h> 27#include <linux/security.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/utsname.h> 29#include <linux/utsname.h>
30#include <linux/kmemcheck.h>
30#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
31#include <linux/fs.h> 32#include <linux/fs.h>
32#include <linux/init.h> 33#include <linux/init.h>
@@ -959,6 +960,17 @@ static struct ctl_table kern_table[] = {
959 .proc_handler = &proc_dointvec, 960 .proc_handler = &proc_dointvec,
960 }, 961 },
961#endif 962#endif
963#ifdef CONFIG_KMEMCHECK
964 {
965 .ctl_name = CTL_UNNUMBERED,
966 .procname = "kmemcheck",
967 .data = &kmemcheck_enabled,
968 .maxlen = sizeof(int),
969 .mode = 0644,
970 .proc_handler = &proc_dointvec,
971 },
972#endif
973
962/* 974/*
963 * NOTE: do not add new entries to this table unless you have read 975 * NOTE: do not add new entries to this table unless you have read
964 * Documentation/sysctl/ctl_unnumbered.txt 976 * Documentation/sysctl/ctl_unnumbered.txt