summaryrefslogtreecommitdiffstats
path: root/kernel/kcov.c
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-03-22 17:27:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-22 18:36:02 -0400
commit5c9a8750a6409c63a0f01d51a9024861022f6593 (patch)
tree61c5cd39711d26f755a30a7f0fd52f91c1f56387 /kernel/kcov.c
parentade356b99a4187578609f2a91c4d2ed88e4e70dc (diff)
kernel: add kcov code coverage
kcov provides code coverage collection for coverage-guided fuzzing (randomized testing). Coverage-guided fuzzing is a testing technique that uses coverage feedback to determine new interesting inputs to a system. A notable user-space example is AFL (http://lcamtuf.coredump.cx/afl/). However, this technique is not widely used for kernel testing due to missing compiler and kernel support. kcov does not aim to collect as much coverage as possible. It aims to collect more or less stable coverage that is function of syscall inputs. To achieve this goal it does not collect coverage in soft/hard interrupts and instrumentation of some inherently non-deterministic or non-interesting parts of kernel is disbled (e.g. scheduler, locking). Currently there is a single coverage collection mode (tracing), but the API anticipates additional collection modes. Initially I also implemented a second mode which exposes coverage in a fixed-size hash table of counters (what Quentin used in his original patch). I've dropped the second mode for simplicity. This patch adds the necessary support on kernel side. The complimentary compiler support was added in gcc revision 231296. We've used this support to build syzkaller system call fuzzer, which has found 90 kernel bugs in just 2 months: https://github.com/google/syzkaller/wiki/Found-Bugs We've also found 30+ bugs in our internal systems with syzkaller. Another (yet unexplored) direction where kcov coverage would greatly help is more traditional "blob mutation". For example, mounting a random blob as a filesystem, or receiving a random blob over wire. Why not gcov. Typical fuzzing loop looks as follows: (1) reset coverage, (2) execute a bit of code, (3) collect coverage, repeat. A typical coverage can be just a dozen of basic blocks (e.g. an invalid input). In such context gcov becomes prohibitively expensive as reset/collect coverage steps depend on total number of basic blocks/edges in program (in case of kernel it is about 2M). Cost of kcov depends only on number of executed basic blocks/edges. On top of that, kernel requires per-thread coverage because there are always background threads and unrelated processes that also produce coverage. With inlined gcov instrumentation per-thread coverage is not possible. kcov exposes kernel PCs and control flow to user-space which is insecure. But debugfs should not be mapped as user accessible. Based on a patch by Quentin Casasnovas. [akpm@linux-foundation.org: make task_struct.kcov_mode have type `enum kcov_mode'] [akpm@linux-foundation.org: unbreak allmodconfig] [akpm@linux-foundation.org: follow x86 Makefile layout standards] Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Kees Cook <keescook@chromium.org> Cc: syzkaller <syzkaller@googlegroups.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Tavis Ormandy <taviso@google.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Kostya Serebryany <kcc@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Alexander Potapenko <glider@google.com> Cc: Kees Cook <keescook@google.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: David Drysdale <drysdale@google.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kcov.c')
-rw-r--r--kernel/kcov.c273
1 files changed, 273 insertions, 0 deletions
diff --git a/kernel/kcov.c b/kernel/kcov.c
new file mode 100644
index 000000000000..3efbee0834a8
--- /dev/null
+++ b/kernel/kcov.c
@@ -0,0 +1,273 @@
1#define pr_fmt(fmt) "kcov: " fmt
2
3#include <linux/compiler.h>
4#include <linux/types.h>
5#include <linux/file.h>
6#include <linux/fs.h>
7#include <linux/mm.h>
8#include <linux/printk.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/vmalloc.h>
12#include <linux/debugfs.h>
13#include <linux/uaccess.h>
14#include <linux/kcov.h>
15
16/*
17 * kcov descriptor (one per opened debugfs file).
18 * State transitions of the descriptor:
19 * - initial state after open()
20 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
21 * - then, mmap() call (several calls are allowed but not useful)
22 * - then, repeated enable/disable for a task (only one task a time allowed)
23 */
24struct kcov {
25 /*
26 * Reference counter. We keep one for:
27 * - opened file descriptor
28 * - task with enabled coverage (we can't unwire it from another task)
29 */
30 atomic_t refcount;
31 /* The lock protects mode, size, area and t. */
32 spinlock_t lock;
33 enum kcov_mode mode;
34 /* Size of arena (in long's for KCOV_MODE_TRACE). */
35 unsigned size;
36 /* Coverage buffer shared with user space. */
37 void *area;
38 /* Task for which we collect coverage, or NULL. */
39 struct task_struct *t;
40};
41
42/*
43 * Entry point from instrumented code.
44 * This is called once per basic-block/edge.
45 */
46void __sanitizer_cov_trace_pc(void)
47{
48 struct task_struct *t;
49 enum kcov_mode mode;
50
51 t = current;
52 /*
53 * We are interested in code coverage as a function of a syscall inputs,
54 * so we ignore code executed in interrupts.
55 */
56 if (!t || in_interrupt())
57 return;
58 mode = READ_ONCE(t->kcov_mode);
59 if (mode == KCOV_MODE_TRACE) {
60 unsigned long *area;
61 unsigned long pos;
62
63 /*
64 * There is some code that runs in interrupts but for which
65 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
66 * READ_ONCE()/barrier() effectively provides load-acquire wrt
67 * interrupts, there are paired barrier()/WRITE_ONCE() in
68 * kcov_ioctl_locked().
69 */
70 barrier();
71 area = t->kcov_area;
72 /* The first word is number of subsequent PCs. */
73 pos = READ_ONCE(area[0]) + 1;
74 if (likely(pos < t->kcov_size)) {
75 area[pos] = _RET_IP_;
76 WRITE_ONCE(area[0], pos);
77 }
78 }
79}
80EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
81
82static void kcov_get(struct kcov *kcov)
83{
84 atomic_inc(&kcov->refcount);
85}
86
87static void kcov_put(struct kcov *kcov)
88{
89 if (atomic_dec_and_test(&kcov->refcount)) {
90 vfree(kcov->area);
91 kfree(kcov);
92 }
93}
94
95void kcov_task_init(struct task_struct *t)
96{
97 t->kcov_mode = KCOV_MODE_DISABLED;
98 t->kcov_size = 0;
99 t->kcov_area = NULL;
100 t->kcov = NULL;
101}
102
103void kcov_task_exit(struct task_struct *t)
104{
105 struct kcov *kcov;
106
107 kcov = t->kcov;
108 if (kcov == NULL)
109 return;
110 spin_lock(&kcov->lock);
111 if (WARN_ON(kcov->t != t)) {
112 spin_unlock(&kcov->lock);
113 return;
114 }
115 /* Just to not leave dangling references behind. */
116 kcov_task_init(t);
117 kcov->t = NULL;
118 spin_unlock(&kcov->lock);
119 kcov_put(kcov);
120}
121
122static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
123{
124 int res = 0;
125 void *area;
126 struct kcov *kcov = vma->vm_file->private_data;
127 unsigned long size, off;
128 struct page *page;
129
130 area = vmalloc_user(vma->vm_end - vma->vm_start);
131 if (!area)
132 return -ENOMEM;
133
134 spin_lock(&kcov->lock);
135 size = kcov->size * sizeof(unsigned long);
136 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
137 vma->vm_end - vma->vm_start != size) {
138 res = -EINVAL;
139 goto exit;
140 }
141 if (!kcov->area) {
142 kcov->area = area;
143 vma->vm_flags |= VM_DONTEXPAND;
144 spin_unlock(&kcov->lock);
145 for (off = 0; off < size; off += PAGE_SIZE) {
146 page = vmalloc_to_page(kcov->area + off);
147 if (vm_insert_page(vma, vma->vm_start + off, page))
148 WARN_ONCE(1, "vm_insert_page() failed");
149 }
150 return 0;
151 }
152exit:
153 spin_unlock(&kcov->lock);
154 vfree(area);
155 return res;
156}
157
158static int kcov_open(struct inode *inode, struct file *filep)
159{
160 struct kcov *kcov;
161
162 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
163 if (!kcov)
164 return -ENOMEM;
165 atomic_set(&kcov->refcount, 1);
166 spin_lock_init(&kcov->lock);
167 filep->private_data = kcov;
168 return nonseekable_open(inode, filep);
169}
170
171static int kcov_close(struct inode *inode, struct file *filep)
172{
173 kcov_put(filep->private_data);
174 return 0;
175}
176
177static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
178 unsigned long arg)
179{
180 struct task_struct *t;
181 unsigned long size, unused;
182
183 switch (cmd) {
184 case KCOV_INIT_TRACE:
185 /*
186 * Enable kcov in trace mode and setup buffer size.
187 * Must happen before anything else.
188 */
189 if (kcov->mode != KCOV_MODE_DISABLED)
190 return -EBUSY;
191 /*
192 * Size must be at least 2 to hold current position and one PC.
193 * Later we allocate size * sizeof(unsigned long) memory,
194 * that must not overflow.
195 */
196 size = arg;
197 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
198 return -EINVAL;
199 kcov->size = size;
200 kcov->mode = KCOV_MODE_TRACE;
201 return 0;
202 case KCOV_ENABLE:
203 /*
204 * Enable coverage for the current task.
205 * At this point user must have been enabled trace mode,
206 * and mmapped the file. Coverage collection is disabled only
207 * at task exit or voluntary by KCOV_DISABLE. After that it can
208 * be enabled for another task.
209 */
210 unused = arg;
211 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
212 kcov->area == NULL)
213 return -EINVAL;
214 if (kcov->t != NULL)
215 return -EBUSY;
216 t = current;
217 /* Cache in task struct for performance. */
218 t->kcov_size = kcov->size;
219 t->kcov_area = kcov->area;
220 /* See comment in __sanitizer_cov_trace_pc(). */
221 barrier();
222 WRITE_ONCE(t->kcov_mode, kcov->mode);
223 t->kcov = kcov;
224 kcov->t = t;
225 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
226 kcov_get(kcov);
227 return 0;
228 case KCOV_DISABLE:
229 /* Disable coverage for the current task. */
230 unused = arg;
231 if (unused != 0 || current->kcov != kcov)
232 return -EINVAL;
233 t = current;
234 if (WARN_ON(kcov->t != t))
235 return -EINVAL;
236 kcov_task_init(t);
237 kcov->t = NULL;
238 kcov_put(kcov);
239 return 0;
240 default:
241 return -ENOTTY;
242 }
243}
244
245static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
246{
247 struct kcov *kcov;
248 int res;
249
250 kcov = filep->private_data;
251 spin_lock(&kcov->lock);
252 res = kcov_ioctl_locked(kcov, cmd, arg);
253 spin_unlock(&kcov->lock);
254 return res;
255}
256
257static const struct file_operations kcov_fops = {
258 .open = kcov_open,
259 .unlocked_ioctl = kcov_ioctl,
260 .mmap = kcov_mmap,
261 .release = kcov_close,
262};
263
264static int __init kcov_init(void)
265{
266 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
267 pr_err("failed to create kcov in debugfs\n");
268 return -ENOMEM;
269 }
270 return 0;
271}
272
273device_initcall(kcov_init);