aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch
diff options
context:
space:
mode:
authorSeth Jennings <sjenning@redhat.com>2014-12-16 12:58:19 -0500
committerJiri Kosina <jkosina@suse.cz>2014-12-22 09:40:49 -0500
commitb700e7f03df5d92f85fa5247fe1f557528d3363d (patch)
treed6da8186d1bd9c42bbd5db9f23deeb1e47bb6dec /kernel/livepatch
parentc5f4546593e9911800f0926c1090959b58bc5c93 (diff)
livepatch: kernel: add support for live patching
This commit introduces code for the live patching core. It implements an ftrace-based mechanism and kernel interface for doing live patching of kernel and kernel module functions. It represents the greatest common functionality set between kpatch and kgraft and can accept patches built using either method. This first version does not implement any consistency mechanism that ensures that old and new code do not run together. In practice, ~90% of CVEs are safe to apply in this way, since they simply add a conditional check. However, any function change that can not execute safely with the old version of the function can _not_ be safely applied in this version. [ jkosina@suse.cz: due to the number of contributions that got folded into this original patch from Seth Jennings, add SUSE's copyright as well, as discussed via e-mail ] Signed-off-by: Seth Jennings <sjenning@redhat.com> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Reviewed-by: Miroslav Benes <mbenes@suse.cz> Reviewed-by: Petr Mladek <pmladek@suse.cz> Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Miroslav Benes <mbenes@suse.cz> Signed-off-by: Petr Mladek <pmladek@suse.cz> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel/livepatch')
-rw-r--r--kernel/livepatch/Kconfig18
-rw-r--r--kernel/livepatch/Makefile3
-rw-r--r--kernel/livepatch/core.c930
3 files changed, 951 insertions, 0 deletions
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig
new file mode 100644
index 000000000000..96da00fbc120
--- /dev/null
+++ b/kernel/livepatch/Kconfig
@@ -0,0 +1,18 @@
1config ARCH_HAVE_LIVE_PATCHING
2 boolean
3 help
4 Arch supports kernel live patching
5
6config LIVE_PATCHING
7 boolean "Kernel Live Patching"
8 depends on DYNAMIC_FTRACE_WITH_REGS
9 depends on MODULES
10 depends on SYSFS
11 depends on KALLSYMS_ALL
12 depends on ARCH_HAVE_LIVE_PATCHING
13 help
14 Say Y here if you want to support kernel live patching.
15 This option has no runtime impact until a kernel "patch"
16 module uses the interface provided by this option to register
17 a patch, causing calls to patched functions to be redirected
18 to new function code contained in the patch module.
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
new file mode 100644
index 000000000000..7c1f00861428
--- /dev/null
+++ b/kernel/livepatch/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_LIVE_PATCHING) += livepatch.o
2
3livepatch-objs := core.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
new file mode 100644
index 000000000000..f99fe189d596
--- /dev/null
+++ b/kernel/livepatch/core.c
@@ -0,0 +1,930 @@
1/*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h>
29#include <linux/kallsyms.h>
30#include <linux/livepatch.h>
31
32/*
33 * The klp_mutex protects the klp_patches list and state transitions of any
34 * structure reachable from the patches list. References to any structure must
35 * be obtained under mutex protection.
36 */
37
38static DEFINE_MUTEX(klp_mutex);
39static LIST_HEAD(klp_patches);
40
41static struct kobject *klp_root_kobj;
42
43static bool klp_is_module(struct klp_object *obj)
44{
45 return obj->name;
46}
47
48static bool klp_is_object_loaded(struct klp_object *obj)
49{
50 return !obj->name || obj->mod;
51}
52
53/* sets obj->mod if object is not vmlinux and module is found */
54static void klp_find_object_module(struct klp_object *obj)
55{
56 if (!klp_is_module(obj))
57 return;
58
59 mutex_lock(&module_mutex);
60 /*
61 * We don't need to take a reference on the module here because we have
62 * the klp_mutex, which is also taken by the module notifier. This
63 * prevents any module from unloading until we release the klp_mutex.
64 */
65 obj->mod = find_module(obj->name);
66 mutex_unlock(&module_mutex);
67}
68
69/* klp_mutex must be held by caller */
70static bool klp_is_patch_registered(struct klp_patch *patch)
71{
72 struct klp_patch *mypatch;
73
74 list_for_each_entry(mypatch, &klp_patches, list)
75 if (mypatch == patch)
76 return true;
77
78 return false;
79}
80
81static bool klp_initialized(void)
82{
83 return klp_root_kobj;
84}
85
86struct klp_find_arg {
87 const char *objname;
88 const char *name;
89 unsigned long addr;
90 /*
91 * If count == 0, the symbol was not found. If count == 1, a unique
92 * match was found and addr is set. If count > 1, there is
93 * unresolvable ambiguity among "count" number of symbols with the same
94 * name in the same object.
95 */
96 unsigned long count;
97};
98
99static int klp_find_callback(void *data, const char *name,
100 struct module *mod, unsigned long addr)
101{
102 struct klp_find_arg *args = data;
103
104 if ((mod && !args->objname) || (!mod && args->objname))
105 return 0;
106
107 if (strcmp(args->name, name))
108 return 0;
109
110 if (args->objname && strcmp(args->objname, mod->name))
111 return 0;
112
113 /*
114 * args->addr might be overwritten if another match is found
115 * but klp_find_object_symbol() handles this and only returns the
116 * addr if count == 1.
117 */
118 args->addr = addr;
119 args->count++;
120
121 return 0;
122}
123
124static int klp_find_object_symbol(const char *objname, const char *name,
125 unsigned long *addr)
126{
127 struct klp_find_arg args = {
128 .objname = objname,
129 .name = name,
130 .addr = 0,
131 .count = 0
132 };
133
134 kallsyms_on_each_symbol(klp_find_callback, &args);
135
136 if (args.count == 0)
137 pr_err("symbol '%s' not found in symbol table\n", name);
138 else if (args.count > 1)
139 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
140 args.count, name, objname);
141 else {
142 *addr = args.addr;
143 return 0;
144 }
145
146 *addr = 0;
147 return -EINVAL;
148}
149
150struct klp_verify_args {
151 const char *name;
152 const unsigned long addr;
153};
154
155static int klp_verify_callback(void *data, const char *name,
156 struct module *mod, unsigned long addr)
157{
158 struct klp_verify_args *args = data;
159
160 if (!mod &&
161 !strcmp(args->name, name) &&
162 args->addr == addr)
163 return 1;
164
165 return 0;
166}
167
168static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
169{
170 struct klp_verify_args args = {
171 .name = name,
172 .addr = addr,
173 };
174
175 if (kallsyms_on_each_symbol(klp_verify_callback, &args))
176 return 0;
177
178 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?",
179 name, addr);
180 return -EINVAL;
181}
182
183static int klp_find_verify_func_addr(struct klp_object *obj,
184 struct klp_func *func)
185{
186 int ret;
187
188#if defined(CONFIG_RANDOMIZE_BASE)
189 /* KASLR is enabled, disregard old_addr from user */
190 func->old_addr = 0;
191#endif
192
193 if (!func->old_addr || klp_is_module(obj))
194 ret = klp_find_object_symbol(obj->name, func->old_name,
195 &func->old_addr);
196 else
197 ret = klp_verify_vmlinux_symbol(func->old_name,
198 func->old_addr);
199
200 return ret;
201}
202
203/*
204 * external symbols are located outside the parent object (where the parent
205 * object is either vmlinux or the kmod being patched).
206 */
207static int klp_find_external_symbol(struct module *pmod, const char *name,
208 unsigned long *addr)
209{
210 const struct kernel_symbol *sym;
211
212 /* first, check if it's an exported symbol */
213 preempt_disable();
214 sym = find_symbol(name, NULL, NULL, true, true);
215 preempt_enable();
216 if (sym) {
217 *addr = sym->value;
218 return 0;
219 }
220
221 /* otherwise check if it's in another .o within the patch module */
222 return klp_find_object_symbol(pmod->name, name, addr);
223}
224
225static int klp_write_object_relocations(struct module *pmod,
226 struct klp_object *obj)
227{
228 int ret;
229 struct klp_reloc *reloc;
230
231 if (WARN_ON(!klp_is_object_loaded(obj)))
232 return -EINVAL;
233
234 if (WARN_ON(!obj->relocs))
235 return -EINVAL;
236
237 for (reloc = obj->relocs; reloc->name; reloc++) {
238 if (!klp_is_module(obj)) {
239 ret = klp_verify_vmlinux_symbol(reloc->name,
240 reloc->val);
241 if (ret)
242 return ret;
243 } else {
244 /* module, reloc->val needs to be discovered */
245 if (reloc->external)
246 ret = klp_find_external_symbol(pmod,
247 reloc->name,
248 &reloc->val);
249 else
250 ret = klp_find_object_symbol(obj->mod->name,
251 reloc->name,
252 &reloc->val);
253 if (ret)
254 return ret;
255 }
256 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
257 reloc->val + reloc->addend);
258 if (ret) {
259 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
260 reloc->name, reloc->val, ret);
261 return ret;
262 }
263 }
264
265 return 0;
266}
267
268static void notrace klp_ftrace_handler(unsigned long ip,
269 unsigned long parent_ip,
270 struct ftrace_ops *ops,
271 struct pt_regs *regs)
272{
273 struct klp_func *func = ops->private;
274
275 regs->ip = (unsigned long)func->new_func;
276}
277
278static int klp_disable_func(struct klp_func *func)
279{
280 int ret;
281
282 if (WARN_ON(func->state != KLP_ENABLED))
283 return -EINVAL;
284
285 if (WARN_ON(!func->old_addr))
286 return -EINVAL;
287
288 ret = unregister_ftrace_function(func->fops);
289 if (ret) {
290 pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
291 func->old_name, ret);
292 return ret;
293 }
294
295 ret = ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0);
296 if (ret)
297 pr_warn("function unregister succeeded but failed to clear the filter\n");
298
299 func->state = KLP_DISABLED;
300
301 return 0;
302}
303
304static int klp_enable_func(struct klp_func *func)
305{
306 int ret;
307
308 if (WARN_ON(!func->old_addr))
309 return -EINVAL;
310
311 if (WARN_ON(func->state != KLP_DISABLED))
312 return -EINVAL;
313
314 ret = ftrace_set_filter_ip(func->fops, func->old_addr, 0, 0);
315 if (ret) {
316 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
317 func->old_name, ret);
318 return ret;
319 }
320
321 ret = register_ftrace_function(func->fops);
322 if (ret) {
323 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
324 func->old_name, ret);
325 ftrace_set_filter_ip(func->fops, func->old_addr, 1, 0);
326 } else {
327 func->state = KLP_ENABLED;
328 }
329
330 return ret;
331}
332
333static int klp_disable_object(struct klp_object *obj)
334{
335 struct klp_func *func;
336 int ret;
337
338 for (func = obj->funcs; func->old_name; func++) {
339 if (func->state != KLP_ENABLED)
340 continue;
341
342 ret = klp_disable_func(func);
343 if (ret)
344 return ret;
345 }
346
347 obj->state = KLP_DISABLED;
348
349 return 0;
350}
351
352static int klp_enable_object(struct klp_object *obj)
353{
354 struct klp_func *func;
355 int ret;
356
357 if (WARN_ON(obj->state != KLP_DISABLED))
358 return -EINVAL;
359
360 if (WARN_ON(!klp_is_object_loaded(obj)))
361 return -EINVAL;
362
363 for (func = obj->funcs; func->old_name; func++) {
364 ret = klp_enable_func(func);
365 if (ret)
366 goto unregister;
367 }
368 obj->state = KLP_ENABLED;
369
370 return 0;
371
372unregister:
373 WARN_ON(klp_disable_object(obj));
374 return ret;
375}
376
377static int __klp_disable_patch(struct klp_patch *patch)
378{
379 struct klp_object *obj;
380 int ret;
381
382 pr_notice("disabling patch '%s'\n", patch->mod->name);
383
384 for (obj = patch->objs; obj->funcs; obj++) {
385 if (obj->state != KLP_ENABLED)
386 continue;
387
388 ret = klp_disable_object(obj);
389 if (ret)
390 return ret;
391 }
392
393 patch->state = KLP_DISABLED;
394
395 return 0;
396}
397
398/**
399 * klp_disable_patch() - disables a registered patch
400 * @patch: The registered, enabled patch to be disabled
401 *
402 * Unregisters the patched functions from ftrace.
403 *
404 * Return: 0 on success, otherwise error
405 */
406int klp_disable_patch(struct klp_patch *patch)
407{
408 int ret;
409
410 mutex_lock(&klp_mutex);
411
412 if (!klp_is_patch_registered(patch)) {
413 ret = -EINVAL;
414 goto err;
415 }
416
417 if (patch->state == KLP_DISABLED) {
418 ret = -EINVAL;
419 goto err;
420 }
421
422 ret = __klp_disable_patch(patch);
423
424err:
425 mutex_unlock(&klp_mutex);
426 return ret;
427}
428EXPORT_SYMBOL_GPL(klp_disable_patch);
429
430static int __klp_enable_patch(struct klp_patch *patch)
431{
432 struct klp_object *obj;
433 int ret;
434
435 if (WARN_ON(patch->state != KLP_DISABLED))
436 return -EINVAL;
437
438 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
439 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
440
441 pr_notice("enabling patch '%s'\n", patch->mod->name);
442
443 for (obj = patch->objs; obj->funcs; obj++) {
444 klp_find_object_module(obj);
445
446 if (!klp_is_object_loaded(obj))
447 continue;
448
449 ret = klp_enable_object(obj);
450 if (ret)
451 goto unregister;
452 }
453
454 patch->state = KLP_ENABLED;
455
456 return 0;
457
458unregister:
459 WARN_ON(__klp_disable_patch(patch));
460 return ret;
461}
462
463/**
464 * klp_enable_patch() - enables a registered patch
465 * @patch: The registered, disabled patch to be enabled
466 *
467 * Performs the needed symbol lookups and code relocations,
468 * then registers the patched functions with ftrace.
469 *
470 * Return: 0 on success, otherwise error
471 */
472int klp_enable_patch(struct klp_patch *patch)
473{
474 int ret;
475
476 mutex_lock(&klp_mutex);
477
478 if (!klp_is_patch_registered(patch)) {
479 ret = -EINVAL;
480 goto err;
481 }
482
483 ret = __klp_enable_patch(patch);
484
485err:
486 mutex_unlock(&klp_mutex);
487 return ret;
488}
489EXPORT_SYMBOL_GPL(klp_enable_patch);
490
491/*
492 * Sysfs Interface
493 *
494 * /sys/kernel/livepatch
495 * /sys/kernel/livepatch/<patch>
496 * /sys/kernel/livepatch/<patch>/enabled
497 * /sys/kernel/livepatch/<patch>/<object>
498 * /sys/kernel/livepatch/<patch>/<object>/<func>
499 */
500
501static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
502 const char *buf, size_t count)
503{
504 struct klp_patch *patch;
505 int ret;
506 unsigned long val;
507
508 ret = kstrtoul(buf, 10, &val);
509 if (ret)
510 return -EINVAL;
511
512 if (val != KLP_DISABLED && val != KLP_ENABLED)
513 return -EINVAL;
514
515 patch = container_of(kobj, struct klp_patch, kobj);
516
517 mutex_lock(&klp_mutex);
518
519 if (val == patch->state) {
520 /* already in requested state */
521 ret = -EINVAL;
522 goto err;
523 }
524
525 if (val == KLP_ENABLED) {
526 ret = __klp_enable_patch(patch);
527 if (ret)
528 goto err;
529 } else {
530 ret = __klp_disable_patch(patch);
531 if (ret)
532 goto err;
533 }
534
535 mutex_unlock(&klp_mutex);
536
537 return count;
538
539err:
540 mutex_unlock(&klp_mutex);
541 return ret;
542}
543
544static ssize_t enabled_show(struct kobject *kobj,
545 struct kobj_attribute *attr, char *buf)
546{
547 struct klp_patch *patch;
548
549 patch = container_of(kobj, struct klp_patch, kobj);
550 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
551}
552
553static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
554static struct attribute *klp_patch_attrs[] = {
555 &enabled_kobj_attr.attr,
556 NULL
557};
558
559static void klp_kobj_release_patch(struct kobject *kobj)
560{
561 /*
562 * Once we have a consistency model we'll need to module_put() the
563 * patch module here. See klp_register_patch() for more details.
564 */
565}
566
567static struct kobj_type klp_ktype_patch = {
568 .release = klp_kobj_release_patch,
569 .sysfs_ops = &kobj_sysfs_ops,
570 .default_attrs = klp_patch_attrs,
571};
572
573static void klp_kobj_release_func(struct kobject *kobj)
574{
575 struct klp_func *func;
576
577 func = container_of(kobj, struct klp_func, kobj);
578 kfree(func->fops);
579}
580
581static struct kobj_type klp_ktype_func = {
582 .release = klp_kobj_release_func,
583 .sysfs_ops = &kobj_sysfs_ops,
584};
585
586/*
587 * Free all functions' kobjects in the array up to some limit. When limit is
588 * NULL, all kobjects are freed.
589 */
590static void klp_free_funcs_limited(struct klp_object *obj,
591 struct klp_func *limit)
592{
593 struct klp_func *func;
594
595 for (func = obj->funcs; func->old_name && func != limit; func++)
596 kobject_put(&func->kobj);
597}
598
599/* Clean up when a patched object is unloaded */
600static void klp_free_object_loaded(struct klp_object *obj)
601{
602 struct klp_func *func;
603
604 obj->mod = NULL;
605
606 for (func = obj->funcs; func->old_name; func++)
607 func->old_addr = 0;
608}
609
610/*
611 * Free all objects' kobjects in the array up to some limit. When limit is
612 * NULL, all kobjects are freed.
613 */
614static void klp_free_objects_limited(struct klp_patch *patch,
615 struct klp_object *limit)
616{
617 struct klp_object *obj;
618
619 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
620 klp_free_funcs_limited(obj, NULL);
621 kobject_put(obj->kobj);
622 }
623}
624
625static void klp_free_patch(struct klp_patch *patch)
626{
627 klp_free_objects_limited(patch, NULL);
628 if (!list_empty(&patch->list))
629 list_del(&patch->list);
630 kobject_put(&patch->kobj);
631}
632
633static int klp_init_func(struct klp_object *obj, struct klp_func *func)
634{
635 struct ftrace_ops *ops;
636 int ret;
637
638 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
639 if (!ops)
640 return -ENOMEM;
641
642 ops->private = func;
643 ops->func = klp_ftrace_handler;
644 ops->flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DYNAMIC;
645 func->fops = ops;
646 func->state = KLP_DISABLED;
647
648 ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
649 obj->kobj, func->old_name);
650 if (ret) {
651 kfree(func->fops);
652 return ret;
653 }
654
655 return 0;
656}
657
658/* parts of the initialization that is done only when the object is loaded */
659static int klp_init_object_loaded(struct klp_patch *patch,
660 struct klp_object *obj)
661{
662 struct klp_func *func;
663 int ret;
664
665 if (obj->relocs) {
666 ret = klp_write_object_relocations(patch->mod, obj);
667 if (ret)
668 return ret;
669 }
670
671 for (func = obj->funcs; func->old_name; func++) {
672 ret = klp_find_verify_func_addr(obj, func);
673 if (ret)
674 return ret;
675 }
676
677 return 0;
678}
679
680static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
681{
682 struct klp_func *func;
683 int ret;
684 const char *name;
685
686 if (!obj->funcs)
687 return -EINVAL;
688
689 obj->state = KLP_DISABLED;
690
691 klp_find_object_module(obj);
692
693 name = klp_is_module(obj) ? obj->name : "vmlinux";
694 obj->kobj = kobject_create_and_add(name, &patch->kobj);
695 if (!obj->kobj)
696 return -ENOMEM;
697
698 for (func = obj->funcs; func->old_name; func++) {
699 ret = klp_init_func(obj, func);
700 if (ret)
701 goto free;
702 }
703
704 if (klp_is_object_loaded(obj)) {
705 ret = klp_init_object_loaded(patch, obj);
706 if (ret)
707 goto free;
708 }
709
710 return 0;
711
712free:
713 klp_free_funcs_limited(obj, func);
714 kobject_put(obj->kobj);
715 return ret;
716}
717
718static int klp_init_patch(struct klp_patch *patch)
719{
720 struct klp_object *obj;
721 int ret;
722
723 if (!patch->objs)
724 return -EINVAL;
725
726 mutex_lock(&klp_mutex);
727
728 patch->state = KLP_DISABLED;
729
730 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
731 klp_root_kobj, patch->mod->name);
732 if (ret)
733 goto unlock;
734
735 for (obj = patch->objs; obj->funcs; obj++) {
736 ret = klp_init_object(patch, obj);
737 if (ret)
738 goto free;
739 }
740
741 list_add(&patch->list, &klp_patches);
742
743 mutex_unlock(&klp_mutex);
744
745 return 0;
746
747free:
748 klp_free_objects_limited(patch, obj);
749 kobject_put(&patch->kobj);
750unlock:
751 mutex_unlock(&klp_mutex);
752 return ret;
753}
754
755/**
756 * klp_unregister_patch() - unregisters a patch
757 * @patch: Disabled patch to be unregistered
758 *
759 * Frees the data structures and removes the sysfs interface.
760 *
761 * Return: 0 on success, otherwise error
762 */
763int klp_unregister_patch(struct klp_patch *patch)
764{
765 int ret = 0;
766
767 mutex_lock(&klp_mutex);
768
769 if (!klp_is_patch_registered(patch)) {
770 ret = -EINVAL;
771 goto out;
772 }
773
774 if (patch->state == KLP_ENABLED) {
775 ret = -EBUSY;
776 goto out;
777 }
778
779 klp_free_patch(patch);
780
781out:
782 mutex_unlock(&klp_mutex);
783 return ret;
784}
785EXPORT_SYMBOL_GPL(klp_unregister_patch);
786
787/**
788 * klp_register_patch() - registers a patch
789 * @patch: Patch to be registered
790 *
791 * Initializes the data structure associated with the patch and
792 * creates the sysfs interface.
793 *
794 * Return: 0 on success, otherwise error
795 */
796int klp_register_patch(struct klp_patch *patch)
797{
798 int ret;
799
800 if (!klp_initialized())
801 return -ENODEV;
802
803 if (!patch || !patch->mod)
804 return -EINVAL;
805
806 /*
807 * A reference is taken on the patch module to prevent it from being
808 * unloaded. Right now, we don't allow patch modules to unload since
809 * there is currently no method to determine if a thread is still
810 * running in the patched code contained in the patch module once
811 * the ftrace registration is successful.
812 */
813 if (!try_module_get(patch->mod))
814 return -ENODEV;
815
816 ret = klp_init_patch(patch);
817 if (ret)
818 module_put(patch->mod);
819
820 return ret;
821}
822EXPORT_SYMBOL_GPL(klp_register_patch);
823
824static void klp_module_notify_coming(struct klp_patch *patch,
825 struct klp_object *obj)
826{
827 struct module *pmod = patch->mod;
828 struct module *mod = obj->mod;
829 int ret;
830
831 ret = klp_init_object_loaded(patch, obj);
832 if (ret)
833 goto err;
834
835 if (patch->state == KLP_DISABLED)
836 return;
837
838 pr_notice("applying patch '%s' to loading module '%s'\n",
839 pmod->name, mod->name);
840
841 ret = klp_enable_object(obj);
842 if (!ret)
843 return;
844
845err:
846 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
847 pmod->name, mod->name, ret);
848}
849
850static void klp_module_notify_going(struct klp_patch *patch,
851 struct klp_object *obj)
852{
853 struct module *pmod = patch->mod;
854 struct module *mod = obj->mod;
855 int ret;
856
857 if (patch->state == KLP_DISABLED)
858 goto disabled;
859
860 pr_notice("reverting patch '%s' on unloading module '%s'\n",
861 pmod->name, mod->name);
862
863 ret = klp_disable_object(obj);
864 if (ret)
865 pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
866 pmod->name, mod->name, ret);
867
868disabled:
869 klp_free_object_loaded(obj);
870}
871
872static int klp_module_notify(struct notifier_block *nb, unsigned long action,
873 void *data)
874{
875 struct module *mod = data;
876 struct klp_patch *patch;
877 struct klp_object *obj;
878
879 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
880 return 0;
881
882 mutex_lock(&klp_mutex);
883
884 list_for_each_entry(patch, &klp_patches, list) {
885 for (obj = patch->objs; obj->funcs; obj++) {
886 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
887 continue;
888
889 if (action == MODULE_STATE_COMING) {
890 obj->mod = mod;
891 klp_module_notify_coming(patch, obj);
892 } else /* MODULE_STATE_GOING */
893 klp_module_notify_going(patch, obj);
894
895 break;
896 }
897 }
898
899 mutex_unlock(&klp_mutex);
900
901 return 0;
902}
903
904static struct notifier_block klp_module_nb = {
905 .notifier_call = klp_module_notify,
906 .priority = INT_MIN+1, /* called late but before ftrace notifier */
907};
908
909static int klp_init(void)
910{
911 int ret;
912
913 ret = register_module_notifier(&klp_module_nb);
914 if (ret)
915 return ret;
916
917 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
918 if (!klp_root_kobj) {
919 ret = -ENOMEM;
920 goto unregister;
921 }
922
923 return 0;
924
925unregister:
926 unregister_module_notifier(&klp_module_nb);
927 return ret;
928}
929
930module_init(klp_init);