diff options
author | Jiri Kosina <jkosina@suse.cz> | 2017-05-01 15:49:28 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2017-05-01 15:49:28 -0400 |
commit | a0841609f658c77f066af9c61a2e13143564fcb4 (patch) | |
tree | 0f0df468b6f852501cd4ed1570701e695b9f5d56 /kernel/livepatch/core.c | |
parent | 77f8f39a2e463eca89a19b916189d0e4e38f75d8 (diff) | |
parent | e679af627fe875a51d40b9a2b17f08fbde36e0e2 (diff) |
Merge branches 'for-4.12/upstream' and 'for-4.12/klp-hybrid-consistency-model' into for-linus
Diffstat (limited to 'kernel/livepatch/core.c')
-rw-r--r-- | kernel/livepatch/core.c | 437 |
1 files changed, 164 insertions, 273 deletions
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 8739e9e0bdb8..b9628e43c78f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -24,61 +24,31 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/ftrace.h> | ||
28 | #include <linux/list.h> | 27 | #include <linux/list.h> |
29 | #include <linux/kallsyms.h> | 28 | #include <linux/kallsyms.h> |
30 | #include <linux/livepatch.h> | 29 | #include <linux/livepatch.h> |
31 | #include <linux/elf.h> | 30 | #include <linux/elf.h> |
32 | #include <linux/moduleloader.h> | 31 | #include <linux/moduleloader.h> |
32 | #include <linux/completion.h> | ||
33 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
34 | 34 | #include "core.h" | |
35 | /** | 35 | #include "patch.h" |
36 | * struct klp_ops - structure for tracking registered ftrace ops structs | 36 | #include "transition.h" |
37 | * | ||
38 | * A single ftrace_ops is shared between all enabled replacement functions | ||
39 | * (klp_func structs) which have the same old_addr. This allows the switch | ||
40 | * between function versions to happen instantaneously by updating the klp_ops | ||
41 | * struct's func_stack list. The winner is the klp_func at the top of the | ||
42 | * func_stack (front of the list). | ||
43 | * | ||
44 | * @node: node for the global klp_ops list | ||
45 | * @func_stack: list head for the stack of klp_func's (active func is on top) | ||
46 | * @fops: registered ftrace ops struct | ||
47 | */ | ||
48 | struct klp_ops { | ||
49 | struct list_head node; | ||
50 | struct list_head func_stack; | ||
51 | struct ftrace_ops fops; | ||
52 | }; | ||
53 | 37 | ||
54 | /* | 38 | /* |
55 | * The klp_mutex protects the global lists and state transitions of any | 39 | * klp_mutex is a coarse lock which serializes access to klp data. All |
56 | * structure reachable from them. References to any structure must be obtained | 40 | * accesses to klp-related variables and structures must have mutex protection, |
57 | * under mutex protection (except in klp_ftrace_handler(), which uses RCU to | 41 | * except within the following functions which carefully avoid the need for it: |
58 | * ensure it gets consistent data). | 42 | * |
43 | * - klp_ftrace_handler() | ||
44 | * - klp_update_patch_state() | ||
59 | */ | 45 | */ |
60 | static DEFINE_MUTEX(klp_mutex); | 46 | DEFINE_MUTEX(klp_mutex); |
61 | 47 | ||
62 | static LIST_HEAD(klp_patches); | 48 | static LIST_HEAD(klp_patches); |
63 | static LIST_HEAD(klp_ops); | ||
64 | 49 | ||
65 | static struct kobject *klp_root_kobj; | 50 | static struct kobject *klp_root_kobj; |
66 | 51 | ||
67 | static struct klp_ops *klp_find_ops(unsigned long old_addr) | ||
68 | { | ||
69 | struct klp_ops *ops; | ||
70 | struct klp_func *func; | ||
71 | |||
72 | list_for_each_entry(ops, &klp_ops, node) { | ||
73 | func = list_first_entry(&ops->func_stack, struct klp_func, | ||
74 | stack_node); | ||
75 | if (func->old_addr == old_addr) | ||
76 | return ops; | ||
77 | } | ||
78 | |||
79 | return NULL; | ||
80 | } | ||
81 | |||
82 | static bool klp_is_module(struct klp_object *obj) | 52 | static bool klp_is_module(struct klp_object *obj) |
83 | { | 53 | { |
84 | return obj->name; | 54 | return obj->name; |
@@ -117,7 +87,6 @@ static void klp_find_object_module(struct klp_object *obj) | |||
117 | mutex_unlock(&module_mutex); | 87 | mutex_unlock(&module_mutex); |
118 | } | 88 | } |
119 | 89 | ||
120 | /* klp_mutex must be held by caller */ | ||
121 | static bool klp_is_patch_registered(struct klp_patch *patch) | 90 | static bool klp_is_patch_registered(struct klp_patch *patch) |
122 | { | 91 | { |
123 | struct klp_patch *mypatch; | 92 | struct klp_patch *mypatch; |
@@ -314,191 +283,30 @@ static int klp_write_object_relocations(struct module *pmod, | |||
314 | return ret; | 283 | return ret; |
315 | } | 284 | } |
316 | 285 | ||
317 | static void notrace klp_ftrace_handler(unsigned long ip, | ||
318 | unsigned long parent_ip, | ||
319 | struct ftrace_ops *fops, | ||
320 | struct pt_regs *regs) | ||
321 | { | ||
322 | struct klp_ops *ops; | ||
323 | struct klp_func *func; | ||
324 | |||
325 | ops = container_of(fops, struct klp_ops, fops); | ||
326 | |||
327 | rcu_read_lock(); | ||
328 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | ||
329 | stack_node); | ||
330 | if (WARN_ON_ONCE(!func)) | ||
331 | goto unlock; | ||
332 | |||
333 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | ||
334 | unlock: | ||
335 | rcu_read_unlock(); | ||
336 | } | ||
337 | |||
338 | /* | ||
339 | * Convert a function address into the appropriate ftrace location. | ||
340 | * | ||
341 | * Usually this is just the address of the function, but on some architectures | ||
342 | * it's more complicated so allow them to provide a custom behaviour. | ||
343 | */ | ||
344 | #ifndef klp_get_ftrace_location | ||
345 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | ||
346 | { | ||
347 | return faddr; | ||
348 | } | ||
349 | #endif | ||
350 | |||
351 | static void klp_disable_func(struct klp_func *func) | ||
352 | { | ||
353 | struct klp_ops *ops; | ||
354 | |||
355 | if (WARN_ON(func->state != KLP_ENABLED)) | ||
356 | return; | ||
357 | if (WARN_ON(!func->old_addr)) | ||
358 | return; | ||
359 | |||
360 | ops = klp_find_ops(func->old_addr); | ||
361 | if (WARN_ON(!ops)) | ||
362 | return; | ||
363 | |||
364 | if (list_is_singular(&ops->func_stack)) { | ||
365 | unsigned long ftrace_loc; | ||
366 | |||
367 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
368 | if (WARN_ON(!ftrace_loc)) | ||
369 | return; | ||
370 | |||
371 | WARN_ON(unregister_ftrace_function(&ops->fops)); | ||
372 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | ||
373 | |||
374 | list_del_rcu(&func->stack_node); | ||
375 | list_del(&ops->node); | ||
376 | kfree(ops); | ||
377 | } else { | ||
378 | list_del_rcu(&func->stack_node); | ||
379 | } | ||
380 | |||
381 | func->state = KLP_DISABLED; | ||
382 | } | ||
383 | |||
384 | static int klp_enable_func(struct klp_func *func) | ||
385 | { | ||
386 | struct klp_ops *ops; | ||
387 | int ret; | ||
388 | |||
389 | if (WARN_ON(!func->old_addr)) | ||
390 | return -EINVAL; | ||
391 | |||
392 | if (WARN_ON(func->state != KLP_DISABLED)) | ||
393 | return -EINVAL; | ||
394 | |||
395 | ops = klp_find_ops(func->old_addr); | ||
396 | if (!ops) { | ||
397 | unsigned long ftrace_loc; | ||
398 | |||
399 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
400 | if (!ftrace_loc) { | ||
401 | pr_err("failed to find location for function '%s'\n", | ||
402 | func->old_name); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | |||
406 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
407 | if (!ops) | ||
408 | return -ENOMEM; | ||
409 | |||
410 | ops->fops.func = klp_ftrace_handler; | ||
411 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | ||
412 | FTRACE_OPS_FL_DYNAMIC | | ||
413 | FTRACE_OPS_FL_IPMODIFY; | ||
414 | |||
415 | list_add(&ops->node, &klp_ops); | ||
416 | |||
417 | INIT_LIST_HEAD(&ops->func_stack); | ||
418 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
419 | |||
420 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | ||
421 | if (ret) { | ||
422 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | ||
423 | func->old_name, ret); | ||
424 | goto err; | ||
425 | } | ||
426 | |||
427 | ret = register_ftrace_function(&ops->fops); | ||
428 | if (ret) { | ||
429 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | ||
430 | func->old_name, ret); | ||
431 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | ||
432 | goto err; | ||
433 | } | ||
434 | |||
435 | |||
436 | } else { | ||
437 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
438 | } | ||
439 | |||
440 | func->state = KLP_ENABLED; | ||
441 | |||
442 | return 0; | ||
443 | |||
444 | err: | ||
445 | list_del_rcu(&func->stack_node); | ||
446 | list_del(&ops->node); | ||
447 | kfree(ops); | ||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | static void klp_disable_object(struct klp_object *obj) | ||
452 | { | ||
453 | struct klp_func *func; | ||
454 | |||
455 | klp_for_each_func(obj, func) | ||
456 | if (func->state == KLP_ENABLED) | ||
457 | klp_disable_func(func); | ||
458 | |||
459 | obj->state = KLP_DISABLED; | ||
460 | } | ||
461 | |||
462 | static int klp_enable_object(struct klp_object *obj) | ||
463 | { | ||
464 | struct klp_func *func; | ||
465 | int ret; | ||
466 | |||
467 | if (WARN_ON(obj->state != KLP_DISABLED)) | ||
468 | return -EINVAL; | ||
469 | |||
470 | if (WARN_ON(!klp_is_object_loaded(obj))) | ||
471 | return -EINVAL; | ||
472 | |||
473 | klp_for_each_func(obj, func) { | ||
474 | ret = klp_enable_func(func); | ||
475 | if (ret) { | ||
476 | klp_disable_object(obj); | ||
477 | return ret; | ||
478 | } | ||
479 | } | ||
480 | obj->state = KLP_ENABLED; | ||
481 | |||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | static int __klp_disable_patch(struct klp_patch *patch) | 286 | static int __klp_disable_patch(struct klp_patch *patch) |
486 | { | 287 | { |
487 | struct klp_object *obj; | 288 | if (klp_transition_patch) |
289 | return -EBUSY; | ||
488 | 290 | ||
489 | /* enforce stacking: only the last enabled patch can be disabled */ | 291 | /* enforce stacking: only the last enabled patch can be disabled */ |
490 | if (!list_is_last(&patch->list, &klp_patches) && | 292 | if (!list_is_last(&patch->list, &klp_patches) && |
491 | list_next_entry(patch, list)->state == KLP_ENABLED) | 293 | list_next_entry(patch, list)->enabled) |
492 | return -EBUSY; | 294 | return -EBUSY; |
493 | 295 | ||
494 | pr_notice("disabling patch '%s'\n", patch->mod->name); | 296 | klp_init_transition(patch, KLP_UNPATCHED); |
495 | 297 | ||
496 | klp_for_each_object(patch, obj) { | 298 | /* |
497 | if (obj->state == KLP_ENABLED) | 299 | * Enforce the order of the func->transition writes in |
498 | klp_disable_object(obj); | 300 | * klp_init_transition() and the TIF_PATCH_PENDING writes in |
499 | } | 301 | * klp_start_transition(). In the rare case where klp_ftrace_handler() |
302 | * is called shortly after klp_update_patch_state() switches the task, | ||
303 | * this ensures the handler sees that func->transition is set. | ||
304 | */ | ||
305 | smp_wmb(); | ||
500 | 306 | ||
501 | patch->state = KLP_DISABLED; | 307 | klp_start_transition(); |
308 | klp_try_complete_transition(); | ||
309 | patch->enabled = false; | ||
502 | 310 | ||
503 | return 0; | 311 | return 0; |
504 | } | 312 | } |
@@ -522,7 +330,7 @@ int klp_disable_patch(struct klp_patch *patch) | |||
522 | goto err; | 330 | goto err; |
523 | } | 331 | } |
524 | 332 | ||
525 | if (patch->state == KLP_DISABLED) { | 333 | if (!patch->enabled) { |
526 | ret = -EINVAL; | 334 | ret = -EINVAL; |
527 | goto err; | 335 | goto err; |
528 | } | 336 | } |
@@ -540,32 +348,61 @@ static int __klp_enable_patch(struct klp_patch *patch) | |||
540 | struct klp_object *obj; | 348 | struct klp_object *obj; |
541 | int ret; | 349 | int ret; |
542 | 350 | ||
543 | if (WARN_ON(patch->state != KLP_DISABLED)) | 351 | if (klp_transition_patch) |
352 | return -EBUSY; | ||
353 | |||
354 | if (WARN_ON(patch->enabled)) | ||
544 | return -EINVAL; | 355 | return -EINVAL; |
545 | 356 | ||
546 | /* enforce stacking: only the first disabled patch can be enabled */ | 357 | /* enforce stacking: only the first disabled patch can be enabled */ |
547 | if (patch->list.prev != &klp_patches && | 358 | if (patch->list.prev != &klp_patches && |
548 | list_prev_entry(patch, list)->state == KLP_DISABLED) | 359 | !list_prev_entry(patch, list)->enabled) |
549 | return -EBUSY; | 360 | return -EBUSY; |
550 | 361 | ||
362 | /* | ||
363 | * A reference is taken on the patch module to prevent it from being | ||
364 | * unloaded. | ||
365 | * | ||
366 | * Note: For immediate (no consistency model) patches we don't allow | ||
367 | * patch modules to unload since there is no safe/sane method to | ||
368 | * determine if a thread is still running in the patched code contained | ||
369 | * in the patch module once the ftrace registration is successful. | ||
370 | */ | ||
371 | if (!try_module_get(patch->mod)) | ||
372 | return -ENODEV; | ||
373 | |||
551 | pr_notice("enabling patch '%s'\n", patch->mod->name); | 374 | pr_notice("enabling patch '%s'\n", patch->mod->name); |
552 | 375 | ||
376 | klp_init_transition(patch, KLP_PATCHED); | ||
377 | |||
378 | /* | ||
379 | * Enforce the order of the func->transition writes in | ||
380 | * klp_init_transition() and the ops->func_stack writes in | ||
381 | * klp_patch_object(), so that klp_ftrace_handler() will see the | ||
382 | * func->transition updates before the handler is registered and the | ||
383 | * new funcs become visible to the handler. | ||
384 | */ | ||
385 | smp_wmb(); | ||
386 | |||
553 | klp_for_each_object(patch, obj) { | 387 | klp_for_each_object(patch, obj) { |
554 | if (!klp_is_object_loaded(obj)) | 388 | if (!klp_is_object_loaded(obj)) |
555 | continue; | 389 | continue; |
556 | 390 | ||
557 | ret = klp_enable_object(obj); | 391 | ret = klp_patch_object(obj); |
558 | if (ret) | 392 | if (ret) { |
559 | goto unregister; | 393 | pr_warn("failed to enable patch '%s'\n", |
394 | patch->mod->name); | ||
395 | |||
396 | klp_cancel_transition(); | ||
397 | return ret; | ||
398 | } | ||
560 | } | 399 | } |
561 | 400 | ||
562 | patch->state = KLP_ENABLED; | 401 | klp_start_transition(); |
402 | klp_try_complete_transition(); | ||
403 | patch->enabled = true; | ||
563 | 404 | ||
564 | return 0; | 405 | return 0; |
565 | |||
566 | unregister: | ||
567 | WARN_ON(__klp_disable_patch(patch)); | ||
568 | return ret; | ||
569 | } | 406 | } |
570 | 407 | ||
571 | /** | 408 | /** |
@@ -602,6 +439,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); | |||
602 | * /sys/kernel/livepatch | 439 | * /sys/kernel/livepatch |
603 | * /sys/kernel/livepatch/<patch> | 440 | * /sys/kernel/livepatch/<patch> |
604 | * /sys/kernel/livepatch/<patch>/enabled | 441 | * /sys/kernel/livepatch/<patch>/enabled |
442 | * /sys/kernel/livepatch/<patch>/transition | ||
605 | * /sys/kernel/livepatch/<patch>/<object> | 443 | * /sys/kernel/livepatch/<patch>/<object> |
606 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> | 444 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
607 | */ | 445 | */ |
@@ -611,26 +449,34 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, | |||
611 | { | 449 | { |
612 | struct klp_patch *patch; | 450 | struct klp_patch *patch; |
613 | int ret; | 451 | int ret; |
614 | unsigned long val; | 452 | bool enabled; |
615 | 453 | ||
616 | ret = kstrtoul(buf, 10, &val); | 454 | ret = kstrtobool(buf, &enabled); |
617 | if (ret) | 455 | if (ret) |
618 | return -EINVAL; | 456 | return ret; |
619 | |||
620 | if (val != KLP_DISABLED && val != KLP_ENABLED) | ||
621 | return -EINVAL; | ||
622 | 457 | ||
623 | patch = container_of(kobj, struct klp_patch, kobj); | 458 | patch = container_of(kobj, struct klp_patch, kobj); |
624 | 459 | ||
625 | mutex_lock(&klp_mutex); | 460 | mutex_lock(&klp_mutex); |
626 | 461 | ||
627 | if (val == patch->state) { | 462 | if (!klp_is_patch_registered(patch)) { |
463 | /* | ||
464 | * Module with the patch could either disappear meanwhile or is | ||
465 | * not properly initialized yet. | ||
466 | */ | ||
467 | ret = -EINVAL; | ||
468 | goto err; | ||
469 | } | ||
470 | |||
471 | if (patch->enabled == enabled) { | ||
628 | /* already in requested state */ | 472 | /* already in requested state */ |
629 | ret = -EINVAL; | 473 | ret = -EINVAL; |
630 | goto err; | 474 | goto err; |
631 | } | 475 | } |
632 | 476 | ||
633 | if (val == KLP_ENABLED) { | 477 | if (patch == klp_transition_patch) { |
478 | klp_reverse_transition(); | ||
479 | } else if (enabled) { | ||
634 | ret = __klp_enable_patch(patch); | 480 | ret = __klp_enable_patch(patch); |
635 | if (ret) | 481 | if (ret) |
636 | goto err; | 482 | goto err; |
@@ -655,21 +501,33 @@ static ssize_t enabled_show(struct kobject *kobj, | |||
655 | struct klp_patch *patch; | 501 | struct klp_patch *patch; |
656 | 502 | ||
657 | patch = container_of(kobj, struct klp_patch, kobj); | 503 | patch = container_of(kobj, struct klp_patch, kobj); |
658 | return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state); | 504 | return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled); |
505 | } | ||
506 | |||
507 | static ssize_t transition_show(struct kobject *kobj, | ||
508 | struct kobj_attribute *attr, char *buf) | ||
509 | { | ||
510 | struct klp_patch *patch; | ||
511 | |||
512 | patch = container_of(kobj, struct klp_patch, kobj); | ||
513 | return snprintf(buf, PAGE_SIZE-1, "%d\n", | ||
514 | patch == klp_transition_patch); | ||
659 | } | 515 | } |
660 | 516 | ||
661 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); | 517 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); |
518 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); | ||
662 | static struct attribute *klp_patch_attrs[] = { | 519 | static struct attribute *klp_patch_attrs[] = { |
663 | &enabled_kobj_attr.attr, | 520 | &enabled_kobj_attr.attr, |
521 | &transition_kobj_attr.attr, | ||
664 | NULL | 522 | NULL |
665 | }; | 523 | }; |
666 | 524 | ||
667 | static void klp_kobj_release_patch(struct kobject *kobj) | 525 | static void klp_kobj_release_patch(struct kobject *kobj) |
668 | { | 526 | { |
669 | /* | 527 | struct klp_patch *patch; |
670 | * Once we have a consistency model we'll need to module_put() the | 528 | |
671 | * patch module here. See klp_register_patch() for more details. | 529 | patch = container_of(kobj, struct klp_patch, kobj); |
672 | */ | 530 | complete(&patch->finish); |
673 | } | 531 | } |
674 | 532 | ||
675 | static struct kobj_type klp_ktype_patch = { | 533 | static struct kobj_type klp_ktype_patch = { |
@@ -740,7 +598,6 @@ static void klp_free_patch(struct klp_patch *patch) | |||
740 | klp_free_objects_limited(patch, NULL); | 598 | klp_free_objects_limited(patch, NULL); |
741 | if (!list_empty(&patch->list)) | 599 | if (!list_empty(&patch->list)) |
742 | list_del(&patch->list); | 600 | list_del(&patch->list); |
743 | kobject_put(&patch->kobj); | ||
744 | } | 601 | } |
745 | 602 | ||
746 | static int klp_init_func(struct klp_object *obj, struct klp_func *func) | 603 | static int klp_init_func(struct klp_object *obj, struct klp_func *func) |
@@ -749,7 +606,8 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) | |||
749 | return -EINVAL; | 606 | return -EINVAL; |
750 | 607 | ||
751 | INIT_LIST_HEAD(&func->stack_node); | 608 | INIT_LIST_HEAD(&func->stack_node); |
752 | func->state = KLP_DISABLED; | 609 | func->patched = false; |
610 | func->transition = false; | ||
753 | 611 | ||
754 | /* The format for the sysfs directory is <function,sympos> where sympos | 612 | /* The format for the sysfs directory is <function,sympos> where sympos |
755 | * is the nth occurrence of this symbol in kallsyms for the patched | 613 | * is the nth occurrence of this symbol in kallsyms for the patched |
@@ -790,6 +648,22 @@ static int klp_init_object_loaded(struct klp_patch *patch, | |||
790 | &func->old_addr); | 648 | &func->old_addr); |
791 | if (ret) | 649 | if (ret) |
792 | return ret; | 650 | return ret; |
651 | |||
652 | ret = kallsyms_lookup_size_offset(func->old_addr, | ||
653 | &func->old_size, NULL); | ||
654 | if (!ret) { | ||
655 | pr_err("kallsyms size lookup failed for '%s'\n", | ||
656 | func->old_name); | ||
657 | return -ENOENT; | ||
658 | } | ||
659 | |||
660 | ret = kallsyms_lookup_size_offset((unsigned long)func->new_func, | ||
661 | &func->new_size, NULL); | ||
662 | if (!ret) { | ||
663 | pr_err("kallsyms size lookup failed for '%s' replacement\n", | ||
664 | func->old_name); | ||
665 | return -ENOENT; | ||
666 | } | ||
793 | } | 667 | } |
794 | 668 | ||
795 | return 0; | 669 | return 0; |
@@ -804,7 +678,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) | |||
804 | if (!obj->funcs) | 678 | if (!obj->funcs) |
805 | return -EINVAL; | 679 | return -EINVAL; |
806 | 680 | ||
807 | obj->state = KLP_DISABLED; | 681 | obj->patched = false; |
808 | obj->mod = NULL; | 682 | obj->mod = NULL; |
809 | 683 | ||
810 | klp_find_object_module(obj); | 684 | klp_find_object_module(obj); |
@@ -845,12 +719,15 @@ static int klp_init_patch(struct klp_patch *patch) | |||
845 | 719 | ||
846 | mutex_lock(&klp_mutex); | 720 | mutex_lock(&klp_mutex); |
847 | 721 | ||
848 | patch->state = KLP_DISABLED; | 722 | patch->enabled = false; |
723 | init_completion(&patch->finish); | ||
849 | 724 | ||
850 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, | 725 | ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, |
851 | klp_root_kobj, "%s", patch->mod->name); | 726 | klp_root_kobj, "%s", patch->mod->name); |
852 | if (ret) | 727 | if (ret) { |
853 | goto unlock; | 728 | mutex_unlock(&klp_mutex); |
729 | return ret; | ||
730 | } | ||
854 | 731 | ||
855 | klp_for_each_object(patch, obj) { | 732 | klp_for_each_object(patch, obj) { |
856 | ret = klp_init_object(patch, obj); | 733 | ret = klp_init_object(patch, obj); |
@@ -866,9 +743,12 @@ static int klp_init_patch(struct klp_patch *patch) | |||
866 | 743 | ||
867 | free: | 744 | free: |
868 | klp_free_objects_limited(patch, obj); | 745 | klp_free_objects_limited(patch, obj); |
869 | kobject_put(&patch->kobj); | 746 | |
870 | unlock: | ||
871 | mutex_unlock(&klp_mutex); | 747 | mutex_unlock(&klp_mutex); |
748 | |||
749 | kobject_put(&patch->kobj); | ||
750 | wait_for_completion(&patch->finish); | ||
751 | |||
872 | return ret; | 752 | return ret; |
873 | } | 753 | } |
874 | 754 | ||
@@ -882,23 +762,29 @@ unlock: | |||
882 | */ | 762 | */ |
883 | int klp_unregister_patch(struct klp_patch *patch) | 763 | int klp_unregister_patch(struct klp_patch *patch) |
884 | { | 764 | { |
885 | int ret = 0; | 765 | int ret; |
886 | 766 | ||
887 | mutex_lock(&klp_mutex); | 767 | mutex_lock(&klp_mutex); |
888 | 768 | ||
889 | if (!klp_is_patch_registered(patch)) { | 769 | if (!klp_is_patch_registered(patch)) { |
890 | ret = -EINVAL; | 770 | ret = -EINVAL; |
891 | goto out; | 771 | goto err; |
892 | } | 772 | } |
893 | 773 | ||
894 | if (patch->state == KLP_ENABLED) { | 774 | if (patch->enabled) { |
895 | ret = -EBUSY; | 775 | ret = -EBUSY; |
896 | goto out; | 776 | goto err; |
897 | } | 777 | } |
898 | 778 | ||
899 | klp_free_patch(patch); | 779 | klp_free_patch(patch); |
900 | 780 | ||
901 | out: | 781 | mutex_unlock(&klp_mutex); |
782 | |||
783 | kobject_put(&patch->kobj); | ||
784 | wait_for_completion(&patch->finish); | ||
785 | |||
786 | return 0; | ||
787 | err: | ||
902 | mutex_unlock(&klp_mutex); | 788 | mutex_unlock(&klp_mutex); |
903 | return ret; | 789 | return ret; |
904 | } | 790 | } |
@@ -911,12 +797,13 @@ EXPORT_SYMBOL_GPL(klp_unregister_patch); | |||
911 | * Initializes the data structure associated with the patch and | 797 | * Initializes the data structure associated with the patch and |
912 | * creates the sysfs interface. | 798 | * creates the sysfs interface. |
913 | * | 799 | * |
800 | * There is no need to take the reference on the patch module here. It is done | ||
801 | * later when the patch is enabled. | ||
802 | * | ||
914 | * Return: 0 on success, otherwise error | 803 | * Return: 0 on success, otherwise error |
915 | */ | 804 | */ |
916 | int klp_register_patch(struct klp_patch *patch) | 805 | int klp_register_patch(struct klp_patch *patch) |
917 | { | 806 | { |
918 | int ret; | ||
919 | |||
920 | if (!patch || !patch->mod) | 807 | if (!patch || !patch->mod) |
921 | return -EINVAL; | 808 | return -EINVAL; |
922 | 809 | ||
@@ -930,20 +817,16 @@ int klp_register_patch(struct klp_patch *patch) | |||
930 | return -ENODEV; | 817 | return -ENODEV; |
931 | 818 | ||
932 | /* | 819 | /* |
933 | * A reference is taken on the patch module to prevent it from being | 820 | * Architectures without reliable stack traces have to set |
934 | * unloaded. Right now, we don't allow patch modules to unload since | 821 | * patch->immediate because there's currently no way to patch kthreads |
935 | * there is currently no method to determine if a thread is still | 822 | * with the consistency model. |
936 | * running in the patched code contained in the patch module once | ||
937 | * the ftrace registration is successful. | ||
938 | */ | 823 | */ |
939 | if (!try_module_get(patch->mod)) | 824 | if (!klp_have_reliable_stack() && !patch->immediate) { |
940 | return -ENODEV; | 825 | pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); |
941 | 826 | return -ENOSYS; | |
942 | ret = klp_init_patch(patch); | 827 | } |
943 | if (ret) | ||
944 | module_put(patch->mod); | ||
945 | 828 | ||
946 | return ret; | 829 | return klp_init_patch(patch); |
947 | } | 830 | } |
948 | EXPORT_SYMBOL_GPL(klp_register_patch); | 831 | EXPORT_SYMBOL_GPL(klp_register_patch); |
949 | 832 | ||
@@ -978,13 +861,17 @@ int klp_module_coming(struct module *mod) | |||
978 | goto err; | 861 | goto err; |
979 | } | 862 | } |
980 | 863 | ||
981 | if (patch->state == KLP_DISABLED) | 864 | /* |
865 | * Only patch the module if the patch is enabled or is | ||
866 | * in transition. | ||
867 | */ | ||
868 | if (!patch->enabled && patch != klp_transition_patch) | ||
982 | break; | 869 | break; |
983 | 870 | ||
984 | pr_notice("applying patch '%s' to loading module '%s'\n", | 871 | pr_notice("applying patch '%s' to loading module '%s'\n", |
985 | patch->mod->name, obj->mod->name); | 872 | patch->mod->name, obj->mod->name); |
986 | 873 | ||
987 | ret = klp_enable_object(obj); | 874 | ret = klp_patch_object(obj); |
988 | if (ret) { | 875 | if (ret) { |
989 | pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", | 876 | pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", |
990 | patch->mod->name, obj->mod->name, ret); | 877 | patch->mod->name, obj->mod->name, ret); |
@@ -1035,10 +922,14 @@ void klp_module_going(struct module *mod) | |||
1035 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | 922 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) |
1036 | continue; | 923 | continue; |
1037 | 924 | ||
1038 | if (patch->state != KLP_DISABLED) { | 925 | /* |
926 | * Only unpatch the module if the patch is enabled or | ||
927 | * is in transition. | ||
928 | */ | ||
929 | if (patch->enabled || patch == klp_transition_patch) { | ||
1039 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | 930 | pr_notice("reverting patch '%s' on unloading module '%s'\n", |
1040 | patch->mod->name, obj->mod->name); | 931 | patch->mod->name, obj->mod->name); |
1041 | klp_disable_object(obj); | 932 | klp_unpatch_object(obj); |
1042 | } | 933 | } |
1043 | 934 | ||
1044 | klp_free_object_loaded(obj); | 935 | klp_free_object_loaded(obj); |