diff options
Diffstat (limited to 'kernel/livepatch/patch.c')
-rw-r--r-- | kernel/livepatch/patch.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 5efa2620851a..f8269036bf0b 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/bug.h> | 29 | #include <linux/bug.h> |
30 | #include <linux/printk.h> | 30 | #include <linux/printk.h> |
31 | #include "patch.h" | 31 | #include "patch.h" |
32 | #include "transition.h" | ||
32 | 33 | ||
33 | static LIST_HEAD(klp_ops); | 34 | static LIST_HEAD(klp_ops); |
34 | 35 | ||
@@ -54,15 +55,64 @@ static void notrace klp_ftrace_handler(unsigned long ip, | |||
54 | { | 55 | { |
55 | struct klp_ops *ops; | 56 | struct klp_ops *ops; |
56 | struct klp_func *func; | 57 | struct klp_func *func; |
58 | int patch_state; | ||
57 | 59 | ||
58 | ops = container_of(fops, struct klp_ops, fops); | 60 | ops = container_of(fops, struct klp_ops, fops); |
59 | 61 | ||
60 | rcu_read_lock(); | 62 | rcu_read_lock(); |
63 | |||
61 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | 64 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
62 | stack_node); | 65 | stack_node); |
66 | |||
67 | /* | ||
68 | * func should never be NULL because preemption should be disabled here | ||
69 | * and unregister_ftrace_function() does the equivalent of a | ||
70 | * synchronize_sched() before the func_stack removal. | ||
71 | */ | ||
63 | if (WARN_ON_ONCE(!func)) | 72 | if (WARN_ON_ONCE(!func)) |
64 | goto unlock; | 73 | goto unlock; |
65 | 74 | ||
75 | /* | ||
76 | * In the enable path, enforce the order of the ops->func_stack and | ||
77 | * func->transition reads. The corresponding write barrier is in | ||
78 | * __klp_enable_patch(). | ||
79 | * | ||
80 | * (Note that this barrier technically isn't needed in the disable | ||
81 | * path. In the rare case where klp_update_patch_state() runs before | ||
82 | * this handler, its TIF_PATCH_PENDING read and this func->transition | ||
83 | * read need to be ordered. But klp_update_patch_state() already | ||
84 | * enforces that.) | ||
85 | */ | ||
86 | smp_rmb(); | ||
87 | |||
88 | if (unlikely(func->transition)) { | ||
89 | |||
90 | /* | ||
91 | * Enforce the order of the func->transition and | ||
92 | * current->patch_state reads. Otherwise we could read an | ||
93 | * out-of-date task state and pick the wrong function. The | ||
94 | * corresponding write barrier is in klp_init_transition(). | ||
95 | */ | ||
96 | smp_rmb(); | ||
97 | |||
98 | patch_state = current->patch_state; | ||
99 | |||
100 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); | ||
101 | |||
102 | if (patch_state == KLP_UNPATCHED) { | ||
103 | /* | ||
104 | * Use the previously patched version of the function. | ||
105 | * If no previous patches exist, continue with the | ||
106 | * original function. | ||
107 | */ | ||
108 | func = list_entry_rcu(func->stack_node.next, | ||
109 | struct klp_func, stack_node); | ||
110 | |||
111 | if (&func->stack_node == &ops->func_stack) | ||
112 | goto unlock; | ||
113 | } | ||
114 | } | ||
115 | |||
66 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | 116 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
67 | unlock: | 117 | unlock: |
68 | rcu_read_unlock(); | 118 | rcu_read_unlock(); |
@@ -211,3 +261,12 @@ int klp_patch_object(struct klp_object *obj) | |||
211 | 261 | ||
212 | return 0; | 262 | return 0; |
213 | } | 263 | } |
264 | |||
265 | void klp_unpatch_objects(struct klp_patch *patch) | ||
266 | { | ||
267 | struct klp_object *obj; | ||
268 | |||
269 | klp_for_each_object(patch, obj) | ||
270 | if (obj->patched) | ||
271 | klp_unpatch_object(obj); | ||
272 | } | ||