aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/livepatch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 13:21:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 13:21:58 -0500
commit0ef76878cfcf4d6b64972b283021f576a95d9216 (patch)
tree387f5bf5bff34738550686c0306e97528dc69711 /kernel/livepatch
parent9682b3dea22190a6fd449d157e3175b0e748684d (diff)
parentfc41efc1843009ebcdb4850b21f1c371ad203f4e (diff)
Merge branch 'for-linus' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatching updates from Jiri Kosina: - shadow variables support, allowing livepatches to associate new "shadow" fields to existing data structures, from Joe Lawrence - pre/post patch callbacks API, allowing livepatch writers to register callbacks to be called before and after patch application, from Joe Lawrence * 'for-linus' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: __klp_disable_patch() should never be called for disabled patches livepatch: Correctly call klp_post_unpatch_callback() in error paths livepatch: add transition notices livepatch: move transition "complete" notice into klp_complete_transition() livepatch: add (un)patch callbacks livepatch: Small shadow variable documentation fixes livepatch: __klp_shadow_get_or_alloc() is local to shadow.c livepatch: introduce shadow variable API
Diffstat (limited to 'kernel/livepatch')
-rw-r--r--kernel/livepatch/Makefile2
-rw-r--r--kernel/livepatch/core.c52
-rw-r--r--kernel/livepatch/core.h40
-rw-r--r--kernel/livepatch/patch.c1
-rw-r--r--kernel/livepatch/shadow.c277
-rw-r--r--kernel/livepatch/transition.c45
6 files changed, 399 insertions, 18 deletions
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index 2b8bdb1925da..b36ceda6488e 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_LIVEPATCH) += livepatch.o 1obj-$(CONFIG_LIVEPATCH) += livepatch.o
2 2
3livepatch-objs := core.o patch.o transition.o 3livepatch-objs := core.o patch.o shadow.o transition.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index bf8c8fd72589..de9e45dca70f 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -54,11 +54,6 @@ static bool klp_is_module(struct klp_object *obj)
54 return obj->name; 54 return obj->name;
55} 55}
56 56
57static bool klp_is_object_loaded(struct klp_object *obj)
58{
59 return !obj->name || obj->mod;
60}
61
62/* sets obj->mod if object is not vmlinux and module is found */ 57/* sets obj->mod if object is not vmlinux and module is found */
63static void klp_find_object_module(struct klp_object *obj) 58static void klp_find_object_module(struct klp_object *obj)
64{ 59{
@@ -285,6 +280,11 @@ static int klp_write_object_relocations(struct module *pmod,
285 280
286static int __klp_disable_patch(struct klp_patch *patch) 281static int __klp_disable_patch(struct klp_patch *patch)
287{ 282{
283 struct klp_object *obj;
284
285 if (WARN_ON(!patch->enabled))
286 return -EINVAL;
287
288 if (klp_transition_patch) 288 if (klp_transition_patch)
289 return -EBUSY; 289 return -EBUSY;
290 290
@@ -295,6 +295,10 @@ static int __klp_disable_patch(struct klp_patch *patch)
295 295
296 klp_init_transition(patch, KLP_UNPATCHED); 296 klp_init_transition(patch, KLP_UNPATCHED);
297 297
298 klp_for_each_object(patch, obj)
299 if (obj->patched)
300 klp_pre_unpatch_callback(obj);
301
298 /* 302 /*
299 * Enforce the order of the func->transition writes in 303 * Enforce the order of the func->transition writes in
300 * klp_init_transition() and the TIF_PATCH_PENDING writes in 304 * klp_init_transition() and the TIF_PATCH_PENDING writes in
@@ -388,13 +392,18 @@ static int __klp_enable_patch(struct klp_patch *patch)
388 if (!klp_is_object_loaded(obj)) 392 if (!klp_is_object_loaded(obj))
389 continue; 393 continue;
390 394
391 ret = klp_patch_object(obj); 395 ret = klp_pre_patch_callback(obj);
392 if (ret) { 396 if (ret) {
393 pr_warn("failed to enable patch '%s'\n", 397 pr_warn("pre-patch callback failed for object '%s'\n",
394 patch->mod->name); 398 klp_is_module(obj) ? obj->name : "vmlinux");
399 goto err;
400 }
395 401
396 klp_cancel_transition(); 402 ret = klp_patch_object(obj);
397 return ret; 403 if (ret) {
404 pr_warn("failed to patch object '%s'\n",
405 klp_is_module(obj) ? obj->name : "vmlinux");
406 goto err;
398 } 407 }
399 } 408 }
400 409
@@ -403,6 +412,11 @@ static int __klp_enable_patch(struct klp_patch *patch)
403 patch->enabled = true; 412 patch->enabled = true;
404 413
405 return 0; 414 return 0;
415err:
416 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
417
418 klp_cancel_transition();
419 return ret;
406} 420}
407 421
408/** 422/**
@@ -854,9 +868,15 @@ static void klp_cleanup_module_patches_limited(struct module *mod,
854 * is in transition. 868 * is in transition.
855 */ 869 */
856 if (patch->enabled || patch == klp_transition_patch) { 870 if (patch->enabled || patch == klp_transition_patch) {
871
872 if (patch != klp_transition_patch)
873 klp_pre_unpatch_callback(obj);
874
857 pr_notice("reverting patch '%s' on unloading module '%s'\n", 875 pr_notice("reverting patch '%s' on unloading module '%s'\n",
858 patch->mod->name, obj->mod->name); 876 patch->mod->name, obj->mod->name);
859 klp_unpatch_object(obj); 877 klp_unpatch_object(obj);
878
879 klp_post_unpatch_callback(obj);
860 } 880 }
861 881
862 klp_free_object_loaded(obj); 882 klp_free_object_loaded(obj);
@@ -906,13 +926,25 @@ int klp_module_coming(struct module *mod)
906 pr_notice("applying patch '%s' to loading module '%s'\n", 926 pr_notice("applying patch '%s' to loading module '%s'\n",
907 patch->mod->name, obj->mod->name); 927 patch->mod->name, obj->mod->name);
908 928
929 ret = klp_pre_patch_callback(obj);
930 if (ret) {
931 pr_warn("pre-patch callback failed for object '%s'\n",
932 obj->name);
933 goto err;
934 }
935
909 ret = klp_patch_object(obj); 936 ret = klp_patch_object(obj);
910 if (ret) { 937 if (ret) {
911 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 938 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
912 patch->mod->name, obj->mod->name, ret); 939 patch->mod->name, obj->mod->name, ret);
940
941 klp_post_unpatch_callback(obj);
913 goto err; 942 goto err;
914 } 943 }
915 944
945 if (patch != klp_transition_patch)
946 klp_post_patch_callback(obj);
947
916 break; 948 break;
917 } 949 }
918 } 950 }
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index a351601d7f76..48a83d4364cf 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -2,6 +2,46 @@
2#ifndef _LIVEPATCH_CORE_H 2#ifndef _LIVEPATCH_CORE_H
3#define _LIVEPATCH_CORE_H 3#define _LIVEPATCH_CORE_H
4 4
5#include <linux/livepatch.h>
6
5extern struct mutex klp_mutex; 7extern struct mutex klp_mutex;
6 8
9static inline bool klp_is_object_loaded(struct klp_object *obj)
10{
11 return !obj->name || obj->mod;
12}
13
14static inline int klp_pre_patch_callback(struct klp_object *obj)
15{
16 int ret = 0;
17
18 if (obj->callbacks.pre_patch)
19 ret = (*obj->callbacks.pre_patch)(obj);
20
21 obj->callbacks.post_unpatch_enabled = !ret;
22
23 return ret;
24}
25
26static inline void klp_post_patch_callback(struct klp_object *obj)
27{
28 if (obj->callbacks.post_patch)
29 (*obj->callbacks.post_patch)(obj);
30}
31
32static inline void klp_pre_unpatch_callback(struct klp_object *obj)
33{
34 if (obj->callbacks.pre_unpatch)
35 (*obj->callbacks.pre_unpatch)(obj);
36}
37
38static inline void klp_post_unpatch_callback(struct klp_object *obj)
39{
40 if (obj->callbacks.post_unpatch_enabled &&
41 obj->callbacks.post_unpatch)
42 (*obj->callbacks.post_unpatch)(obj);
43
44 obj->callbacks.post_unpatch_enabled = false;
45}
46
7#endif /* _LIVEPATCH_CORE_H */ 47#endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 52c4e907c14b..82d584225dc6 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/bug.h> 29#include <linux/bug.h>
30#include <linux/printk.h> 30#include <linux/printk.h>
31#include "core.h"
31#include "patch.h" 32#include "patch.h"
32#include "transition.h" 33#include "transition.h"
33 34
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c
new file mode 100644
index 000000000000..fdac27588d60
--- /dev/null
+++ b/kernel/livepatch/shadow.c
@@ -0,0 +1,277 @@
1/*
2 * shadow.c - Shadow Variables
3 *
4 * Copyright (C) 2014 Josh Poimboeuf <jpoimboe@redhat.com>
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22/**
23 * DOC: Shadow variable API concurrency notes:
24 *
25 * The shadow variable API provides a simple relationship between an
26 * <obj, id> pair and a pointer value. It is the responsibility of the
27 * caller to provide any mutual exclusion required of the shadow data.
28 *
29 * Once a shadow variable is attached to its parent object via the
30 * klp_shadow_*alloc() API calls, it is considered live: any subsequent
31 * call to klp_shadow_get() may then return the shadow variable's data
32 * pointer. Callers of klp_shadow_*alloc() should prepare shadow data
33 * accordingly.
34 *
35 * The klp_shadow_*alloc() API calls may allocate memory for new shadow
36 * variable structures. Their implementation does not call kmalloc
37 * inside any spinlocks, but API callers should pass GFP flags according
38 * to their specific needs.
39 *
40 * The klp_shadow_hash is an RCU-enabled hashtable and is safe against
41 * concurrent klp_shadow_free() and klp_shadow_get() operations.
42 */
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/hashtable.h>
47#include <linux/slab.h>
48#include <linux/livepatch.h>
49
50static DEFINE_HASHTABLE(klp_shadow_hash, 12);
51
52/*
53 * klp_shadow_lock provides exclusive access to the klp_shadow_hash and
54 * the shadow variables it references.
55 */
56static DEFINE_SPINLOCK(klp_shadow_lock);
57
58/**
59 * struct klp_shadow - shadow variable structure
60 * @node: klp_shadow_hash hash table node
61 * @rcu_head: RCU is used to safely free this structure
62 * @obj: pointer to parent object
63 * @id: data identifier
64 * @data: data area
65 */
66struct klp_shadow {
67 struct hlist_node node;
68 struct rcu_head rcu_head;
69 void *obj;
70 unsigned long id;
71 char data[];
72};
73
74/**
75 * klp_shadow_match() - verify a shadow variable matches given <obj, id>
76 * @shadow: shadow variable to match
77 * @obj: pointer to parent object
78 * @id: data identifier
79 *
80 * Return: true if the shadow variable matches.
81 */
82static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj,
83 unsigned long id)
84{
85 return shadow->obj == obj && shadow->id == id;
86}
87
88/**
89 * klp_shadow_get() - retrieve a shadow variable data pointer
90 * @obj: pointer to parent object
91 * @id: data identifier
92 *
93 * Return: the shadow variable data element, NULL on failure.
94 */
95void *klp_shadow_get(void *obj, unsigned long id)
96{
97 struct klp_shadow *shadow;
98
99 rcu_read_lock();
100
101 hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
102 (unsigned long)obj) {
103
104 if (klp_shadow_match(shadow, obj, id)) {
105 rcu_read_unlock();
106 return shadow->data;
107 }
108 }
109
110 rcu_read_unlock();
111
112 return NULL;
113}
114EXPORT_SYMBOL_GPL(klp_shadow_get);
115
116static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
117 size_t size, gfp_t gfp_flags, bool warn_on_exist)
118{
119 struct klp_shadow *new_shadow;
120 void *shadow_data;
121 unsigned long flags;
122
123 /* Check if the shadow variable already exists */
124 shadow_data = klp_shadow_get(obj, id);
125 if (shadow_data)
126 goto exists;
127
128 /* Allocate a new shadow variable for use inside the lock below */
129 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
130 if (!new_shadow)
131 return NULL;
132
133 new_shadow->obj = obj;
134 new_shadow->id = id;
135
136 /* Initialize the shadow variable if data provided */
137 if (data)
138 memcpy(new_shadow->data, data, size);
139
140 /* Look for <obj, id> again under the lock */
141 spin_lock_irqsave(&klp_shadow_lock, flags);
142 shadow_data = klp_shadow_get(obj, id);
143 if (unlikely(shadow_data)) {
144 /*
145 * Shadow variable was found, throw away speculative
146 * allocation.
147 */
148 spin_unlock_irqrestore(&klp_shadow_lock, flags);
149 kfree(new_shadow);
150 goto exists;
151 }
152
153 /* No <obj, id> found, so attach the newly allocated one */
154 hash_add_rcu(klp_shadow_hash, &new_shadow->node,
155 (unsigned long)new_shadow->obj);
156 spin_unlock_irqrestore(&klp_shadow_lock, flags);
157
158 return new_shadow->data;
159
160exists:
161 if (warn_on_exist) {
162 WARN(1, "Duplicate shadow variable <%p, %lx>\n", obj, id);
163 return NULL;
164 }
165
166 return shadow_data;
167}
168
169/**
170 * klp_shadow_alloc() - allocate and add a new shadow variable
171 * @obj: pointer to parent object
172 * @id: data identifier
173 * @data: pointer to data to attach to parent
174 * @size: size of attached data
175 * @gfp_flags: GFP mask for allocation
176 *
177 * Allocates @size bytes for new shadow variable data using @gfp_flags
178 * and copies @size bytes from @data into the new shadow variable's own
179 * data space. If @data is NULL, @size bytes are still allocated, but
180 * no copy is performed. The new shadow variable is then added to the
181 * global hashtable.
182 *
183 * If an existing <obj, id> shadow variable can be found, this routine
184 * will issue a WARN, exit early and return NULL.
185 *
186 * Return: the shadow variable data element, NULL on duplicate or
187 * failure.
188 */
189void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
190 size_t size, gfp_t gfp_flags)
191{
192 return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true);
193}
194EXPORT_SYMBOL_GPL(klp_shadow_alloc);
195
196/**
197 * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
198 * @obj: pointer to parent object
199 * @id: data identifier
200 * @data: pointer to data to attach to parent
201 * @size: size of attached data
202 * @gfp_flags: GFP mask for allocation
203 *
204 * Returns a pointer to existing shadow data if an <obj, id> shadow
205 * variable is already present. Otherwise, it creates a new shadow
206 * variable like klp_shadow_alloc().
207 *
208 * This function guarantees that only one shadow variable exists with
209 * the given @id for the given @obj. It also guarantees that the shadow
210 * variable will be initialized by the given @data only when it did not
211 * exist before.
212 *
213 * Return: the shadow variable data element, NULL on failure.
214 */
215void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
216 size_t size, gfp_t gfp_flags)
217{
218 return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false);
219}
220EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
221
222/**
223 * klp_shadow_free() - detach and free a <obj, id> shadow variable
224 * @obj: pointer to parent object
225 * @id: data identifier
226 *
227 * This function releases the memory for this <obj, id> shadow variable
228 * instance, callers should stop referencing it accordingly.
229 */
230void klp_shadow_free(void *obj, unsigned long id)
231{
232 struct klp_shadow *shadow;
233 unsigned long flags;
234
235 spin_lock_irqsave(&klp_shadow_lock, flags);
236
237 /* Delete <obj, id> from hash */
238 hash_for_each_possible(klp_shadow_hash, shadow, node,
239 (unsigned long)obj) {
240
241 if (klp_shadow_match(shadow, obj, id)) {
242 hash_del_rcu(&shadow->node);
243 kfree_rcu(shadow, rcu_head);
244 break;
245 }
246 }
247
248 spin_unlock_irqrestore(&klp_shadow_lock, flags);
249}
250EXPORT_SYMBOL_GPL(klp_shadow_free);
251
252/**
253 * klp_shadow_free_all() - detach and free all <*, id> shadow variables
254 * @id: data identifier
255 *
256 * This function releases the memory for all <*, id> shadow variable
257 * instances, callers should stop referencing them accordingly.
258 */
259void klp_shadow_free_all(unsigned long id)
260{
261 struct klp_shadow *shadow;
262 unsigned long flags;
263 int i;
264
265 spin_lock_irqsave(&klp_shadow_lock, flags);
266
267 /* Delete all <*, id> from hash */
268 hash_for_each(klp_shadow_hash, i, shadow, node) {
269 if (klp_shadow_match(shadow, shadow->obj, id)) {
270 hash_del_rcu(&shadow->node);
271 kfree_rcu(shadow, rcu_head);
272 }
273 }
274
275 spin_unlock_irqrestore(&klp_shadow_lock, flags);
276}
277EXPORT_SYMBOL_GPL(klp_shadow_free_all);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index b004a1fb6032..56add6327736 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -82,6 +82,10 @@ static void klp_complete_transition(void)
82 unsigned int cpu; 82 unsigned int cpu;
83 bool immediate_func = false; 83 bool immediate_func = false;
84 84
85 pr_debug("'%s': completing %s transition\n",
86 klp_transition_patch->mod->name,
87 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
88
85 if (klp_target_state == KLP_UNPATCHED) { 89 if (klp_target_state == KLP_UNPATCHED) {
86 /* 90 /*
87 * All tasks have transitioned to KLP_UNPATCHED so we can now 91 * All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -109,9 +113,6 @@ static void klp_complete_transition(void)
109 } 113 }
110 } 114 }
111 115
112 if (klp_target_state == KLP_UNPATCHED && !immediate_func)
113 module_put(klp_transition_patch->mod);
114
115 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 116 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
116 if (klp_target_state == KLP_PATCHED) 117 if (klp_target_state == KLP_PATCHED)
117 klp_synchronize_transition(); 118 klp_synchronize_transition();
@@ -130,6 +131,27 @@ static void klp_complete_transition(void)
130 } 131 }
131 132
132done: 133done:
134 klp_for_each_object(klp_transition_patch, obj) {
135 if (!klp_is_object_loaded(obj))
136 continue;
137 if (klp_target_state == KLP_PATCHED)
138 klp_post_patch_callback(obj);
139 else if (klp_target_state == KLP_UNPATCHED)
140 klp_post_unpatch_callback(obj);
141 }
142
143 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
144 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
145
146 /*
147 * See complementary comment in __klp_enable_patch() for why we
148 * keep the module reference for immediate patches.
149 */
150 if (!klp_transition_patch->immediate && !immediate_func &&
151 klp_target_state == KLP_UNPATCHED) {
152 module_put(klp_transition_patch->mod);
153 }
154
133 klp_target_state = KLP_UNDEFINED; 155 klp_target_state = KLP_UNDEFINED;
134 klp_transition_patch = NULL; 156 klp_transition_patch = NULL;
135} 157}
@@ -145,6 +167,9 @@ void klp_cancel_transition(void)
145 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) 167 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
146 return; 168 return;
147 169
170 pr_debug("'%s': canceling patching transition, going to unpatch\n",
171 klp_transition_patch->mod->name);
172
148 klp_target_state = KLP_UNPATCHED; 173 klp_target_state = KLP_UNPATCHED;
149 klp_complete_transition(); 174 klp_complete_transition();
150} 175}
@@ -408,9 +433,6 @@ void klp_try_complete_transition(void)
408 } 433 }
409 434
410success: 435success:
411 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
412 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
413
414 /* we're done, now cleanup the data structures */ 436 /* we're done, now cleanup the data structures */
415 klp_complete_transition(); 437 klp_complete_transition();
416} 438}
@@ -426,7 +448,8 @@ void klp_start_transition(void)
426 448
427 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); 449 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
428 450
429 pr_notice("'%s': %s...\n", klp_transition_patch->mod->name, 451 pr_notice("'%s': starting %s transition\n",
452 klp_transition_patch->mod->name,
430 klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); 453 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
431 454
432 /* 455 /*
@@ -482,6 +505,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
482 */ 505 */
483 klp_target_state = state; 506 klp_target_state = state;
484 507
508 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
509 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
510
485 /* 511 /*
486 * If the patch can be applied or reverted immediately, skip the 512 * If the patch can be applied or reverted immediately, skip the
487 * per-task transitions. 513 * per-task transitions.
@@ -547,6 +573,11 @@ void klp_reverse_transition(void)
547 unsigned int cpu; 573 unsigned int cpu;
548 struct task_struct *g, *task; 574 struct task_struct *g, *task;
549 575
576 pr_debug("'%s': reversing transition from %s\n",
577 klp_transition_patch->mod->name,
578 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
579 "unpatching to patching");
580
550 klp_transition_patch->enabled = !klp_transition_patch->enabled; 581 klp_transition_patch->enabled = !klp_transition_patch->enabled;
551 582
552 klp_target_state = !klp_target_state; 583 klp_target_state = !klp_target_state;