diff options
| -rw-r--r-- | Documentation/ABI/testing/sysfs-kernel-livepatch | 26 | ||||
| -rw-r--r-- | Documentation/livepatch/livepatch.txt | 114 | ||||
| -rw-r--r-- | arch/powerpc/kernel/signal.c | 6 | ||||
| -rw-r--r-- | arch/x86/entry/common.c | 6 | ||||
| -rw-r--r-- | include/linux/livepatch.h | 4 | ||||
| -rw-r--r-- | kernel/livepatch/core.c | 76 | ||||
| -rw-r--r-- | kernel/livepatch/transition.c | 116 | ||||
| -rw-r--r-- | kernel/livepatch/transition.h | 2 | ||||
| -rw-r--r-- | kernel/signal.c | 4 | ||||
| -rw-r--r-- | samples/livepatch/livepatch-callbacks-demo.c | 15 | ||||
| -rw-r--r-- | samples/livepatch/livepatch-sample.c | 15 | ||||
| -rw-r--r-- | samples/livepatch/livepatch-shadow-fix1.c | 15 | ||||
| -rw-r--r-- | samples/livepatch/livepatch-shadow-fix2.c | 15 |
13 files changed, 227 insertions, 187 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch index d5d39748382f..dac7e1e62a8b 100644 --- a/Documentation/ABI/testing/sysfs-kernel-livepatch +++ b/Documentation/ABI/testing/sysfs-kernel-livepatch | |||
| @@ -33,6 +33,32 @@ Description: | |||
| 33 | An attribute which indicates whether the patch is currently in | 33 | An attribute which indicates whether the patch is currently in |
| 34 | transition. | 34 | transition. |
| 35 | 35 | ||
| 36 | What: /sys/kernel/livepatch/<patch>/signal | ||
| 37 | Date: Nov 2017 | ||
| 38 | KernelVersion: 4.15.0 | ||
| 39 | Contact: live-patching@vger.kernel.org | ||
| 40 | Description: | ||
| 41 | A writable attribute that allows administrator to affect the | ||
| 42 | course of an existing transition. Writing 1 sends a fake | ||
| 43 | signal to all remaining blocking tasks. The fake signal | ||
| 44 | means that no proper signal is delivered (there is no data in | ||
| 45 | signal pending structures). Tasks are interrupted or woken up, | ||
| 46 | and forced to change their patched state. | ||
| 47 | |||
| 48 | What: /sys/kernel/livepatch/<patch>/force | ||
| 49 | Date: Nov 2017 | ||
| 50 | KernelVersion: 4.15.0 | ||
| 51 | Contact: live-patching@vger.kernel.org | ||
| 52 | Description: | ||
| 53 | A writable attribute that allows administrator to affect the | ||
| 54 | course of an existing transition. Writing 1 clears | ||
| 55 | TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to | ||
| 56 | the patched or unpatched state. Administrator should not | ||
| 57 | use this feature without a clearance from a patch | ||
| 58 | distributor. Removal (rmmod) of patch modules is permanently | ||
| 59 | disabled when the feature is used. See | ||
| 60 | Documentation/livepatch/livepatch.txt for more information. | ||
| 61 | |||
| 36 | What: /sys/kernel/livepatch/<patch>/<object> | 62 | What: /sys/kernel/livepatch/<patch>/<object> |
| 37 | Date: Nov 2014 | 63 | Date: Nov 2014 |
| 38 | KernelVersion: 3.19.0 | 64 | KernelVersion: 3.19.0 |
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt index ecdb18104ab0..1ae2de758c08 100644 --- a/Documentation/livepatch/livepatch.txt +++ b/Documentation/livepatch/livepatch.txt | |||
| @@ -72,8 +72,7 @@ example, they add a NULL pointer or a boundary check, fix a race by adding | |||
| 72 | a missing memory barrier, or add some locking around a critical section. | 72 | a missing memory barrier, or add some locking around a critical section. |
| 73 | Most of these changes are self contained and the function presents itself | 73 | Most of these changes are self contained and the function presents itself |
| 74 | the same way to the rest of the system. In this case, the functions might | 74 | the same way to the rest of the system. In this case, the functions might |
| 75 | be updated independently one by one. (This can be done by setting the | 75 | be updated independently one by one. |
| 76 | 'immediate' flag in the klp_patch struct.) | ||
| 77 | 76 | ||
| 78 | But there are more complex fixes. For example, a patch might change | 77 | But there are more complex fixes. For example, a patch might change |
| 79 | ordering of locking in multiple functions at the same time. Or a patch | 78 | ordering of locking in multiple functions at the same time. Or a patch |
| @@ -125,12 +124,6 @@ safe to patch tasks: | |||
| 125 | b) Patching CPU-bound user tasks. If the task is highly CPU-bound | 124 | b) Patching CPU-bound user tasks. If the task is highly CPU-bound |
| 126 | then it will get patched the next time it gets interrupted by an | 125 | then it will get patched the next time it gets interrupted by an |
| 127 | IRQ. | 126 | IRQ. |
| 128 | c) In the future it could be useful for applying patches for | ||
| 129 | architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In | ||
| 130 | this case you would have to signal most of the tasks on the | ||
| 131 | system. However this isn't supported yet because there's | ||
| 132 | currently no way to patch kthreads without | ||
| 133 | HAVE_RELIABLE_STACKTRACE. | ||
| 134 | 127 | ||
| 135 | 3. For idle "swapper" tasks, since they don't ever exit the kernel, they | 128 | 3. For idle "swapper" tasks, since they don't ever exit the kernel, they |
| 136 | instead have a klp_update_patch_state() call in the idle loop which | 129 | instead have a klp_update_patch_state() call in the idle loop which |
| @@ -138,27 +131,16 @@ safe to patch tasks: | |||
| 138 | 131 | ||
| 139 | (Note there's not yet such an approach for kthreads.) | 132 | (Note there's not yet such an approach for kthreads.) |
| 140 | 133 | ||
| 141 | All the above approaches may be skipped by setting the 'immediate' flag | 134 | Architectures which don't have HAVE_RELIABLE_STACKTRACE solely rely on |
| 142 | in the 'klp_patch' struct, which will disable per-task consistency and | 135 | the second approach. It's highly likely that some tasks may still be |
| 143 | patch all tasks immediately. This can be useful if the patch doesn't | 136 | running with an old version of the function, until that function |
| 144 | change any function or data semantics. Note that, even with this flag | 137 | returns. In this case you would have to signal the tasks. This |
| 145 | set, it's possible that some tasks may still be running with an old | 138 | especially applies to kthreads. They may not be woken up and would need |
| 146 | version of the function, until that function returns. | 139 | to be forced. See below for more information. |
| 147 | 140 | ||
| 148 | There's also an 'immediate' flag in the 'klp_func' struct which allows | 141 | Unless we can come up with another way to patch kthreads, architectures |
| 149 | you to specify that certain functions in the patch can be applied | 142 | without HAVE_RELIABLE_STACKTRACE are not considered fully supported by |
| 150 | without per-task consistency. This might be useful if you want to patch | 143 | the kernel livepatching. |
| 151 | a common function like schedule(), and the function change doesn't need | ||
| 152 | consistency but the rest of the patch does. | ||
| 153 | |||
| 154 | For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user | ||
| 155 | must set patch->immediate which causes all tasks to be patched | ||
| 156 | immediately. This option should be used with care, only when the patch | ||
| 157 | doesn't change any function or data semantics. | ||
| 158 | |||
| 159 | In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE | ||
| 160 | may be allowed to use per-task consistency if we can come up with | ||
| 161 | another way to patch kthreads. | ||
| 162 | 144 | ||
| 163 | The /sys/kernel/livepatch/<patch>/transition file shows whether a patch | 145 | The /sys/kernel/livepatch/<patch>/transition file shows whether a patch |
| 164 | is in transition. Only a single patch (the topmost patch on the stack) | 146 | is in transition. Only a single patch (the topmost patch on the stack) |
| @@ -176,8 +158,31 @@ If a patch is in transition, this file shows 0 to indicate the task is | |||
| 176 | unpatched and 1 to indicate it's patched. Otherwise, if no patch is in | 158 | unpatched and 1 to indicate it's patched. Otherwise, if no patch is in |
| 177 | transition, it shows -1. Any tasks which are blocking the transition | 159 | transition, it shows -1. Any tasks which are blocking the transition |
| 178 | can be signaled with SIGSTOP and SIGCONT to force them to change their | 160 | can be signaled with SIGSTOP and SIGCONT to force them to change their |
| 179 | patched state. | 161 | patched state. This may be harmful to the system though. |
| 180 | 162 | /sys/kernel/livepatch/<patch>/signal attribute provides a better alternative. | |
| 163 | Writing 1 to the attribute sends a fake signal to all remaining blocking | ||
| 164 | tasks. No proper signal is actually delivered (there is no data in signal | ||
| 165 | pending structures). Tasks are interrupted or woken up, and forced to change | ||
| 166 | their patched state. | ||
| 167 | |||
| 168 | Administrator can also affect a transition through | ||
| 169 | /sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears | ||
| 170 | TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to the patched | ||
| 171 | state. Important note! The force attribute is intended for cases when the | ||
| 172 | transition gets stuck for a long time because of a blocking task. Administrator | ||
| 173 | is expected to collect all necessary data (namely stack traces of such blocking | ||
| 174 | tasks) and request a clearance from a patch distributor to force the transition. | ||
| 175 | Unauthorized usage may cause harm to the system. It depends on the nature of the | ||
| 176 | patch, which functions are (un)patched, and which functions the blocking tasks | ||
| 177 | are sleeping in (/proc/<pid>/stack may help here). Removal (rmmod) of patch | ||
| 178 | modules is permanently disabled when the force feature is used. It cannot be | ||
| 179 | guaranteed there is no task sleeping in such module. It implies unbounded | ||
| 180 | reference count if a patch module is disabled and enabled in a loop. | ||
| 181 | |||
| 182 | Moreover, the usage of force may also affect future applications of live | ||
| 183 | patches and cause even more harm to the system. Administrator should first | ||
| 184 | consider to simply cancel a transition (see above). If force is used, reboot | ||
| 185 | should be planned and no more live patches applied. | ||
| 181 | 186 | ||
| 182 | 3.1 Adding consistency model support to new architectures | 187 | 3.1 Adding consistency model support to new architectures |
| 183 | --------------------------------------------------------- | 188 | --------------------------------------------------------- |
| @@ -216,13 +221,6 @@ few options: | |||
| 216 | a good backup option for those architectures which don't have | 221 | a good backup option for those architectures which don't have |
| 217 | reliable stack traces yet. | 222 | reliable stack traces yet. |
| 218 | 223 | ||
| 219 | In the meantime, patches for such architectures can bypass the | ||
| 220 | consistency model by setting klp_patch.immediate to true. This option | ||
| 221 | is perfectly fine for patches which don't change the semantics of the | ||
| 222 | patched functions. In practice, this is usable for ~90% of security | ||
| 223 | fixes. Use of this option also means the patch can't be unloaded after | ||
| 224 | it has been disabled. | ||
| 225 | |||
| 226 | 224 | ||
| 227 | 4. Livepatch module | 225 | 4. Livepatch module |
| 228 | =================== | 226 | =================== |
| @@ -278,9 +276,6 @@ into three levels: | |||
| 278 | only for a particular object ( vmlinux or a kernel module ). Note that | 276 | only for a particular object ( vmlinux or a kernel module ). Note that |
| 279 | kallsyms allows for searching symbols according to the object name. | 277 | kallsyms allows for searching symbols according to the object name. |
| 280 | 278 | ||
| 281 | There's also an 'immediate' flag which, when set, patches the | ||
| 282 | function immediately, bypassing the consistency model safety checks. | ||
| 283 | |||
| 284 | + struct klp_object defines an array of patched functions (struct | 279 | + struct klp_object defines an array of patched functions (struct |
| 285 | klp_func) in the same object. Where the object is either vmlinux | 280 | klp_func) in the same object. Where the object is either vmlinux |
| 286 | (NULL) or a module name. | 281 | (NULL) or a module name. |
| @@ -299,9 +294,6 @@ into three levels: | |||
| 299 | symbols are found. The only exception are symbols from objects | 294 | symbols are found. The only exception are symbols from objects |
| 300 | (kernel modules) that have not been loaded yet. | 295 | (kernel modules) that have not been loaded yet. |
| 301 | 296 | ||
| 302 | Setting the 'immediate' flag applies the patch to all tasks | ||
| 303 | immediately, bypassing the consistency model safety checks. | ||
| 304 | |||
| 305 | For more details on how the patch is applied on a per-task basis, | 297 | For more details on how the patch is applied on a per-task basis, |
| 306 | see the "Consistency model" section. | 298 | see the "Consistency model" section. |
| 307 | 299 | ||
| @@ -316,14 +308,12 @@ section "Livepatch life-cycle" below for more details about these | |||
| 316 | two operations. | 308 | two operations. |
| 317 | 309 | ||
| 318 | Module removal is only safe when there are no users of the underlying | 310 | Module removal is only safe when there are no users of the underlying |
| 319 | functions. The immediate consistency model is not able to detect this. The | 311 | functions. This is the reason why the force feature permanently disables |
| 320 | code just redirects the functions at the very beginning and it does not | 312 | the removal. The forced tasks entered the functions but we cannot say |
| 321 | check if the functions are in use. In other words, it knows when the | 313 | that they returned back. Therefore it cannot be decided when the |
| 322 | functions get called but it does not know when the functions return. | 314 | livepatch module can be safely removed. When the system is successfully |
| 323 | Therefore it cannot be decided when the livepatch module can be safely | 315 | transitioned to a new patch state (patched/unpatched) without being |
| 324 | removed. This is solved by a hybrid consistency model. When the system is | 316 | forced it is guaranteed that no task sleeps or runs in the old code. |
| 325 | transitioned to a new patch state (patched/unpatched) it is guaranteed that | ||
| 326 | no task sleeps or runs in the old code. | ||
| 327 | 317 | ||
| 328 | 318 | ||
| 329 | 5. Livepatch life-cycle | 319 | 5. Livepatch life-cycle |
| @@ -337,19 +327,12 @@ First, the patch is applied only when all patched symbols for already | |||
| 337 | loaded objects are found. The error handling is much easier if this | 327 | loaded objects are found. The error handling is much easier if this |
| 338 | check is done before particular functions get redirected. | 328 | check is done before particular functions get redirected. |
| 339 | 329 | ||
| 340 | Second, the immediate consistency model does not guarantee that anyone is not | 330 | Second, it might take some time until the entire system is migrated with |
| 341 | sleeping in the new code after the patch is reverted. This means that the new | 331 | the hybrid consistency model being used. The patch revert might block |
| 342 | code needs to stay around "forever". If the code is there, one could apply it | 332 | the livepatch module removal for too long. Therefore it is useful to |
| 343 | again. Therefore it makes sense to separate the operations that might be done | 333 | revert the patch using a separate operation that might be called |
| 344 | once and those that need to be repeated when the patch is enabled (applied) | 334 | explicitly. But it does not make sense to remove all information until |
| 345 | again. | 335 | the livepatch module is really removed. |
| 346 | |||
| 347 | Third, it might take some time until the entire system is migrated | ||
| 348 | when a more complex consistency model is used. The patch revert might | ||
| 349 | block the livepatch module removal for too long. Therefore it is useful | ||
| 350 | to revert the patch using a separate operation that might be called | ||
| 351 | explicitly. But it does not make sense to remove all information | ||
| 352 | until the livepatch module is really removed. | ||
| 353 | 336 | ||
| 354 | 337 | ||
| 355 | 5.1. Registration | 338 | 5.1. Registration |
| @@ -435,6 +418,9 @@ Information about the registered patches can be found under | |||
| 435 | /sys/kernel/livepatch. The patches could be enabled and disabled | 418 | /sys/kernel/livepatch. The patches could be enabled and disabled |
| 436 | by writing there. | 419 | by writing there. |
| 437 | 420 | ||
| 421 | /sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force | ||
| 422 | attributes allow administrator to affect a patching operation. | ||
| 423 | |||
| 438 | See Documentation/ABI/testing/sysfs-kernel-livepatch for more details. | 424 | See Documentation/ABI/testing/sysfs-kernel-livepatch for more details. |
| 439 | 425 | ||
| 440 | 426 | ||
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 3d7539b90010..61db86ecd318 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
| @@ -153,6 +153,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |||
| 153 | if (thread_info_flags & _TIF_UPROBE) | 153 | if (thread_info_flags & _TIF_UPROBE) |
| 154 | uprobe_notify_resume(regs); | 154 | uprobe_notify_resume(regs); |
| 155 | 155 | ||
| 156 | if (thread_info_flags & _TIF_PATCH_PENDING) | ||
| 157 | klp_update_patch_state(current); | ||
| 158 | |||
| 156 | if (thread_info_flags & _TIF_SIGPENDING) { | 159 | if (thread_info_flags & _TIF_SIGPENDING) { |
| 157 | BUG_ON(regs != current->thread.regs); | 160 | BUG_ON(regs != current->thread.regs); |
| 158 | do_signal(current); | 161 | do_signal(current); |
| @@ -163,9 +166,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |||
| 163 | tracehook_notify_resume(regs); | 166 | tracehook_notify_resume(regs); |
| 164 | } | 167 | } |
| 165 | 168 | ||
| 166 | if (thread_info_flags & _TIF_PATCH_PENDING) | ||
| 167 | klp_update_patch_state(current); | ||
| 168 | |||
| 169 | user_enter(); | 169 | user_enter(); |
| 170 | } | 170 | } |
| 171 | 171 | ||
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index d7d3cc24baf4..1e3883e45687 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
| @@ -153,6 +153,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) | |||
| 153 | if (cached_flags & _TIF_UPROBE) | 153 | if (cached_flags & _TIF_UPROBE) |
| 154 | uprobe_notify_resume(regs); | 154 | uprobe_notify_resume(regs); |
| 155 | 155 | ||
| 156 | if (cached_flags & _TIF_PATCH_PENDING) | ||
| 157 | klp_update_patch_state(current); | ||
| 158 | |||
| 156 | /* deal with pending signal delivery */ | 159 | /* deal with pending signal delivery */ |
| 157 | if (cached_flags & _TIF_SIGPENDING) | 160 | if (cached_flags & _TIF_SIGPENDING) |
| 158 | do_signal(regs); | 161 | do_signal(regs); |
| @@ -165,9 +168,6 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) | |||
| 165 | if (cached_flags & _TIF_USER_RETURN_NOTIFY) | 168 | if (cached_flags & _TIF_USER_RETURN_NOTIFY) |
| 166 | fire_user_return_notifiers(); | 169 | fire_user_return_notifiers(); |
| 167 | 170 | ||
| 168 | if (cached_flags & _TIF_PATCH_PENDING) | ||
| 169 | klp_update_patch_state(current); | ||
| 170 | |||
| 171 | /* Disable IRQs and retry */ | 171 | /* Disable IRQs and retry */ |
| 172 | local_irq_disable(); | 172 | local_irq_disable(); |
| 173 | 173 | ||
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index fc5c1be3f6f4..4754f01c1abb 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
| @@ -40,7 +40,6 @@ | |||
| 40 | * @new_func: pointer to the patched function code | 40 | * @new_func: pointer to the patched function code |
| 41 | * @old_sympos: a hint indicating which symbol position the old function | 41 | * @old_sympos: a hint indicating which symbol position the old function |
| 42 | * can be found (optional) | 42 | * can be found (optional) |
| 43 | * @immediate: patch the func immediately, bypassing safety mechanisms | ||
| 44 | * @old_addr: the address of the function being patched | 43 | * @old_addr: the address of the function being patched |
| 45 | * @kobj: kobject for sysfs resources | 44 | * @kobj: kobject for sysfs resources |
| 46 | * @stack_node: list node for klp_ops func_stack list | 45 | * @stack_node: list node for klp_ops func_stack list |
| @@ -76,7 +75,6 @@ struct klp_func { | |||
| 76 | * in kallsyms for the given object is used. | 75 | * in kallsyms for the given object is used. |
| 77 | */ | 76 | */ |
| 78 | unsigned long old_sympos; | 77 | unsigned long old_sympos; |
| 79 | bool immediate; | ||
| 80 | 78 | ||
| 81 | /* internal */ | 79 | /* internal */ |
| 82 | unsigned long old_addr; | 80 | unsigned long old_addr; |
| @@ -137,7 +135,6 @@ struct klp_object { | |||
| 137 | * struct klp_patch - patch structure for live patching | 135 | * struct klp_patch - patch structure for live patching |
| 138 | * @mod: reference to the live patch module | 136 | * @mod: reference to the live patch module |
| 139 | * @objs: object entries for kernel objects to be patched | 137 | * @objs: object entries for kernel objects to be patched |
| 140 | * @immediate: patch all funcs immediately, bypassing safety mechanisms | ||
| 141 | * @list: list node for global list of registered patches | 138 | * @list: list node for global list of registered patches |
| 142 | * @kobj: kobject for sysfs resources | 139 | * @kobj: kobject for sysfs resources |
| 143 | * @enabled: the patch is enabled (but operation may be incomplete) | 140 | * @enabled: the patch is enabled (but operation may be incomplete) |
| @@ -147,7 +144,6 @@ struct klp_patch { | |||
| 147 | /* external */ | 144 | /* external */ |
| 148 | struct module *mod; | 145 | struct module *mod; |
| 149 | struct klp_object *objs; | 146 | struct klp_object *objs; |
| 150 | bool immediate; | ||
| 151 | 147 | ||
| 152 | /* internal */ | 148 | /* internal */ |
| 153 | struct list_head list; | 149 | struct list_head list; |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index de9e45dca70f..3a4656fb7047 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -366,11 +366,6 @@ static int __klp_enable_patch(struct klp_patch *patch) | |||
| 366 | /* | 366 | /* |
| 367 | * A reference is taken on the patch module to prevent it from being | 367 | * A reference is taken on the patch module to prevent it from being |
| 368 | * unloaded. | 368 | * unloaded. |
| 369 | * | ||
| 370 | * Note: For immediate (no consistency model) patches we don't allow | ||
| 371 | * patch modules to unload since there is no safe/sane method to | ||
| 372 | * determine if a thread is still running in the patched code contained | ||
| 373 | * in the patch module once the ftrace registration is successful. | ||
| 374 | */ | 369 | */ |
| 375 | if (!try_module_get(patch->mod)) | 370 | if (!try_module_get(patch->mod)) |
| 376 | return -ENODEV; | 371 | return -ENODEV; |
| @@ -454,6 +449,8 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); | |||
| 454 | * /sys/kernel/livepatch/<patch> | 449 | * /sys/kernel/livepatch/<patch> |
| 455 | * /sys/kernel/livepatch/<patch>/enabled | 450 | * /sys/kernel/livepatch/<patch>/enabled |
| 456 | * /sys/kernel/livepatch/<patch>/transition | 451 | * /sys/kernel/livepatch/<patch>/transition |
| 452 | * /sys/kernel/livepatch/<patch>/signal | ||
| 453 | * /sys/kernel/livepatch/<patch>/force | ||
| 457 | * /sys/kernel/livepatch/<patch>/<object> | 454 | * /sys/kernel/livepatch/<patch>/<object> |
| 458 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> | 455 | * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> |
| 459 | */ | 456 | */ |
| @@ -528,11 +525,73 @@ static ssize_t transition_show(struct kobject *kobj, | |||
| 528 | patch == klp_transition_patch); | 525 | patch == klp_transition_patch); |
| 529 | } | 526 | } |
| 530 | 527 | ||
| 528 | static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
| 529 | const char *buf, size_t count) | ||
| 530 | { | ||
| 531 | struct klp_patch *patch; | ||
| 532 | int ret; | ||
| 533 | bool val; | ||
| 534 | |||
| 535 | ret = kstrtobool(buf, &val); | ||
| 536 | if (ret) | ||
| 537 | return ret; | ||
| 538 | |||
| 539 | if (!val) | ||
| 540 | return count; | ||
| 541 | |||
| 542 | mutex_lock(&klp_mutex); | ||
| 543 | |||
| 544 | patch = container_of(kobj, struct klp_patch, kobj); | ||
| 545 | if (patch != klp_transition_patch) { | ||
| 546 | mutex_unlock(&klp_mutex); | ||
| 547 | return -EINVAL; | ||
| 548 | } | ||
| 549 | |||
| 550 | klp_send_signals(); | ||
| 551 | |||
| 552 | mutex_unlock(&klp_mutex); | ||
| 553 | |||
| 554 | return count; | ||
| 555 | } | ||
| 556 | |||
| 557 | static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, | ||
| 558 | const char *buf, size_t count) | ||
| 559 | { | ||
| 560 | struct klp_patch *patch; | ||
| 561 | int ret; | ||
| 562 | bool val; | ||
| 563 | |||
| 564 | ret = kstrtobool(buf, &val); | ||
| 565 | if (ret) | ||
| 566 | return ret; | ||
| 567 | |||
| 568 | if (!val) | ||
| 569 | return count; | ||
| 570 | |||
| 571 | mutex_lock(&klp_mutex); | ||
| 572 | |||
| 573 | patch = container_of(kobj, struct klp_patch, kobj); | ||
| 574 | if (patch != klp_transition_patch) { | ||
| 575 | mutex_unlock(&klp_mutex); | ||
| 576 | return -EINVAL; | ||
| 577 | } | ||
| 578 | |||
| 579 | klp_force_transition(); | ||
| 580 | |||
| 581 | mutex_unlock(&klp_mutex); | ||
| 582 | |||
| 583 | return count; | ||
| 584 | } | ||
| 585 | |||
| 531 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); | 586 | static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); |
| 532 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); | 587 | static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); |
| 588 | static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal); | ||
| 589 | static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); | ||
| 533 | static struct attribute *klp_patch_attrs[] = { | 590 | static struct attribute *klp_patch_attrs[] = { |
| 534 | &enabled_kobj_attr.attr, | 591 | &enabled_kobj_attr.attr, |
| 535 | &transition_kobj_attr.attr, | 592 | &transition_kobj_attr.attr, |
| 593 | &signal_kobj_attr.attr, | ||
| 594 | &force_kobj_attr.attr, | ||
| 536 | NULL | 595 | NULL |
| 537 | }; | 596 | }; |
| 538 | 597 | ||
| @@ -830,12 +889,7 @@ int klp_register_patch(struct klp_patch *patch) | |||
| 830 | if (!klp_initialized()) | 889 | if (!klp_initialized()) |
| 831 | return -ENODEV; | 890 | return -ENODEV; |
| 832 | 891 | ||
| 833 | /* | 892 | if (!klp_have_reliable_stack()) { |
| 834 | * Architectures without reliable stack traces have to set | ||
| 835 | * patch->immediate because there's currently no way to patch kthreads | ||
| 836 | * with the consistency model. | ||
| 837 | */ | ||
| 838 | if (!klp_have_reliable_stack() && !patch->immediate) { | ||
| 839 | pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); | 893 | pr_err("This architecture doesn't have support for the livepatch consistency model.\n"); |
| 840 | return -ENOSYS; | 894 | return -ENOSYS; |
| 841 | } | 895 | } |
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 56add6327736..7c6631e693bc 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c | |||
| @@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch; | |||
| 33 | 33 | ||
| 34 | static int klp_target_state = KLP_UNDEFINED; | 34 | static int klp_target_state = KLP_UNDEFINED; |
| 35 | 35 | ||
| 36 | static bool klp_forced = false; | ||
| 37 | |||
| 36 | /* | 38 | /* |
| 37 | * This work can be performed periodically to finish patching or unpatching any | 39 | * This work can be performed periodically to finish patching or unpatching any |
| 38 | * "straggler" tasks which failed to transition in the first attempt. | 40 | * "straggler" tasks which failed to transition in the first attempt. |
| @@ -80,7 +82,6 @@ static void klp_complete_transition(void) | |||
| 80 | struct klp_func *func; | 82 | struct klp_func *func; |
| 81 | struct task_struct *g, *task; | 83 | struct task_struct *g, *task; |
| 82 | unsigned int cpu; | 84 | unsigned int cpu; |
| 83 | bool immediate_func = false; | ||
| 84 | 85 | ||
| 85 | pr_debug("'%s': completing %s transition\n", | 86 | pr_debug("'%s': completing %s transition\n", |
| 86 | klp_transition_patch->mod->name, | 87 | klp_transition_patch->mod->name, |
| @@ -102,16 +103,9 @@ static void klp_complete_transition(void) | |||
| 102 | klp_synchronize_transition(); | 103 | klp_synchronize_transition(); |
| 103 | } | 104 | } |
| 104 | 105 | ||
| 105 | if (klp_transition_patch->immediate) | 106 | klp_for_each_object(klp_transition_patch, obj) |
| 106 | goto done; | 107 | klp_for_each_func(obj, func) |
| 107 | |||
| 108 | klp_for_each_object(klp_transition_patch, obj) { | ||
| 109 | klp_for_each_func(obj, func) { | ||
| 110 | func->transition = false; | 108 | func->transition = false; |
| 111 | if (func->immediate) | ||
| 112 | immediate_func = true; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | 109 | ||
| 116 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ | 110 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
| 117 | if (klp_target_state == KLP_PATCHED) | 111 | if (klp_target_state == KLP_PATCHED) |
| @@ -130,7 +124,6 @@ static void klp_complete_transition(void) | |||
| 130 | task->patch_state = KLP_UNDEFINED; | 124 | task->patch_state = KLP_UNDEFINED; |
| 131 | } | 125 | } |
| 132 | 126 | ||
| 133 | done: | ||
| 134 | klp_for_each_object(klp_transition_patch, obj) { | 127 | klp_for_each_object(klp_transition_patch, obj) { |
| 135 | if (!klp_is_object_loaded(obj)) | 128 | if (!klp_is_object_loaded(obj)) |
| 136 | continue; | 129 | continue; |
| @@ -144,13 +137,11 @@ done: | |||
| 144 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 137 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 145 | 138 | ||
| 146 | /* | 139 | /* |
| 147 | * See complementary comment in __klp_enable_patch() for why we | 140 | * klp_forced set implies unbounded increase of module's ref count if |
| 148 | * keep the module reference for immediate patches. | 141 | * the module is disabled/enabled in a loop. |
| 149 | */ | 142 | */ |
| 150 | if (!klp_transition_patch->immediate && !immediate_func && | 143 | if (!klp_forced && klp_target_state == KLP_UNPATCHED) |
| 151 | klp_target_state == KLP_UNPATCHED) { | ||
| 152 | module_put(klp_transition_patch->mod); | 144 | module_put(klp_transition_patch->mod); |
| 153 | } | ||
| 154 | 145 | ||
| 155 | klp_target_state = KLP_UNDEFINED; | 146 | klp_target_state = KLP_UNDEFINED; |
| 156 | klp_transition_patch = NULL; | 147 | klp_transition_patch = NULL; |
| @@ -218,9 +209,6 @@ static int klp_check_stack_func(struct klp_func *func, | |||
| 218 | struct klp_ops *ops; | 209 | struct klp_ops *ops; |
| 219 | int i; | 210 | int i; |
| 220 | 211 | ||
| 221 | if (func->immediate) | ||
| 222 | return 0; | ||
| 223 | |||
| 224 | for (i = 0; i < trace->nr_entries; i++) { | 212 | for (i = 0; i < trace->nr_entries; i++) { |
| 225 | address = trace->entries[i]; | 213 | address = trace->entries[i]; |
| 226 | 214 | ||
| @@ -383,13 +371,6 @@ void klp_try_complete_transition(void) | |||
| 383 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | 371 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); |
| 384 | 372 | ||
| 385 | /* | 373 | /* |
| 386 | * If the patch can be applied or reverted immediately, skip the | ||
| 387 | * per-task transitions. | ||
| 388 | */ | ||
| 389 | if (klp_transition_patch->immediate) | ||
| 390 | goto success; | ||
| 391 | |||
| 392 | /* | ||
| 393 | * Try to switch the tasks to the target patch state by walking their | 374 | * Try to switch the tasks to the target patch state by walking their |
| 394 | * stacks and looking for any to-be-patched or to-be-unpatched | 375 | * stacks and looking for any to-be-patched or to-be-unpatched |
| 395 | * functions. If such functions are found on a stack, or if the stack | 376 | * functions. If such functions are found on a stack, or if the stack |
| @@ -432,7 +413,6 @@ void klp_try_complete_transition(void) | |||
| 432 | return; | 413 | return; |
| 433 | } | 414 | } |
| 434 | 415 | ||
| 435 | success: | ||
| 436 | /* we're done, now cleanup the data structures */ | 416 | /* we're done, now cleanup the data structures */ |
| 437 | klp_complete_transition(); | 417 | klp_complete_transition(); |
| 438 | } | 418 | } |
| @@ -453,13 +433,6 @@ void klp_start_transition(void) | |||
| 453 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 433 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 454 | 434 | ||
| 455 | /* | 435 | /* |
| 456 | * If the patch can be applied or reverted immediately, skip the | ||
| 457 | * per-task transitions. | ||
| 458 | */ | ||
| 459 | if (klp_transition_patch->immediate) | ||
| 460 | return; | ||
| 461 | |||
| 462 | /* | ||
| 463 | * Mark all normal tasks as needing a patch state update. They'll | 436 | * Mark all normal tasks as needing a patch state update. They'll |
| 464 | * switch either in klp_try_complete_transition() or as they exit the | 437 | * switch either in klp_try_complete_transition() or as they exit the |
| 465 | * kernel. | 438 | * kernel. |
| @@ -509,13 +482,6 @@ void klp_init_transition(struct klp_patch *patch, int state) | |||
| 509 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | 482 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
| 510 | 483 | ||
| 511 | /* | 484 | /* |
| 512 | * If the patch can be applied or reverted immediately, skip the | ||
| 513 | * per-task transitions. | ||
| 514 | */ | ||
| 515 | if (patch->immediate) | ||
| 516 | return; | ||
| 517 | |||
| 518 | /* | ||
| 519 | * Initialize all tasks to the initial patch state to prepare them for | 485 | * Initialize all tasks to the initial patch state to prepare them for |
| 520 | * switching to the target state. | 486 | * switching to the target state. |
| 521 | */ | 487 | */ |
| @@ -608,3 +574,71 @@ void klp_copy_process(struct task_struct *child) | |||
| 608 | 574 | ||
| 609 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ | 575 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ |
| 610 | } | 576 | } |
| 577 | |||
| 578 | /* | ||
| 579 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. | ||
| 580 | * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this | ||
| 581 | * action currently. | ||
| 582 | */ | ||
| 583 | void klp_send_signals(void) | ||
| 584 | { | ||
| 585 | struct task_struct *g, *task; | ||
| 586 | |||
| 587 | pr_notice("signaling remaining tasks\n"); | ||
| 588 | |||
| 589 | read_lock(&tasklist_lock); | ||
| 590 | for_each_process_thread(g, task) { | ||
| 591 | if (!klp_patch_pending(task)) | ||
| 592 | continue; | ||
| 593 | |||
| 594 | /* | ||
| 595 | * There is a small race here. We could see TIF_PATCH_PENDING | ||
| 596 | * set and decide to wake up a kthread or send a fake signal. | ||
| 597 | * Meanwhile the task could migrate itself and the action | ||
| 598 | * would be meaningless. It is not serious though. | ||
| 599 | */ | ||
| 600 | if (task->flags & PF_KTHREAD) { | ||
| 601 | /* | ||
| 602 | * Wake up a kthread which sleeps interruptedly and | ||
| 603 | * still has not been migrated. | ||
| 604 | */ | ||
| 605 | wake_up_state(task, TASK_INTERRUPTIBLE); | ||
| 606 | } else { | ||
| 607 | /* | ||
| 608 | * Send fake signal to all non-kthread tasks which are | ||
| 609 | * still not migrated. | ||
| 610 | */ | ||
| 611 | spin_lock_irq(&task->sighand->siglock); | ||
| 612 | signal_wake_up(task, 0); | ||
| 613 | spin_unlock_irq(&task->sighand->siglock); | ||
| 614 | } | ||
| 615 | } | ||
| 616 | read_unlock(&tasklist_lock); | ||
| 617 | } | ||
| 618 | |||
| 619 | /* | ||
| 620 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an | ||
| 621 | * existing transition to finish. | ||
| 622 | * | ||
| 623 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or | ||
| 624 | * 'current'. This is not the case here and the consistency model could be | ||
| 625 | * broken. Administrator, who is the only one to execute the | ||
| 626 | * klp_force_transitions(), has to be aware of this. | ||
| 627 | */ | ||
| 628 | void klp_force_transition(void) | ||
| 629 | { | ||
| 630 | struct task_struct *g, *task; | ||
| 631 | unsigned int cpu; | ||
| 632 | |||
| 633 | pr_warn("forcing remaining tasks to the patched state\n"); | ||
| 634 | |||
| 635 | read_lock(&tasklist_lock); | ||
| 636 | for_each_process_thread(g, task) | ||
| 637 | klp_update_patch_state(task); | ||
| 638 | read_unlock(&tasklist_lock); | ||
| 639 | |||
| 640 | for_each_possible_cpu(cpu) | ||
| 641 | klp_update_patch_state(idle_task(cpu)); | ||
| 642 | |||
| 643 | klp_forced = true; | ||
| 644 | } | ||
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h index 0f6e27c481f9..f9d0bc016067 100644 --- a/kernel/livepatch/transition.h +++ b/kernel/livepatch/transition.h | |||
| @@ -11,5 +11,7 @@ void klp_cancel_transition(void); | |||
| 11 | void klp_start_transition(void); | 11 | void klp_start_transition(void); |
| 12 | void klp_try_complete_transition(void); | 12 | void klp_try_complete_transition(void); |
| 13 | void klp_reverse_transition(void); | 13 | void klp_reverse_transition(void); |
| 14 | void klp_send_signals(void); | ||
| 15 | void klp_force_transition(void); | ||
| 14 | 16 | ||
| 15 | #endif /* _LIVEPATCH_TRANSITION_H */ | 17 | #endif /* _LIVEPATCH_TRANSITION_H */ |
diff --git a/kernel/signal.c b/kernel/signal.c index e549174c0831..c6e4c83dc090 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/cn_proc.h> | 40 | #include <linux/cn_proc.h> |
| 41 | #include <linux/compiler.h> | 41 | #include <linux/compiler.h> |
| 42 | #include <linux/posix-timers.h> | 42 | #include <linux/posix-timers.h> |
| 43 | #include <linux/livepatch.h> | ||
| 43 | 44 | ||
| 44 | #define CREATE_TRACE_POINTS | 45 | #define CREATE_TRACE_POINTS |
| 45 | #include <trace/events/signal.h> | 46 | #include <trace/events/signal.h> |
| @@ -165,7 +166,8 @@ void recalc_sigpending_and_wake(struct task_struct *t) | |||
| 165 | 166 | ||
| 166 | void recalc_sigpending(void) | 167 | void recalc_sigpending(void) |
| 167 | { | 168 | { |
| 168 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | 169 | if (!recalc_sigpending_tsk(current) && !freezing(current) && |
| 170 | !klp_patch_pending(current)) | ||
| 169 | clear_thread_flag(TIF_SIGPENDING); | 171 | clear_thread_flag(TIF_SIGPENDING); |
| 170 | 172 | ||
| 171 | } | 173 | } |
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c index 3d115bd68442..72f9e6d1387b 100644 --- a/samples/livepatch/livepatch-callbacks-demo.c +++ b/samples/livepatch/livepatch-callbacks-demo.c | |||
| @@ -197,21 +197,6 @@ static int livepatch_callbacks_demo_init(void) | |||
| 197 | { | 197 | { |
| 198 | int ret; | 198 | int ret; |
| 199 | 199 | ||
| 200 | if (!klp_have_reliable_stack() && !patch.immediate) { | ||
| 201 | /* | ||
| 202 | * WARNING: Be very careful when using 'patch.immediate' in | ||
| 203 | * your patches. It's ok to use it for simple patches like | ||
| 204 | * this, but for more complex patches which change function | ||
| 205 | * semantics, locking semantics, or data structures, it may not | ||
| 206 | * be safe. Use of this option will also prevent removal of | ||
| 207 | * the patch. | ||
| 208 | * | ||
| 209 | * See Documentation/livepatch/livepatch.txt for more details. | ||
| 210 | */ | ||
| 211 | patch.immediate = true; | ||
| 212 | pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n"); | ||
| 213 | } | ||
| 214 | |||
| 215 | ret = klp_register_patch(&patch); | 200 | ret = klp_register_patch(&patch); |
| 216 | if (ret) | 201 | if (ret) |
| 217 | return ret; | 202 | return ret; |
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c index 84795223f15f..2d554dd930e2 100644 --- a/samples/livepatch/livepatch-sample.c +++ b/samples/livepatch/livepatch-sample.c | |||
| @@ -71,21 +71,6 @@ static int livepatch_init(void) | |||
| 71 | { | 71 | { |
| 72 | int ret; | 72 | int ret; |
| 73 | 73 | ||
| 74 | if (!klp_have_reliable_stack() && !patch.immediate) { | ||
| 75 | /* | ||
| 76 | * WARNING: Be very careful when using 'patch.immediate' in | ||
| 77 | * your patches. It's ok to use it for simple patches like | ||
| 78 | * this, but for more complex patches which change function | ||
| 79 | * semantics, locking semantics, or data structures, it may not | ||
| 80 | * be safe. Use of this option will also prevent removal of | ||
| 81 | * the patch. | ||
| 82 | * | ||
| 83 | * See Documentation/livepatch/livepatch.txt for more details. | ||
| 84 | */ | ||
| 85 | patch.immediate = true; | ||
| 86 | pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n"); | ||
| 87 | } | ||
| 88 | |||
| 89 | ret = klp_register_patch(&patch); | 74 | ret = klp_register_patch(&patch); |
| 90 | if (ret) | 75 | if (ret) |
| 91 | return ret; | 76 | return ret; |
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index fbe0a1f3d99b..830c55514f9f 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c | |||
| @@ -133,21 +133,6 @@ static int livepatch_shadow_fix1_init(void) | |||
| 133 | { | 133 | { |
| 134 | int ret; | 134 | int ret; |
| 135 | 135 | ||
| 136 | if (!klp_have_reliable_stack() && !patch.immediate) { | ||
| 137 | /* | ||
| 138 | * WARNING: Be very careful when using 'patch.immediate' in | ||
| 139 | * your patches. It's ok to use it for simple patches like | ||
| 140 | * this, but for more complex patches which change function | ||
| 141 | * semantics, locking semantics, or data structures, it may not | ||
| 142 | * be safe. Use of this option will also prevent removal of | ||
| 143 | * the patch. | ||
| 144 | * | ||
| 145 | * See Documentation/livepatch/livepatch.txt for more details. | ||
| 146 | */ | ||
| 147 | patch.immediate = true; | ||
| 148 | pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n"); | ||
| 149 | } | ||
| 150 | |||
| 151 | ret = klp_register_patch(&patch); | 136 | ret = klp_register_patch(&patch); |
| 152 | if (ret) | 137 | if (ret) |
| 153 | return ret; | 138 | return ret; |
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c index 53c1794bdc5f..ff9948f0ec00 100644 --- a/samples/livepatch/livepatch-shadow-fix2.c +++ b/samples/livepatch/livepatch-shadow-fix2.c | |||
| @@ -128,21 +128,6 @@ static int livepatch_shadow_fix2_init(void) | |||
| 128 | { | 128 | { |
| 129 | int ret; | 129 | int ret; |
| 130 | 130 | ||
| 131 | if (!klp_have_reliable_stack() && !patch.immediate) { | ||
| 132 | /* | ||
| 133 | * WARNING: Be very careful when using 'patch.immediate' in | ||
| 134 | * your patches. It's ok to use it for simple patches like | ||
| 135 | * this, but for more complex patches which change function | ||
| 136 | * semantics, locking semantics, or data structures, it may not | ||
| 137 | * be safe. Use of this option will also prevent removal of | ||
| 138 | * the patch. | ||
| 139 | * | ||
| 140 | * See Documentation/livepatch/livepatch.txt for more details. | ||
| 141 | */ | ||
| 142 | patch.immediate = true; | ||
| 143 | pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n"); | ||
| 144 | } | ||
| 145 | |||
| 146 | ret = klp_register_patch(&patch); | 131 | ret = klp_register_patch(&patch); |
| 147 | if (ret) | 132 | if (ret) |
| 148 | return ret; | 133 | return ret; |
