aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 21:24:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 21:24:16 -0400
commit76f1948a79b26d5f57a5ee9941876b745c6baaea (patch)
treea2c42578e31acabc61db2115d91e467cea439cd7
parent7af4c727c7b6104f94f2ffc3d0899e75a9cc1e55 (diff)
parenta0841609f658c77f066af9c61a2e13143564fcb4 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching
Pull livepatch updates from Jiri Kosina: - a per-task consistency model is being added for architectures that support reliable stack dumping (extending this, currently rather trivial set, is currently in the works). This extends the nature of the types of patches that can be applied by live patching infrastructure. The code stems from the design proposal made [1] back in November 2014. It's a hybrid of SUSE's kGraft and RH's kpatch, combining advantages of both: it uses kGraft's per-task consistency and syscall barrier switching combined with kpatch's stack trace switching. There are also a number of fallback options which make it quite flexible. Most of the heavy lifting done by Josh Poimboeuf with help from Miroslav Benes and Petr Mladek [1] https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz - module load time patch optimization from Zhou Chengming - a few assorted small fixes * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching: livepatch: add missing printk newlines livepatch: Cancel transition a safe way for immediate patches livepatch: Reduce the time of finding module symbols livepatch: make klp_mutex proper part of API livepatch: allow removal of a disabled patch livepatch: add /proc/<pid>/patch_state livepatch: change to a per-task consistency model livepatch: store function sizes livepatch: use kstrtobool() in enabled_store() livepatch: move patching functions into patch.c livepatch: remove unnecessary object loaded check livepatch: separate enabled and patched states livepatch/s390: add TIF_PATCH_PENDING thread flag livepatch/s390: reorganize TIF thread flag bits livepatch/powerpc: add TIF_PATCH_PENDING thread flag livepatch/x86: add TIF_PATCH_PENDING thread flag livepatch: create temporary klp_update_patch_state() stub x86/entry: define _TIF_ALLWORK_MASK flags explicitly stacktrace/x86: add function for detecting reliable stack traces
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch8
-rw-r--r--Documentation/filesystems/proc.txt18
-rw-r--r--Documentation/livepatch/livepatch.txt214
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/powerpc/include/asm/thread_info.h4
-rw-r--r--arch/powerpc/kernel/signal.c4
-rw-r--r--arch/s390/include/asm/thread_info.h24
-rw-r--r--arch/s390/kernel/entry.S30
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/common.c9
-rw-r--r--arch/x86/include/asm/thread_info.h13
-rw-r--r--arch/x86/include/asm/unwind.h6
-rw-r--r--arch/x86/kernel/stacktrace.c96
-rw-r--r--arch/x86/kernel/unwind_frame.c2
-rw-r--r--fs/proc/base.c15
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/livepatch.h68
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/stacktrace.h9
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/livepatch/Makefile2
-rw-r--r--kernel/livepatch/core.c450
-rw-r--r--kernel/livepatch/core.h6
-rw-r--r--kernel/livepatch/patch.c272
-rw-r--r--kernel/livepatch/patch.h33
-rw-r--r--kernel/livepatch/transition.c553
-rw-r--r--kernel/livepatch/transition.h14
-rw-r--r--kernel/sched/idle.c4
-rw-r--r--kernel/stacktrace.c12
-rw-r--r--samples/livepatch/livepatch-sample.c18
30 files changed, 1547 insertions, 359 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index da87f43aec58..d5d39748382f 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -25,6 +25,14 @@ Description:
25 code is currently applied. Writing 0 will disable the patch 25 code is currently applied. Writing 0 will disable the patch
26 while writing 1 will re-enable the patch. 26 while writing 1 will re-enable the patch.
27 27
28What: /sys/kernel/livepatch/<patch>/transition
29Date: Feb 2017
30KernelVersion: 4.12.0
31Contact: live-patching@vger.kernel.org
32Description:
33 An attribute which indicates whether the patch is currently in
34 transition.
35
28What: /sys/kernel/livepatch/<patch>/<object> 36What: /sys/kernel/livepatch/<patch>/<object>
29Date: Nov 2014 37Date: Nov 2014
30KernelVersion: 3.19.0 38KernelVersion: 3.19.0
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index c94b4675d021..9036dbf16156 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -44,6 +44,7 @@ Table of Contents
44 3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file 44 3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file
45 3.9 /proc/<pid>/map_files - Information about memory mapped files 45 3.9 /proc/<pid>/map_files - Information about memory mapped files
46 3.10 /proc/<pid>/timerslack_ns - Task timerslack value 46 3.10 /proc/<pid>/timerslack_ns - Task timerslack value
47 3.11 /proc/<pid>/patch_state - Livepatch patch operation state
47 48
48 4 Configuring procfs 49 4 Configuring procfs
49 4.1 Mount options 50 4.1 Mount options
@@ -1887,6 +1888,23 @@ Valid values are from 0 - ULLONG_MAX
1887An application setting the value must have PTRACE_MODE_ATTACH_FSCREDS level 1888An application setting the value must have PTRACE_MODE_ATTACH_FSCREDS level
1888permissions on the task specified to change its timerslack_ns value. 1889permissions on the task specified to change its timerslack_ns value.
1889 1890
18913.11 /proc/<pid>/patch_state - Livepatch patch operation state
1892-----------------------------------------------------------------
1893When CONFIG_LIVEPATCH is enabled, this file displays the value of the
1894patch state for the task.
1895
1896A value of '-1' indicates that no patch is in transition.
1897
1898A value of '0' indicates that a patch is in transition and the task is
1899unpatched. If the patch is being enabled, then the task hasn't been
1900patched yet. If the patch is being disabled, then the task has already
1901been unpatched.
1902
1903A value of '1' indicates that a patch is in transition and the task is
1904patched. If the patch is being enabled, then the task has already been
1905patched. If the patch is being disabled, then the task hasn't been
1906unpatched yet.
1907
1890 1908
1891------------------------------------------------------------------------------ 1909------------------------------------------------------------------------------
1892Configuring procfs 1910Configuring procfs
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 9d2096c7160d..ecdb18104ab0 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -72,7 +72,8 @@ example, they add a NULL pointer or a boundary check, fix a race by adding
72a missing memory barrier, or add some locking around a critical section. 72a missing memory barrier, or add some locking around a critical section.
73Most of these changes are self contained and the function presents itself 73Most of these changes are self contained and the function presents itself
74the same way to the rest of the system. In this case, the functions might 74the same way to the rest of the system. In this case, the functions might
75be updated independently one by one. 75be updated independently one by one. (This can be done by setting the
76'immediate' flag in the klp_patch struct.)
76 77
77But there are more complex fixes. For example, a patch might change 78But there are more complex fixes. For example, a patch might change
78ordering of locking in multiple functions at the same time. Or a patch 79ordering of locking in multiple functions at the same time. Or a patch
@@ -86,20 +87,141 @@ or no data are stored in the modified structures at the moment.
86The theory about how to apply functions a safe way is rather complex. 87The theory about how to apply functions a safe way is rather complex.
87The aim is to define a so-called consistency model. It attempts to define 88The aim is to define a so-called consistency model. It attempts to define
88conditions when the new implementation could be used so that the system 89conditions when the new implementation could be used so that the system
89stays consistent. The theory is not yet finished. See the discussion at 90stays consistent.
90https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz 91
91 92Livepatch has a consistency model which is a hybrid of kGraft and
92The current consistency model is very simple. It guarantees that either 93kpatch: it uses kGraft's per-task consistency and syscall barrier
93the old or the new function is called. But various functions get redirected 94switching combined with kpatch's stack trace switching. There are also
94one by one without any synchronization. 95a number of fallback options which make it quite flexible.
95 96
96In other words, the current implementation _never_ modifies the behavior 97Patches are applied on a per-task basis, when the task is deemed safe to
97in the middle of the call. It is because it does _not_ rewrite the entire 98switch over. When a patch is enabled, livepatch enters into a
98function in the memory. Instead, the function gets redirected at the 99transition state where tasks are converging to the patched state.
99very beginning. But this redirection is used immediately even when 100Usually this transition state can complete in a few seconds. The same
100some other functions from the same patch have not been redirected yet. 101sequence occurs when a patch is disabled, except the tasks converge from
101 102the patched state to the unpatched state.
102See also the section "Limitations" below. 103
104An interrupt handler inherits the patched state of the task it
105interrupts. The same is true for forked tasks: the child inherits the
106patched state of the parent.
107
108Livepatch uses several complementary approaches to determine when it's
109safe to patch tasks:
110
1111. The first and most effective approach is stack checking of sleeping
112 tasks. If no affected functions are on the stack of a given task,
113 the task is patched. In most cases this will patch most or all of
114 the tasks on the first try. Otherwise it'll keep trying
115 periodically. This option is only available if the architecture has
116 reliable stacks (HAVE_RELIABLE_STACKTRACE).
117
1182. The second approach, if needed, is kernel exit switching. A
119 task is switched when it returns to user space from a system call, a
120 user space IRQ, or a signal. It's useful in the following cases:
121
122 a) Patching I/O-bound user tasks which are sleeping on an affected
123 function. In this case you have to send SIGSTOP and SIGCONT to
124 force it to exit the kernel and be patched.
125 b) Patching CPU-bound user tasks. If the task is highly CPU-bound
126 then it will get patched the next time it gets interrupted by an
127 IRQ.
128 c) In the future it could be useful for applying patches for
129 architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In
130 this case you would have to signal most of the tasks on the
131 system. However this isn't supported yet because there's
132 currently no way to patch kthreads without
133 HAVE_RELIABLE_STACKTRACE.
134
1353. For idle "swapper" tasks, since they don't ever exit the kernel, they
136 instead have a klp_update_patch_state() call in the idle loop which
137 allows them to be patched before the CPU enters the idle state.
138
139 (Note there's not yet such an approach for kthreads.)
140
141All the above approaches may be skipped by setting the 'immediate' flag
142in the 'klp_patch' struct, which will disable per-task consistency and
143patch all tasks immediately. This can be useful if the patch doesn't
144change any function or data semantics. Note that, even with this flag
145set, it's possible that some tasks may still be running with an old
146version of the function, until that function returns.
147
148There's also an 'immediate' flag in the 'klp_func' struct which allows
149you to specify that certain functions in the patch can be applied
150without per-task consistency. This might be useful if you want to patch
151a common function like schedule(), and the function change doesn't need
152consistency but the rest of the patch does.
153
154For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user
155must set patch->immediate which causes all tasks to be patched
156immediately. This option should be used with care, only when the patch
157doesn't change any function or data semantics.
158
159In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE
160may be allowed to use per-task consistency if we can come up with
161another way to patch kthreads.
162
163The /sys/kernel/livepatch/<patch>/transition file shows whether a patch
164is in transition. Only a single patch (the topmost patch on the stack)
165can be in transition at a given time. A patch can remain in transition
166indefinitely, if any of the tasks are stuck in the initial patch state.
167
168A transition can be reversed and effectively canceled by writing the
169opposite value to the /sys/kernel/livepatch/<patch>/enabled file while
170the transition is in progress. Then all the tasks will attempt to
171converge back to the original patch state.
172
173There's also a /proc/<pid>/patch_state file which can be used to
174determine which tasks are blocking completion of a patching operation.
175If a patch is in transition, this file shows 0 to indicate the task is
176unpatched and 1 to indicate it's patched. Otherwise, if no patch is in
177transition, it shows -1. Any tasks which are blocking the transition
178can be signaled with SIGSTOP and SIGCONT to force them to change their
179patched state.
180
181
1823.1 Adding consistency model support to new architectures
183---------------------------------------------------------
184
185For adding consistency model support to new architectures, there are a
186few options:
187
1881) Add CONFIG_HAVE_RELIABLE_STACKTRACE. This means porting objtool, and
189 for non-DWARF unwinders, also making sure there's a way for the stack
190 tracing code to detect interrupts on the stack.
191
1922) Alternatively, ensure that every kthread has a call to
193 klp_update_patch_state() in a safe location. Kthreads are typically
194 in an infinite loop which does some action repeatedly. The safe
195 location to switch the kthread's patch state would be at a designated
196 point in the loop where there are no locks taken and all data
197 structures are in a well-defined state.
198
199 The location is clear when using workqueues or the kthread worker
200 API. These kthreads process independent actions in a generic loop.
201
202 It's much more complicated with kthreads which have a custom loop.
203 There the safe location must be carefully selected on a case-by-case
204 basis.
205
206 In that case, arches without HAVE_RELIABLE_STACKTRACE would still be
207 able to use the non-stack-checking parts of the consistency model:
208
209 a) patching user tasks when they cross the kernel/user space
210 boundary; and
211
212 b) patching kthreads and idle tasks at their designated patch points.
213
214 This option isn't as good as option 1 because it requires signaling
215 user tasks and waking kthreads to patch them. But it could still be
216 a good backup option for those architectures which don't have
217 reliable stack traces yet.
218
219In the meantime, patches for such architectures can bypass the
220consistency model by setting klp_patch.immediate to true. This option
221is perfectly fine for patches which don't change the semantics of the
222patched functions. In practice, this is usable for ~90% of security
223fixes. Use of this option also means the patch can't be unloaded after
224it has been disabled.
103 225
104 226
1054. Livepatch module 2274. Livepatch module
@@ -134,7 +256,7 @@ Documentation/livepatch/module-elf-format.txt for more details.
134 256
135 257
1364.2. Metadata 2584.2. Metadata
137------------ 259-------------
138 260
139The patch is described by several structures that split the information 261The patch is described by several structures that split the information
140into three levels: 262into three levels:
@@ -156,6 +278,9 @@ into three levels:
156 only for a particular object ( vmlinux or a kernel module ). Note that 278 only for a particular object ( vmlinux or a kernel module ). Note that
157 kallsyms allows for searching symbols according to the object name. 279 kallsyms allows for searching symbols according to the object name.
158 280
281 There's also an 'immediate' flag which, when set, patches the
282 function immediately, bypassing the consistency model safety checks.
283
159 + struct klp_object defines an array of patched functions (struct 284 + struct klp_object defines an array of patched functions (struct
160 klp_func) in the same object. Where the object is either vmlinux 285 klp_func) in the same object. Where the object is either vmlinux
161 (NULL) or a module name. 286 (NULL) or a module name.
@@ -172,10 +297,13 @@ into three levels:
172 This structure handles all patched functions consistently and eventually, 297 This structure handles all patched functions consistently and eventually,
173 synchronously. The whole patch is applied only when all patched 298 synchronously. The whole patch is applied only when all patched
174 symbols are found. The only exception are symbols from objects 299 symbols are found. The only exception are symbols from objects
175 (kernel modules) that have not been loaded yet. Also if a more complex 300 (kernel modules) that have not been loaded yet.
176 consistency model is supported then a selected unit (thread, 301
177 kernel as a whole) will see the new code from the entire patch 302 Setting the 'immediate' flag applies the patch to all tasks
178 only when it is in a safe state. 303 immediately, bypassing the consistency model safety checks.
304
305 For more details on how the patch is applied on a per-task basis,
306 see the "Consistency model" section.
179 307
180 308
1814.3. Livepatch module handling 3094.3. Livepatch module handling
@@ -188,8 +316,15 @@ section "Livepatch life-cycle" below for more details about these
188two operations. 316two operations.
189 317
190Module removal is only safe when there are no users of the underlying 318Module removal is only safe when there are no users of the underlying
191functions. The immediate consistency model is not able to detect this; 319functions. The immediate consistency model is not able to detect this. The
192therefore livepatch modules cannot be removed. See "Limitations" below. 320code just redirects the functions at the very beginning and it does not
321check if the functions are in use. In other words, it knows when the
322functions get called but it does not know when the functions return.
323Therefore it cannot be decided when the livepatch module can be safely
324removed. This is solved by a hybrid consistency model. When the system is
325transitioned to a new patch state (patched/unpatched) it is guaranteed that
326no task sleeps or runs in the old code.
327
193 328
1945. Livepatch life-cycle 3295. Livepatch life-cycle
195======================= 330=======================
@@ -239,9 +374,15 @@ Registered patches might be enabled either by calling klp_enable_patch() or
239by writing '1' to /sys/kernel/livepatch/<name>/enabled. The system will 374by writing '1' to /sys/kernel/livepatch/<name>/enabled. The system will
240start using the new implementation of the patched functions at this stage. 375start using the new implementation of the patched functions at this stage.
241 376
242In particular, if an original function is patched for the first time, a 377When a patch is enabled, livepatch enters into a transition state where
243function specific struct klp_ops is created and an universal ftrace handler 378tasks are converging to the patched state. This is indicated by a value
244is registered. 379of '1' in /sys/kernel/livepatch/<name>/transition. Once all tasks have
380been patched, the 'transition' value changes to '0'. For more
381information about this process, see the "Consistency model" section.
382
383If an original function is patched for the first time, a function
384specific struct klp_ops is created and an universal ftrace handler is
385registered.
245 386
246Functions might be patched multiple times. The ftrace handler is registered 387Functions might be patched multiple times. The ftrace handler is registered
247only once for the given function. Further patches just add an entry to the 388only once for the given function. Further patches just add an entry to the
@@ -261,6 +402,12 @@ by writing '0' to /sys/kernel/livepatch/<name>/enabled. At this stage
261either the code from the previously enabled patch or even the original 402either the code from the previously enabled patch or even the original
262code gets used. 403code gets used.
263 404
405When a patch is disabled, livepatch enters into a transition state where
406tasks are converging to the unpatched state. This is indicated by a
407value of '1' in /sys/kernel/livepatch/<name>/transition. Once all tasks
408have been unpatched, the 'transition' value changes to '0'. For more
409information about this process, see the "Consistency model" section.
410
264Here all the functions (struct klp_func) associated with the to-be-disabled 411Here all the functions (struct klp_func) associated with the to-be-disabled
265patch are removed from the corresponding struct klp_ops. The ftrace handler 412patch are removed from the corresponding struct klp_ops. The ftrace handler
266is unregistered and the struct klp_ops is freed when the func_stack list 413is unregistered and the struct klp_ops is freed when the func_stack list
@@ -329,23 +476,6 @@ The current Livepatch implementation has several limitations:
329 by "notrace". 476 by "notrace".
330 477
331 478
332 + Livepatch modules can not be removed.
333
334 The current implementation just redirects the functions at the very
335 beginning. It does not check if the functions are in use. In other
336 words, it knows when the functions get called but it does not
337 know when the functions return. Therefore it can not decide when
338 the livepatch module can be safely removed.
339
340 This will get most likely solved once a more complex consistency model
341 is supported. The idea is that a safe state for patching should also
342 mean a safe state for removing the patch.
343
344 Note that the patch itself might get disabled by writing zero
345 to /sys/kernel/livepatch/<patch>/enabled. It causes that the new
346 code will not longer get called. But it does not guarantee
347 that anyone is not sleeping anywhere in the new code.
348
349 479
350 + Livepatch works reliably only when the dynamic ftrace is located at 480 + Livepatch works reliably only when the dynamic ftrace is located at
351 the very beginning of the function. 481 the very beginning of the function.
diff --git a/arch/Kconfig b/arch/Kconfig
index c4d6833aacd9..640999412d11 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -720,6 +720,12 @@ config HAVE_STACK_VALIDATION
720 Architecture supports the 'objtool check' host tool command, which 720 Architecture supports the 'objtool check' host tool command, which
721 performs compile-time stack metadata validation. 721 performs compile-time stack metadata validation.
722 722
723config HAVE_RELIABLE_STACKTRACE
724 bool
725 help
726 Architecture has a save_stack_trace_tsk_reliable() function which
727 only returns a stack trace if it can guarantee the trace is reliable.
728
723config HAVE_ARCH_HASH 729config HAVE_ARCH_HASH
724 bool 730 bool
725 default n 731 default n
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 87e4b2d8dcd4..6fc6464f7421 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void)
92 TIF_NEED_RESCHED */ 92 TIF_NEED_RESCHED */
93#define TIF_32BIT 4 /* 32 bit binary */ 93#define TIF_32BIT 4 /* 32 bit binary */
94#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ 94#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
95#define TIF_PATCH_PENDING 6 /* pending live patching update */
95#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 96#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
96#define TIF_SINGLESTEP 8 /* singlestepping active */ 97#define TIF_SINGLESTEP 8 /* singlestepping active */
97#define TIF_NOHZ 9 /* in adaptive nohz mode */ 98#define TIF_NOHZ 9 /* in adaptive nohz mode */
@@ -115,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
115#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
116#define _TIF_32BIT (1<<TIF_32BIT) 117#define _TIF_32BIT (1<<TIF_32BIT)
117#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM) 118#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
119#define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
118#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 120#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
119#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 121#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
120#define _TIF_SECCOMP (1<<TIF_SECCOMP) 122#define _TIF_SECCOMP (1<<TIF_SECCOMP)
@@ -131,7 +133,7 @@ static inline struct thread_info *current_thread_info(void)
131 133
132#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 134#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
133 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 135 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
134 _TIF_RESTORE_TM) 136 _TIF_RESTORE_TM | _TIF_PATCH_PENDING)
135#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 137#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
136 138
137/* Bits in local_flags */ 139/* Bits in local_flags */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 3a3671172436..e9436c5e1e09 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -14,6 +14,7 @@
14#include <linux/uprobes.h> 14#include <linux/uprobes.h>
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/context_tracking.h> 16#include <linux/context_tracking.h>
17#include <linux/livepatch.h>
17#include <asm/hw_breakpoint.h> 18#include <asm/hw_breakpoint.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
19#include <asm/unistd.h> 20#include <asm/unistd.h>
@@ -162,6 +163,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
162 tracehook_notify_resume(regs); 163 tracehook_notify_resume(regs);
163 } 164 }
164 165
166 if (thread_info_flags & _TIF_PATCH_PENDING)
167 klp_update_patch_state(current);
168
165 user_enter(); 169 user_enter();
166} 170}
167 171
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f36e6e2b73f0..0b3ee083a665 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -51,15 +51,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
51/* 51/*
52 * thread information flags bit numbers 52 * thread information flags bit numbers
53 */ 53 */
54/* _TIF_WORK bits */
54#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ 55#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */
55#define TIF_SIGPENDING 1 /* signal pending */ 56#define TIF_SIGPENDING 1 /* signal pending */
56#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 57#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
57#define TIF_UPROBE 3 /* breakpointed or single-stepping */ 58#define TIF_UPROBE 3 /* breakpointed or single-stepping */
58#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ 59#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
59#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 60#define TIF_PATCH_PENDING 5 /* pending live patching update */
60#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 61
61#define TIF_SECCOMP 10 /* secure computing */
62#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
63#define TIF_31BIT 16 /* 32bit process */ 62#define TIF_31BIT 16 /* 32bit process */
64#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 63#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
65#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ 64#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -67,16 +66,25 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
67#define TIF_BLOCK_STEP 20 /* This task is block stepped */ 66#define TIF_BLOCK_STEP 20 /* This task is block stepped */
68#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ 67#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
69 68
69/* _TIF_TRACE bits */
70#define TIF_SYSCALL_TRACE 24 /* syscall trace active */
71#define TIF_SYSCALL_AUDIT 25 /* syscall auditing active */
72#define TIF_SECCOMP 26 /* secure computing */
73#define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */
74
70#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) 75#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME)
71#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) 76#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING)
72#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) 77#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED)
78#define _TIF_UPROBE _BITUL(TIF_UPROBE)
79#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
80#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING)
81
82#define _TIF_31BIT _BITUL(TIF_31BIT)
83#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
84
73#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) 85#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE)
74#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) 86#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT)
75#define _TIF_SECCOMP _BITUL(TIF_SECCOMP) 87#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
76#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) 88#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
77#define _TIF_UPROBE _BITUL(TIF_UPROBE)
78#define _TIF_31BIT _BITUL(TIF_31BIT)
79#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
80#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
81 89
82#endif /* _ASM_THREAD_INFO_H */ 90#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c6cf338c9327..a5f5d3bb3dbc 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -47,7 +47,7 @@ STACK_SIZE = 1 << STACK_SHIFT
47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 47STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
48 48
49_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 49_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
50 _TIF_UPROBE | _TIF_GUARDED_STORAGE) 50 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
51_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 51_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
52 _TIF_SYSCALL_TRACEPOINT) 52 _TIF_SYSCALL_TRACEPOINT)
53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
@@ -334,6 +334,11 @@ ENTRY(system_call)
334 jo .Lsysc_guarded_storage 334 jo .Lsysc_guarded_storage
335 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 335 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
336 jo .Lsysc_singlestep 336 jo .Lsysc_singlestep
337#ifdef CONFIG_LIVEPATCH
338 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
339 jo .Lsysc_patch_pending # handle live patching just before
340 # signals and possible syscall restart
341#endif
337 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 342 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
338 jo .Lsysc_sigpending 343 jo .Lsysc_sigpending
339 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 344 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
@@ -415,6 +420,15 @@ ENTRY(system_call)
415 lgr %r2,%r11 # pass pointer to pt_regs 420 lgr %r2,%r11 # pass pointer to pt_regs
416 larl %r14,.Lsysc_return 421 larl %r14,.Lsysc_return
417 jg gs_load_bc_cb 422 jg gs_load_bc_cb
423#
424# _TIF_PATCH_PENDING is set, call klp_update_patch_state
425#
426#ifdef CONFIG_LIVEPATCH
427.Lsysc_patch_pending:
428 lg %r2,__LC_CURRENT # pass pointer to task struct
429 larl %r14,.Lsysc_return
430 jg klp_update_patch_state
431#endif
418 432
419# 433#
420# _PIF_PER_TRAP is set, call do_per_trap 434# _PIF_PER_TRAP is set, call do_per_trap
@@ -667,6 +681,10 @@ ENTRY(io_int_handler)
667 jo .Lio_mcck_pending 681 jo .Lio_mcck_pending
668 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 682 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
669 jo .Lio_reschedule 683 jo .Lio_reschedule
684#ifdef CONFIG_LIVEPATCH
685 TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING
686 jo .Lio_patch_pending
687#endif
670 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 688 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
671 jo .Lio_sigpending 689 jo .Lio_sigpending
672 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 690 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
@@ -730,6 +748,16 @@ ENTRY(io_int_handler)
730 j .Lio_return 748 j .Lio_return
731 749
732# 750#
751# _TIF_PATCH_PENDING is set, call klp_update_patch_state
752#
753#ifdef CONFIG_LIVEPATCH
754.Lio_patch_pending:
755 lg %r2,__LC_CURRENT # pass pointer to task struct
756 larl %r14,.Lio_return
757 jg klp_update_patch_state
758#endif
759
760#
733# _TIF_SIGPENDING or is set, call do_signal 761# _TIF_SIGPENDING or is set, call do_signal
734# 762#
735.Lio_sigpending: 763.Lio_sigpending:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8d4f87e5bba3..cd18994a9555 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -160,6 +160,7 @@ config X86
160 select HAVE_PERF_REGS 160 select HAVE_PERF_REGS
161 select HAVE_PERF_USER_STACK_DUMP 161 select HAVE_PERF_USER_STACK_DUMP
162 select HAVE_REGS_AND_STACK_ACCESS_API 162 select HAVE_REGS_AND_STACK_ACCESS_API
163 select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION
163 select HAVE_STACK_VALIDATION if X86_64 164 select HAVE_STACK_VALIDATION if X86_64
164 select HAVE_SYSCALL_TRACEPOINTS 165 select HAVE_SYSCALL_TRACEPOINTS
165 select HAVE_UNSTABLE_SCHED_CLOCK 166 select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 370c42c7f046..cdefcfdd9e63 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -22,6 +22,7 @@
22#include <linux/context_tracking.h> 22#include <linux/context_tracking.h>
23#include <linux/user-return-notifier.h> 23#include <linux/user-return-notifier.h>
24#include <linux/uprobes.h> 24#include <linux/uprobes.h>
25#include <linux/livepatch.h>
25 26
26#include <asm/desc.h> 27#include <asm/desc.h>
27#include <asm/traps.h> 28#include <asm/traps.h>
@@ -130,14 +131,13 @@ static long syscall_trace_enter(struct pt_regs *regs)
130 131
131#define EXIT_TO_USERMODE_LOOP_FLAGS \ 132#define EXIT_TO_USERMODE_LOOP_FLAGS \
132 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 133 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
133 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) 134 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
134 135
135static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) 136static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
136{ 137{
137 /* 138 /*
138 * In order to return to user mode, we need to have IRQs off with 139 * In order to return to user mode, we need to have IRQs off with
139 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, 140 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
140 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
141 * can be set at any time on preemptable kernels if we have IRQs on, 141 * can be set at any time on preemptable kernels if we have IRQs on,
142 * so we need to loop. Disabling preemption wouldn't help: doing the 142 * so we need to loop. Disabling preemption wouldn't help: doing the
143 * work to clear some of the flags can sleep. 143 * work to clear some of the flags can sleep.
@@ -164,6 +164,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
164 if (cached_flags & _TIF_USER_RETURN_NOTIFY) 164 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
165 fire_user_return_notifiers(); 165 fire_user_return_notifiers();
166 166
167 if (cached_flags & _TIF_PATCH_PENDING)
168 klp_update_patch_state(current);
169
167 /* Disable IRQs and retry */ 170 /* Disable IRQs and retry */
168 local_irq_disable(); 171 local_irq_disable();
169 172
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f765a49103fb..e00e1bd6e7b3 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -73,9 +73,6 @@ struct thread_info {
73 * thread information flags 73 * thread information flags
74 * - these are process state flags that various assembly files 74 * - these are process state flags that various assembly files
75 * may need to access 75 * may need to access
76 * - pending work-to-be-done flags are in LSW
77 * - other flags in MSW
78 * Warning: layout of LSW is hardcoded in entry.S
79 */ 76 */
80#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 77#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 78#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
@@ -87,6 +84,7 @@ struct thread_info {
87#define TIF_SECCOMP 8 /* secure computing */ 84#define TIF_SECCOMP 8 /* secure computing */
88#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 85#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
89#define TIF_UPROBE 12 /* breakpointed or singlestepping */ 86#define TIF_UPROBE 12 /* breakpointed or singlestepping */
87#define TIF_PATCH_PENDING 13 /* pending live patching update */
90#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ 88#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
91#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 89#define TIF_NOTSC 16 /* TSC is not accessible in userland */
92#define TIF_IA32 17 /* IA32 compatibility process */ 90#define TIF_IA32 17 /* IA32 compatibility process */
@@ -104,13 +102,14 @@ struct thread_info {
104#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 102#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
105#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 103#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
106#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 104#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
107#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
108#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 105#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
106#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
109#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 107#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
110#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 108#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
111#define _TIF_SECCOMP (1 << TIF_SECCOMP) 109#define _TIF_SECCOMP (1 << TIF_SECCOMP)
112#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) 110#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
113#define _TIF_UPROBE (1 << TIF_UPROBE) 111#define _TIF_UPROBE (1 << TIF_UPROBE)
112#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
114#define _TIF_NOCPUID (1 << TIF_NOCPUID) 113#define _TIF_NOCPUID (1 << TIF_NOCPUID)
115#define _TIF_NOTSC (1 << TIF_NOTSC) 114#define _TIF_NOTSC (1 << TIF_NOTSC)
116#define _TIF_IA32 (1 << TIF_IA32) 115#define _TIF_IA32 (1 << TIF_IA32)
@@ -135,8 +134,10 @@ struct thread_info {
135 134
136/* work to do on any return to user space */ 135/* work to do on any return to user space */
137#define _TIF_ALLWORK_MASK \ 136#define _TIF_ALLWORK_MASK \
138 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ 137 (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
139 _TIF_NOHZ) 138 _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
139 _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
140 _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT)
140 141
141/* flags to check in __switch_to() */ 142/* flags to check in __switch_to() */
142#define _TIF_WORK_CTXSW \ 143#define _TIF_WORK_CTXSW \
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 9b10dcd51716..e6676495b125 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -11,6 +11,7 @@ struct unwind_state {
11 unsigned long stack_mask; 11 unsigned long stack_mask;
12 struct task_struct *task; 12 struct task_struct *task;
13 int graph_idx; 13 int graph_idx;
14 bool error;
14#ifdef CONFIG_FRAME_POINTER 15#ifdef CONFIG_FRAME_POINTER
15 bool got_irq; 16 bool got_irq;
16 unsigned long *bp, *orig_sp; 17 unsigned long *bp, *orig_sp;
@@ -42,6 +43,11 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
42 __unwind_start(state, task, regs, first_frame); 43 __unwind_start(state, task, regs, first_frame);
43} 44}
44 45
46static inline bool unwind_error(struct unwind_state *state)
47{
48 return state->error;
49}
50
45#ifdef CONFIG_FRAME_POINTER 51#ifdef CONFIG_FRAME_POINTER
46 52
47static inline 53static inline
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 8e2b79b88e51..8dabd7bf1673 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -76,6 +76,101 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
76} 76}
77EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 77EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
78 78
79#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
80
81#define STACKTRACE_DUMP_ONCE(task) ({ \
82 static bool __section(.data.unlikely) __dumped; \
83 \
84 if (!__dumped) { \
85 __dumped = true; \
86 WARN_ON(1); \
87 show_stack(task, NULL); \
88 } \
89})
90
91static int __save_stack_trace_reliable(struct stack_trace *trace,
92 struct task_struct *task)
93{
94 struct unwind_state state;
95 struct pt_regs *regs;
96 unsigned long addr;
97
98 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
99 unwind_next_frame(&state)) {
100
101 regs = unwind_get_entry_regs(&state);
102 if (regs) {
103 /*
104 * Kernel mode registers on the stack indicate an
105 * in-kernel interrupt or exception (e.g., preemption
106 * or a page fault), which can make frame pointers
107 * unreliable.
108 */
109 if (!user_mode(regs))
110 return -EINVAL;
111
112 /*
113 * The last frame contains the user mode syscall
114 * pt_regs. Skip it and finish the unwind.
115 */
116 unwind_next_frame(&state);
117 if (!unwind_done(&state)) {
118 STACKTRACE_DUMP_ONCE(task);
119 return -EINVAL;
120 }
121 break;
122 }
123
124 addr = unwind_get_return_address(&state);
125
126 /*
127 * A NULL or invalid return address probably means there's some
128 * generated code which __kernel_text_address() doesn't know
129 * about.
130 */
131 if (!addr) {
132 STACKTRACE_DUMP_ONCE(task);
133 return -EINVAL;
134 }
135
136 if (save_stack_address(trace, addr, false))
137 return -EINVAL;
138 }
139
140 /* Check for stack corruption */
141 if (unwind_error(&state)) {
142 STACKTRACE_DUMP_ONCE(task);
143 return -EINVAL;
144 }
145
146 if (trace->nr_entries < trace->max_entries)
147 trace->entries[trace->nr_entries++] = ULONG_MAX;
148
149 return 0;
150}
151
152/*
153 * This function returns an error if it detects any unreliable features of the
154 * stack. Otherwise it guarantees that the stack trace is reliable.
155 *
156 * If the task is not 'current', the caller *must* ensure the task is inactive.
157 */
158int save_stack_trace_tsk_reliable(struct task_struct *tsk,
159 struct stack_trace *trace)
160{
161 int ret;
162
163 if (!try_get_task_stack(tsk))
164 return -EINVAL;
165
166 ret = __save_stack_trace_reliable(trace, tsk);
167
168 put_task_stack(tsk);
169
170 return ret;
171}
172#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
173
79/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ 174/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
80 175
81struct stack_frame_user { 176struct stack_frame_user {
@@ -138,4 +233,3 @@ void save_stack_trace_user(struct stack_trace *trace)
138 if (trace->nr_entries < trace->max_entries) 233 if (trace->nr_entries < trace->max_entries)
139 trace->entries[trace->nr_entries++] = ULONG_MAX; 234 trace->entries[trace->nr_entries++] = ULONG_MAX;
140} 235}
141
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index fec70fe3b1ec..82c6d7f1fd73 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -283,6 +283,8 @@ bool unwind_next_frame(struct unwind_state *state)
283 return true; 283 return true;
284 284
285bad_address: 285bad_address:
286 state->error = true;
287
286 /* 288 /*
287 * When unwinding a non-current task, the task might actually be 289 * When unwinding a non-current task, the task might actually be
288 * running on another CPU, in which case it could be modifying its 290 * running on another CPU, in which case it could be modifying its
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c87b6b9a8a76..9e3ac5c11780 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2834,6 +2834,15 @@ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2834 return err; 2834 return err;
2835} 2835}
2836 2836
2837#ifdef CONFIG_LIVEPATCH
2838static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
2839 struct pid *pid, struct task_struct *task)
2840{
2841 seq_printf(m, "%d\n", task->patch_state);
2842 return 0;
2843}
2844#endif /* CONFIG_LIVEPATCH */
2845
2837/* 2846/*
2838 * Thread groups 2847 * Thread groups
2839 */ 2848 */
@@ -2933,6 +2942,9 @@ static const struct pid_entry tgid_base_stuff[] = {
2933 REG("timers", S_IRUGO, proc_timers_operations), 2942 REG("timers", S_IRUGO, proc_timers_operations),
2934#endif 2943#endif
2935 REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), 2944 REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
2945#ifdef CONFIG_LIVEPATCH
2946 ONE("patch_state", S_IRUSR, proc_pid_patch_state),
2947#endif
2936}; 2948};
2937 2949
2938static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) 2950static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3315,6 +3327,9 @@ static const struct pid_entry tid_base_stuff[] = {
3315 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 3327 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
3316 REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), 3328 REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
3317#endif 3329#endif
3330#ifdef CONFIG_LIVEPATCH
3331 ONE("patch_state", S_IRUSR, proc_pid_patch_state),
3332#endif
3318}; 3333};
3319 3334
3320static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) 3335static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2c487e0879d5..82be96564266 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,7 @@
15#include <linux/sched/autogroup.h> 15#include <linux/sched/autogroup.h>
16#include <net/net_namespace.h> 16#include <net/net_namespace.h>
17#include <linux/sched/rt.h> 17#include <linux/sched/rt.h>
18#include <linux/livepatch.h>
18#include <linux/mm_types.h> 19#include <linux/mm_types.h>
19 20
20#include <asm/thread_info.h> 21#include <asm/thread_info.h>
@@ -203,6 +204,13 @@ extern struct cred init_cred;
203# define INIT_KASAN(tsk) 204# define INIT_KASAN(tsk)
204#endif 205#endif
205 206
207#ifdef CONFIG_LIVEPATCH
208# define INIT_LIVEPATCH(tsk) \
209 .patch_state = KLP_UNDEFINED,
210#else
211# define INIT_LIVEPATCH(tsk)
212#endif
213
206#ifdef CONFIG_THREAD_INFO_IN_TASK 214#ifdef CONFIG_THREAD_INFO_IN_TASK
207# define INIT_TASK_TI(tsk) \ 215# define INIT_TASK_TI(tsk) \
208 .thread_info = INIT_THREAD_INFO(tsk), \ 216 .thread_info = INIT_THREAD_INFO(tsk), \
@@ -289,6 +297,7 @@ extern struct cred init_cred;
289 INIT_VTIME(tsk) \ 297 INIT_VTIME(tsk) \
290 INIT_NUMA_BALANCING(tsk) \ 298 INIT_NUMA_BALANCING(tsk) \
291 INIT_KASAN(tsk) \ 299 INIT_KASAN(tsk) \
300 INIT_LIVEPATCH(tsk) \
292} 301}
293 302
294 303
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 9072f04db616..194991ef9347 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -23,15 +23,16 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/ftrace.h> 25#include <linux/ftrace.h>
26#include <linux/completion.h>
26 27
27#if IS_ENABLED(CONFIG_LIVEPATCH) 28#if IS_ENABLED(CONFIG_LIVEPATCH)
28 29
29#include <asm/livepatch.h> 30#include <asm/livepatch.h>
30 31
31enum klp_state { 32/* task patch states */
32 KLP_DISABLED, 33#define KLP_UNDEFINED -1
33 KLP_ENABLED 34#define KLP_UNPATCHED 0
34}; 35#define KLP_PATCHED 1
35 36
36/** 37/**
37 * struct klp_func - function structure for live patching 38 * struct klp_func - function structure for live patching
@@ -39,10 +40,29 @@ enum klp_state {
39 * @new_func: pointer to the patched function code 40 * @new_func: pointer to the patched function code
40 * @old_sympos: a hint indicating which symbol position the old function 41 * @old_sympos: a hint indicating which symbol position the old function
41 * can be found (optional) 42 * can be found (optional)
43 * @immediate: patch the func immediately, bypassing safety mechanisms
42 * @old_addr: the address of the function being patched 44 * @old_addr: the address of the function being patched
43 * @kobj: kobject for sysfs resources 45 * @kobj: kobject for sysfs resources
44 * @state: tracks function-level patch application state
45 * @stack_node: list node for klp_ops func_stack list 46 * @stack_node: list node for klp_ops func_stack list
47 * @old_size: size of the old function
48 * @new_size: size of the new function
49 * @patched: the func has been added to the klp_ops list
50 * @transition: the func is currently being applied or reverted
51 *
52 * The patched and transition variables define the func's patching state. When
53 * patching, a func is always in one of the following states:
54 *
55 * patched=0 transition=0: unpatched
56 * patched=0 transition=1: unpatched, temporary starting state
57 * patched=1 transition=1: patched, may be visible to some tasks
58 * patched=1 transition=0: patched, visible to all tasks
59 *
60 * And when unpatching, it goes in the reverse order:
61 *
62 * patched=1 transition=0: patched, visible to all tasks
63 * patched=1 transition=1: patched, may be visible to some tasks
64 * patched=0 transition=1: unpatched, temporary ending state
65 * patched=0 transition=0: unpatched
46 */ 66 */
47struct klp_func { 67struct klp_func {
48 /* external */ 68 /* external */
@@ -56,12 +76,15 @@ struct klp_func {
56 * in kallsyms for the given object is used. 76 * in kallsyms for the given object is used.
57 */ 77 */
58 unsigned long old_sympos; 78 unsigned long old_sympos;
79 bool immediate;
59 80
60 /* internal */ 81 /* internal */
61 unsigned long old_addr; 82 unsigned long old_addr;
62 struct kobject kobj; 83 struct kobject kobj;
63 enum klp_state state;
64 struct list_head stack_node; 84 struct list_head stack_node;
85 unsigned long old_size, new_size;
86 bool patched;
87 bool transition;
65}; 88};
66 89
67/** 90/**
@@ -70,8 +93,8 @@ struct klp_func {
70 * @funcs: function entries for functions to be patched in the object 93 * @funcs: function entries for functions to be patched in the object
71 * @kobj: kobject for sysfs resources 94 * @kobj: kobject for sysfs resources
72 * @mod: kernel module associated with the patched object 95 * @mod: kernel module associated with the patched object
73 * (NULL for vmlinux) 96 * (NULL for vmlinux)
74 * @state: tracks object-level patch application state 97 * @patched: the object's funcs have been added to the klp_ops list
75 */ 98 */
76struct klp_object { 99struct klp_object {
77 /* external */ 100 /* external */
@@ -81,26 +104,30 @@ struct klp_object {
81 /* internal */ 104 /* internal */
82 struct kobject kobj; 105 struct kobject kobj;
83 struct module *mod; 106 struct module *mod;
84 enum klp_state state; 107 bool patched;
85}; 108};
86 109
87/** 110/**
88 * struct klp_patch - patch structure for live patching 111 * struct klp_patch - patch structure for live patching
89 * @mod: reference to the live patch module 112 * @mod: reference to the live patch module
90 * @objs: object entries for kernel objects to be patched 113 * @objs: object entries for kernel objects to be patched
114 * @immediate: patch all funcs immediately, bypassing safety mechanisms
91 * @list: list node for global list of registered patches 115 * @list: list node for global list of registered patches
92 * @kobj: kobject for sysfs resources 116 * @kobj: kobject for sysfs resources
93 * @state: tracks patch-level application state 117 * @enabled: the patch is enabled (but operation may be incomplete)
118 * @finish: for waiting till it is safe to remove the patch module
94 */ 119 */
95struct klp_patch { 120struct klp_patch {
96 /* external */ 121 /* external */
97 struct module *mod; 122 struct module *mod;
98 struct klp_object *objs; 123 struct klp_object *objs;
124 bool immediate;
99 125
100 /* internal */ 126 /* internal */
101 struct list_head list; 127 struct list_head list;
102 struct kobject kobj; 128 struct kobject kobj;
103 enum klp_state state; 129 bool enabled;
130 struct completion finish;
104}; 131};
105 132
106#define klp_for_each_object(patch, obj) \ 133#define klp_for_each_object(patch, obj) \
@@ -123,10 +150,27 @@ void arch_klp_init_object_loaded(struct klp_patch *patch,
123int klp_module_coming(struct module *mod); 150int klp_module_coming(struct module *mod);
124void klp_module_going(struct module *mod); 151void klp_module_going(struct module *mod);
125 152
153void klp_copy_process(struct task_struct *child);
154void klp_update_patch_state(struct task_struct *task);
155
156static inline bool klp_patch_pending(struct task_struct *task)
157{
158 return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
159}
160
161static inline bool klp_have_reliable_stack(void)
162{
163 return IS_ENABLED(CONFIG_STACKTRACE) &&
164 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
165}
166
126#else /* !CONFIG_LIVEPATCH */ 167#else /* !CONFIG_LIVEPATCH */
127 168
128static inline int klp_module_coming(struct module *mod) { return 0; } 169static inline int klp_module_coming(struct module *mod) { return 0; }
129static inline void klp_module_going(struct module *mod) { } 170static inline void klp_module_going(struct module *mod) {}
171static inline bool klp_patch_pending(struct task_struct *task) { return false; }
172static inline void klp_update_patch_state(struct task_struct *task) {}
173static inline void klp_copy_process(struct task_struct *child) {}
130 174
131#endif /* CONFIG_LIVEPATCH */ 175#endif /* CONFIG_LIVEPATCH */
132 176
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ba080e586dae..186dd6eae958 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1044,6 +1044,9 @@ struct task_struct {
1044 /* A live task holds one reference: */ 1044 /* A live task holds one reference: */
1045 atomic_t stack_refcount; 1045 atomic_t stack_refcount;
1046#endif 1046#endif
1047#ifdef CONFIG_LIVEPATCH
1048 int patch_state;
1049#endif
1047 /* CPU-specific state of this task: */ 1050 /* CPU-specific state of this task: */
1048 struct thread_struct thread; 1051 struct thread_struct thread;
1049 1052
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489a46b6..4205f71a5f0e 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -18,6 +18,8 @@ extern void save_stack_trace_regs(struct pt_regs *regs,
18 struct stack_trace *trace); 18 struct stack_trace *trace);
19extern void save_stack_trace_tsk(struct task_struct *tsk, 19extern void save_stack_trace_tsk(struct task_struct *tsk,
20 struct stack_trace *trace); 20 struct stack_trace *trace);
21extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
22 struct stack_trace *trace);
21 23
22extern void print_stack_trace(struct stack_trace *trace, int spaces); 24extern void print_stack_trace(struct stack_trace *trace, int spaces);
23extern int snprint_stack_trace(char *buf, size_t size, 25extern int snprint_stack_trace(char *buf, size_t size,
@@ -29,12 +31,13 @@ extern void save_stack_trace_user(struct stack_trace *trace);
29# define save_stack_trace_user(trace) do { } while (0) 31# define save_stack_trace_user(trace) do { } while (0)
30#endif 32#endif
31 33
32#else 34#else /* !CONFIG_STACKTRACE */
33# define save_stack_trace(trace) do { } while (0) 35# define save_stack_trace(trace) do { } while (0)
34# define save_stack_trace_tsk(tsk, trace) do { } while (0) 36# define save_stack_trace_tsk(tsk, trace) do { } while (0)
35# define save_stack_trace_user(trace) do { } while (0) 37# define save_stack_trace_user(trace) do { } while (0)
36# define print_stack_trace(trace, spaces) do { } while (0) 38# define print_stack_trace(trace, spaces) do { } while (0)
37# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) 39# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
38#endif 40# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
41#endif /* CONFIG_STACKTRACE */
39 42
40#endif 43#endif /* __LINUX_STACKTRACE_H */
diff --git a/kernel/fork.c b/kernel/fork.c
index 3a4343cdfe90..56d85fd81411 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -87,6 +87,7 @@
87#include <linux/compiler.h> 87#include <linux/compiler.h>
88#include <linux/sysctl.h> 88#include <linux/sysctl.h>
89#include <linux/kcov.h> 89#include <linux/kcov.h>
90#include <linux/livepatch.h>
90 91
91#include <asm/pgtable.h> 92#include <asm/pgtable.h>
92#include <asm/pgalloc.h> 93#include <asm/pgalloc.h>
@@ -1798,6 +1799,8 @@ static __latent_entropy struct task_struct *copy_process(
1798 p->parent_exec_id = current->self_exec_id; 1799 p->parent_exec_id = current->self_exec_id;
1799 } 1800 }
1800 1801
1802 klp_copy_process(p);
1803
1801 spin_lock(&current->sighand->siglock); 1804 spin_lock(&current->sighand->siglock);
1802 1805
1803 /* 1806 /*
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index e8780c0901d9..2b8bdb1925da 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,3 +1,3 @@
1obj-$(CONFIG_LIVEPATCH) += livepatch.o 1obj-$(CONFIG_LIVEPATCH) += livepatch.o
2 2
3livepatch-objs := core.o 3livepatch-objs := core.o patch.o transition.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index af4643873e71..b9628e43c78f 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -24,61 +24,31 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/ftrace.h>
28#include <linux/list.h> 27#include <linux/list.h>
29#include <linux/kallsyms.h> 28#include <linux/kallsyms.h>
30#include <linux/livepatch.h> 29#include <linux/livepatch.h>
31#include <linux/elf.h> 30#include <linux/elf.h>
32#include <linux/moduleloader.h> 31#include <linux/moduleloader.h>
32#include <linux/completion.h>
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34 34#include "core.h"
35/** 35#include "patch.h"
36 * struct klp_ops - structure for tracking registered ftrace ops structs 36#include "transition.h"
37 *
38 * A single ftrace_ops is shared between all enabled replacement functions
39 * (klp_func structs) which have the same old_addr. This allows the switch
40 * between function versions to happen instantaneously by updating the klp_ops
41 * struct's func_stack list. The winner is the klp_func at the top of the
42 * func_stack (front of the list).
43 *
44 * @node: node for the global klp_ops list
45 * @func_stack: list head for the stack of klp_func's (active func is on top)
46 * @fops: registered ftrace ops struct
47 */
48struct klp_ops {
49 struct list_head node;
50 struct list_head func_stack;
51 struct ftrace_ops fops;
52};
53 37
54/* 38/*
55 * The klp_mutex protects the global lists and state transitions of any 39 * klp_mutex is a coarse lock which serializes access to klp data. All
56 * structure reachable from them. References to any structure must be obtained 40 * accesses to klp-related variables and structures must have mutex protection,
57 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to 41 * except within the following functions which carefully avoid the need for it:
58 * ensure it gets consistent data). 42 *
43 * - klp_ftrace_handler()
44 * - klp_update_patch_state()
59 */ 45 */
60static DEFINE_MUTEX(klp_mutex); 46DEFINE_MUTEX(klp_mutex);
61 47
62static LIST_HEAD(klp_patches); 48static LIST_HEAD(klp_patches);
63static LIST_HEAD(klp_ops);
64 49
65static struct kobject *klp_root_kobj; 50static struct kobject *klp_root_kobj;
66 51
67static struct klp_ops *klp_find_ops(unsigned long old_addr)
68{
69 struct klp_ops *ops;
70 struct klp_func *func;
71
72 list_for_each_entry(ops, &klp_ops, node) {
73 func = list_first_entry(&ops->func_stack, struct klp_func,
74 stack_node);
75 if (func->old_addr == old_addr)
76 return ops;
77 }
78
79 return NULL;
80}
81
82static bool klp_is_module(struct klp_object *obj) 52static bool klp_is_module(struct klp_object *obj)
83{ 53{
84 return obj->name; 54 return obj->name;
@@ -117,7 +87,6 @@ static void klp_find_object_module(struct klp_object *obj)
117 mutex_unlock(&module_mutex); 87 mutex_unlock(&module_mutex);
118} 88}
119 89
120/* klp_mutex must be held by caller */
121static bool klp_is_patch_registered(struct klp_patch *patch) 90static bool klp_is_patch_registered(struct klp_patch *patch)
122{ 91{
123 struct klp_patch *mypatch; 92 struct klp_patch *mypatch;
@@ -182,7 +151,10 @@ static int klp_find_object_symbol(const char *objname, const char *name,
182 }; 151 };
183 152
184 mutex_lock(&module_mutex); 153 mutex_lock(&module_mutex);
185 kallsyms_on_each_symbol(klp_find_callback, &args); 154 if (objname)
155 module_kallsyms_on_each_symbol(klp_find_callback, &args);
156 else
157 kallsyms_on_each_symbol(klp_find_callback, &args);
186 mutex_unlock(&module_mutex); 158 mutex_unlock(&module_mutex);
187 159
188 /* 160 /*
@@ -233,7 +205,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
233 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 205 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
234 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); 206 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
235 if (sym->st_shndx != SHN_LIVEPATCH) { 207 if (sym->st_shndx != SHN_LIVEPATCH) {
236 pr_err("symbol %s is not marked as a livepatch symbol", 208 pr_err("symbol %s is not marked as a livepatch symbol\n",
237 strtab + sym->st_name); 209 strtab + sym->st_name);
238 return -EINVAL; 210 return -EINVAL;
239 } 211 }
@@ -243,7 +215,7 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
243 ".klp.sym.%55[^.].%127[^,],%lu", 215 ".klp.sym.%55[^.].%127[^,],%lu",
244 objname, symname, &sympos); 216 objname, symname, &sympos);
245 if (cnt != 3) { 217 if (cnt != 3) {
246 pr_err("symbol %s has an incorrectly formatted name", 218 pr_err("symbol %s has an incorrectly formatted name\n",
247 strtab + sym->st_name); 219 strtab + sym->st_name);
248 return -EINVAL; 220 return -EINVAL;
249 } 221 }
@@ -288,7 +260,7 @@ static int klp_write_object_relocations(struct module *pmod,
288 */ 260 */
289 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname); 261 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
290 if (cnt != 1) { 262 if (cnt != 1) {
291 pr_err("section %s has an incorrectly formatted name", 263 pr_err("section %s has an incorrectly formatted name\n",
292 secname); 264 secname);
293 ret = -EINVAL; 265 ret = -EINVAL;
294 break; 266 break;
@@ -311,191 +283,30 @@ static int klp_write_object_relocations(struct module *pmod,
311 return ret; 283 return ret;
312} 284}
313 285
314static void notrace klp_ftrace_handler(unsigned long ip,
315 unsigned long parent_ip,
316 struct ftrace_ops *fops,
317 struct pt_regs *regs)
318{
319 struct klp_ops *ops;
320 struct klp_func *func;
321
322 ops = container_of(fops, struct klp_ops, fops);
323
324 rcu_read_lock();
325 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
326 stack_node);
327 if (WARN_ON_ONCE(!func))
328 goto unlock;
329
330 klp_arch_set_pc(regs, (unsigned long)func->new_func);
331unlock:
332 rcu_read_unlock();
333}
334
335/*
336 * Convert a function address into the appropriate ftrace location.
337 *
338 * Usually this is just the address of the function, but on some architectures
339 * it's more complicated so allow them to provide a custom behaviour.
340 */
341#ifndef klp_get_ftrace_location
342static unsigned long klp_get_ftrace_location(unsigned long faddr)
343{
344 return faddr;
345}
346#endif
347
348static void klp_disable_func(struct klp_func *func)
349{
350 struct klp_ops *ops;
351
352 if (WARN_ON(func->state != KLP_ENABLED))
353 return;
354 if (WARN_ON(!func->old_addr))
355 return;
356
357 ops = klp_find_ops(func->old_addr);
358 if (WARN_ON(!ops))
359 return;
360
361 if (list_is_singular(&ops->func_stack)) {
362 unsigned long ftrace_loc;
363
364 ftrace_loc = klp_get_ftrace_location(func->old_addr);
365 if (WARN_ON(!ftrace_loc))
366 return;
367
368 WARN_ON(unregister_ftrace_function(&ops->fops));
369 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
370
371 list_del_rcu(&func->stack_node);
372 list_del(&ops->node);
373 kfree(ops);
374 } else {
375 list_del_rcu(&func->stack_node);
376 }
377
378 func->state = KLP_DISABLED;
379}
380
381static int klp_enable_func(struct klp_func *func)
382{
383 struct klp_ops *ops;
384 int ret;
385
386 if (WARN_ON(!func->old_addr))
387 return -EINVAL;
388
389 if (WARN_ON(func->state != KLP_DISABLED))
390 return -EINVAL;
391
392 ops = klp_find_ops(func->old_addr);
393 if (!ops) {
394 unsigned long ftrace_loc;
395
396 ftrace_loc = klp_get_ftrace_location(func->old_addr);
397 if (!ftrace_loc) {
398 pr_err("failed to find location for function '%s'\n",
399 func->old_name);
400 return -EINVAL;
401 }
402
403 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
404 if (!ops)
405 return -ENOMEM;
406
407 ops->fops.func = klp_ftrace_handler;
408 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
409 FTRACE_OPS_FL_DYNAMIC |
410 FTRACE_OPS_FL_IPMODIFY;
411
412 list_add(&ops->node, &klp_ops);
413
414 INIT_LIST_HEAD(&ops->func_stack);
415 list_add_rcu(&func->stack_node, &ops->func_stack);
416
417 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
418 if (ret) {
419 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
420 func->old_name, ret);
421 goto err;
422 }
423
424 ret = register_ftrace_function(&ops->fops);
425 if (ret) {
426 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
427 func->old_name, ret);
428 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
429 goto err;
430 }
431
432
433 } else {
434 list_add_rcu(&func->stack_node, &ops->func_stack);
435 }
436
437 func->state = KLP_ENABLED;
438
439 return 0;
440
441err:
442 list_del_rcu(&func->stack_node);
443 list_del(&ops->node);
444 kfree(ops);
445 return ret;
446}
447
448static void klp_disable_object(struct klp_object *obj)
449{
450 struct klp_func *func;
451
452 klp_for_each_func(obj, func)
453 if (func->state == KLP_ENABLED)
454 klp_disable_func(func);
455
456 obj->state = KLP_DISABLED;
457}
458
459static int klp_enable_object(struct klp_object *obj)
460{
461 struct klp_func *func;
462 int ret;
463
464 if (WARN_ON(obj->state != KLP_DISABLED))
465 return -EINVAL;
466
467 if (WARN_ON(!klp_is_object_loaded(obj)))
468 return -EINVAL;
469
470 klp_for_each_func(obj, func) {
471 ret = klp_enable_func(func);
472 if (ret) {
473 klp_disable_object(obj);
474 return ret;
475 }
476 }
477 obj->state = KLP_ENABLED;
478
479 return 0;
480}
481
482static int __klp_disable_patch(struct klp_patch *patch) 286static int __klp_disable_patch(struct klp_patch *patch)
483{ 287{
484 struct klp_object *obj; 288 if (klp_transition_patch)
289 return -EBUSY;
485 290
486 /* enforce stacking: only the last enabled patch can be disabled */ 291 /* enforce stacking: only the last enabled patch can be disabled */
487 if (!list_is_last(&patch->list, &klp_patches) && 292 if (!list_is_last(&patch->list, &klp_patches) &&
488 list_next_entry(patch, list)->state == KLP_ENABLED) 293 list_next_entry(patch, list)->enabled)
489 return -EBUSY; 294 return -EBUSY;
490 295
491 pr_notice("disabling patch '%s'\n", patch->mod->name); 296 klp_init_transition(patch, KLP_UNPATCHED);
492 297
493 klp_for_each_object(patch, obj) { 298 /*
494 if (obj->state == KLP_ENABLED) 299 * Enforce the order of the func->transition writes in
495 klp_disable_object(obj); 300 * klp_init_transition() and the TIF_PATCH_PENDING writes in
496 } 301 * klp_start_transition(). In the rare case where klp_ftrace_handler()
302 * is called shortly after klp_update_patch_state() switches the task,
303 * this ensures the handler sees that func->transition is set.
304 */
305 smp_wmb();
497 306
498 patch->state = KLP_DISABLED; 307 klp_start_transition();
308 klp_try_complete_transition();
309 patch->enabled = false;
499 310
500 return 0; 311 return 0;
501} 312}
@@ -519,7 +330,7 @@ int klp_disable_patch(struct klp_patch *patch)
519 goto err; 330 goto err;
520 } 331 }
521 332
522 if (patch->state == KLP_DISABLED) { 333 if (!patch->enabled) {
523 ret = -EINVAL; 334 ret = -EINVAL;
524 goto err; 335 goto err;
525 } 336 }
@@ -537,32 +348,61 @@ static int __klp_enable_patch(struct klp_patch *patch)
537 struct klp_object *obj; 348 struct klp_object *obj;
538 int ret; 349 int ret;
539 350
540 if (WARN_ON(patch->state != KLP_DISABLED)) 351 if (klp_transition_patch)
352 return -EBUSY;
353
354 if (WARN_ON(patch->enabled))
541 return -EINVAL; 355 return -EINVAL;
542 356
543 /* enforce stacking: only the first disabled patch can be enabled */ 357 /* enforce stacking: only the first disabled patch can be enabled */
544 if (patch->list.prev != &klp_patches && 358 if (patch->list.prev != &klp_patches &&
545 list_prev_entry(patch, list)->state == KLP_DISABLED) 359 !list_prev_entry(patch, list)->enabled)
546 return -EBUSY; 360 return -EBUSY;
547 361
362 /*
363 * A reference is taken on the patch module to prevent it from being
364 * unloaded.
365 *
366 * Note: For immediate (no consistency model) patches we don't allow
367 * patch modules to unload since there is no safe/sane method to
368 * determine if a thread is still running in the patched code contained
369 * in the patch module once the ftrace registration is successful.
370 */
371 if (!try_module_get(patch->mod))
372 return -ENODEV;
373
548 pr_notice("enabling patch '%s'\n", patch->mod->name); 374 pr_notice("enabling patch '%s'\n", patch->mod->name);
549 375
376 klp_init_transition(patch, KLP_PATCHED);
377
378 /*
379 * Enforce the order of the func->transition writes in
380 * klp_init_transition() and the ops->func_stack writes in
381 * klp_patch_object(), so that klp_ftrace_handler() will see the
382 * func->transition updates before the handler is registered and the
383 * new funcs become visible to the handler.
384 */
385 smp_wmb();
386
550 klp_for_each_object(patch, obj) { 387 klp_for_each_object(patch, obj) {
551 if (!klp_is_object_loaded(obj)) 388 if (!klp_is_object_loaded(obj))
552 continue; 389 continue;
553 390
554 ret = klp_enable_object(obj); 391 ret = klp_patch_object(obj);
555 if (ret) 392 if (ret) {
556 goto unregister; 393 pr_warn("failed to enable patch '%s'\n",
394 patch->mod->name);
395
396 klp_cancel_transition();
397 return ret;
398 }
557 } 399 }
558 400
559 patch->state = KLP_ENABLED; 401 klp_start_transition();
402 klp_try_complete_transition();
403 patch->enabled = true;
560 404
561 return 0; 405 return 0;
562
563unregister:
564 WARN_ON(__klp_disable_patch(patch));
565 return ret;
566} 406}
567 407
568/** 408/**
@@ -599,6 +439,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
599 * /sys/kernel/livepatch 439 * /sys/kernel/livepatch
600 * /sys/kernel/livepatch/<patch> 440 * /sys/kernel/livepatch/<patch>
601 * /sys/kernel/livepatch/<patch>/enabled 441 * /sys/kernel/livepatch/<patch>/enabled
442 * /sys/kernel/livepatch/<patch>/transition
602 * /sys/kernel/livepatch/<patch>/<object> 443 * /sys/kernel/livepatch/<patch>/<object>
603 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> 444 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
604 */ 445 */
@@ -608,26 +449,34 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
608{ 449{
609 struct klp_patch *patch; 450 struct klp_patch *patch;
610 int ret; 451 int ret;
611 unsigned long val; 452 bool enabled;
612 453
613 ret = kstrtoul(buf, 10, &val); 454 ret = kstrtobool(buf, &enabled);
614 if (ret) 455 if (ret)
615 return -EINVAL; 456 return ret;
616
617 if (val != KLP_DISABLED && val != KLP_ENABLED)
618 return -EINVAL;
619 457
620 patch = container_of(kobj, struct klp_patch, kobj); 458 patch = container_of(kobj, struct klp_patch, kobj);
621 459
622 mutex_lock(&klp_mutex); 460 mutex_lock(&klp_mutex);
623 461
624 if (val == patch->state) { 462 if (!klp_is_patch_registered(patch)) {
463 /*
464 * Module with the patch could either disappear meanwhile or is
465 * not properly initialized yet.
466 */
467 ret = -EINVAL;
468 goto err;
469 }
470
471 if (patch->enabled == enabled) {
625 /* already in requested state */ 472 /* already in requested state */
626 ret = -EINVAL; 473 ret = -EINVAL;
627 goto err; 474 goto err;
628 } 475 }
629 476
630 if (val == KLP_ENABLED) { 477 if (patch == klp_transition_patch) {
478 klp_reverse_transition();
479 } else if (enabled) {
631 ret = __klp_enable_patch(patch); 480 ret = __klp_enable_patch(patch);
632 if (ret) 481 if (ret)
633 goto err; 482 goto err;
@@ -652,21 +501,33 @@ static ssize_t enabled_show(struct kobject *kobj,
652 struct klp_patch *patch; 501 struct klp_patch *patch;
653 502
654 patch = container_of(kobj, struct klp_patch, kobj); 503 patch = container_of(kobj, struct klp_patch, kobj);
655 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state); 504 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
505}
506
507static ssize_t transition_show(struct kobject *kobj,
508 struct kobj_attribute *attr, char *buf)
509{
510 struct klp_patch *patch;
511
512 patch = container_of(kobj, struct klp_patch, kobj);
513 return snprintf(buf, PAGE_SIZE-1, "%d\n",
514 patch == klp_transition_patch);
656} 515}
657 516
658static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); 517static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
518static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
659static struct attribute *klp_patch_attrs[] = { 519static struct attribute *klp_patch_attrs[] = {
660 &enabled_kobj_attr.attr, 520 &enabled_kobj_attr.attr,
521 &transition_kobj_attr.attr,
661 NULL 522 NULL
662}; 523};
663 524
664static void klp_kobj_release_patch(struct kobject *kobj) 525static void klp_kobj_release_patch(struct kobject *kobj)
665{ 526{
666 /* 527 struct klp_patch *patch;
667 * Once we have a consistency model we'll need to module_put() the 528
668 * patch module here. See klp_register_patch() for more details. 529 patch = container_of(kobj, struct klp_patch, kobj);
669 */ 530 complete(&patch->finish);
670} 531}
671 532
672static struct kobj_type klp_ktype_patch = { 533static struct kobj_type klp_ktype_patch = {
@@ -737,7 +598,6 @@ static void klp_free_patch(struct klp_patch *patch)
737 klp_free_objects_limited(patch, NULL); 598 klp_free_objects_limited(patch, NULL);
738 if (!list_empty(&patch->list)) 599 if (!list_empty(&patch->list))
739 list_del(&patch->list); 600 list_del(&patch->list);
740 kobject_put(&patch->kobj);
741} 601}
742 602
743static int klp_init_func(struct klp_object *obj, struct klp_func *func) 603static int klp_init_func(struct klp_object *obj, struct klp_func *func)
@@ -746,7 +606,8 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
746 return -EINVAL; 606 return -EINVAL;
747 607
748 INIT_LIST_HEAD(&func->stack_node); 608 INIT_LIST_HEAD(&func->stack_node);
749 func->state = KLP_DISABLED; 609 func->patched = false;
610 func->transition = false;
750 611
751 /* The format for the sysfs directory is <function,sympos> where sympos 612 /* The format for the sysfs directory is <function,sympos> where sympos
752 * is the nth occurrence of this symbol in kallsyms for the patched 613 * is the nth occurrence of this symbol in kallsyms for the patched
@@ -787,6 +648,22 @@ static int klp_init_object_loaded(struct klp_patch *patch,
787 &func->old_addr); 648 &func->old_addr);
788 if (ret) 649 if (ret)
789 return ret; 650 return ret;
651
652 ret = kallsyms_lookup_size_offset(func->old_addr,
653 &func->old_size, NULL);
654 if (!ret) {
655 pr_err("kallsyms size lookup failed for '%s'\n",
656 func->old_name);
657 return -ENOENT;
658 }
659
660 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
661 &func->new_size, NULL);
662 if (!ret) {
663 pr_err("kallsyms size lookup failed for '%s' replacement\n",
664 func->old_name);
665 return -ENOENT;
666 }
790 } 667 }
791 668
792 return 0; 669 return 0;
@@ -801,7 +678,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
801 if (!obj->funcs) 678 if (!obj->funcs)
802 return -EINVAL; 679 return -EINVAL;
803 680
804 obj->state = KLP_DISABLED; 681 obj->patched = false;
805 obj->mod = NULL; 682 obj->mod = NULL;
806 683
807 klp_find_object_module(obj); 684 klp_find_object_module(obj);
@@ -842,12 +719,15 @@ static int klp_init_patch(struct klp_patch *patch)
842 719
843 mutex_lock(&klp_mutex); 720 mutex_lock(&klp_mutex);
844 721
845 patch->state = KLP_DISABLED; 722 patch->enabled = false;
723 init_completion(&patch->finish);
846 724
847 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, 725 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
848 klp_root_kobj, "%s", patch->mod->name); 726 klp_root_kobj, "%s", patch->mod->name);
849 if (ret) 727 if (ret) {
850 goto unlock; 728 mutex_unlock(&klp_mutex);
729 return ret;
730 }
851 731
852 klp_for_each_object(patch, obj) { 732 klp_for_each_object(patch, obj) {
853 ret = klp_init_object(patch, obj); 733 ret = klp_init_object(patch, obj);
@@ -863,9 +743,12 @@ static int klp_init_patch(struct klp_patch *patch)
863 743
864free: 744free:
865 klp_free_objects_limited(patch, obj); 745 klp_free_objects_limited(patch, obj);
866 kobject_put(&patch->kobj); 746
867unlock:
868 mutex_unlock(&klp_mutex); 747 mutex_unlock(&klp_mutex);
748
749 kobject_put(&patch->kobj);
750 wait_for_completion(&patch->finish);
751
869 return ret; 752 return ret;
870} 753}
871 754
@@ -879,23 +762,29 @@ unlock:
879 */ 762 */
880int klp_unregister_patch(struct klp_patch *patch) 763int klp_unregister_patch(struct klp_patch *patch)
881{ 764{
882 int ret = 0; 765 int ret;
883 766
884 mutex_lock(&klp_mutex); 767 mutex_lock(&klp_mutex);
885 768
886 if (!klp_is_patch_registered(patch)) { 769 if (!klp_is_patch_registered(patch)) {
887 ret = -EINVAL; 770 ret = -EINVAL;
888 goto out; 771 goto err;
889 } 772 }
890 773
891 if (patch->state == KLP_ENABLED) { 774 if (patch->enabled) {
892 ret = -EBUSY; 775 ret = -EBUSY;
893 goto out; 776 goto err;
894 } 777 }
895 778
896 klp_free_patch(patch); 779 klp_free_patch(patch);
897 780
898out: 781 mutex_unlock(&klp_mutex);
782
783 kobject_put(&patch->kobj);
784 wait_for_completion(&patch->finish);
785
786 return 0;
787err:
899 mutex_unlock(&klp_mutex); 788 mutex_unlock(&klp_mutex);
900 return ret; 789 return ret;
901} 790}
@@ -908,17 +797,18 @@ EXPORT_SYMBOL_GPL(klp_unregister_patch);
908 * Initializes the data structure associated with the patch and 797 * Initializes the data structure associated with the patch and
909 * creates the sysfs interface. 798 * creates the sysfs interface.
910 * 799 *
800 * There is no need to take the reference on the patch module here. It is done
801 * later when the patch is enabled.
802 *
911 * Return: 0 on success, otherwise error 803 * Return: 0 on success, otherwise error
912 */ 804 */
913int klp_register_patch(struct klp_patch *patch) 805int klp_register_patch(struct klp_patch *patch)
914{ 806{
915 int ret;
916
917 if (!patch || !patch->mod) 807 if (!patch || !patch->mod)
918 return -EINVAL; 808 return -EINVAL;
919 809
920 if (!is_livepatch_module(patch->mod)) { 810 if (!is_livepatch_module(patch->mod)) {
921 pr_err("module %s is not marked as a livepatch module", 811 pr_err("module %s is not marked as a livepatch module\n",
922 patch->mod->name); 812 patch->mod->name);
923 return -EINVAL; 813 return -EINVAL;
924 } 814 }
@@ -927,20 +817,16 @@ int klp_register_patch(struct klp_patch *patch)
927 return -ENODEV; 817 return -ENODEV;
928 818
929 /* 819 /*
930 * A reference is taken on the patch module to prevent it from being 820 * Architectures without reliable stack traces have to set
931 * unloaded. Right now, we don't allow patch modules to unload since 821 * patch->immediate because there's currently no way to patch kthreads
932 * there is currently no method to determine if a thread is still 822 * with the consistency model.
933 * running in the patched code contained in the patch module once
934 * the ftrace registration is successful.
935 */ 823 */
936 if (!try_module_get(patch->mod)) 824 if (!klp_have_reliable_stack() && !patch->immediate) {
937 return -ENODEV; 825 pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
938 826 return -ENOSYS;
939 ret = klp_init_patch(patch); 827 }
940 if (ret)
941 module_put(patch->mod);
942 828
943 return ret; 829 return klp_init_patch(patch);
944} 830}
945EXPORT_SYMBOL_GPL(klp_register_patch); 831EXPORT_SYMBOL_GPL(klp_register_patch);
946 832
@@ -975,13 +861,17 @@ int klp_module_coming(struct module *mod)
975 goto err; 861 goto err;
976 } 862 }
977 863
978 if (patch->state == KLP_DISABLED) 864 /*
865 * Only patch the module if the patch is enabled or is
866 * in transition.
867 */
868 if (!patch->enabled && patch != klp_transition_patch)
979 break; 869 break;
980 870
981 pr_notice("applying patch '%s' to loading module '%s'\n", 871 pr_notice("applying patch '%s' to loading module '%s'\n",
982 patch->mod->name, obj->mod->name); 872 patch->mod->name, obj->mod->name);
983 873
984 ret = klp_enable_object(obj); 874 ret = klp_patch_object(obj);
985 if (ret) { 875 if (ret) {
986 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", 876 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
987 patch->mod->name, obj->mod->name, ret); 877 patch->mod->name, obj->mod->name, ret);
@@ -1032,10 +922,14 @@ void klp_module_going(struct module *mod)
1032 if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) 922 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1033 continue; 923 continue;
1034 924
1035 if (patch->state != KLP_DISABLED) { 925 /*
926 * Only unpatch the module if the patch is enabled or
927 * is in transition.
928 */
929 if (patch->enabled || patch == klp_transition_patch) {
1036 pr_notice("reverting patch '%s' on unloading module '%s'\n", 930 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1037 patch->mod->name, obj->mod->name); 931 patch->mod->name, obj->mod->name);
1038 klp_disable_object(obj); 932 klp_unpatch_object(obj);
1039 } 933 }
1040 934
1041 klp_free_object_loaded(obj); 935 klp_free_object_loaded(obj);
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
new file mode 100644
index 000000000000..c74f24c47837
--- /dev/null
+++ b/kernel/livepatch/core.h
@@ -0,0 +1,6 @@
1#ifndef _LIVEPATCH_CORE_H
2#define _LIVEPATCH_CORE_H
3
4extern struct mutex klp_mutex;
5
6#endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
new file mode 100644
index 000000000000..f8269036bf0b
--- /dev/null
+++ b/kernel/livepatch/patch.c
@@ -0,0 +1,272 @@
1/*
2 * patch.c - livepatch patching functions
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/livepatch.h>
25#include <linux/list.h>
26#include <linux/ftrace.h>
27#include <linux/rculist.h>
28#include <linux/slab.h>
29#include <linux/bug.h>
30#include <linux/printk.h>
31#include "patch.h"
32#include "transition.h"
33
34static LIST_HEAD(klp_ops);
35
36struct klp_ops *klp_find_ops(unsigned long old_addr)
37{
38 struct klp_ops *ops;
39 struct klp_func *func;
40
41 list_for_each_entry(ops, &klp_ops, node) {
42 func = list_first_entry(&ops->func_stack, struct klp_func,
43 stack_node);
44 if (func->old_addr == old_addr)
45 return ops;
46 }
47
48 return NULL;
49}
50
51static void notrace klp_ftrace_handler(unsigned long ip,
52 unsigned long parent_ip,
53 struct ftrace_ops *fops,
54 struct pt_regs *regs)
55{
56 struct klp_ops *ops;
57 struct klp_func *func;
58 int patch_state;
59
60 ops = container_of(fops, struct klp_ops, fops);
61
62 rcu_read_lock();
63
64 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65 stack_node);
66
67 /*
68 * func should never be NULL because preemption should be disabled here
69 * and unregister_ftrace_function() does the equivalent of a
70 * synchronize_sched() before the func_stack removal.
71 */
72 if (WARN_ON_ONCE(!func))
73 goto unlock;
74
75 /*
76 * In the enable path, enforce the order of the ops->func_stack and
77 * func->transition reads. The corresponding write barrier is in
78 * __klp_enable_patch().
79 *
80 * (Note that this barrier technically isn't needed in the disable
81 * path. In the rare case where klp_update_patch_state() runs before
82 * this handler, its TIF_PATCH_PENDING read and this func->transition
83 * read need to be ordered. But klp_update_patch_state() already
84 * enforces that.)
85 */
86 smp_rmb();
87
88 if (unlikely(func->transition)) {
89
90 /*
91 * Enforce the order of the func->transition and
92 * current->patch_state reads. Otherwise we could read an
93 * out-of-date task state and pick the wrong function. The
94 * corresponding write barrier is in klp_init_transition().
95 */
96 smp_rmb();
97
98 patch_state = current->patch_state;
99
100 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
101
102 if (patch_state == KLP_UNPATCHED) {
103 /*
104 * Use the previously patched version of the function.
105 * If no previous patches exist, continue with the
106 * original function.
107 */
108 func = list_entry_rcu(func->stack_node.next,
109 struct klp_func, stack_node);
110
111 if (&func->stack_node == &ops->func_stack)
112 goto unlock;
113 }
114 }
115
116 klp_arch_set_pc(regs, (unsigned long)func->new_func);
117unlock:
118 rcu_read_unlock();
119}
120
121/*
122 * Convert a function address into the appropriate ftrace location.
123 *
124 * Usually this is just the address of the function, but on some architectures
125 * it's more complicated so allow them to provide a custom behaviour.
126 */
127#ifndef klp_get_ftrace_location
128static unsigned long klp_get_ftrace_location(unsigned long faddr)
129{
130 return faddr;
131}
132#endif
133
134static void klp_unpatch_func(struct klp_func *func)
135{
136 struct klp_ops *ops;
137
138 if (WARN_ON(!func->patched))
139 return;
140 if (WARN_ON(!func->old_addr))
141 return;
142
143 ops = klp_find_ops(func->old_addr);
144 if (WARN_ON(!ops))
145 return;
146
147 if (list_is_singular(&ops->func_stack)) {
148 unsigned long ftrace_loc;
149
150 ftrace_loc = klp_get_ftrace_location(func->old_addr);
151 if (WARN_ON(!ftrace_loc))
152 return;
153
154 WARN_ON(unregister_ftrace_function(&ops->fops));
155 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
156
157 list_del_rcu(&func->stack_node);
158 list_del(&ops->node);
159 kfree(ops);
160 } else {
161 list_del_rcu(&func->stack_node);
162 }
163
164 func->patched = false;
165}
166
167static int klp_patch_func(struct klp_func *func)
168{
169 struct klp_ops *ops;
170 int ret;
171
172 if (WARN_ON(!func->old_addr))
173 return -EINVAL;
174
175 if (WARN_ON(func->patched))
176 return -EINVAL;
177
178 ops = klp_find_ops(func->old_addr);
179 if (!ops) {
180 unsigned long ftrace_loc;
181
182 ftrace_loc = klp_get_ftrace_location(func->old_addr);
183 if (!ftrace_loc) {
184 pr_err("failed to find location for function '%s'\n",
185 func->old_name);
186 return -EINVAL;
187 }
188
189 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
190 if (!ops)
191 return -ENOMEM;
192
193 ops->fops.func = klp_ftrace_handler;
194 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
195 FTRACE_OPS_FL_DYNAMIC |
196 FTRACE_OPS_FL_IPMODIFY;
197
198 list_add(&ops->node, &klp_ops);
199
200 INIT_LIST_HEAD(&ops->func_stack);
201 list_add_rcu(&func->stack_node, &ops->func_stack);
202
203 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
204 if (ret) {
205 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
206 func->old_name, ret);
207 goto err;
208 }
209
210 ret = register_ftrace_function(&ops->fops);
211 if (ret) {
212 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
213 func->old_name, ret);
214 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
215 goto err;
216 }
217
218
219 } else {
220 list_add_rcu(&func->stack_node, &ops->func_stack);
221 }
222
223 func->patched = true;
224
225 return 0;
226
227err:
228 list_del_rcu(&func->stack_node);
229 list_del(&ops->node);
230 kfree(ops);
231 return ret;
232}
233
234void klp_unpatch_object(struct klp_object *obj)
235{
236 struct klp_func *func;
237
238 klp_for_each_func(obj, func)
239 if (func->patched)
240 klp_unpatch_func(func);
241
242 obj->patched = false;
243}
244
245int klp_patch_object(struct klp_object *obj)
246{
247 struct klp_func *func;
248 int ret;
249
250 if (WARN_ON(obj->patched))
251 return -EINVAL;
252
253 klp_for_each_func(obj, func) {
254 ret = klp_patch_func(func);
255 if (ret) {
256 klp_unpatch_object(obj);
257 return ret;
258 }
259 }
260 obj->patched = true;
261
262 return 0;
263}
264
265void klp_unpatch_objects(struct klp_patch *patch)
266{
267 struct klp_object *obj;
268
269 klp_for_each_object(patch, obj)
270 if (obj->patched)
271 klp_unpatch_object(obj);
272}
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h
new file mode 100644
index 000000000000..0db227170c36
--- /dev/null
+++ b/kernel/livepatch/patch.h
@@ -0,0 +1,33 @@
1#ifndef _LIVEPATCH_PATCH_H
2#define _LIVEPATCH_PATCH_H
3
4#include <linux/livepatch.h>
5#include <linux/list.h>
6#include <linux/ftrace.h>
7
8/**
9 * struct klp_ops - structure for tracking registered ftrace ops structs
10 *
11 * A single ftrace_ops is shared between all enabled replacement functions
12 * (klp_func structs) which have the same old_addr. This allows the switch
13 * between function versions to happen instantaneously by updating the klp_ops
14 * struct's func_stack list. The winner is the klp_func at the top of the
15 * func_stack (front of the list).
16 *
17 * @node: node for the global klp_ops list
18 * @func_stack: list head for the stack of klp_func's (active func is on top)
19 * @fops: registered ftrace ops struct
20 */
21struct klp_ops {
22 struct list_head node;
23 struct list_head func_stack;
24 struct ftrace_ops fops;
25};
26
27struct klp_ops *klp_find_ops(unsigned long old_addr);
28
29int klp_patch_object(struct klp_object *obj);
30void klp_unpatch_object(struct klp_object *obj);
31void klp_unpatch_objects(struct klp_patch *patch);
32
33#endif /* _LIVEPATCH_PATCH_H */
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
new file mode 100644
index 000000000000..adc0cc64aa4b
--- /dev/null
+++ b/kernel/livepatch/transition.c
@@ -0,0 +1,553 @@
1/*
2 * transition.c - Kernel Live Patching transition functions
3 *
4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpu.h>
23#include <linux/stacktrace.h>
24#include "core.h"
25#include "patch.h"
26#include "transition.h"
27#include "../sched/sched.h"
28
29#define MAX_STACK_ENTRIES 100
30#define STACK_ERR_BUF_SIZE 128
31
32struct klp_patch *klp_transition_patch;
33
34static int klp_target_state = KLP_UNDEFINED;
35
36/*
37 * This work can be performed periodically to finish patching or unpatching any
38 * "straggler" tasks which failed to transition in the first attempt.
39 */
40static void klp_transition_work_fn(struct work_struct *work)
41{
42 mutex_lock(&klp_mutex);
43
44 if (klp_transition_patch)
45 klp_try_complete_transition();
46
47 mutex_unlock(&klp_mutex);
48}
49static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50
51/*
52 * The transition to the target patch state is complete. Clean up the data
53 * structures.
54 */
55static void klp_complete_transition(void)
56{
57 struct klp_object *obj;
58 struct klp_func *func;
59 struct task_struct *g, *task;
60 unsigned int cpu;
61 bool immediate_func = false;
62
63 if (klp_target_state == KLP_UNPATCHED) {
64 /*
65 * All tasks have transitioned to KLP_UNPATCHED so we can now
66 * remove the new functions from the func_stack.
67 */
68 klp_unpatch_objects(klp_transition_patch);
69
70 /*
71 * Make sure klp_ftrace_handler() can no longer see functions
72 * from this patch on the ops->func_stack. Otherwise, after
73 * func->transition gets cleared, the handler may choose a
74 * removed function.
75 */
76 synchronize_rcu();
77 }
78
79 if (klp_transition_patch->immediate)
80 goto done;
81
82 klp_for_each_object(klp_transition_patch, obj) {
83 klp_for_each_func(obj, func) {
84 func->transition = false;
85 if (func->immediate)
86 immediate_func = true;
87 }
88 }
89
90 if (klp_target_state == KLP_UNPATCHED && !immediate_func)
91 module_put(klp_transition_patch->mod);
92
93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
94 if (klp_target_state == KLP_PATCHED)
95 synchronize_rcu();
96
97 read_lock(&tasklist_lock);
98 for_each_process_thread(g, task) {
99 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
100 task->patch_state = KLP_UNDEFINED;
101 }
102 read_unlock(&tasklist_lock);
103
104 for_each_possible_cpu(cpu) {
105 task = idle_task(cpu);
106 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
107 task->patch_state = KLP_UNDEFINED;
108 }
109
110done:
111 klp_target_state = KLP_UNDEFINED;
112 klp_transition_patch = NULL;
113}
114
115/*
116 * This is called in the error path, to cancel a transition before it has
117 * started, i.e. klp_init_transition() has been called but
118 * klp_start_transition() hasn't. If the transition *has* been started,
119 * klp_reverse_transition() should be used instead.
120 */
121void klp_cancel_transition(void)
122{
123 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
124 return;
125
126 klp_target_state = KLP_UNPATCHED;
127 klp_complete_transition();
128}
129
130/*
131 * Switch the patched state of the task to the set of functions in the target
132 * patch state.
133 *
134 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
135 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
136 */
137void klp_update_patch_state(struct task_struct *task)
138{
139 rcu_read_lock();
140
141 /*
142 * This test_and_clear_tsk_thread_flag() call also serves as a read
143 * barrier (smp_rmb) for two cases:
144 *
145 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
146 * klp_target_state read. The corresponding write barrier is in
147 * klp_init_transition().
148 *
149 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
150 * of func->transition, if klp_ftrace_handler() is called later on
151 * the same CPU. See __klp_disable_patch().
152 */
153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
154 task->patch_state = READ_ONCE(klp_target_state);
155
156 rcu_read_unlock();
157}
158
159/*
160 * Determine whether the given stack trace includes any references to a
161 * to-be-patched or to-be-unpatched function.
162 */
163static int klp_check_stack_func(struct klp_func *func,
164 struct stack_trace *trace)
165{
166 unsigned long func_addr, func_size, address;
167 struct klp_ops *ops;
168 int i;
169
170 if (func->immediate)
171 return 0;
172
173 for (i = 0; i < trace->nr_entries; i++) {
174 address = trace->entries[i];
175
176 if (klp_target_state == KLP_UNPATCHED) {
177 /*
178 * Check for the to-be-unpatched function
179 * (the func itself).
180 */
181 func_addr = (unsigned long)func->new_func;
182 func_size = func->new_size;
183 } else {
184 /*
185 * Check for the to-be-patched function
186 * (the previous func).
187 */
188 ops = klp_find_ops(func->old_addr);
189
190 if (list_is_singular(&ops->func_stack)) {
191 /* original function */
192 func_addr = func->old_addr;
193 func_size = func->old_size;
194 } else {
195 /* previously patched function */
196 struct klp_func *prev;
197
198 prev = list_next_entry(func, stack_node);
199 func_addr = (unsigned long)prev->new_func;
200 func_size = prev->new_size;
201 }
202 }
203
204 if (address >= func_addr && address < func_addr + func_size)
205 return -EAGAIN;
206 }
207
208 return 0;
209}
210
211/*
212 * Determine whether it's safe to transition the task to the target patch state
213 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
214 */
215static int klp_check_stack(struct task_struct *task, char *err_buf)
216{
217 static unsigned long entries[MAX_STACK_ENTRIES];
218 struct stack_trace trace;
219 struct klp_object *obj;
220 struct klp_func *func;
221 int ret;
222
223 trace.skip = 0;
224 trace.nr_entries = 0;
225 trace.max_entries = MAX_STACK_ENTRIES;
226 trace.entries = entries;
227 ret = save_stack_trace_tsk_reliable(task, &trace);
228 WARN_ON_ONCE(ret == -ENOSYS);
229 if (ret) {
230 snprintf(err_buf, STACK_ERR_BUF_SIZE,
231 "%s: %s:%d has an unreliable stack\n",
232 __func__, task->comm, task->pid);
233 return ret;
234 }
235
236 klp_for_each_object(klp_transition_patch, obj) {
237 if (!obj->patched)
238 continue;
239 klp_for_each_func(obj, func) {
240 ret = klp_check_stack_func(func, &trace);
241 if (ret) {
242 snprintf(err_buf, STACK_ERR_BUF_SIZE,
243 "%s: %s:%d is sleeping on function %s\n",
244 __func__, task->comm, task->pid,
245 func->old_name);
246 return ret;
247 }
248 }
249 }
250
251 return 0;
252}
253
254/*
255 * Try to safely switch a task to the target patch state. If it's currently
256 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
257 * if the stack is unreliable, return false.
258 */
259static bool klp_try_switch_task(struct task_struct *task)
260{
261 struct rq *rq;
262 struct rq_flags flags;
263 int ret;
264 bool success = false;
265 char err_buf[STACK_ERR_BUF_SIZE];
266
267 err_buf[0] = '\0';
268
269 /* check if this task has already switched over */
270 if (task->patch_state == klp_target_state)
271 return true;
272
273 /*
274 * For arches which don't have reliable stack traces, we have to rely
275 * on other methods (e.g., switching tasks at kernel exit).
276 */
277 if (!klp_have_reliable_stack())
278 return false;
279
280 /*
281 * Now try to check the stack for any to-be-patched or to-be-unpatched
282 * functions. If all goes well, switch the task to the target patch
283 * state.
284 */
285 rq = task_rq_lock(task, &flags);
286
287 if (task_running(rq, task) && task != current) {
288 snprintf(err_buf, STACK_ERR_BUF_SIZE,
289 "%s: %s:%d is running\n", __func__, task->comm,
290 task->pid);
291 goto done;
292 }
293
294 ret = klp_check_stack(task, err_buf);
295 if (ret)
296 goto done;
297
298 success = true;
299
300 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
301 task->patch_state = klp_target_state;
302
303done:
304 task_rq_unlock(rq, task, &flags);
305
306 /*
307 * Due to console deadlock issues, pr_debug() can't be used while
308 * holding the task rq lock. Instead we have to use a temporary buffer
309 * and print the debug message after releasing the lock.
310 */
311 if (err_buf[0] != '\0')
312 pr_debug("%s", err_buf);
313
314 return success;
315
316}
317
318/*
319 * Try to switch all remaining tasks to the target patch state by walking the
320 * stacks of sleeping tasks and looking for any to-be-patched or
321 * to-be-unpatched functions. If such functions are found, the task can't be
322 * switched yet.
323 *
324 * If any tasks are still stuck in the initial patch state, schedule a retry.
325 */
326void klp_try_complete_transition(void)
327{
328 unsigned int cpu;
329 struct task_struct *g, *task;
330 bool complete = true;
331
332 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
333
334 /*
335 * If the patch can be applied or reverted immediately, skip the
336 * per-task transitions.
337 */
338 if (klp_transition_patch->immediate)
339 goto success;
340
341 /*
342 * Try to switch the tasks to the target patch state by walking their
343 * stacks and looking for any to-be-patched or to-be-unpatched
344 * functions. If such functions are found on a stack, or if the stack
345 * is deemed unreliable, the task can't be switched yet.
346 *
347 * Usually this will transition most (or all) of the tasks on a system
348 * unless the patch includes changes to a very common function.
349 */
350 read_lock(&tasklist_lock);
351 for_each_process_thread(g, task)
352 if (!klp_try_switch_task(task))
353 complete = false;
354 read_unlock(&tasklist_lock);
355
356 /*
357 * Ditto for the idle "swapper" tasks.
358 */
359 get_online_cpus();
360 for_each_possible_cpu(cpu) {
361 task = idle_task(cpu);
362 if (cpu_online(cpu)) {
363 if (!klp_try_switch_task(task))
364 complete = false;
365 } else if (task->patch_state != klp_target_state) {
366 /* offline idle tasks can be switched immediately */
367 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
368 task->patch_state = klp_target_state;
369 }
370 }
371 put_online_cpus();
372
373 if (!complete) {
374 /*
375 * Some tasks weren't able to be switched over. Try again
376 * later and/or wait for other methods like kernel exit
377 * switching.
378 */
379 schedule_delayed_work(&klp_transition_work,
380 round_jiffies_relative(HZ));
381 return;
382 }
383
384success:
385 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
386 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
387
388 /* we're done, now cleanup the data structures */
389 klp_complete_transition();
390}
391
392/*
393 * Start the transition to the specified target patch state so tasks can begin
394 * switching to it.
395 */
396void klp_start_transition(void)
397{
398 struct task_struct *g, *task;
399 unsigned int cpu;
400
401 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
402
403 pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
404 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
405
406 /*
407 * If the patch can be applied or reverted immediately, skip the
408 * per-task transitions.
409 */
410 if (klp_transition_patch->immediate)
411 return;
412
413 /*
414 * Mark all normal tasks as needing a patch state update. They'll
415 * switch either in klp_try_complete_transition() or as they exit the
416 * kernel.
417 */
418 read_lock(&tasklist_lock);
419 for_each_process_thread(g, task)
420 if (task->patch_state != klp_target_state)
421 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
422 read_unlock(&tasklist_lock);
423
424 /*
425 * Mark all idle tasks as needing a patch state update. They'll switch
426 * either in klp_try_complete_transition() or at the idle loop switch
427 * point.
428 */
429 for_each_possible_cpu(cpu) {
430 task = idle_task(cpu);
431 if (task->patch_state != klp_target_state)
432 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
433 }
434}
435
436/*
437 * Initialize the global target patch state and all tasks to the initial patch
438 * state, and initialize all function transition states to true in preparation
439 * for patching or unpatching.
440 */
441void klp_init_transition(struct klp_patch *patch, int state)
442{
443 struct task_struct *g, *task;
444 unsigned int cpu;
445 struct klp_object *obj;
446 struct klp_func *func;
447 int initial_state = !state;
448
449 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
450
451 klp_transition_patch = patch;
452
453 /*
454 * Set the global target patch state which tasks will switch to. This
455 * has no effect until the TIF_PATCH_PENDING flags get set later.
456 */
457 klp_target_state = state;
458
459 /*
460 * If the patch can be applied or reverted immediately, skip the
461 * per-task transitions.
462 */
463 if (patch->immediate)
464 return;
465
466 /*
467 * Initialize all tasks to the initial patch state to prepare them for
468 * switching to the target state.
469 */
470 read_lock(&tasklist_lock);
471 for_each_process_thread(g, task) {
472 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
473 task->patch_state = initial_state;
474 }
475 read_unlock(&tasklist_lock);
476
477 /*
478 * Ditto for the idle "swapper" tasks.
479 */
480 for_each_possible_cpu(cpu) {
481 task = idle_task(cpu);
482 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
483 task->patch_state = initial_state;
484 }
485
486 /*
487 * Enforce the order of the task->patch_state initializations and the
488 * func->transition updates to ensure that klp_ftrace_handler() doesn't
489 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
490 *
491 * Also enforce the order of the klp_target_state write and future
492 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
493 * set a task->patch_state to KLP_UNDEFINED.
494 */
495 smp_wmb();
496
497 /*
498 * Set the func transition states so klp_ftrace_handler() will know to
499 * switch to the transition logic.
500 *
501 * When patching, the funcs aren't yet in the func_stack and will be
502 * made visible to the ftrace handler shortly by the calls to
503 * klp_patch_object().
504 *
505 * When unpatching, the funcs are already in the func_stack and so are
506 * already visible to the ftrace handler.
507 */
508 klp_for_each_object(patch, obj)
509 klp_for_each_func(obj, func)
510 func->transition = true;
511}
512
513/*
514 * This function can be called in the middle of an existing transition to
515 * reverse the direction of the target patch state. This can be done to
516 * effectively cancel an existing enable or disable operation if there are any
517 * tasks which are stuck in the initial patch state.
518 */
519void klp_reverse_transition(void)
520{
521 unsigned int cpu;
522 struct task_struct *g, *task;
523
524 klp_transition_patch->enabled = !klp_transition_patch->enabled;
525
526 klp_target_state = !klp_target_state;
527
528 /*
529 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
530 * klp_update_patch_state() running in parallel with
531 * klp_start_transition().
532 */
533 read_lock(&tasklist_lock);
534 for_each_process_thread(g, task)
535 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
536 read_unlock(&tasklist_lock);
537
538 for_each_possible_cpu(cpu)
539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
540
541 /* Let any remaining calls to klp_update_patch_state() complete */
542 synchronize_rcu();
543
544 klp_start_transition();
545}
546
547/* Called from copy_process() during fork */
548void klp_copy_process(struct task_struct *child)
549{
550 child->patch_state = current->patch_state;
551
552 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
553}
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
new file mode 100644
index 000000000000..ce09b326546c
--- /dev/null
+++ b/kernel/livepatch/transition.h
@@ -0,0 +1,14 @@
1#ifndef _LIVEPATCH_TRANSITION_H
2#define _LIVEPATCH_TRANSITION_H
3
4#include <linux/livepatch.h>
5
6extern struct klp_patch *klp_transition_patch;
7
8void klp_init_transition(struct klp_patch *patch, int state);
9void klp_cancel_transition(void);
10void klp_start_transition(void);
11void klp_try_complete_transition(void);
12void klp_reverse_transition(void);
13
14#endif /* _LIVEPATCH_TRANSITION_H */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index ac6d5176463d..2a25a9ec2c6e 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/stackprotector.h> 11#include <linux/stackprotector.h>
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/livepatch.h>
13 14
14#include <asm/tlb.h> 15#include <asm/tlb.h>
15 16
@@ -265,6 +266,9 @@ static void do_idle(void)
265 266
266 sched_ttwu_pending(); 267 sched_ttwu_pending();
267 schedule_preempt_disabled(); 268 schedule_preempt_disabled();
269
270 if (unlikely(klp_patch_pending(current)))
271 klp_update_patch_state(current);
268} 272}
269 273
270bool cpu_in_idle(unsigned long pc) 274bool cpu_in_idle(unsigned long pc)
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 9c15a9124e83..f8edee9c792d 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -54,8 +54,8 @@ int snprint_stack_trace(char *buf, size_t size,
54EXPORT_SYMBOL_GPL(snprint_stack_trace); 54EXPORT_SYMBOL_GPL(snprint_stack_trace);
55 55
56/* 56/*
57 * Architectures that do not implement save_stack_trace_tsk or 57 * Architectures that do not implement save_stack_trace_*()
58 * save_stack_trace_regs get this weak alias and a once-per-bootup warning 58 * get these weak aliases and once-per-bootup warnings
59 * (whenever this facility is utilized - for example by procfs): 59 * (whenever this facility is utilized - for example by procfs):
60 */ 60 */
61__weak void 61__weak void
@@ -69,3 +69,11 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
69{ 69{
70 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n"); 70 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
71} 71}
72
73__weak int
74save_stack_trace_tsk_reliable(struct task_struct *tsk,
75 struct stack_trace *trace)
76{
77 WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
78 return -ENOSYS;
79}
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index e34f871e69b1..84795223f15f 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -17,6 +17,8 @@
17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/livepatch.h> 24#include <linux/livepatch.h>
@@ -69,6 +71,21 @@ static int livepatch_init(void)
69{ 71{
70 int ret; 72 int ret;
71 73
74 if (!klp_have_reliable_stack() && !patch.immediate) {
75 /*
76 * WARNING: Be very careful when using 'patch.immediate' in
77 * your patches. It's ok to use it for simple patches like
78 * this, but for more complex patches which change function
79 * semantics, locking semantics, or data structures, it may not
80 * be safe. Use of this option will also prevent removal of
81 * the patch.
82 *
83 * See Documentation/livepatch/livepatch.txt for more details.
84 */
85 patch.immediate = true;
86 pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
87 }
88
72 ret = klp_register_patch(&patch); 89 ret = klp_register_patch(&patch);
73 if (ret) 90 if (ret)
74 return ret; 91 return ret;
@@ -82,7 +99,6 @@ static int livepatch_init(void)
82 99
83static void livepatch_exit(void) 100static void livepatch_exit(void)
84{ 101{
85 WARN_ON(klp_disable_patch(&patch));
86 WARN_ON(klp_unregister_patch(&patch)); 102 WARN_ON(klp_unregister_patch(&patch));
87} 103}
88 104