summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDominik Brodowski <linux@dominikbrodowski.net>2018-03-17 10:18:30 -0400
committerDominik Brodowski <linux@dominikbrodowski.net>2018-04-02 14:15:01 -0400
commit6b27aef09fea32b805a8c81287b1bb80362dadb0 (patch)
treec7188fcb528eeb261b62bbbfebb5466881849c0a
parentd53238cd51a80f6f2e5b9d64830c62e2086787bd (diff)
kexec: call do_kexec_load() in compat syscall directly
do_kexec_load() can be called directly by compat_sys_kexec() as long as the same parameters checks are completed which are currently handled (also) by sys_kexec(). Therefore, move those to kexec_load_check(), call that newly introduced helper function from both sys_kexec() and compat_sys_kexec(), and duplicate the remaining code from sys_kexec() in compat_sys_kexec(). This patch is part of a series which removes in-kernel calls to syscalls. On this basis, the syscall entry path can be streamlined. For details, see http://lkml.kernel.org/r/20180325162527.GA17492@light.dominikbrodowski.net Cc: Eric Biederman <ebiederm@xmission.com> Cc: kexec@lists.infradead.org Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
-rw-r--r--kernel/kexec.c52
1 files changed, 39 insertions, 13 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c
index e62ec4dc6620..aed8fb2564b3 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -192,11 +192,9 @@ out:
192 * that to happen you need to do that yourself. 192 * that to happen you need to do that yourself.
193 */ 193 */
194 194
195SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, 195static inline int kexec_load_check(unsigned long nr_segments,
196 struct kexec_segment __user *, segments, unsigned long, flags) 196 unsigned long flags)
197{ 197{
198 int result;
199
200 /* We only trust the superuser with rebooting the system. */ 198 /* We only trust the superuser with rebooting the system. */
201 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) 199 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
202 return -EPERM; 200 return -EPERM;
@@ -208,17 +206,29 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
208 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) 206 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
209 return -EINVAL; 207 return -EINVAL;
210 208
211 /* Verify we are on the appropriate architecture */
212 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
213 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
214 return -EINVAL;
215
216 /* Put an artificial cap on the number 209 /* Put an artificial cap on the number
217 * of segments passed to kexec_load. 210 * of segments passed to kexec_load.
218 */ 211 */
219 if (nr_segments > KEXEC_SEGMENT_MAX) 212 if (nr_segments > KEXEC_SEGMENT_MAX)
220 return -EINVAL; 213 return -EINVAL;
221 214
215 return 0;
216}
217
218SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
219 struct kexec_segment __user *, segments, unsigned long, flags)
220{
221 int result;
222
223 result = kexec_load_check(nr_segments, flags);
224 if (result)
225 return result;
226
227 /* Verify we are on the appropriate architecture */
228 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
229 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
230 return -EINVAL;
231
222 /* Because we write directly to the reserved memory 232 /* Because we write directly to the reserved memory
223 * region when loading crash kernels we need a mutex here to 233 * region when loading crash kernels we need a mutex here to
224 * prevent multiple crash kernels from attempting to load 234 * prevent multiple crash kernels from attempting to load
@@ -247,15 +257,16 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
247 struct kexec_segment out, __user *ksegments; 257 struct kexec_segment out, __user *ksegments;
248 unsigned long i, result; 258 unsigned long i, result;
249 259
260 result = kexec_load_check(nr_segments, flags);
261 if (result)
262 return result;
263
250 /* Don't allow clients that don't understand the native 264 /* Don't allow clients that don't understand the native
251 * architecture to do anything. 265 * architecture to do anything.
252 */ 266 */
253 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) 267 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
254 return -EINVAL; 268 return -EINVAL;
255 269
256 if (nr_segments > KEXEC_SEGMENT_MAX)
257 return -EINVAL;
258
259 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 270 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
260 for (i = 0; i < nr_segments; i++) { 271 for (i = 0; i < nr_segments; i++) {
261 result = copy_from_user(&in, &segments[i], sizeof(in)); 272 result = copy_from_user(&in, &segments[i], sizeof(in));
@@ -272,6 +283,21 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
272 return -EFAULT; 283 return -EFAULT;
273 } 284 }
274 285
275 return sys_kexec_load(entry, nr_segments, ksegments, flags); 286 /* Because we write directly to the reserved memory
287 * region when loading crash kernels we need a mutex here to
288 * prevent multiple crash kernels from attempting to load
289 * simultaneously, and to prevent a crash kernel from loading
290 * over the top of a in use crash kernel.
291 *
292 * KISS: always take the mutex.
293 */
294 if (!mutex_trylock(&kexec_mutex))
295 return -EBUSY;
296
297 result = do_kexec_load(entry, nr_segments, ksegments, flags);
298
299 mutex_unlock(&kexec_mutex);
300
301 return result;
276} 302}
277#endif 303#endif