aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/system_certificates.S14
-rw-r--r--kernel/system_keyring.c4
-rw-r--r--kernel/workqueue.c32
6 files changed, 33 insertions, 29 deletions
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bde4e9c..790d83c7d160 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
5config_data.gz 5config_data.gz
6timeconst.h 6timeconst.h
7hz.bc 7hz.bc
8x509_certificate_list
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086f021d..f6ff0191ecf7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
251 return -EINVAL; 251 return -EINVAL;
252 address -= key->both.offset; 252 address -= key->both.offset;
253 253
254 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
255 return -EFAULT;
256
254 /* 257 /*
255 * PROCESS_PRIVATE futexes are fast. 258 * PROCESS_PRIVATE futexes are fast.
256 * As the mm cannot disappear under us and the 'key' only needs 259 * As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
259 * but access_ok() should be faster than find_vma() 262 * but access_ok() should be faster than find_vma()
260 */ 263 */
261 if (!fshared) { 264 if (!fshared) {
262 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
263 return -EFAULT;
264 key->private.mm = mm; 265 key->private.mm = mm;
265 key->private.address = address; 266 key->private.address = address;
266 get_futex_key_refs(key); 267 get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
288 put_page(page); 289 put_page(page);
289 /* serialize against __split_huge_page_splitting() */ 290 /* serialize against __split_huge_page_splitting() */
290 local_irq_disable(); 291 local_irq_disable();
291 if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) { 292 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
292 page_head = compound_head(page); 293 page_head = compound_head(page);
293 /* 294 /*
294 * page_head is valid pointer but we must pin 295 * page_head is valid pointer but we must pin
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc03627e..d0d8fca54065 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 49
50/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
50/* Location of the reserved area for the crash kernel */ 53/* Location of the reserved area for the crash kernel */
51struct resource crashk_res = { 54struct resource crashk_res = {
52 .name = "Crash kernel", 55 .name = "Crash kernel",
@@ -1675,6 +1678,7 @@ int kernel_kexec(void)
1675 } else 1678 } else
1676#endif 1679#endif
1677 { 1680 {
1681 kexec_in_progress = true;
1678 kernel_restart_prepare(NULL); 1682 kernel_restart_prepare(NULL);
1679 printk(KERN_EMERG "Starting new kernel\n"); 1683 printk(KERN_EMERG "Starting new kernel\n");
1680 machine_shutdown(); 1684 machine_shutdown();
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
index 4aef390671cb..3e9868d47535 100644
--- a/kernel/system_certificates.S
+++ b/kernel/system_certificates.S
@@ -3,8 +3,18 @@
3 3
4 __INITRODATA 4 __INITRODATA
5 5
6 .align 8
6 .globl VMLINUX_SYMBOL(system_certificate_list) 7 .globl VMLINUX_SYMBOL(system_certificate_list)
7VMLINUX_SYMBOL(system_certificate_list): 8VMLINUX_SYMBOL(system_certificate_list):
9__cert_list_start:
8 .incbin "kernel/x509_certificate_list" 10 .incbin "kernel/x509_certificate_list"
9 .globl VMLINUX_SYMBOL(system_certificate_list_end) 11__cert_list_end:
10VMLINUX_SYMBOL(system_certificate_list_end): 12
13 .align 8
14 .globl VMLINUX_SYMBOL(system_certificate_list_size)
15VMLINUX_SYMBOL(system_certificate_list_size):
16#ifdef CONFIG_64BIT
17 .quad __cert_list_end - __cert_list_start
18#else
19 .long __cert_list_end - __cert_list_start
20#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
index 564dd93430a2..52ebc70263f4 100644
--- a/kernel/system_keyring.c
+++ b/kernel/system_keyring.c
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring;
22EXPORT_SYMBOL_GPL(system_trusted_keyring); 22EXPORT_SYMBOL_GPL(system_trusted_keyring);
23 23
24extern __initconst const u8 system_certificate_list[]; 24extern __initconst const u8 system_certificate_list[];
25extern __initconst const u8 system_certificate_list_end[]; 25extern __initconst const unsigned long system_certificate_list_size;
26 26
27/* 27/*
28 * Load the compiled-in keys 28 * Load the compiled-in keys
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void)
60 60
61 pr_notice("Loading compiled-in X.509 certificates\n"); 61 pr_notice("Loading compiled-in X.509 certificates\n");
62 62
63 end = system_certificate_list_end;
64 p = system_certificate_list; 63 p = system_certificate_list;
64 end = p + system_certificate_list_size;
65 while (p < end) { 65 while (p < end) {
66 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more 66 /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
67 * than 256 bytes in size. 67 * than 256 bytes in size.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c66912be990f..b010eac595d2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2851,19 +2851,6 @@ already_gone:
2851 return false; 2851 return false;
2852} 2852}
2853 2853
2854static bool __flush_work(struct work_struct *work)
2855{
2856 struct wq_barrier barr;
2857
2858 if (start_flush_work(work, &barr)) {
2859 wait_for_completion(&barr.done);
2860 destroy_work_on_stack(&barr.work);
2861 return true;
2862 } else {
2863 return false;
2864 }
2865}
2866
2867/** 2854/**
2868 * flush_work - wait for a work to finish executing the last queueing instance 2855 * flush_work - wait for a work to finish executing the last queueing instance
2869 * @work: the work to flush 2856 * @work: the work to flush
@@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
2877 */ 2864 */
2878bool flush_work(struct work_struct *work) 2865bool flush_work(struct work_struct *work)
2879{ 2866{
2867 struct wq_barrier barr;
2868
2880 lock_map_acquire(&work->lockdep_map); 2869 lock_map_acquire(&work->lockdep_map);
2881 lock_map_release(&work->lockdep_map); 2870 lock_map_release(&work->lockdep_map);
2882 2871
2883 return __flush_work(work); 2872 if (start_flush_work(work, &barr)) {
2873 wait_for_completion(&barr.done);
2874 destroy_work_on_stack(&barr.work);
2875 return true;
2876 } else {
2877 return false;
2878 }
2884} 2879}
2885EXPORT_SYMBOL_GPL(flush_work); 2880EXPORT_SYMBOL_GPL(flush_work);
2886 2881
@@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4832 4827
4833 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 4828 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4834 schedule_work_on(cpu, &wfc.work); 4829 schedule_work_on(cpu, &wfc.work);
4835 4830 flush_work(&wfc.work);
4836 /*
4837 * The work item is on-stack and can't lead to deadlock through
4838 * flushing. Use __flush_work() to avoid spurious lockdep warnings
4839 * when work_on_cpu()s are nested.
4840 */
4841 __flush_work(&wfc.work);
4842
4843 return wfc.ret; 4831 return wfc.ret;
4844} 4832}
4845EXPORT_SYMBOL_GPL(work_on_cpu); 4833EXPORT_SYMBOL_GPL(work_on_cpu);