aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:47 -0400
commit8065be8d032f38da25b54bf077a05a30d9ce9f2a (patch)
tree32a7baf4b40e0240ab4b9dd6f2bbe6129929bb66 /arch
parent27d438c56009e5ae632de36fe70985d1aab5e344 (diff)
parentecc265fe9e09e32a3573b2ba26e79b2099eb8bbb (diff)
Merge branch 'akpm' (second patchbomb from Andrew Morton)
Merge more incoming from Andrew Morton: "Two new syscalls: memfd_create in "shm: add memfd_create() syscall" kexec_file_load in "kexec: implementation of new syscall kexec_file_load" And: - Most (all?) of the rest of MM - Lots of the usual misc bits - fs/autofs4 - drivers/rtc - fs/nilfs - procfs - fork.c, exec.c - more in lib/ - rapidio - Janitorial work in filesystems: fs/ufs, fs/reiserfs, fs/adfs, fs/cramfs, fs/romfs, fs/qnx6. - initrd/initramfs work - "file sealing" and the memfd_create() syscall, in tmpfs - add pci_zalloc_consistent, use it in lots of places - MAINTAINERS maintenance - kexec feature work" * emailed patches from Andrew Morton <akpm@linux-foundation.org: (193 commits) MAINTAINERS: update nomadik patterns MAINTAINERS: update usb/gadget patterns MAINTAINERS: update DMA BUFFER SHARING patterns kexec: verify the signature of signed PE bzImage kexec: support kexec/kdump on EFI systems kexec: support for kexec on panic using new system call kexec-bzImage64: support for loading bzImage using 64bit entry kexec: load and relocate purgatory at kernel load time purgatory: core purgatory functionality purgatory/sha256: provide implementation of sha256 in purgaotory context kexec: implementation of new syscall kexec_file_load kexec: new syscall kexec_file_load() declaration kexec: make kexec_segment user buffer pointer a union resource: provide new functions to walk through resources kexec: use common function for kimage_normal_alloc() and kimage_crash_alloc() kexec: move segment verification code in a separate function kexec: rename unusebale_pages to unusable_pages kernel: build bin2c based on config option CONFIG_BUILD_BIN2C bin2c: move bin2c in scripts/basic shm: wait for pins to be released when sealing ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/scatterlist.h6
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/scatterlist.h12
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/mux.c22
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c2
-rw-r--r--arch/arm/mach-w90x900/cpu.c3
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/page.h3
-rw-r--r--arch/arm64/kernel/vdso.c19
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/scatterlist.h6
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/scatterlist.h6
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/page.h2
-rw-r--r--arch/ia64/include/asm/scatterlist.h7
-rw-r--r--arch/ia64/kernel/time.c15
-rw-r--r--arch/ia64/mm/init.c31
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/scatterlist.h6
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/scatterlist.h1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/scatterlist.h16
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/scatterlist.h17
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/kernel/vdso.c16
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c1
-rw-r--r--arch/powerpc/platforms/44x/warp.c1
-rw-r--r--arch/powerpc/platforms/52xx/efika.c1
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c2
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/scatterlist.h3
-rw-r--r--arch/s390/kernel/vdso.c15
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/scatterlist.h6
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/page.h5
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c15
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/scatterlist.h8
-rw-r--r--arch/tile/Kconfig2
-rw-r--r--arch/tile/include/asm/hardwall.h2
-rw-r--r--arch/tile/include/asm/page.h6
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/vdso.c15
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/page.h5
-rw-r--r--arch/x86/Kbuild4
-rw-r--r--arch/x86/Kconfig26
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/include/asm/Kbuild3
-rw-r--r--arch/x86/include/asm/crash.h9
-rw-r--r--arch/x86/include/asm/kexec-bzimage64.h6
-rw-r--r--arch/x86/include/asm/kexec.h45
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/page_64.h2
-rw-r--r--arch/x86/include/asm/scatterlist.h8
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c4
-rw-r--r--arch/x86/kernel/crash.c563
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c553
-rw-r--r--arch/x86/kernel/machine_kexec_64.c239
-rw-r--r--arch/x86/kvm/mmu_audit.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/purgatory/Makefile30
-rw-r--r--arch/x86/purgatory/entry64.S101
-rw-r--r--arch/x86/purgatory/purgatory.c72
-rw-r--r--arch/x86/purgatory/setup-x86_64.S58
-rw-r--r--arch/x86/purgatory/sha256.c283
-rw-r--r--arch/x86/purgatory/sha256.h22
-rw-r--r--arch/x86/purgatory/stack.S19
-rw-r--r--arch/x86/purgatory/string.c13
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/um/asm/elf.h1
-rw-r--r--arch/x86/um/mem_64.c15
-rw-r--r--arch/x86/vdso/vdso32-setup.c19
97 files changed, 2169 insertions, 293 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 96e54bed5088..e858aa0ad8af 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -6,4 +6,5 @@ generic-y += exec.h
6generic-y += hash.h 6generic-y += hash.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += preempt.h 8generic-y += preempt.h
9generic-y += scatterlist.h
9generic-y += trace_clock.h 10generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
deleted file mode 100644
index 017d7471c3c4..000000000000
--- a/arch/alpha/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ALPHA_SCATTERLIST_H
2#define _ALPHA_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* !(_ALPHA_SCATTERLIST_H) */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 916cedbd7a67..c49a775937db 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -83,6 +83,7 @@ config ARM
83 <http://www.arm.linux.org.uk/>. 83 <http://www.arm.linux.org.uk/>.
84 84
85config ARM_HAS_SG_CHAIN 85config ARM_HAS_SG_CHAIN
86 select ARCH_HAS_SG_CHAIN
86 bool 87 bool
87 88
88config NEED_SG_DMA_LENGTH 89config NEED_SG_DMA_LENGTH
@@ -1982,6 +1983,8 @@ config XIP_PHYS_ADDR
1982config KEXEC 1983config KEXEC
1983 bool "Kexec system call (EXPERIMENTAL)" 1984 bool "Kexec system call (EXPERIMENTAL)"
1984 depends on (!SMP || PM_SLEEP_SMP) 1985 depends on (!SMP || PM_SLEEP_SMP)
1986 select CRYPTO
1987 select CRYPTO_SHA256
1985 help 1988 help
1986 kexec is a system call that implements the ability to shutdown your 1989 kexec is a system call that implements the ability to shutdown your
1987 current kernel, and to start another kernel. It is like a reboot 1990 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index f5a357601983..70cd84eb7fda 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += poll.h
22generic-y += preempt.h 22generic-y += preempt.h
23generic-y += resource.h 23generic-y += resource.h
24generic-y += rwsem.h 24generic-y += rwsem.h
25generic-y += scatterlist.h
25generic-y += sections.h 26generic-y += sections.h
26generic-y += segment.h 27generic-y += segment.h
27generic-y += sembuf.h 28generic-y += sembuf.h
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
deleted file mode 100644
index cefdb8f898a1..000000000000
--- a/arch/arm/include/asm/scatterlist.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASMARM_SCATTERLIST_H
2#define _ASMARM_SCATTERLIST_H
3
4#ifdef CONFIG_ARM_HAS_SG_CHAIN
5#define ARCH_HAS_SG_CHAIN
6#endif
7
8#include <asm/memory.h>
9#include <asm/types.h>
10#include <asm-generic/scatterlist.h>
11
12#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 7da48bc42bbf..70b904c010c6 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -336,7 +336,7 @@ static int __init early_touchbook_revision(char *p)
336 if (!p) 336 if (!p)
337 return 0; 337 return 0;
338 338
339 return strict_strtoul(p, 10, &touchbook_revision); 339 return kstrtoul(p, 10, &touchbook_revision);
340} 340}
341early_param("tbr", early_touchbook_revision); 341early_param("tbr", early_touchbook_revision);
342 342
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index f62f7537d899..ac8a249779f2 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -681,29 +681,19 @@ static ssize_t omap_mux_dbg_signal_write(struct file *file,
681 const char __user *user_buf, 681 const char __user *user_buf,
682 size_t count, loff_t *ppos) 682 size_t count, loff_t *ppos)
683{ 683{
684 char buf[OMAP_MUX_MAX_ARG_CHAR];
685 struct seq_file *seqf; 684 struct seq_file *seqf;
686 struct omap_mux *m; 685 struct omap_mux *m;
687 unsigned long val; 686 u16 val;
688 int buf_size, ret; 687 int ret;
689 struct omap_mux_partition *partition; 688 struct omap_mux_partition *partition;
690 689
691 if (count > OMAP_MUX_MAX_ARG_CHAR) 690 if (count > OMAP_MUX_MAX_ARG_CHAR)
692 return -EINVAL; 691 return -EINVAL;
693 692
694 memset(buf, 0, sizeof(buf)); 693 ret = kstrtou16_from_user(user_buf, count, 0x10, &val);
695 buf_size = min(count, sizeof(buf) - 1);
696
697 if (copy_from_user(buf, user_buf, buf_size))
698 return -EFAULT;
699
700 ret = strict_strtoul(buf, 0x10, &val);
701 if (ret < 0) 694 if (ret < 0)
702 return ret; 695 return ret;
703 696
704 if (val > 0xffff)
705 return -EINVAL;
706
707 seqf = file->private_data; 697 seqf = file->private_data;
708 m = seqf->private; 698 m = seqf->private;
709 699
@@ -711,7 +701,7 @@ static ssize_t omap_mux_dbg_signal_write(struct file *file,
711 if (!partition) 701 if (!partition)
712 return -ENODEV; 702 return -ENODEV;
713 703
714 omap_mux_write(partition, (u16)val, m->reg_offset); 704 omap_mux_write(partition, val, m->reg_offset);
715 *ppos += count; 705 *ppos += count;
716 706
717 return count; 707 return count;
@@ -917,14 +907,14 @@ static void __init omap_mux_set_cmdline_signals(void)
917 907
918 while ((token = strsep(&next_opt, ",")) != NULL) { 908 while ((token = strsep(&next_opt, ",")) != NULL) {
919 char *keyval, *name; 909 char *keyval, *name;
920 unsigned long val; 910 u16 val;
921 911
922 keyval = token; 912 keyval = token;
923 name = strsep(&keyval, "="); 913 name = strsep(&keyval, "=");
924 if (name) { 914 if (name) {
925 int res; 915 int res;
926 916
927 res = strict_strtoul(keyval, 0x10, &val); 917 res = kstrtou16(keyval, 0x10, &val);
928 if (res < 0) 918 if (res < 0)
929 continue; 919 continue;
930 920
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 43596e0ed051..d897292712eb 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -90,7 +90,7 @@ int __init parse_balloon3_features(char *arg)
90 if (!arg) 90 if (!arg)
91 return 0; 91 return 0;
92 92
93 return strict_strtoul(arg, 0, &balloon3_features_present); 93 return kstrtoul(arg, 0, &balloon3_features_present);
94} 94}
95early_param("balloon3_features", parse_balloon3_features); 95early_param("balloon3_features", parse_balloon3_features);
96 96
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 41f27f667ca8..de3b08073fe7 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -769,7 +769,7 @@ static unsigned long viper_tpm;
769 769
770static int __init viper_tpm_setup(char *str) 770static int __init viper_tpm_setup(char *str)
771{ 771{
772 return strict_strtoul(str, 10, &viper_tpm) >= 0; 772 return kstrtoul(str, 10, &viper_tpm) >= 0;
773} 773}
774 774
775__setup("tpm=", viper_tpm_setup); 775__setup("tpm=", viper_tpm_setup);
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index e647b47244a9..7804d3c6991b 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -242,7 +242,7 @@ static int __init jive_mtdset(char *options)
242 if (options == NULL || options[0] == '\0') 242 if (options == NULL || options[0] == '\0')
243 return 0; 243 return 0;
244 244
245 if (strict_strtoul(options, 10, &set)) { 245 if (kstrtoul(options, 10, &set)) {
246 printk(KERN_ERR "failed to parse mtdset=%s\n", options); 246 printk(KERN_ERR "failed to parse mtdset=%s\n", options);
247 return 0; 247 return 0;
248 } 248 }
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index b1eabaad50a5..213230ee57d1 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -178,7 +178,8 @@ static int __init nuc900_set_cpufreq(char *str)
178 if (!*str) 178 if (!*str)
179 return 0; 179 return 0;
180 180
181 strict_strtoul(str, 0, &cpufreq); 181 if (kstrtoul(str, 0, &cpufreq))
182 return 0;
182 183
183 nuc900_clock_source(NULL, "ext"); 184 nuc900_clock_source(NULL, "ext");
184 185
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b0f9c9db9590..fd4e81a4e1ce 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
1config ARM64 1config ARM64
2 def_bool y 2 def_bool y
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select ARCH_HAS_SG_CHAIN
4 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
5 select ARCH_USE_CMPXCHG_LOCKREF 6 select ARCH_USE_CMPXCHG_LOCKREF
6 select ARCH_SUPPORTS_ATOMIC_RMW 7 select ARCH_SUPPORTS_ATOMIC_RMW
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 7a3f462133b0..22b16232bd60 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -28,9 +28,6 @@
28#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 28#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
29#define PAGE_MASK (~(PAGE_SIZE-1)) 29#define PAGE_MASK (~(PAGE_SIZE-1))
30 30
31/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
32#define __HAVE_ARCH_GATE_AREA 1
33
34/* 31/*
35 * The idmap and swapper page tables need some space reserved in the kernel 32 * The idmap and swapper page tables need some space reserved in the kernel
36 * image. Both require pgd, pud (4 levels only) and pmd tables to (section) 33 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a81a446a5786..32aeea083d93 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -195,25 +195,6 @@ up_fail:
195} 195}
196 196
197/* 197/*
198 * We define AT_SYSINFO_EHDR, so we need these function stubs to keep
199 * Linux happy.
200 */
201int in_gate_area_no_mm(unsigned long addr)
202{
203 return 0;
204}
205
206int in_gate_area(struct mm_struct *mm, unsigned long addr)
207{
208 return 0;
209}
210
211struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
212{
213 return NULL;
214}
215
216/*
217 * Update the vDSO data page to keep in sync with kernel timekeeping. 198 * Update the vDSO data page to keep in sync with kernel timekeeping.
218 */ 199 */
219void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index afff5105909d..31742dfadff9 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += linkage.h
13generic-y += mcs_spinlock.h 13generic-y += mcs_spinlock.h
14generic-y += module.h 14generic-y += module.h
15generic-y += preempt.h 15generic-y += preempt.h
16generic-y += scatterlist.h
16generic-y += trace_clock.h 17generic-y += trace_clock.h
17generic-y += vga.h 18generic-y += vga.h
18generic-y += xor.h 19generic-y += xor.h
diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h
deleted file mode 100644
index f11f8f40ec4a..000000000000
--- a/arch/cris/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_CRIS_SCATTERLIST_H
2#define __ASM_CRIS_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* !(__ASM_CRIS_SCATTERLIST_H) */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 87b95eb8aee5..5b73921b6e9d 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -5,4 +5,5 @@ generic-y += exec.h
5generic-y += hash.h 5generic-y += hash.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += scatterlist.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
deleted file mode 100644
index 0e5eb3018468..000000000000
--- a/arch/frv/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_SCATTERLIST_H
2#define _ASM_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* !_ASM_SCATTERLIST_H */
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 44a6915ab13d..64aefb76bd69 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -28,6 +28,7 @@ config IA64
28 select HAVE_MEMBLOCK 28 select HAVE_MEMBLOCK
29 select HAVE_MEMBLOCK_NODE_MAP 29 select HAVE_MEMBLOCK_NODE_MAP
30 select HAVE_VIRT_CPU_ACCOUNTING 30 select HAVE_VIRT_CPU_ACCOUNTING
31 select ARCH_HAS_SG_CHAIN
31 select VIRT_TO_BUS 32 select VIRT_TO_BUS
32 select ARCH_DISCARD_MEMBLOCK 33 select ARCH_DISCARD_MEMBLOCK
33 select GENERIC_IRQ_PROBE 34 select GENERIC_IRQ_PROBE
@@ -548,6 +549,8 @@ source "drivers/sn/Kconfig"
548config KEXEC 549config KEXEC
549 bool "kexec system call" 550 bool "kexec system call"
550 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) 551 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
552 select CRYPTO
553 select CRYPTO_SHA256
551 help 554 help
552 kexec is a system call that implements the ability to shutdown your 555 kexec is a system call that implements the ability to shutdown your
553 current kernel, and to start another kernel. It is like a reboot 556 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 0da4aa2602ae..e8317d2d6c8d 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -5,5 +5,6 @@ generic-y += hash.h
5generic-y += kvm_para.h 5generic-y += kvm_para.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += preempt.h 7generic-y += preempt.h
8generic-y += scatterlist.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
9generic-y += vtime.h 10generic-y += vtime.h
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index f1e1b2e3cdb3..1f1bf144fe62 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -231,4 +231,6 @@ get_order (unsigned long size)
231#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 231#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
232#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) 232#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
233 233
234#define __HAVE_ARCH_GATE_AREA 1
235
234#endif /* _ASM_IA64_PAGE_H */ 236#endif /* _ASM_IA64_PAGE_H */
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
deleted file mode 100644
index 08fd93bff1db..000000000000
--- a/arch/ia64/include/asm/scatterlist.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _ASM_IA64_SCATTERLIST_H
2#define _ASM_IA64_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5#define ARCH_HAS_SG_CHAIN
6
7#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 3e71ef85e439..9a0104a38cd3 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -384,21 +384,6 @@ static struct irqaction timer_irqaction = {
384 .name = "timer" 384 .name = "timer"
385}; 385};
386 386
387static struct platform_device rtc_efi_dev = {
388 .name = "rtc-efi",
389 .id = -1,
390};
391
392static int __init rtc_init(void)
393{
394 if (platform_device_register(&rtc_efi_dev) < 0)
395 printk(KERN_ERR "unable to register rtc device...\n");
396
397 /* not necessarily an error */
398 return 0;
399}
400module_init(rtc_init);
401
402void read_persistent_clock(struct timespec *ts) 387void read_persistent_clock(struct timespec *ts)
403{ 388{
404 efi_gettimeofday(ts); 389 efi_gettimeofday(ts);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 892d43e32f3b..6b3345758d3e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -278,6 +278,37 @@ setup_gate (void)
278 ia64_patch_gate(); 278 ia64_patch_gate();
279} 279}
280 280
281static struct vm_area_struct gate_vma;
282
283static int __init gate_vma_init(void)
284{
285 gate_vma.vm_mm = NULL;
286 gate_vma.vm_start = FIXADDR_USER_START;
287 gate_vma.vm_end = FIXADDR_USER_END;
288 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
289 gate_vma.vm_page_prot = __P101;
290
291 return 0;
292}
293__initcall(gate_vma_init);
294
295struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
296{
297 return &gate_vma;
298}
299
300int in_gate_area_no_mm(unsigned long addr)
301{
302 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
303 return 1;
304 return 0;
305}
306
307int in_gate_area(struct mm_struct *mm, unsigned long addr)
308{
309 return in_gate_area_no_mm(addr);
310}
311
281void ia64_mmu_init(void *my_cpu_data) 312void ia64_mmu_init(void *my_cpu_data)
282{ 313{
283 unsigned long pta, impl_va_bits; 314 unsigned long pta, impl_va_bits;
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 67779a74b62d..accc10a3dc78 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -6,4 +6,5 @@ generic-y += hash.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += module.h 7generic-y += module.h
8generic-y += preempt.h 8generic-y += preempt.h
9generic-y += scatterlist.h
9generic-y += trace_clock.h 10generic-y += trace_clock.h
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
deleted file mode 100644
index 7370b8b6243e..000000000000
--- a/arch/m32r/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_M32R_SCATTERLIST_H
2#define _ASM_M32R_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* _ASM_M32R_SCATTERLIST_H */
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 87b7c7581b1d..3ff8c9a25335 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -91,6 +91,8 @@ config MMU_SUN3
91config KEXEC 91config KEXEC
92 bool "kexec system call" 92 bool "kexec system call"
93 depends on M68KCLASSIC 93 depends on M68KCLASSIC
94 select CRYPTO
95 select CRYPTO_SHA256
94 help 96 help
95 kexec is a system call that implements the ability to shutdown your 97 kexec is a system call that implements the ability to shutdown your
96 current kernel, and to start another kernel. It is like a reboot 98 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 35b3ecaf25d5..27a3acda6c19 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -7,5 +7,6 @@ generic-y += exec.h
7generic-y += hash.h 7generic-y += hash.h
8generic-y += mcs_spinlock.h 8generic-y += mcs_spinlock.h
9generic-y += preempt.h 9generic-y += preempt.h
10generic-y += scatterlist.h
10generic-y += syscalls.h 11generic-y += syscalls.h
11generic-y += trace_clock.h 12generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
deleted file mode 100644
index 35d786fe93ae..000000000000
--- a/arch/microblaze/include/asm/scatterlist.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/scatterlist.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 900c7e5333b6..df51e78a72cc 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2396,6 +2396,8 @@ source "kernel/Kconfig.preempt"
2396 2396
2397config KEXEC 2397config KEXEC
2398 bool "Kexec system call" 2398 bool "Kexec system call"
2399 select CRYPTO
2400 select CRYPTO_SHA256
2399 help 2401 help
2400 kexec is a system call that implements the ability to shutdown your 2402 kexec is a system call that implements the ability to shutdown your
2401 current kernel, and to start another kernel. It is like a reboot 2403 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 654d5ba6e310..ecbd6676bd33 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -6,4 +6,5 @@ generic-y += exec.h
6generic-y += hash.h 6generic-y += hash.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += preempt.h 8generic-y += preempt.h
9generic-y += scatterlist.h
9generic-y += trace_clock.h 10generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
deleted file mode 100644
index 7baa4006008a..000000000000
--- a/arch/mn10300/include/asm/scatterlist.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* MN10300 Scatterlist definitions
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SCATTERLIST_H
12#define _ASM_SCATTERLIST_H
13
14#include <asm-generic/scatterlist.h>
15
16#endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 80b94b0add1f..a577609f8ed6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -111,6 +111,7 @@ config PPC
111 select HAVE_DMA_API_DEBUG 111 select HAVE_DMA_API_DEBUG
112 select HAVE_OPROFILE 112 select HAVE_OPROFILE
113 select HAVE_DEBUG_KMEMLEAK 113 select HAVE_DEBUG_KMEMLEAK
114 select ARCH_HAS_SG_CHAIN
114 select GENERIC_ATOMIC64 if PPC32 115 select GENERIC_ATOMIC64 if PPC32
115 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 116 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
116 select HAVE_PERF_EVENTS 117 select HAVE_PERF_EVENTS
@@ -398,6 +399,8 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
398config KEXEC 399config KEXEC
399 bool "kexec system call" 400 bool "kexec system call"
400 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) 401 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
402 select CRYPTO
403 select CRYPTO_SHA256
401 help 404 help
402 kexec is a system call that implements the ability to shutdown your 405 kexec is a system call that implements the ability to shutdown your
403 current kernel, and to start another kernel. It is like a reboot 406 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 3fb1bc432f4f..7f23f162ce9c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -4,5 +4,6 @@ generic-y += hash.h
4generic-y += mcs_spinlock.h 4generic-y += mcs_spinlock.h
5generic-y += preempt.h 5generic-y += preempt.h
6generic-y += rwsem.h 6generic-y += rwsem.h
7generic-y += scatterlist.h
7generic-y += trace_clock.h 8generic-y += trace_clock.h
8generic-y += vtime.h 9generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 32e4e212b9c1..26fe1ae15212 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -48,9 +48,6 @@ extern unsigned int HPAGE_SHIFT;
48#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) 48#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
49#endif 49#endif
50 50
51/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
52#define __HAVE_ARCH_GATE_AREA 1
53
54/* 51/*
55 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
56 * assign PAGE_MASK to a larger type it gets extended the way we want 53 * assign PAGE_MASK to a larger type it gets extended the way we want
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
deleted file mode 100644
index de1f620bd5c9..000000000000
--- a/arch/powerpc/include/asm/scatterlist.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _ASM_POWERPC_SCATTERLIST_H
2#define _ASM_POWERPC_SCATTERLIST_H
3/*
4 * Copyright (C) 2001 PPC64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/dma.h>
13#include <asm-generic/scatterlist.h>
14
15#define ARCH_HAS_SG_CHAIN
16
17#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d0225572faa1..75d62d63fe68 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -149,13 +149,13 @@ static void check_smt_enabled(void)
149 else if (!strcmp(smt_enabled_cmdline, "off")) 149 else if (!strcmp(smt_enabled_cmdline, "off"))
150 smt_enabled_at_boot = 0; 150 smt_enabled_at_boot = 0;
151 else { 151 else {
152 long smt; 152 int smt;
153 int rc; 153 int rc;
154 154
155 rc = strict_strtol(smt_enabled_cmdline, 10, &smt); 155 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
156 if (!rc) 156 if (!rc)
157 smt_enabled_at_boot = 157 smt_enabled_at_boot =
158 min(threads_per_core, (int)smt); 158 min(threads_per_core, smt);
159 } 159 }
160 } else { 160 } else {
161 dn = of_find_node_by_path("/options"); 161 dn = of_find_node_by_path("/options");
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ce74c335a6a4..f174351842cf 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -840,19 +840,3 @@ static int __init vdso_init(void)
840 return 0; 840 return 0;
841} 841}
842arch_initcall(vdso_init); 842arch_initcall(vdso_init);
843
844int in_gate_area_no_mm(unsigned long addr)
845{
846 return 0;
847}
848
849int in_gate_area(struct mm_struct *mm, unsigned long addr)
850{
851 return 0;
852}
853
854struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
855{
856 return NULL;
857}
858
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 904c66128fae..5bfdab9047be 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -977,7 +977,7 @@ static ssize_t viodev_cmo_desired_set(struct device *dev,
977 size_t new_desired; 977 size_t new_desired;
978 int ret; 978 int ret;
979 979
980 ret = strict_strtoul(buf, 10, &new_desired); 980 ret = kstrtoul(buf, 10, &new_desired);
981 if (ret) 981 if (ret)
982 return ret; 982 return ret;
983 983
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 7b6c10750179..d85e86aac7fb 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -33,6 +33,7 @@
33#include <linux/export.h> 33#include <linux/export.h>
34 34
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/dma.h>
36 37
37#include "mmu_decl.h" 38#include "mmu_decl.h"
38 39
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 534574a97ec9..3a104284b338 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -25,6 +25,7 @@
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/uic.h> 26#include <asm/uic.h>
27#include <asm/ppc4xx.h> 27#include <asm/ppc4xx.h>
28#include <asm/dma.h>
28 29
29 30
30static __initdata struct of_device_id warp_of_bus[] = { 31static __initdata struct of_device_id warp_of_bus[] = {
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 6e19b0ad5d26..3feffde9128d 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -13,6 +13,7 @@
13#include <generated/utsrelease.h> 13#include <generated/utsrelease.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <asm/dma.h>
16#include <asm/prom.h> 17#include <asm/prom.h>
17#include <asm/time.h> 18#include <asm/time.h>
18#include <asm/machdep.h> 19#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 03aabc0e16ac..2fe12046279e 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -24,6 +24,7 @@
24#include <asm/i8259.h> 24#include <asm/i8259.h>
25#include <asm/time.h> 25#include <asm/time.h>
26#include <asm/udbg.h> 26#include <asm/udbg.h>
27#include <asm/dma.h>
27 28
28extern void __flush_disable_L1(void); 29extern void __flush_disable_L1(void);
29 30
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 2d0b4d68a40a..a2450b8a50a5 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -400,10 +400,10 @@ out:
400static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 400static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
401{ 401{
402 struct device_node *dn, *parent; 402 struct device_node *dn, *parent;
403 unsigned long drc_index; 403 u32 drc_index;
404 int rc; 404 int rc;
405 405
406 rc = strict_strtoul(buf, 0, &drc_index); 406 rc = kstrtou32(buf, 0, &drc_index);
407 if (rc) 407 if (rc)
408 return -EINVAL; 408 return -EINVAL;
409 409
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index d146fef038b8..e7cb6d4a871a 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -320,7 +320,7 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
320 u64 streamid; 320 u64 streamid;
321 int rc; 321 int rc;
322 322
323 rc = strict_strtoull(buf, 0, &streamid); 323 rc = kstrtou64(buf, 0, &streamid);
324 if (rc) 324 if (rc)
325 return rc; 325 return rc;
326 326
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8ca60f8d5683..ab39ceb89ecf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -48,6 +48,8 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
48 48
49config KEXEC 49config KEXEC
50 def_bool y 50 def_bool y
51 select CRYPTO
52 select CRYPTO_SHA256
51 53
52config AUDIT_ARCH 54config AUDIT_ARCH
53 def_bool y 55 def_bool y
@@ -145,6 +147,7 @@ config S390
145 select TTY 147 select TTY
146 select VIRT_CPU_ACCOUNTING 148 select VIRT_CPU_ACCOUNTING
147 select VIRT_TO_BUS 149 select VIRT_TO_BUS
150 select ARCH_HAS_SG_CHAIN
148 151
149config SCHED_OMIT_FRAME_POINTER 152config SCHED_OMIT_FRAME_POINTER
150 def_bool y 153 def_bool y
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 57892a8a9055..b3fea0722ff1 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -4,4 +4,5 @@ generic-y += clkdev.h
4generic-y += hash.h 4generic-y += hash.h
5generic-y += mcs_spinlock.h 5generic-y += mcs_spinlock.h
6generic-y += preempt.h 6generic-y += preempt.h
7generic-y += scatterlist.h
7generic-y += trace_clock.h 8generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 114258eeaacd..7b2ac6e44166 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -162,6 +162,4 @@ static inline int devmem_is_allowed(unsigned long pfn)
162#include <asm-generic/memory_model.h> 162#include <asm-generic/memory_model.h>
163#include <asm-generic/getorder.h> 163#include <asm-generic/getorder.h>
164 164
165#define __HAVE_ARCH_GATE_AREA 1
166
167#endif /* _S390_PAGE_H */ 165#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
deleted file mode 100644
index 6d45ef6c12a7..000000000000
--- a/arch/s390/include/asm/scatterlist.h
+++ /dev/null
@@ -1,3 +0,0 @@
1#include <asm-generic/scatterlist.h>
2
3#define ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 613649096783..0bbb7e027c5a 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -316,18 +316,3 @@ static int __init vdso_init(void)
316 return 0; 316 return 0;
317} 317}
318early_initcall(vdso_init); 318early_initcall(vdso_init);
319
320int in_gate_area_no_mm(unsigned long addr)
321{
322 return 0;
323}
324
325int in_gate_area(struct mm_struct *mm, unsigned long addr)
326{
327 return 0;
328}
329
330struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
331{
332 return NULL;
333}
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 2f947aba4bd4..aad209199f7e 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -8,5 +8,6 @@ generic-y += cputime.h
8generic-y += hash.h 8generic-y += hash.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += preempt.h 10generic-y += preempt.h
11generic-y += scatterlist.h
11generic-y += trace_clock.h 12generic-y += trace_clock.h
12generic-y += xor.h 13generic-y += xor.h
diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h
deleted file mode 100644
index 9f533b8362c7..000000000000
--- a/arch/score/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_SCORE_SCATTERLIST_H
2#define _ASM_SCORE_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* _ASM_SCORE_SCATTERLIST_H */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index aa2df3eaeb29..453fa5c09550 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -595,6 +595,8 @@ source kernel/Kconfig.hz
595config KEXEC 595config KEXEC
596 bool "kexec system call (EXPERIMENTAL)" 596 bool "kexec system call (EXPERIMENTAL)"
597 depends on SUPERH32 && MMU 597 depends on SUPERH32 && MMU
598 select CRYPTO
599 select CRYPTO_SHA256
598 help 600 help
599 kexec is a system call that implements the ability to shutdown your 601 kexec is a system call that implements the ability to shutdown your
600 current kernel, and to start another kernel. It is like a reboot 602 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 15d970328f71..fe20d14ae051 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -186,11 +186,6 @@ typedef struct page *pgtable_t;
186#include <asm-generic/memory_model.h> 186#include <asm-generic/memory_model.h>
187#include <asm-generic/getorder.h> 187#include <asm-generic/getorder.h>
188 188
189/* vDSO support */
190#ifdef CONFIG_VSYSCALL
191#define __HAVE_ARCH_GATE_AREA
192#endif
193
194/* 189/*
195 * Some drivers need to perform DMA into kmalloc'ed buffers 190 * Some drivers need to perform DMA into kmalloc'ed buffers
196 * and so we have to increase the kmalloc minalign for this. 191 * and so we have to increase the kmalloc minalign for this.
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 5ca579720a09..ea2aa1393b87 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -92,18 +92,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
92 92
93 return NULL; 93 return NULL;
94} 94}
95
96struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
97{
98 return NULL;
99}
100
101int in_gate_area(struct mm_struct *mm, unsigned long address)
102{
103 return 0;
104}
105
106int in_gate_area_no_mm(unsigned long address)
107{
108 return 0;
109}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 4692c90936f1..a537816613f9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -42,6 +42,7 @@ config SPARC
42 select MODULES_USE_ELF_RELA 42 select MODULES_USE_ELF_RELA
43 select ODD_RT_SIGACTION 43 select ODD_RT_SIGACTION
44 select OLD_SIGSUSPEND 44 select OLD_SIGSUSPEND
45 select ARCH_HAS_SG_CHAIN
45 46
46config SPARC32 47config SPARC32
47 def_bool !64BIT 48 def_bool !64BIT
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index a45821818003..cdd1b447bb6c 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += mcs_spinlock.h
15generic-y += module.h 15generic-y += module.h
16generic-y += mutex.h 16generic-y += mutex.h
17generic-y += preempt.h 17generic-y += preempt.h
18generic-y += scatterlist.h
18generic-y += serial.h 19generic-y += serial.h
19generic-y += trace_clock.h 20generic-y += trace_clock.h
20generic-y += types.h 21generic-y += types.h
diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h
deleted file mode 100644
index 92bb638313f8..000000000000
--- a/arch/sparc/include/asm/scatterlist.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#define ARCH_HAS_SG_CHAIN
7
8#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 7fcd492adbfc..a3ffe2dd4832 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -191,6 +191,8 @@ source "kernel/Kconfig.hz"
191 191
192config KEXEC 192config KEXEC
193 bool "kexec system call" 193 bool "kexec system call"
194 select CRYPTO
195 select CRYPTO_SHA256
194 ---help--- 196 ---help---
195 kexec is a system call that implements the ability to shutdown your 197 kexec is a system call that implements the ability to shutdown your
196 current kernel, and to start another kernel. It is like a reboot 198 current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2f572b6b7bc2..44d2765bde2b 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -23,7 +23,7 @@
23struct proc_dir_entry; 23struct proc_dir_entry;
24#ifdef CONFIG_HARDWALL 24#ifdef CONFIG_HARDWALL
25void proc_tile_hardwall_init(struct proc_dir_entry *root); 25void proc_tile_hardwall_init(struct proc_dir_entry *root);
26int proc_pid_hardwall(struct task_struct *task, char *buffer); 26int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task);
27#else 27#else
28static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} 28static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
29#endif 29#endif
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 672768008618..a213a8d84a95 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -39,12 +39,6 @@
39#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 39#define HPAGE_MASK (~(HPAGE_SIZE - 1))
40 40
41/* 41/*
42 * We do define AT_SYSINFO_EHDR to support vDSO,
43 * but don't use the gate mechanism.
44 */
45#define __HAVE_ARCH_GATE_AREA 1
46
47/*
48 * If the Kconfig doesn't specify, set a maximum zone order that 42 * If the Kconfig doesn't specify, set a maximum zone order that
49 * is enough so that we can create huge pages from small pages given 43 * is enough so that we can create huge pages from small pages given
50 * the respective sizes of the two page types. See <linux/mmzone.h>. 44 * the respective sizes of the two page types. See <linux/mmzone.h>.
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 531f4c365351..aca6000bca75 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -947,15 +947,15 @@ static void hardwall_remove_proc(struct hardwall_info *info)
947 remove_proc_entry(buf, info->type->proc_dir); 947 remove_proc_entry(buf, info->type->proc_dir);
948} 948}
949 949
950int proc_pid_hardwall(struct task_struct *task, char *buffer) 950int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns,
951 struct pid *pid, struct task_struct *task)
951{ 952{
952 int i; 953 int i;
953 int n = 0; 954 int n = 0;
954 for (i = 0; i < HARDWALL_TYPES; ++i) { 955 for (i = 0; i < HARDWALL_TYPES; ++i) {
955 struct hardwall_info *info = task->thread.hardwall[i].info; 956 struct hardwall_info *info = task->thread.hardwall[i].info;
956 if (info) 957 if (info)
957 n += sprintf(&buffer[n], "%s: %d\n", 958 seq_printf(m, "%s: %d\n", info->type->name, info->id);
958 info->type->name, info->id);
959 } 959 }
960 return n; 960 return n;
961} 961}
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
index 1533af24106e..5bc51d7dfdcb 100644
--- a/arch/tile/kernel/vdso.c
+++ b/arch/tile/kernel/vdso.c
@@ -121,21 +121,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
121 return NULL; 121 return NULL;
122} 122}
123 123
124struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
125{
126 return NULL;
127}
128
129int in_gate_area(struct mm_struct *mm, unsigned long address)
130{
131 return 0;
132}
133
134int in_gate_area_no_mm(unsigned long address)
135{
136 return 0;
137}
138
139int setup_vdso_pages(void) 124int setup_vdso_pages(void)
140{ 125{
141 struct page **pagelist; 126 struct page **pagelist;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index a5e4b6068213..7bd64aa2e94a 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += param.h
21generic-y += pci.h 21generic-y += pci.h
22generic-y += percpu.h 22generic-y += percpu.h
23generic-y += preempt.h 23generic-y += preempt.h
24generic-y += scatterlist.h
24generic-y += sections.h 25generic-y += sections.h
25generic-y += switch_to.h 26generic-y += switch_to.h
26generic-y += topology.h 27generic-y += topology.h
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
index 5ff53d9185f7..71c5d132062a 100644
--- a/arch/um/include/asm/page.h
+++ b/arch/um/include/asm/page.h
@@ -119,4 +119,9 @@ extern unsigned long uml_physmem;
119#include <asm-generic/getorder.h> 119#include <asm-generic/getorder.h>
120 120
121#endif /* __ASSEMBLY__ */ 121#endif /* __ASSEMBLY__ */
122
123#ifdef CONFIG_X86_32
124#define __HAVE_ARCH_GATE_AREA 1
125#endif
126
122#endif /* __UM_PAGE_H */ 127#endif /* __UM_PAGE_H */
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index e5287d8517aa..61b6d51866f8 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -16,3 +16,7 @@ obj-$(CONFIG_IA32_EMULATION) += ia32/
16 16
17obj-y += platform/ 17obj-y += platform/
18obj-y += net/ 18obj-y += net/
19
20ifeq ($(CONFIG_X86_64),y)
21obj-$(CONFIG_KEXEC) += purgatory/
22endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bf2405053af5..4aafd322e21e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -96,6 +96,7 @@ config X86
96 select IRQ_FORCED_THREADING 96 select IRQ_FORCED_THREADING
97 select HAVE_BPF_JIT if X86_64 97 select HAVE_BPF_JIT if X86_64
98 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 98 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
99 select ARCH_HAS_SG_CHAIN
99 select CLKEVT_I8253 100 select CLKEVT_I8253
100 select ARCH_HAVE_NMI_SAFE_CMPXCHG 101 select ARCH_HAVE_NMI_SAFE_CMPXCHG
101 select GENERIC_IOMAP 102 select GENERIC_IOMAP
@@ -1581,6 +1582,9 @@ source kernel/Kconfig.hz
1581 1582
1582config KEXEC 1583config KEXEC
1583 bool "kexec system call" 1584 bool "kexec system call"
1585 select BUILD_BIN2C
1586 select CRYPTO
1587 select CRYPTO_SHA256
1584 ---help--- 1588 ---help---
1585 kexec is a system call that implements the ability to shutdown your 1589 kexec is a system call that implements the ability to shutdown your
1586 current kernel, and to start another kernel. It is like a reboot 1590 current kernel, and to start another kernel. It is like a reboot
@@ -1595,6 +1599,28 @@ config KEXEC
1595 interface is strongly in flux, so no good recommendation can be 1599 interface is strongly in flux, so no good recommendation can be
1596 made. 1600 made.
1597 1601
1602config KEXEC_VERIFY_SIG
1603 bool "Verify kernel signature during kexec_file_load() syscall"
1604 depends on KEXEC
1605 ---help---
1606 This option makes kernel signature verification mandatory for
1607 kexec_file_load() syscall. If kernel is signature can not be
1608 verified, kexec_file_load() will fail.
1609
1610 This option enforces signature verification at generic level.
1611 One needs to enable signature verification for type of kernel
1612 image being loaded to make sure it works. For example, enable
1613 bzImage signature verification option to be able to load and
1614 verify signatures of bzImage. Otherwise kernel loading will fail.
1615
1616config KEXEC_BZIMAGE_VERIFY_SIG
1617 bool "Enable bzImage signature verification support"
1618 depends on KEXEC_VERIFY_SIG
1619 depends on SIGNED_PE_FILE_VERIFICATION
1620 select SYSTEM_TRUSTED_KEYRING
1621 ---help---
1622 Enable bzImage signature verification support.
1623
1598config CRASH_DUMP 1624config CRASH_DUMP
1599 bool "kernel crash dumps" 1625 bool "kernel crash dumps"
1600 depends on X86_64 || (X86_32 && HIGHMEM) 1626 depends on X86_64 || (X86_32 && HIGHMEM)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index c65fd9650467..c1aa36887843 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -183,6 +183,14 @@ archscripts: scripts_basic
183archheaders: 183archheaders:
184 $(Q)$(MAKE) $(build)=arch/x86/syscalls all 184 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
185 185
186archprepare:
187ifeq ($(CONFIG_KEXEC),y)
188# Build only for 64bit. No loaders for 32bit yet.
189 ifeq ($(CONFIG_X86_64),y)
190 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
191 endif
192endif
193
186### 194###
187# Kernel objects 195# Kernel objects
188 196
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 3ca9762e1649..3bf000fab0ae 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -5,6 +5,7 @@ genhdr-y += unistd_64.h
5genhdr-y += unistd_x32.h 5genhdr-y += unistd_x32.h
6 6
7generic-y += clkdev.h 7generic-y += clkdev.h
8generic-y += early_ioremap.h
9generic-y += cputime.h 8generic-y += cputime.h
9generic-y += early_ioremap.h
10generic-y += mcs_spinlock.h 10generic-y += mcs_spinlock.h
11generic-y += scatterlist.h
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
new file mode 100644
index 000000000000..f498411f2500
--- /dev/null
+++ b/arch/x86/include/asm/crash.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_X86_CRASH_H
2#define _ASM_X86_CRASH_H
3
4int crash_load_segments(struct kimage *image);
5int crash_copy_backup_region(struct kimage *image);
6int crash_setup_memmap_entries(struct kimage *image,
7 struct boot_params *params);
8
9#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/kexec-bzimage64.h b/arch/x86/include/asm/kexec-bzimage64.h
new file mode 100644
index 000000000000..d1b5d194e31d
--- /dev/null
+++ b/arch/x86/include/asm/kexec-bzimage64.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_KEXEC_BZIMAGE64_H
2#define _ASM_KEXEC_BZIMAGE64_H
3
4extern struct kexec_file_ops kexec_bzImage64_ops;
5
6#endif /* _ASM_KEXE_BZIMAGE64_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 17483a492f18..d2434c1cad05 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -23,6 +23,9 @@
23 23
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/ptrace.h> 25#include <asm/ptrace.h>
26#include <asm/bootparam.h>
27
28struct kimage;
26 29
27/* 30/*
28 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 31 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -61,6 +64,10 @@
61# define KEXEC_ARCH KEXEC_ARCH_X86_64 64# define KEXEC_ARCH KEXEC_ARCH_X86_64
62#endif 65#endif
63 66
67/* Memory to backup during crash kdump */
68#define KEXEC_BACKUP_SRC_START (0UL)
69#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
70
64/* 71/*
65 * CPU does not save ss and sp on stack if execution is already 72 * CPU does not save ss and sp on stack if execution is already
66 * running in kernel mode at the time of NMI occurrence. This code 73 * running in kernel mode at the time of NMI occurrence. This code
@@ -160,6 +167,44 @@ struct kimage_arch {
160 pud_t *pud; 167 pud_t *pud;
161 pmd_t *pmd; 168 pmd_t *pmd;
162 pte_t *pte; 169 pte_t *pte;
170 /* Details of backup region */
171 unsigned long backup_src_start;
172 unsigned long backup_src_sz;
173
174 /* Physical address of backup segment */
175 unsigned long backup_load_addr;
176
177 /* Core ELF header buffer */
178 void *elf_headers;
179 unsigned long elf_headers_sz;
180 unsigned long elf_load_addr;
181};
182#endif /* CONFIG_X86_32 */
183
184#ifdef CONFIG_X86_64
185/*
186 * Number of elements and order of elements in this structure should match
187 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
188 * make an appropriate change in purgatory too.
189 */
190struct kexec_entry64_regs {
191 uint64_t rax;
192 uint64_t rcx;
193 uint64_t rdx;
194 uint64_t rbx;
195 uint64_t rsp;
196 uint64_t rbp;
197 uint64_t rsi;
198 uint64_t rdi;
199 uint64_t r8;
200 uint64_t r9;
201 uint64_t r10;
202 uint64_t r11;
203 uint64_t r12;
204 uint64_t r13;
205 uint64_t r14;
206 uint64_t r15;
207 uint64_t rip;
163}; 208};
164#endif 209#endif
165 210
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 775873d3be55..802dde30c928 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -70,7 +70,6 @@ extern bool __virt_addr_valid(unsigned long kaddr);
70#include <asm-generic/memory_model.h> 70#include <asm-generic/memory_model.h>
71#include <asm-generic/getorder.h> 71#include <asm-generic/getorder.h>
72 72
73#define __HAVE_ARCH_GATE_AREA 1
74#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 73#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
75 74
76#endif /* __KERNEL__ */ 75#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 0f1ddee6a0ce..f408caf73430 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -39,4 +39,6 @@ void copy_page(void *to, void *from);
39 39
40#endif /* !__ASSEMBLY__ */ 40#endif /* !__ASSEMBLY__ */
41 41
42#define __HAVE_ARCH_GATE_AREA 1
43
42#endif /* _ASM_X86_PAGE_64_H */ 44#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
deleted file mode 100644
index 4240878b9d76..000000000000
--- a/arch/x86/include/asm/scatterlist.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#define ARCH_HAS_SG_CHAIN
7
8#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index bde3993624f1..b5ea75c4a4b4 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -118,4 +118,5 @@ ifeq ($(CONFIG_X86_64),y)
118 118
119 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o 119 obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
120 obj-y += vsmp_64.o 120 obj-y += vsmp_64.o
121 obj-$(CONFIG_KEXEC) += kexec-bzimage64.o
121endif 122endif
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9c8f7394c612..c7035073dfc1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -461,7 +461,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
461 461
462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); 462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
463 463
464 if (strict_strtoul(buf, 10, &val) < 0) 464 if (kstrtoul(buf, 10, &val) < 0)
465 return -EINVAL; 465 return -EINVAL;
466 466
467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); 467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
@@ -511,7 +511,7 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
512 return -EINVAL; 512 return -EINVAL;
513 513
514 if (strict_strtoul(buf, 16, &val) < 0) 514 if (kstrtoul(buf, 16, &val) < 0)
515 return -EINVAL; 515 return -EINVAL;
516 516
517 if (amd_set_subcaches(cpu, val)) 517 if (amd_set_subcaches(cpu, val))
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 4fc57975acc1..bd9ccda8087f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2136,7 +2136,7 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2136{ 2136{
2137 u64 new; 2137 u64 new;
2138 2138
2139 if (strict_strtoull(buf, 0, &new) < 0) 2139 if (kstrtou64(buf, 0, &new) < 0)
2140 return -EINVAL; 2140 return -EINVAL;
2141 2141
2142 attr_to_bank(attr)->ctl = new; 2142 attr_to_bank(attr)->ctl = new;
@@ -2174,7 +2174,7 @@ static ssize_t set_ignore_ce(struct device *s,
2174{ 2174{
2175 u64 new; 2175 u64 new;
2176 2176
2177 if (strict_strtoull(buf, 0, &new) < 0) 2177 if (kstrtou64(buf, 0, &new) < 0)
2178 return -EINVAL; 2178 return -EINVAL;
2179 2179
2180 if (mca_cfg.ignore_ce ^ !!new) { 2180 if (mca_cfg.ignore_ce ^ !!new) {
@@ -2198,7 +2198,7 @@ static ssize_t set_cmci_disabled(struct device *s,
2198{ 2198{
2199 u64 new; 2199 u64 new;
2200 2200
2201 if (strict_strtoull(buf, 0, &new) < 0) 2201 if (kstrtou64(buf, 0, &new) < 0)
2202 return -EINVAL; 2202 return -EINVAL;
2203 2203
2204 if (mca_cfg.cmci_disabled ^ !!new) { 2204 if (mca_cfg.cmci_disabled ^ !!new) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 603df4f74640..1e49f8f41276 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -353,7 +353,7 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
353 if (!b->interrupt_capable) 353 if (!b->interrupt_capable)
354 return -EINVAL; 354 return -EINVAL;
355 355
356 if (strict_strtoul(buf, 0, &new) < 0) 356 if (kstrtoul(buf, 0, &new) < 0)
357 return -EINVAL; 357 return -EINVAL;
358 358
359 b->interrupt_enable = !!new; 359 b->interrupt_enable = !!new;
@@ -372,7 +372,7 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
372 struct thresh_restart tr; 372 struct thresh_restart tr;
373 unsigned long new; 373 unsigned long new;
374 374
375 if (strict_strtoul(buf, 0, &new) < 0) 375 if (kstrtoul(buf, 0, &new) < 0)
376 return -EINVAL; 376 return -EINVAL;
377 377
378 if (new > THRESHOLD_MAX) 378 if (new > THRESHOLD_MAX)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 507de8066594..0553a34fa0df 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -4,9 +4,14 @@
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * 5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved. 6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
8 * Authors:
9 * Vivek Goyal <vgoyal@redhat.com>
7 * 10 *
8 */ 11 */
9 12
13#define pr_fmt(fmt) "kexec: " fmt
14
10#include <linux/types.h> 15#include <linux/types.h>
11#include <linux/kernel.h> 16#include <linux/kernel.h>
12#include <linux/smp.h> 17#include <linux/smp.h>
@@ -16,6 +21,7 @@
16#include <linux/elf.h> 21#include <linux/elf.h>
17#include <linux/elfcore.h> 22#include <linux/elfcore.h>
18#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h>
19 25
20#include <asm/processor.h> 26#include <asm/processor.h>
21#include <asm/hardirq.h> 27#include <asm/hardirq.h>
@@ -28,6 +34,45 @@
28#include <asm/reboot.h> 34#include <asm/reboot.h>
29#include <asm/virtext.h> 35#include <asm/virtext.h>
30 36
37/* Alignment required for elf header segment */
38#define ELF_CORE_HEADER_ALIGN 4096
39
40/* This primarily represents number of split ranges due to exclusion */
41#define CRASH_MAX_RANGES 16
42
43struct crash_mem_range {
44 u64 start, end;
45};
46
47struct crash_mem {
48 unsigned int nr_ranges;
49 struct crash_mem_range ranges[CRASH_MAX_RANGES];
50};
51
52/* Misc data about ram ranges needed to prepare elf headers */
53struct crash_elf_data {
54 struct kimage *image;
55 /*
56 * Total number of ram ranges we have after various adjustments for
57 * GART, crash reserved region etc.
58 */
59 unsigned int max_nr_ranges;
60 unsigned long gart_start, gart_end;
61
62 /* Pointer to elf header */
63 void *ehdr;
64 /* Pointer to next phdr */
65 void *bufp;
66 struct crash_mem mem;
67};
68
69/* Used while preparing memory map entries for second kernel */
70struct crash_memmap_data {
71 struct boot_params *params;
72 /* Type of memory */
73 unsigned int type;
74};
75
31int in_crash_kexec; 76int in_crash_kexec;
32 77
33/* 78/*
@@ -39,6 +84,7 @@ int in_crash_kexec;
39 */ 84 */
40crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; 85crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
41EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); 86EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
87unsigned long crash_zero_bytes;
42 88
43static inline void cpu_crash_vmclear_loaded_vmcss(void) 89static inline void cpu_crash_vmclear_loaded_vmcss(void)
44{ 90{
@@ -135,3 +181,520 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
135#endif 181#endif
136 crash_save_cpu(regs, safe_smp_processor_id()); 182 crash_save_cpu(regs, safe_smp_processor_id());
137} 183}
184
185#ifdef CONFIG_X86_64
186
187static int get_nr_ram_ranges_callback(unsigned long start_pfn,
188 unsigned long nr_pfn, void *arg)
189{
190 int *nr_ranges = arg;
191
192 (*nr_ranges)++;
193 return 0;
194}
195
196static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
197{
198 struct crash_elf_data *ced = arg;
199
200 ced->gart_start = start;
201 ced->gart_end = end;
202
203 /* Not expecting more than 1 gart aperture */
204 return 1;
205}
206
207
208/* Gather all the required information to prepare elf headers for ram regions */
209static void fill_up_crash_elf_data(struct crash_elf_data *ced,
210 struct kimage *image)
211{
212 unsigned int nr_ranges = 0;
213
214 ced->image = image;
215
216 walk_system_ram_range(0, -1, &nr_ranges,
217 get_nr_ram_ranges_callback);
218
219 ced->max_nr_ranges = nr_ranges;
220
221 /*
222 * We don't create ELF headers for GART aperture as an attempt
223 * to dump this memory in second kernel leads to hang/crash.
224 * If gart aperture is present, one needs to exclude that region
225 * and that could lead to need of extra phdr.
226 */
227 walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
228 ced, get_gart_ranges_callback);
229
230 /*
231 * If we have gart region, excluding that could potentially split
232 * a memory range, resulting in extra header. Account for that.
233 */
234 if (ced->gart_end)
235 ced->max_nr_ranges++;
236
237 /* Exclusion of crash region could split memory ranges */
238 ced->max_nr_ranges++;
239
240 /* If crashk_low_res is not 0, another range split possible */
241 if (crashk_low_res.end != 0)
242 ced->max_nr_ranges++;
243}
244
245static int exclude_mem_range(struct crash_mem *mem,
246 unsigned long long mstart, unsigned long long mend)
247{
248 int i, j;
249 unsigned long long start, end;
250 struct crash_mem_range temp_range = {0, 0};
251
252 for (i = 0; i < mem->nr_ranges; i++) {
253 start = mem->ranges[i].start;
254 end = mem->ranges[i].end;
255
256 if (mstart > end || mend < start)
257 continue;
258
259 /* Truncate any area outside of range */
260 if (mstart < start)
261 mstart = start;
262 if (mend > end)
263 mend = end;
264
265 /* Found completely overlapping range */
266 if (mstart == start && mend == end) {
267 mem->ranges[i].start = 0;
268 mem->ranges[i].end = 0;
269 if (i < mem->nr_ranges - 1) {
270 /* Shift rest of the ranges to left */
271 for (j = i; j < mem->nr_ranges - 1; j++) {
272 mem->ranges[j].start =
273 mem->ranges[j+1].start;
274 mem->ranges[j].end =
275 mem->ranges[j+1].end;
276 }
277 }
278 mem->nr_ranges--;
279 return 0;
280 }
281
282 if (mstart > start && mend < end) {
283 /* Split original range */
284 mem->ranges[i].end = mstart - 1;
285 temp_range.start = mend + 1;
286 temp_range.end = end;
287 } else if (mstart != start)
288 mem->ranges[i].end = mstart - 1;
289 else
290 mem->ranges[i].start = mend + 1;
291 break;
292 }
293
294 /* If a split happend, add the split to array */
295 if (!temp_range.end)
296 return 0;
297
298 /* Split happened */
299 if (i == CRASH_MAX_RANGES - 1) {
300 pr_err("Too many crash ranges after split\n");
301 return -ENOMEM;
302 }
303
304 /* Location where new range should go */
305 j = i + 1;
306 if (j < mem->nr_ranges) {
307 /* Move over all ranges one slot towards the end */
308 for (i = mem->nr_ranges - 1; i >= j; i--)
309 mem->ranges[i + 1] = mem->ranges[i];
310 }
311
312 mem->ranges[j].start = temp_range.start;
313 mem->ranges[j].end = temp_range.end;
314 mem->nr_ranges++;
315 return 0;
316}
317
318/*
319 * Look for any unwanted ranges between mstart, mend and remove them. This
320 * might lead to split and split ranges are put in ced->mem.ranges[] array
321 */
322static int elf_header_exclude_ranges(struct crash_elf_data *ced,
323 unsigned long long mstart, unsigned long long mend)
324{
325 struct crash_mem *cmem = &ced->mem;
326 int ret = 0;
327
328 memset(cmem->ranges, 0, sizeof(cmem->ranges));
329
330 cmem->ranges[0].start = mstart;
331 cmem->ranges[0].end = mend;
332 cmem->nr_ranges = 1;
333
334 /* Exclude crashkernel region */
335 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
336 if (ret)
337 return ret;
338
339 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
340 if (ret)
341 return ret;
342
343 /* Exclude GART region */
344 if (ced->gart_end) {
345 ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
346 if (ret)
347 return ret;
348 }
349
350 return ret;
351}
352
353static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
354{
355 struct crash_elf_data *ced = arg;
356 Elf64_Ehdr *ehdr;
357 Elf64_Phdr *phdr;
358 unsigned long mstart, mend;
359 struct kimage *image = ced->image;
360 struct crash_mem *cmem;
361 int ret, i;
362
363 ehdr = ced->ehdr;
364
365 /* Exclude unwanted mem ranges */
366 ret = elf_header_exclude_ranges(ced, start, end);
367 if (ret)
368 return ret;
369
370 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
371 cmem = &ced->mem;
372
373 for (i = 0; i < cmem->nr_ranges; i++) {
374 mstart = cmem->ranges[i].start;
375 mend = cmem->ranges[i].end;
376
377 phdr = ced->bufp;
378 ced->bufp += sizeof(Elf64_Phdr);
379
380 phdr->p_type = PT_LOAD;
381 phdr->p_flags = PF_R|PF_W|PF_X;
382 phdr->p_offset = mstart;
383
384 /*
385 * If a range matches backup region, adjust offset to backup
386 * segment.
387 */
388 if (mstart == image->arch.backup_src_start &&
389 (mend - mstart + 1) == image->arch.backup_src_sz)
390 phdr->p_offset = image->arch.backup_load_addr;
391
392 phdr->p_paddr = mstart;
393 phdr->p_vaddr = (unsigned long long) __va(mstart);
394 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
395 phdr->p_align = 0;
396 ehdr->e_phnum++;
397 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
398 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
399 ehdr->e_phnum, phdr->p_offset);
400 }
401
402 return ret;
403}
404
405static int prepare_elf64_headers(struct crash_elf_data *ced,
406 void **addr, unsigned long *sz)
407{
408 Elf64_Ehdr *ehdr;
409 Elf64_Phdr *phdr;
410 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
411 unsigned char *buf, *bufp;
412 unsigned int cpu;
413 unsigned long long notes_addr;
414 int ret;
415
416 /* extra phdr for vmcoreinfo elf note */
417 nr_phdr = nr_cpus + 1;
418 nr_phdr += ced->max_nr_ranges;
419
420 /*
421 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
422 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
423 * I think this is required by tools like gdb. So same physical
424 * memory will be mapped in two elf headers. One will contain kernel
425 * text virtual addresses and other will have __va(physical) addresses.
426 */
427
428 nr_phdr++;
429 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
430 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
431
432 buf = vzalloc(elf_sz);
433 if (!buf)
434 return -ENOMEM;
435
436 bufp = buf;
437 ehdr = (Elf64_Ehdr *)bufp;
438 bufp += sizeof(Elf64_Ehdr);
439 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
440 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
441 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
442 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
443 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
444 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
445 ehdr->e_type = ET_CORE;
446 ehdr->e_machine = ELF_ARCH;
447 ehdr->e_version = EV_CURRENT;
448 ehdr->e_phoff = sizeof(Elf64_Ehdr);
449 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
450 ehdr->e_phentsize = sizeof(Elf64_Phdr);
451
452 /* Prepare one phdr of type PT_NOTE for each present cpu */
453 for_each_present_cpu(cpu) {
454 phdr = (Elf64_Phdr *)bufp;
455 bufp += sizeof(Elf64_Phdr);
456 phdr->p_type = PT_NOTE;
457 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
458 phdr->p_offset = phdr->p_paddr = notes_addr;
459 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
460 (ehdr->e_phnum)++;
461 }
462
463 /* Prepare one PT_NOTE header for vmcoreinfo */
464 phdr = (Elf64_Phdr *)bufp;
465 bufp += sizeof(Elf64_Phdr);
466 phdr->p_type = PT_NOTE;
467 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
468 phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
469 (ehdr->e_phnum)++;
470
471#ifdef CONFIG_X86_64
472 /* Prepare PT_LOAD type program header for kernel text region */
473 phdr = (Elf64_Phdr *)bufp;
474 bufp += sizeof(Elf64_Phdr);
475 phdr->p_type = PT_LOAD;
476 phdr->p_flags = PF_R|PF_W|PF_X;
477 phdr->p_vaddr = (Elf64_Addr)_text;
478 phdr->p_filesz = phdr->p_memsz = _end - _text;
479 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
480 (ehdr->e_phnum)++;
481#endif
482
483 /* Prepare PT_LOAD headers for system ram chunks. */
484 ced->ehdr = ehdr;
485 ced->bufp = bufp;
486 ret = walk_system_ram_res(0, -1, ced,
487 prepare_elf64_ram_headers_callback);
488 if (ret < 0)
489 return ret;
490
491 *addr = buf;
492 *sz = elf_sz;
493 return 0;
494}
495
496/* Prepare elf headers. Return addr and size */
497static int prepare_elf_headers(struct kimage *image, void **addr,
498 unsigned long *sz)
499{
500 struct crash_elf_data *ced;
501 int ret;
502
503 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
504 if (!ced)
505 return -ENOMEM;
506
507 fill_up_crash_elf_data(ced, image);
508
509 /* By default prepare 64bit headers */
510 ret = prepare_elf64_headers(ced, addr, sz);
511 kfree(ced);
512 return ret;
513}
514
515static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
516{
517 unsigned int nr_e820_entries;
518
519 nr_e820_entries = params->e820_entries;
520 if (nr_e820_entries >= E820MAX)
521 return 1;
522
523 memcpy(&params->e820_map[nr_e820_entries], entry,
524 sizeof(struct e820entry));
525 params->e820_entries++;
526 return 0;
527}
528
529static int memmap_entry_callback(u64 start, u64 end, void *arg)
530{
531 struct crash_memmap_data *cmd = arg;
532 struct boot_params *params = cmd->params;
533 struct e820entry ei;
534
535 ei.addr = start;
536 ei.size = end - start + 1;
537 ei.type = cmd->type;
538 add_e820_entry(params, &ei);
539
540 return 0;
541}
542
543static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
544 unsigned long long mstart,
545 unsigned long long mend)
546{
547 unsigned long start, end;
548 int ret = 0;
549
550 cmem->ranges[0].start = mstart;
551 cmem->ranges[0].end = mend;
552 cmem->nr_ranges = 1;
553
554 /* Exclude Backup region */
555 start = image->arch.backup_load_addr;
556 end = start + image->arch.backup_src_sz - 1;
557 ret = exclude_mem_range(cmem, start, end);
558 if (ret)
559 return ret;
560
561 /* Exclude elf header region */
562 start = image->arch.elf_load_addr;
563 end = start + image->arch.elf_headers_sz - 1;
564 return exclude_mem_range(cmem, start, end);
565}
566
567/* Prepare memory map for crash dump kernel */
568int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
569{
570 int i, ret = 0;
571 unsigned long flags;
572 struct e820entry ei;
573 struct crash_memmap_data cmd;
574 struct crash_mem *cmem;
575
576 cmem = vzalloc(sizeof(struct crash_mem));
577 if (!cmem)
578 return -ENOMEM;
579
580 memset(&cmd, 0, sizeof(struct crash_memmap_data));
581 cmd.params = params;
582
583 /* Add first 640K segment */
584 ei.addr = image->arch.backup_src_start;
585 ei.size = image->arch.backup_src_sz;
586 ei.type = E820_RAM;
587 add_e820_entry(params, &ei);
588
589 /* Add ACPI tables */
590 cmd.type = E820_ACPI;
591 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
592 walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
593 memmap_entry_callback);
594
595 /* Add ACPI Non-volatile Storage */
596 cmd.type = E820_NVS;
597 walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
598 memmap_entry_callback);
599
600 /* Add crashk_low_res region */
601 if (crashk_low_res.end) {
602 ei.addr = crashk_low_res.start;
603 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
604 ei.type = E820_RAM;
605 add_e820_entry(params, &ei);
606 }
607
608 /* Exclude some ranges from crashk_res and add rest to memmap */
609 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
610 crashk_res.end);
611 if (ret)
612 goto out;
613
614 for (i = 0; i < cmem->nr_ranges; i++) {
615 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
616
617 /* If entry is less than a page, skip it */
618 if (ei.size < PAGE_SIZE)
619 continue;
620 ei.addr = cmem->ranges[i].start;
621 ei.type = E820_RAM;
622 add_e820_entry(params, &ei);
623 }
624
625out:
626 vfree(cmem);
627 return ret;
628}
629
630static int determine_backup_region(u64 start, u64 end, void *arg)
631{
632 struct kimage *image = arg;
633
634 image->arch.backup_src_start = start;
635 image->arch.backup_src_sz = end - start + 1;
636
637 /* Expecting only one range for backup region */
638 return 1;
639}
640
641int crash_load_segments(struct kimage *image)
642{
643 unsigned long src_start, src_sz, elf_sz;
644 void *elf_addr;
645 int ret;
646
647 /*
648 * Determine and load a segment for backup area. First 640K RAM
649 * region is backup source
650 */
651
652 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
653 image, determine_backup_region);
654
655 /* Zero or postive return values are ok */
656 if (ret < 0)
657 return ret;
658
659 src_start = image->arch.backup_src_start;
660 src_sz = image->arch.backup_src_sz;
661
662 /* Add backup segment. */
663 if (src_sz) {
664 /*
665 * Ideally there is no source for backup segment. This is
666 * copied in purgatory after crash. Just add a zero filled
667 * segment for now to make sure checksum logic works fine.
668 */
669 ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
670 sizeof(crash_zero_bytes), src_sz,
671 PAGE_SIZE, 0, -1, 0,
672 &image->arch.backup_load_addr);
673 if (ret)
674 return ret;
675 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
676 image->arch.backup_load_addr, src_start, src_sz);
677 }
678
679 /* Prepare elf headers and add a segment */
680 ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
681 if (ret)
682 return ret;
683
684 image->arch.elf_headers = elf_addr;
685 image->arch.elf_headers_sz = elf_sz;
686
687 ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
688 ELF_CORE_HEADER_ALIGN, 0, -1, 0,
689 &image->arch.elf_load_addr);
690 if (ret) {
691 vfree((void *)image->arch.elf_headers);
692 return ret;
693 }
694 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
695 image->arch.elf_load_addr, elf_sz, elf_sz);
696
697 return ret;
698}
699
700#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
new file mode 100644
index 000000000000..9642b9b33655
--- /dev/null
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -0,0 +1,553 @@
1/*
2 * Kexec bzImage loader
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 * Authors:
6 * Vivek Goyal <vgoyal@redhat.com>
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12#define pr_fmt(fmt) "kexec-bzImage64: " fmt
13
14#include <linux/string.h>
15#include <linux/printk.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/kexec.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/efi.h>
22#include <linux/verify_pefile.h>
23#include <keys/system_keyring.h>
24
25#include <asm/bootparam.h>
26#include <asm/setup.h>
27#include <asm/crash.h>
28#include <asm/efi.h>
29
30#define MAX_ELFCOREHDR_STR_LEN 30 /* elfcorehdr=0x<64bit-value> */
31
32/*
33 * Defines lowest physical address for various segments. Not sure where
34 * exactly these limits came from. Current bzimage64 loader in kexec-tools
35 * uses these so I am retaining it. It can be changed over time as we gain
36 * more insight.
37 */
38#define MIN_PURGATORY_ADDR 0x3000
39#define MIN_BOOTPARAM_ADDR 0x3000
40#define MIN_KERNEL_LOAD_ADDR 0x100000
41#define MIN_INITRD_LOAD_ADDR 0x1000000
42
43/*
44 * This is a place holder for all boot loader specific data structure which
45 * gets allocated in one call but gets freed much later during cleanup
46 * time. Right now there is only one field but it can grow as need be.
47 */
48struct bzimage64_data {
49 /*
50 * Temporary buffer to hold bootparams buffer. This should be
51 * freed once the bootparam segment has been loaded.
52 */
53 void *bootparams_buf;
54};
55
56static int setup_initrd(struct boot_params *params,
57 unsigned long initrd_load_addr, unsigned long initrd_len)
58{
59 params->hdr.ramdisk_image = initrd_load_addr & 0xffffffffUL;
60 params->hdr.ramdisk_size = initrd_len & 0xffffffffUL;
61
62 params->ext_ramdisk_image = initrd_load_addr >> 32;
63 params->ext_ramdisk_size = initrd_len >> 32;
64
65 return 0;
66}
67
68static int setup_cmdline(struct kimage *image, struct boot_params *params,
69 unsigned long bootparams_load_addr,
70 unsigned long cmdline_offset, char *cmdline,
71 unsigned long cmdline_len)
72{
73 char *cmdline_ptr = ((char *)params) + cmdline_offset;
74 unsigned long cmdline_ptr_phys, len;
75 uint32_t cmdline_low_32, cmdline_ext_32;
76
77 memcpy(cmdline_ptr, cmdline, cmdline_len);
78 if (image->type == KEXEC_TYPE_CRASH) {
79 len = sprintf(cmdline_ptr + cmdline_len - 1,
80 " elfcorehdr=0x%lx", image->arch.elf_load_addr);
81 cmdline_len += len;
82 }
83 cmdline_ptr[cmdline_len - 1] = '\0';
84
85 pr_debug("Final command line is: %s\n", cmdline_ptr);
86 cmdline_ptr_phys = bootparams_load_addr + cmdline_offset;
87 cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL;
88 cmdline_ext_32 = cmdline_ptr_phys >> 32;
89
90 params->hdr.cmd_line_ptr = cmdline_low_32;
91 if (cmdline_ext_32)
92 params->ext_cmd_line_ptr = cmdline_ext_32;
93
94 return 0;
95}
96
97static int setup_e820_entries(struct boot_params *params)
98{
99 unsigned int nr_e820_entries;
100
101 nr_e820_entries = e820_saved.nr_map;
102
103 /* TODO: Pass entries more than E820MAX in bootparams setup data */
104 if (nr_e820_entries > E820MAX)
105 nr_e820_entries = E820MAX;
106
107 params->e820_entries = nr_e820_entries;
108 memcpy(&params->e820_map, &e820_saved.map,
109 nr_e820_entries * sizeof(struct e820entry));
110
111 return 0;
112}
113
114#ifdef CONFIG_EFI
115static int setup_efi_info_memmap(struct boot_params *params,
116 unsigned long params_load_addr,
117 unsigned int efi_map_offset,
118 unsigned int efi_map_sz)
119{
120 void *efi_map = (void *)params + efi_map_offset;
121 unsigned long efi_map_phys_addr = params_load_addr + efi_map_offset;
122 struct efi_info *ei = &params->efi_info;
123
124 if (!efi_map_sz)
125 return 0;
126
127 efi_runtime_map_copy(efi_map, efi_map_sz);
128
129 ei->efi_memmap = efi_map_phys_addr & 0xffffffff;
130 ei->efi_memmap_hi = efi_map_phys_addr >> 32;
131 ei->efi_memmap_size = efi_map_sz;
132
133 return 0;
134}
135
136static int
137prepare_add_efi_setup_data(struct boot_params *params,
138 unsigned long params_load_addr,
139 unsigned int efi_setup_data_offset)
140{
141 unsigned long setup_data_phys;
142 struct setup_data *sd = (void *)params + efi_setup_data_offset;
143 struct efi_setup_data *esd = (void *)sd + sizeof(struct setup_data);
144
145 esd->fw_vendor = efi.fw_vendor;
146 esd->runtime = efi.runtime;
147 esd->tables = efi.config_table;
148 esd->smbios = efi.smbios;
149
150 sd->type = SETUP_EFI;
151 sd->len = sizeof(struct efi_setup_data);
152
153 /* Add setup data */
154 setup_data_phys = params_load_addr + efi_setup_data_offset;
155 sd->next = params->hdr.setup_data;
156 params->hdr.setup_data = setup_data_phys;
157
158 return 0;
159}
160
161static int
162setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
163 unsigned int efi_map_offset, unsigned int efi_map_sz,
164 unsigned int efi_setup_data_offset)
165{
166 struct efi_info *current_ei = &boot_params.efi_info;
167 struct efi_info *ei = &params->efi_info;
168
169 if (!current_ei->efi_memmap_size)
170 return 0;
171
172 /*
173 * If 1:1 mapping is not enabled, second kernel can not setup EFI
174 * and use EFI run time services. User space will have to pass
175 * acpi_rsdp=<addr> on kernel command line to make second kernel boot
176 * without efi.
177 */
178 if (efi_enabled(EFI_OLD_MEMMAP))
179 return 0;
180
181 ei->efi_loader_signature = current_ei->efi_loader_signature;
182 ei->efi_systab = current_ei->efi_systab;
183 ei->efi_systab_hi = current_ei->efi_systab_hi;
184
185 ei->efi_memdesc_version = current_ei->efi_memdesc_version;
186 ei->efi_memdesc_size = efi_get_runtime_map_desc_size();
187
188 setup_efi_info_memmap(params, params_load_addr, efi_map_offset,
189 efi_map_sz);
190 prepare_add_efi_setup_data(params, params_load_addr,
191 efi_setup_data_offset);
192 return 0;
193}
194#endif /* CONFIG_EFI */
195
196static int
197setup_boot_parameters(struct kimage *image, struct boot_params *params,
198 unsigned long params_load_addr,
199 unsigned int efi_map_offset, unsigned int efi_map_sz,
200 unsigned int efi_setup_data_offset)
201{
202 unsigned int nr_e820_entries;
203 unsigned long long mem_k, start, end;
204 int i, ret = 0;
205
206 /* Get subarch from existing bootparams */
207 params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
208
209 /* Copying screen_info will do? */
210 memcpy(&params->screen_info, &boot_params.screen_info,
211 sizeof(struct screen_info));
212
213 /* Fill in memsize later */
214 params->screen_info.ext_mem_k = 0;
215 params->alt_mem_k = 0;
216
217 /* Default APM info */
218 memset(&params->apm_bios_info, 0, sizeof(params->apm_bios_info));
219
220 /* Default drive info */
221 memset(&params->hd0_info, 0, sizeof(params->hd0_info));
222 memset(&params->hd1_info, 0, sizeof(params->hd1_info));
223
224 /* Default sysdesc table */
225 params->sys_desc_table.length = 0;
226
227 if (image->type == KEXEC_TYPE_CRASH) {
228 ret = crash_setup_memmap_entries(image, params);
229 if (ret)
230 return ret;
231 } else
232 setup_e820_entries(params);
233
234 nr_e820_entries = params->e820_entries;
235
236 for (i = 0; i < nr_e820_entries; i++) {
237 if (params->e820_map[i].type != E820_RAM)
238 continue;
239 start = params->e820_map[i].addr;
240 end = params->e820_map[i].addr + params->e820_map[i].size - 1;
241
242 if ((start <= 0x100000) && end > 0x100000) {
243 mem_k = (end >> 10) - (0x100000 >> 10);
244 params->screen_info.ext_mem_k = mem_k;
245 params->alt_mem_k = mem_k;
246 if (mem_k > 0xfc00)
247 params->screen_info.ext_mem_k = 0xfc00; /* 64M*/
248 if (mem_k > 0xffffffff)
249 params->alt_mem_k = 0xffffffff;
250 }
251 }
252
253#ifdef CONFIG_EFI
254 /* Setup EFI state */
255 setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz,
256 efi_setup_data_offset);
257#endif
258
259 /* Setup EDD info */
260 memcpy(params->eddbuf, boot_params.eddbuf,
261 EDDMAXNR * sizeof(struct edd_info));
262 params->eddbuf_entries = boot_params.eddbuf_entries;
263
264 memcpy(params->edd_mbr_sig_buffer, boot_params.edd_mbr_sig_buffer,
265 EDD_MBR_SIG_MAX * sizeof(unsigned int));
266
267 return ret;
268}
269
270int bzImage64_probe(const char *buf, unsigned long len)
271{
272 int ret = -ENOEXEC;
273 struct setup_header *header;
274
275 /* kernel should be atleast two sectors long */
276 if (len < 2 * 512) {
277 pr_err("File is too short to be a bzImage\n");
278 return ret;
279 }
280
281 header = (struct setup_header *)(buf + offsetof(struct boot_params, hdr));
282 if (memcmp((char *)&header->header, "HdrS", 4) != 0) {
283 pr_err("Not a bzImage\n");
284 return ret;
285 }
286
287 if (header->boot_flag != 0xAA55) {
288 pr_err("No x86 boot sector present\n");
289 return ret;
290 }
291
292 if (header->version < 0x020C) {
293 pr_err("Must be at least protocol version 2.12\n");
294 return ret;
295 }
296
297 if (!(header->loadflags & LOADED_HIGH)) {
298 pr_err("zImage not a bzImage\n");
299 return ret;
300 }
301
302 if (!(header->xloadflags & XLF_KERNEL_64)) {
303 pr_err("Not a bzImage64. XLF_KERNEL_64 is not set.\n");
304 return ret;
305 }
306
307 if (!(header->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G)) {
308 pr_err("XLF_CAN_BE_LOADED_ABOVE_4G is not set.\n");
309 return ret;
310 }
311
312 /*
313 * Can't handle 32bit EFI as it does not allow loading kernel
314 * above 4G. This should be handled by 32bit bzImage loader
315 */
316 if (efi_enabled(EFI_RUNTIME_SERVICES) && !efi_enabled(EFI_64BIT)) {
317 pr_debug("EFI is 32 bit. Can't load kernel above 4G.\n");
318 return ret;
319 }
320
321 /* I've got a bzImage */
322 pr_debug("It's a relocatable bzImage64\n");
323 ret = 0;
324
325 return ret;
326}
327
328void *bzImage64_load(struct kimage *image, char *kernel,
329 unsigned long kernel_len, char *initrd,
330 unsigned long initrd_len, char *cmdline,
331 unsigned long cmdline_len)
332{
333
334 struct setup_header *header;
335 int setup_sects, kern16_size, ret = 0;
336 unsigned long setup_header_size, params_cmdline_sz, params_misc_sz;
337 struct boot_params *params;
338 unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
339 unsigned long purgatory_load_addr;
340 unsigned long kernel_bufsz, kernel_memsz, kernel_align;
341 char *kernel_buf;
342 struct bzimage64_data *ldata;
343 struct kexec_entry64_regs regs64;
344 void *stack;
345 unsigned int setup_hdr_offset = offsetof(struct boot_params, hdr);
346 unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
347
348 header = (struct setup_header *)(kernel + setup_hdr_offset);
349 setup_sects = header->setup_sects;
350 if (setup_sects == 0)
351 setup_sects = 4;
352
353 kern16_size = (setup_sects + 1) * 512;
354 if (kernel_len < kern16_size) {
355 pr_err("bzImage truncated\n");
356 return ERR_PTR(-ENOEXEC);
357 }
358
359 if (cmdline_len > header->cmdline_size) {
360 pr_err("Kernel command line too long\n");
361 return ERR_PTR(-EINVAL);
362 }
363
364 /*
365 * In case of crash dump, we will append elfcorehdr=<addr> to
366 * command line. Make sure it does not overflow
367 */
368 if (cmdline_len + MAX_ELFCOREHDR_STR_LEN > header->cmdline_size) {
369 pr_debug("Appending elfcorehdr=<addr> to command line exceeds maximum allowed length\n");
370 return ERR_PTR(-EINVAL);
371 }
372
373 /* Allocate and load backup region */
374 if (image->type == KEXEC_TYPE_CRASH) {
375 ret = crash_load_segments(image);
376 if (ret)
377 return ERR_PTR(ret);
378 }
379
380 /*
381 * Load purgatory. For 64bit entry point, purgatory code can be
382 * anywhere.
383 */
384 ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1,
385 &purgatory_load_addr);
386 if (ret) {
387 pr_err("Loading purgatory failed\n");
388 return ERR_PTR(ret);
389 }
390
391 pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
392
393
394 /*
395 * Load Bootparams and cmdline and space for efi stuff.
396 *
397 * Allocate memory together for multiple data structures so
398 * that they all can go in single area/segment and we don't
399 * have to create separate segment for each. Keeps things
400 * little bit simple
401 */
402 efi_map_sz = efi_get_runtime_map_size();
403 efi_map_sz = ALIGN(efi_map_sz, 16);
404 params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
405 MAX_ELFCOREHDR_STR_LEN;
406 params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
407 params_misc_sz = params_cmdline_sz + efi_map_sz +
408 sizeof(struct setup_data) +
409 sizeof(struct efi_setup_data);
410
411 params = kzalloc(params_misc_sz, GFP_KERNEL);
412 if (!params)
413 return ERR_PTR(-ENOMEM);
414 efi_map_offset = params_cmdline_sz;
415 efi_setup_data_offset = efi_map_offset + efi_map_sz;
416
417 /* Copy setup header onto bootparams. Documentation/x86/boot.txt */
418 setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
419
420 /* Is there a limit on setup header size? */
421 memcpy(&params->hdr, (kernel + setup_hdr_offset), setup_header_size);
422
423 ret = kexec_add_buffer(image, (char *)params, params_misc_sz,
424 params_misc_sz, 16, MIN_BOOTPARAM_ADDR,
425 ULONG_MAX, 1, &bootparam_load_addr);
426 if (ret)
427 goto out_free_params;
428 pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
429 bootparam_load_addr, params_misc_sz, params_misc_sz);
430
431 /* Load kernel */
432 kernel_buf = kernel + kern16_size;
433 kernel_bufsz = kernel_len - kern16_size;
434 kernel_memsz = PAGE_ALIGN(header->init_size);
435 kernel_align = header->kernel_alignment;
436
437 ret = kexec_add_buffer(image, kernel_buf,
438 kernel_bufsz, kernel_memsz, kernel_align,
439 MIN_KERNEL_LOAD_ADDR, ULONG_MAX, 1,
440 &kernel_load_addr);
441 if (ret)
442 goto out_free_params;
443
444 pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
445 kernel_load_addr, kernel_memsz, kernel_memsz);
446
447 /* Load initrd high */
448 if (initrd) {
449 ret = kexec_add_buffer(image, initrd, initrd_len, initrd_len,
450 PAGE_SIZE, MIN_INITRD_LOAD_ADDR,
451 ULONG_MAX, 1, &initrd_load_addr);
452 if (ret)
453 goto out_free_params;
454
455 pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
456 initrd_load_addr, initrd_len, initrd_len);
457
458 setup_initrd(params, initrd_load_addr, initrd_len);
459 }
460
461 setup_cmdline(image, params, bootparam_load_addr,
462 sizeof(struct boot_params), cmdline, cmdline_len);
463
464 /* bootloader info. Do we need a separate ID for kexec kernel loader? */
465 params->hdr.type_of_loader = 0x0D << 4;
466 params->hdr.loadflags = 0;
467
468 /* Setup purgatory regs for entry */
469 ret = kexec_purgatory_get_set_symbol(image, "entry64_regs", &regs64,
470 sizeof(regs64), 1);
471 if (ret)
472 goto out_free_params;
473
474 regs64.rbx = 0; /* Bootstrap Processor */
475 regs64.rsi = bootparam_load_addr;
476 regs64.rip = kernel_load_addr + 0x200;
477 stack = kexec_purgatory_get_symbol_addr(image, "stack_end");
478 if (IS_ERR(stack)) {
479 pr_err("Could not find address of symbol stack_end\n");
480 ret = -EINVAL;
481 goto out_free_params;
482 }
483
484 regs64.rsp = (unsigned long)stack;
485 ret = kexec_purgatory_get_set_symbol(image, "entry64_regs", &regs64,
486 sizeof(regs64), 0);
487 if (ret)
488 goto out_free_params;
489
490 ret = setup_boot_parameters(image, params, bootparam_load_addr,
491 efi_map_offset, efi_map_sz,
492 efi_setup_data_offset);
493 if (ret)
494 goto out_free_params;
495
496 /* Allocate loader specific data */
497 ldata = kzalloc(sizeof(struct bzimage64_data), GFP_KERNEL);
498 if (!ldata) {
499 ret = -ENOMEM;
500 goto out_free_params;
501 }
502
503 /*
504 * Store pointer to params so that it could be freed after loading
505 * params segment has been loaded and contents have been copied
506 * somewhere else.
507 */
508 ldata->bootparams_buf = params;
509 return ldata;
510
511out_free_params:
512 kfree(params);
513 return ERR_PTR(ret);
514}
515
516/* This cleanup function is called after various segments have been loaded */
517int bzImage64_cleanup(void *loader_data)
518{
519 struct bzimage64_data *ldata = loader_data;
520
521 if (!ldata)
522 return 0;
523
524 kfree(ldata->bootparams_buf);
525 ldata->bootparams_buf = NULL;
526
527 return 0;
528}
529
530#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
531int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
532{
533 bool trusted;
534 int ret;
535
536 ret = verify_pefile_signature(kernel, kernel_len,
537 system_trusted_keyring, &trusted);
538 if (ret < 0)
539 return ret;
540 if (!trusted)
541 return -EKEYREJECTED;
542 return 0;
543}
544#endif
545
546struct kexec_file_ops kexec_bzImage64_ops = {
547 .probe = bzImage64_probe,
548 .load = bzImage64_load,
549 .cleanup = bzImage64_cleanup,
550#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
551 .verify_sig = bzImage64_verify_sig,
552#endif
553};
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 679cef0791cd..8b04018e5d1f 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -6,6 +6,8 @@
6 * Version 2. See the file COPYING for more details. 6 * Version 2. See the file COPYING for more details.
7 */ 7 */
8 8
9#define pr_fmt(fmt) "kexec: " fmt
10
9#include <linux/mm.h> 11#include <linux/mm.h>
10#include <linux/kexec.h> 12#include <linux/kexec.h>
11#include <linux/string.h> 13#include <linux/string.h>
@@ -21,6 +23,11 @@
21#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
22#include <asm/mmu_context.h> 24#include <asm/mmu_context.h>
23#include <asm/debugreg.h> 25#include <asm/debugreg.h>
26#include <asm/kexec-bzimage64.h>
27
28static struct kexec_file_ops *kexec_file_loaders[] = {
29 &kexec_bzImage64_ops,
30};
24 31
25static void free_transition_pgtable(struct kimage *image) 32static void free_transition_pgtable(struct kimage *image)
26{ 33{
@@ -171,6 +178,38 @@ static void load_segments(void)
171 ); 178 );
172} 179}
173 180
181/* Update purgatory as needed after various image segments have been prepared */
182static int arch_update_purgatory(struct kimage *image)
183{
184 int ret = 0;
185
186 if (!image->file_mode)
187 return 0;
188
189 /* Setup copying of backup region */
190 if (image->type == KEXEC_TYPE_CRASH) {
191 ret = kexec_purgatory_get_set_symbol(image, "backup_dest",
192 &image->arch.backup_load_addr,
193 sizeof(image->arch.backup_load_addr), 0);
194 if (ret)
195 return ret;
196
197 ret = kexec_purgatory_get_set_symbol(image, "backup_src",
198 &image->arch.backup_src_start,
199 sizeof(image->arch.backup_src_start), 0);
200 if (ret)
201 return ret;
202
203 ret = kexec_purgatory_get_set_symbol(image, "backup_sz",
204 &image->arch.backup_src_sz,
205 sizeof(image->arch.backup_src_sz), 0);
206 if (ret)
207 return ret;
208 }
209
210 return ret;
211}
212
174int machine_kexec_prepare(struct kimage *image) 213int machine_kexec_prepare(struct kimage *image)
175{ 214{
176 unsigned long start_pgtable; 215 unsigned long start_pgtable;
@@ -184,6 +223,11 @@ int machine_kexec_prepare(struct kimage *image)
184 if (result) 223 if (result)
185 return result; 224 return result;
186 225
226 /* update purgatory as needed */
227 result = arch_update_purgatory(image);
228 if (result)
229 return result;
230
187 return 0; 231 return 0;
188} 232}
189 233
@@ -283,3 +327,198 @@ void arch_crash_save_vmcoreinfo(void)
283 (unsigned long)&_text - __START_KERNEL); 327 (unsigned long)&_text - __START_KERNEL);
284} 328}
285 329
330/* arch-dependent functionality related to kexec file-based syscall */
331
332int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
333 unsigned long buf_len)
334{
335 int i, ret = -ENOEXEC;
336 struct kexec_file_ops *fops;
337
338 for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
339 fops = kexec_file_loaders[i];
340 if (!fops || !fops->probe)
341 continue;
342
343 ret = fops->probe(buf, buf_len);
344 if (!ret) {
345 image->fops = fops;
346 return ret;
347 }
348 }
349
350 return ret;
351}
352
353void *arch_kexec_kernel_image_load(struct kimage *image)
354{
355 vfree(image->arch.elf_headers);
356 image->arch.elf_headers = NULL;
357
358 if (!image->fops || !image->fops->load)
359 return ERR_PTR(-ENOEXEC);
360
361 return image->fops->load(image, image->kernel_buf,
362 image->kernel_buf_len, image->initrd_buf,
363 image->initrd_buf_len, image->cmdline_buf,
364 image->cmdline_buf_len);
365}
366
367int arch_kimage_file_post_load_cleanup(struct kimage *image)
368{
369 if (!image->fops || !image->fops->cleanup)
370 return 0;
371
372 return image->fops->cleanup(image->image_loader_data);
373}
374
375int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
376 unsigned long kernel_len)
377{
378 if (!image->fops || !image->fops->verify_sig) {
379 pr_debug("kernel loader does not support signature verification.");
380 return -EKEYREJECTED;
381 }
382
383 return image->fops->verify_sig(kernel, kernel_len);
384}
385
386/*
387 * Apply purgatory relocations.
388 *
389 * ehdr: Pointer to elf headers
390 * sechdrs: Pointer to section headers.
391 * relsec: section index of SHT_RELA section.
392 *
393 * TODO: Some of the code belongs to generic code. Move that in kexec.c.
394 */
395int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
396 Elf64_Shdr *sechdrs, unsigned int relsec)
397{
398 unsigned int i;
399 Elf64_Rela *rel;
400 Elf64_Sym *sym;
401 void *location;
402 Elf64_Shdr *section, *symtabsec;
403 unsigned long address, sec_base, value;
404 const char *strtab, *name, *shstrtab;
405
406 /*
407 * ->sh_offset has been modified to keep the pointer to section
408 * contents in memory
409 */
410 rel = (void *)sechdrs[relsec].sh_offset;
411
412 /* Section to which relocations apply */
413 section = &sechdrs[sechdrs[relsec].sh_info];
414
415 pr_debug("Applying relocate section %u to %u\n", relsec,
416 sechdrs[relsec].sh_info);
417
418 /* Associated symbol table */
419 symtabsec = &sechdrs[sechdrs[relsec].sh_link];
420
421 /* String table */
422 if (symtabsec->sh_link >= ehdr->e_shnum) {
423 /* Invalid strtab section number */
424 pr_err("Invalid string table section index %d\n",
425 symtabsec->sh_link);
426 return -ENOEXEC;
427 }
428
429 strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
430
431 /* section header string table */
432 shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
433
434 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
435
436 /*
437 * rel[i].r_offset contains byte offset from beginning
438 * of section to the storage unit affected.
439 *
440 * This is location to update (->sh_offset). This is temporary
441 * buffer where section is currently loaded. This will finally
442 * be loaded to a different address later, pointed to by
443 * ->sh_addr. kexec takes care of moving it
444 * (kexec_load_segment()).
445 */
446 location = (void *)(section->sh_offset + rel[i].r_offset);
447
448 /* Final address of the location */
449 address = section->sh_addr + rel[i].r_offset;
450
451 /*
452 * rel[i].r_info contains information about symbol table index
453 * w.r.t which relocation must be made and type of relocation
454 * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
455 * these respectively.
456 */
457 sym = (Elf64_Sym *)symtabsec->sh_offset +
458 ELF64_R_SYM(rel[i].r_info);
459
460 if (sym->st_name)
461 name = strtab + sym->st_name;
462 else
463 name = shstrtab + sechdrs[sym->st_shndx].sh_name;
464
465 pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
466 name, sym->st_info, sym->st_shndx, sym->st_value,
467 sym->st_size);
468
469 if (sym->st_shndx == SHN_UNDEF) {
470 pr_err("Undefined symbol: %s\n", name);
471 return -ENOEXEC;
472 }
473
474 if (sym->st_shndx == SHN_COMMON) {
475 pr_err("symbol '%s' in common section\n", name);
476 return -ENOEXEC;
477 }
478
479 if (sym->st_shndx == SHN_ABS)
480 sec_base = 0;
481 else if (sym->st_shndx >= ehdr->e_shnum) {
482 pr_err("Invalid section %d for symbol %s\n",
483 sym->st_shndx, name);
484 return -ENOEXEC;
485 } else
486 sec_base = sechdrs[sym->st_shndx].sh_addr;
487
488 value = sym->st_value;
489 value += sec_base;
490 value += rel[i].r_addend;
491
492 switch (ELF64_R_TYPE(rel[i].r_info)) {
493 case R_X86_64_NONE:
494 break;
495 case R_X86_64_64:
496 *(u64 *)location = value;
497 break;
498 case R_X86_64_32:
499 *(u32 *)location = value;
500 if (value != *(u32 *)location)
501 goto overflow;
502 break;
503 case R_X86_64_32S:
504 *(s32 *)location = value;
505 if ((s64)value != *(s32 *)location)
506 goto overflow;
507 break;
508 case R_X86_64_PC32:
509 value -= (u64)address;
510 *(u32 *)location = value;
511 break;
512 default:
513 pr_err("Unknown rela relocation: %llu\n",
514 ELF64_R_TYPE(rel[i].r_info));
515 return -ENOEXEC;
516 }
517 }
518 return 0;
519
520overflow:
521 pr_err("Overflow in relocation type %d value 0x%lx\n",
522 (int)ELF64_R_TYPE(rel[i].r_info), value);
523 return -ENOEXEC;
524}
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 1185fe7a7f47..9ade5cfb5a4c 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -273,7 +273,7 @@ static int mmu_audit_set(const char *val, const struct kernel_param *kp)
273 int ret; 273 int ret;
274 unsigned long enable; 274 unsigned long enable;
275 275
276 ret = strict_strtoul(val, 10, &enable); 276 ret = kstrtoul(val, 10, &enable);
277 if (ret < 0) 277 if (ret < 0)
278 return -EINVAL; 278 return -EINVAL;
279 279
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index ed161c6e278b..3968d67d366b 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1479,7 +1479,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1479 return count; 1479 return count;
1480 } 1480 }
1481 1481
1482 if (strict_strtol(optstr, 10, &input_arg) < 0) { 1482 if (kstrtol(optstr, 10, &input_arg) < 0) {
1483 printk(KERN_DEBUG "%s is invalid\n", optstr); 1483 printk(KERN_DEBUG "%s is invalid\n", optstr);
1484 return -EINVAL; 1484 return -EINVAL;
1485 } 1485 }
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
new file mode 100644
index 000000000000..7fde9ee438a4
--- /dev/null
+++ b/arch/x86/purgatory/Makefile
@@ -0,0 +1,30 @@
1purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string.o
2
3targets += $(purgatory-y)
4PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
5
6LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
7targets += purgatory.ro
8
9# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
10# in turn leaves some undefined symbols like __fentry__ in purgatory and not
11# sure how to relocate those. Like kexec-tools, use custom flags.
12
13KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
14
15$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
16 $(call if_changed,ld)
17
18targets += kexec-purgatory.c
19
20quiet_cmd_bin2c = BIN2C $@
21 cmd_bin2c = cat $(obj)/purgatory.ro | $(objtree)/scripts/basic/bin2c kexec_purgatory > $(obj)/kexec-purgatory.c
22
23$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
24 $(call if_changed,bin2c)
25
26
27# No loaders for 32bits yet.
28ifeq ($(CONFIG_X86_64),y)
29 obj-$(CONFIG_KEXEC) += kexec-purgatory.o
30endif
diff --git a/arch/x86/purgatory/entry64.S b/arch/x86/purgatory/entry64.S
new file mode 100644
index 000000000000..d1a4291d3568
--- /dev/null
+++ b/arch/x86/purgatory/entry64.S
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com)
3 * Copyright (C) 2014 Red Hat Inc.
4
5 * Author(s): Vivek Goyal <vgoyal@redhat.com>
6 *
7 * This code has been taken from kexec-tools.
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13 .text
14 .balign 16
15 .code64
16 .globl entry64, entry64_regs
17
18
19entry64:
20 /* Setup a gdt that should be preserved */
21 lgdt gdt(%rip)
22
23 /* load the data segments */
24 movl $0x18, %eax /* data segment */
25 movl %eax, %ds
26 movl %eax, %es
27 movl %eax, %ss
28 movl %eax, %fs
29 movl %eax, %gs
30
31 /* Setup new stack */
32 leaq stack_init(%rip), %rsp
33 pushq $0x10 /* CS */
34 leaq new_cs_exit(%rip), %rax
35 pushq %rax
36 lretq
37new_cs_exit:
38
39 /* Load the registers */
40 movq rax(%rip), %rax
41 movq rbx(%rip), %rbx
42 movq rcx(%rip), %rcx
43 movq rdx(%rip), %rdx
44 movq rsi(%rip), %rsi
45 movq rdi(%rip), %rdi
46 movq rsp(%rip), %rsp
47 movq rbp(%rip), %rbp
48 movq r8(%rip), %r8
49 movq r9(%rip), %r9
50 movq r10(%rip), %r10
51 movq r11(%rip), %r11
52 movq r12(%rip), %r12
53 movq r13(%rip), %r13
54 movq r14(%rip), %r14
55 movq r15(%rip), %r15
56
57 /* Jump to the new code... */
58 jmpq *rip(%rip)
59
60 .section ".rodata"
61 .balign 4
62entry64_regs:
63rax: .quad 0x0
64rcx: .quad 0x0
65rdx: .quad 0x0
66rbx: .quad 0x0
67rsp: .quad 0x0
68rbp: .quad 0x0
69rsi: .quad 0x0
70rdi: .quad 0x0
71r8: .quad 0x0
72r9: .quad 0x0
73r10: .quad 0x0
74r11: .quad 0x0
75r12: .quad 0x0
76r13: .quad 0x0
77r14: .quad 0x0
78r15: .quad 0x0
79rip: .quad 0x0
80 .size entry64_regs, . - entry64_regs
81
82 /* GDT */
83 .section ".rodata"
84 .balign 16
85gdt:
86 /* 0x00 unusable segment
87 * 0x08 unused
88 * so use them as gdt ptr
89 */
90 .word gdt_end - gdt - 1
91 .quad gdt
92 .word 0, 0, 0
93
94 /* 0x10 4GB flat code segment */
95 .word 0xFFFF, 0x0000, 0x9A00, 0x00AF
96
97 /* 0x18 4GB flat data segment */
98 .word 0xFFFF, 0x0000, 0x9200, 0x00CF
99gdt_end:
100stack: .quad 0, 0
101stack_init:
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
new file mode 100644
index 000000000000..25e068ba3382
--- /dev/null
+++ b/arch/x86/purgatory/purgatory.c
@@ -0,0 +1,72 @@
1/*
2 * purgatory: Runs between two kernels
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 *
6 * Author:
7 * Vivek Goyal <vgoyal@redhat.com>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13#include "sha256.h"
14#include "../boot/string.h"
15
16struct sha_region {
17 unsigned long start;
18 unsigned long len;
19};
20
21unsigned long backup_dest = 0;
22unsigned long backup_src = 0;
23unsigned long backup_sz = 0;
24
25u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
26
27struct sha_region sha_regions[16] = {};
28
29/*
30 * On x86, second kernel requries first 640K of memory to boot. Copy
31 * first 640K to a backup region in reserved memory range so that second
32 * kernel can use first 640K.
33 */
34static int copy_backup_region(void)
35{
36 if (backup_dest)
37 memcpy((void *)backup_dest, (void *)backup_src, backup_sz);
38
39 return 0;
40}
41
42int verify_sha256_digest(void)
43{
44 struct sha_region *ptr, *end;
45 u8 digest[SHA256_DIGEST_SIZE];
46 struct sha256_state sctx;
47
48 sha256_init(&sctx);
49 end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])];
50 for (ptr = sha_regions; ptr < end; ptr++)
51 sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
52
53 sha256_final(&sctx, digest);
54
55 if (memcmp(digest, sha256_digest, sizeof(digest)))
56 return 1;
57
58 return 0;
59}
60
61void purgatory(void)
62{
63 int ret;
64
65 ret = verify_sha256_digest();
66 if (ret) {
67 /* loop forever */
68 for (;;)
69 ;
70 }
71 copy_backup_region();
72}
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S
new file mode 100644
index 000000000000..fe3c91ba1bd0
--- /dev/null
+++ b/arch/x86/purgatory/setup-x86_64.S
@@ -0,0 +1,58 @@
1/*
2 * purgatory: setup code
3 *
4 * Copyright (C) 2003,2004 Eric Biederman (ebiederm@xmission.com)
5 * Copyright (C) 2014 Red Hat Inc.
6 *
7 * This code has been taken from kexec-tools.
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13 .text
14 .globl purgatory_start
15 .balign 16
16purgatory_start:
17 .code64
18
19 /* Load a gdt so I know what the segment registers are */
20 lgdt gdt(%rip)
21
22 /* load the data segments */
23 movl $0x18, %eax /* data segment */
24 movl %eax, %ds
25 movl %eax, %es
26 movl %eax, %ss
27 movl %eax, %fs
28 movl %eax, %gs
29
30 /* Setup a stack */
31 leaq lstack_end(%rip), %rsp
32
33 /* Call the C code */
34 call purgatory
35 jmp entry64
36
37 .section ".rodata"
38 .balign 16
39gdt: /* 0x00 unusable segment
40 * 0x08 unused
41 * so use them as the gdt ptr
42 */
43 .word gdt_end - gdt - 1
44 .quad gdt
45 .word 0, 0, 0
46
47 /* 0x10 4GB flat code segment */
48 .word 0xFFFF, 0x0000, 0x9A00, 0x00AF
49
50 /* 0x18 4GB flat data segment */
51 .word 0xFFFF, 0x0000, 0x9200, 0x00CF
52gdt_end:
53
54 .bss
55 .balign 4096
56lstack:
57 .skip 4096
58lstack_end:
diff --git a/arch/x86/purgatory/sha256.c b/arch/x86/purgatory/sha256.c
new file mode 100644
index 000000000000..548ca675a14a
--- /dev/null
+++ b/arch/x86/purgatory/sha256.c
@@ -0,0 +1,283 @@
1/*
2 * SHA-256, as specified in
3 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
4 *
5 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
6 *
7 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
8 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
9 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
10 * Copyright (c) 2014 Red Hat Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 */
17
18#include <linux/bitops.h>
19#include <asm/byteorder.h>
20#include "sha256.h"
21#include "../boot/string.h"
22
23static inline u32 Ch(u32 x, u32 y, u32 z)
24{
25 return z ^ (x & (y ^ z));
26}
27
28static inline u32 Maj(u32 x, u32 y, u32 z)
29{
30 return (x & y) | (z & (x | y));
31}
32
33#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
34#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
35#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
36#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
37
38static inline void LOAD_OP(int I, u32 *W, const u8 *input)
39{
40 W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
41}
42
43static inline void BLEND_OP(int I, u32 *W)
44{
45 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
46}
47
48static void sha256_transform(u32 *state, const u8 *input)
49{
50 u32 a, b, c, d, e, f, g, h, t1, t2;
51 u32 W[64];
52 int i;
53
54 /* load the input */
55 for (i = 0; i < 16; i++)
56 LOAD_OP(i, W, input);
57
58 /* now blend */
59 for (i = 16; i < 64; i++)
60 BLEND_OP(i, W);
61
62 /* load the state into our registers */
63 a = state[0]; b = state[1]; c = state[2]; d = state[3];
64 e = state[4]; f = state[5]; g = state[6]; h = state[7];
65
66 /* now iterate */
67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
68 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
70 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
72 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
73 t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
74 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
75 t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
76 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
77 t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
78 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
79 t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
80 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
81 t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
82 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
83
84 t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
85 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
86 t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
87 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
88 t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
89 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
90 t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
91 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
92 t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
93 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
94 t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
95 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
96 t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
97 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
98 t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
99 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
100
101 t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
102 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
103 t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
104 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
105 t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
106 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
107 t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
108 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
109 t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
110 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
111 t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
112 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
113 t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
114 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
115 t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
116 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
117
118 t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
119 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
120 t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
121 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
122 t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
123 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
124 t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
125 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
126 t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
127 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
128 t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
129 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
130 t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
131 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
132 t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
133 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
134
135 t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
136 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
137 t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
138 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
139 t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
140 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
141 t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
142 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
143 t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
144 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
145 t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
146 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
147 t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
148 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
149 t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
150 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
151
152 t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
153 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
154 t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
155 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
156 t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
157 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
158 t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
159 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
160 t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
161 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
162 t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
163 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
164 t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
165 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
166 t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
167 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
168
169 t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
170 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
171 t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
172 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
173 t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
174 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
175 t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
176 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
177 t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
178 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
179 t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
180 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
181 t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
182 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
183 t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
184 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
185
186 t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
187 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
188 t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
189 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
190 t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
191 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
192 t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
193 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
194 t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
195 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
196 t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
197 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
198 t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
199 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
200 t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
201 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
202
203 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
204 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
205
206 /* clear any sensitive info... */
207 a = b = c = d = e = f = g = h = t1 = t2 = 0;
208 memset(W, 0, 64 * sizeof(u32));
209}
210
211int sha256_init(struct sha256_state *sctx)
212{
213 sctx->state[0] = SHA256_H0;
214 sctx->state[1] = SHA256_H1;
215 sctx->state[2] = SHA256_H2;
216 sctx->state[3] = SHA256_H3;
217 sctx->state[4] = SHA256_H4;
218 sctx->state[5] = SHA256_H5;
219 sctx->state[6] = SHA256_H6;
220 sctx->state[7] = SHA256_H7;
221 sctx->count = 0;
222
223 return 0;
224}
225
226int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
227{
228 unsigned int partial, done;
229 const u8 *src;
230
231 partial = sctx->count & 0x3f;
232 sctx->count += len;
233 done = 0;
234 src = data;
235
236 if ((partial + len) > 63) {
237 if (partial) {
238 done = -partial;
239 memcpy(sctx->buf + partial, data, done + 64);
240 src = sctx->buf;
241 }
242
243 do {
244 sha256_transform(sctx->state, src);
245 done += 64;
246 src = data + done;
247 } while (done + 63 < len);
248
249 partial = 0;
250 }
251 memcpy(sctx->buf + partial, src, len - done);
252
253 return 0;
254}
255
256int sha256_final(struct sha256_state *sctx, u8 *out)
257{
258 __be32 *dst = (__be32 *)out;
259 __be64 bits;
260 unsigned int index, pad_len;
261 int i;
262 static const u8 padding[64] = { 0x80, };
263
264 /* Save number of bits */
265 bits = cpu_to_be64(sctx->count << 3);
266
267 /* Pad out to 56 mod 64. */
268 index = sctx->count & 0x3f;
269 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
270 sha256_update(sctx, padding, pad_len);
271
272 /* Append length (before padding) */
273 sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
274
275 /* Store state in digest */
276 for (i = 0; i < 8; i++)
277 dst[i] = cpu_to_be32(sctx->state[i]);
278
279 /* Zeroize sensitive information. */
280 memset(sctx, 0, sizeof(*sctx));
281
282 return 0;
283}
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h
new file mode 100644
index 000000000000..bd15a4127735
--- /dev/null
+++ b/arch/x86/purgatory/sha256.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2014 Red Hat Inc.
3 *
4 * Author: Vivek Goyal <vgoyal@redhat.com>
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10#ifndef SHA256_H
11#define SHA256_H
12
13
14#include <linux/types.h>
15#include <crypto/sha.h>
16
17extern int sha256_init(struct sha256_state *sctx);
18extern int sha256_update(struct sha256_state *sctx, const u8 *input,
19 unsigned int length);
20extern int sha256_final(struct sha256_state *sctx, u8 *hash);
21
22#endif /* SHA256_H */
diff --git a/arch/x86/purgatory/stack.S b/arch/x86/purgatory/stack.S
new file mode 100644
index 000000000000..3cefba1fefc8
--- /dev/null
+++ b/arch/x86/purgatory/stack.S
@@ -0,0 +1,19 @@
1/*
2 * purgatory: stack
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10 /* A stack for the loaded kernel.
11 * Seperate and in the data section so it can be prepopulated.
12 */
13 .data
14 .balign 4096
15 .globl stack, stack_end
16
17stack:
18 .skip 4096
19stack_end:
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
new file mode 100644
index 000000000000..d886b1fa36f0
--- /dev/null
+++ b/arch/x86/purgatory/string.c
@@ -0,0 +1,13 @@
1/*
2 * Simple string functions.
3 *
4 * Copyright (C) 2014 Red Hat Inc.
5 *
6 * Author:
7 * Vivek Goyal <vgoyal@redhat.com>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13#include "../boot/string.c"
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index d1b4a119d4a5..028b78168d85 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -362,3 +362,4 @@
362353 i386 renameat2 sys_renameat2 362353 i386 renameat2 sys_renameat2
363354 i386 seccomp sys_seccomp 363354 i386 seccomp sys_seccomp
364355 i386 getrandom sys_getrandom 364355 i386 getrandom sys_getrandom
365356 i386 memfd_create sys_memfd_create
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 252c804bb1aa..35dd922727b9 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -325,6 +325,8 @@
325316 common renameat2 sys_renameat2 325316 common renameat2 sys_renameat2
326317 common seccomp sys_seccomp 326317 common seccomp sys_seccomp
327318 common getrandom sys_getrandom 327318 common getrandom sys_getrandom
328319 common memfd_create sys_memfd_create
329320 common kexec_file_load sys_kexec_file_load
328 330
329# 331#
330# x32-specific system call numbers start at 512 to avoid cache impact 332# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
index 0feee2fd5077..25a1022dd793 100644
--- a/arch/x86/um/asm/elf.h
+++ b/arch/x86/um/asm/elf.h
@@ -216,6 +216,5 @@ extern long elf_aux_hwcap;
216#define ELF_HWCAP (elf_aux_hwcap) 216#define ELF_HWCAP (elf_aux_hwcap)
217 217
218#define SET_PERSONALITY(ex) do ; while(0) 218#define SET_PERSONALITY(ex) do ; while(0)
219#define __HAVE_ARCH_GATE_AREA 1
220 219
221#endif 220#endif
diff --git a/arch/x86/um/mem_64.c b/arch/x86/um/mem_64.c
index c6492e75797b..f8fecaddcc0d 100644
--- a/arch/x86/um/mem_64.c
+++ b/arch/x86/um/mem_64.c
@@ -9,18 +9,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
9 9
10 return NULL; 10 return NULL;
11} 11}
12
13struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
14{
15 return NULL;
16}
17
18int in_gate_area(struct mm_struct *mm, unsigned long addr)
19{
20 return 0;
21}
22
23int in_gate_area_no_mm(unsigned long addr)
24{
25 return 0;
26}
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index e4f7781ee162..e904c270573b 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -115,23 +115,6 @@ static __init int ia32_binfmt_init(void)
115 return 0; 115 return 0;
116} 116}
117__initcall(ia32_binfmt_init); 117__initcall(ia32_binfmt_init);
118#endif 118#endif /* CONFIG_SYSCTL */
119
120#else /* CONFIG_X86_32 */
121
122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
123{
124 return NULL;
125}
126
127int in_gate_area(struct mm_struct *mm, unsigned long addr)
128{
129 return 0;
130}
131
132int in_gate_area_no_mm(unsigned long addr)
133{
134 return 0;
135}
136 119
137#endif /* CONFIG_X86_64 */ 120#endif /* CONFIG_X86_64 */