diff options
| -rw-r--r-- | arch/x86/.gitignore | 1 | ||||
| -rw-r--r-- | arch/x86/Kconfig | 75 | ||||
| -rw-r--r-- | arch/x86/include/asm/cpufeature.h | 79 | ||||
| -rw-r--r-- | arch/x86/include/asm/page_64.h | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-internal.h | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 19 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 1 | ||||
| -rw-r--r-- | arch/x86/lib/error-inject.c | 1 | ||||
| -rw-r--r-- | arch/x86/mm/init_64.c | 4 | ||||
| -rw-r--r-- | fs/proc/kcore.c | 4 | ||||
| -rw-r--r-- | include/linux/kcore.h | 1 | ||||
| -rw-r--r-- | include/linux/mm_inline.h | 6 | ||||
| -rw-r--r-- | mm/memory-failure.c | 2 |
14 files changed, 146 insertions, 81 deletions
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore index aff152c87cf4..5a82bac5e0bc 100644 --- a/arch/x86/.gitignore +++ b/arch/x86/.gitignore | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | boot/compressed/vmlinux | 1 | boot/compressed/vmlinux |
| 2 | tools/test_get_len | 2 | tools/test_get_len |
| 3 | tools/insn_sanity | 3 | tools/insn_sanity |
| 4 | tools/insn_decoder_test | ||
| 4 | purgatory/kexec-purgatory.c | 5 | purgatory/kexec-purgatory.c |
| 5 | purgatory/purgatory.ro | 6 | purgatory/purgatory.ro |
| 6 | 7 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 63bf349b2b24..a528c14d45a5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -423,12 +423,6 @@ config X86_MPPARSE | |||
| 423 | For old smp systems that do not have proper acpi support. Newer systems | 423 | For old smp systems that do not have proper acpi support. Newer systems |
| 424 | (esp with 64bit cpus) with acpi support, MADT and DSDT will override it | 424 | (esp with 64bit cpus) with acpi support, MADT and DSDT will override it |
| 425 | 425 | ||
| 426 | config X86_BIGSMP | ||
| 427 | bool "Support for big SMP systems with more than 8 CPUs" | ||
| 428 | depends on X86_32 && SMP | ||
| 429 | ---help--- | ||
| 430 | This option is needed for the systems that have more than 8 CPUs | ||
| 431 | |||
| 432 | config GOLDFISH | 426 | config GOLDFISH |
| 433 | def_bool y | 427 | def_bool y |
| 434 | depends on X86_GOLDFISH | 428 | depends on X86_GOLDFISH |
| @@ -460,6 +454,12 @@ config INTEL_RDT | |||
| 460 | Say N if unsure. | 454 | Say N if unsure. |
| 461 | 455 | ||
| 462 | if X86_32 | 456 | if X86_32 |
| 457 | config X86_BIGSMP | ||
| 458 | bool "Support for big SMP systems with more than 8 CPUs" | ||
| 459 | depends on SMP | ||
| 460 | ---help--- | ||
| 461 | This option is needed for the systems that have more than 8 CPUs | ||
| 462 | |||
| 463 | config X86_EXTENDED_PLATFORM | 463 | config X86_EXTENDED_PLATFORM |
| 464 | bool "Support for extended (non-PC) x86 platforms" | 464 | bool "Support for extended (non-PC) x86 platforms" |
| 465 | default y | 465 | default y |
| @@ -949,25 +949,66 @@ config MAXSMP | |||
| 949 | Enable maximum number of CPUS and NUMA Nodes for this architecture. | 949 | Enable maximum number of CPUS and NUMA Nodes for this architecture. |
| 950 | If unsure, say N. | 950 | If unsure, say N. |
| 951 | 951 | ||
| 952 | # | ||
| 953 | # The maximum number of CPUs supported: | ||
| 954 | # | ||
| 955 | # The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT, | ||
| 956 | # and which can be configured interactively in the | ||
| 957 | # [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range. | ||
| 958 | # | ||
| 959 | # The ranges are different on 32-bit and 64-bit kernels, depending on | ||
| 960 | # hardware capabilities and scalability features of the kernel. | ||
| 961 | # | ||
| 962 | # ( If MAXSMP is enabled we just use the highest possible value and disable | ||
| 963 | # interactive configuration. ) | ||
| 964 | # | ||
| 965 | |||
| 966 | config NR_CPUS_RANGE_BEGIN | ||
| 967 | int | ||
| 968 | default NR_CPUS_RANGE_END if MAXSMP | ||
| 969 | default 1 if !SMP | ||
| 970 | default 2 | ||
| 971 | |||
| 972 | config NR_CPUS_RANGE_END | ||
| 973 | int | ||
| 974 | depends on X86_32 | ||
| 975 | default 64 if SMP && X86_BIGSMP | ||
| 976 | default 8 if SMP && !X86_BIGSMP | ||
| 977 | default 1 if !SMP | ||
| 978 | |||
| 979 | config NR_CPUS_RANGE_END | ||
| 980 | int | ||
| 981 | depends on X86_64 | ||
| 982 | default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) | ||
| 983 | default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) | ||
| 984 | default 1 if !SMP | ||
| 985 | |||
| 986 | config NR_CPUS_DEFAULT | ||
| 987 | int | ||
| 988 | depends on X86_32 | ||
| 989 | default 32 if X86_BIGSMP | ||
| 990 | default 8 if SMP | ||
| 991 | default 1 if !SMP | ||
| 992 | |||
| 993 | config NR_CPUS_DEFAULT | ||
| 994 | int | ||
| 995 | depends on X86_64 | ||
| 996 | default 8192 if MAXSMP | ||
| 997 | default 64 if SMP | ||
| 998 | default 1 if !SMP | ||
| 999 | |||
| 952 | config NR_CPUS | 1000 | config NR_CPUS |
| 953 | int "Maximum number of CPUs" if SMP && !MAXSMP | 1001 | int "Maximum number of CPUs" if SMP && !MAXSMP |
| 954 | range 2 8 if SMP && X86_32 && !X86_BIGSMP | 1002 | range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END |
| 955 | range 2 64 if SMP && X86_32 && X86_BIGSMP | 1003 | default NR_CPUS_DEFAULT |
| 956 | range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 | ||
| 957 | range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 | ||
| 958 | default "1" if !SMP | ||
| 959 | default "8192" if MAXSMP | ||
| 960 | default "32" if SMP && X86_BIGSMP | ||
| 961 | default "8" if SMP && X86_32 | ||
| 962 | default "64" if SMP | ||
| 963 | ---help--- | 1004 | ---help--- |
| 964 | This allows you to specify the maximum number of CPUs which this | 1005 | This allows you to specify the maximum number of CPUs which this |
| 965 | kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum | 1006 | kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum |
| 966 | supported value is 8192, otherwise the maximum value is 512. The | 1007 | supported value is 8192, otherwise the maximum value is 512. The |
| 967 | minimum value which makes sense is 2. | 1008 | minimum value which makes sense is 2. |
| 968 | 1009 | ||
| 969 | This is purely to save memory - each supported CPU adds | 1010 | This is purely to save memory: each supported CPU adds about 8KB |
| 970 | approximately eight kilobytes to the kernel image. | 1011 | to the kernel image. |
| 971 | 1012 | ||
| 972 | config SCHED_SMT | 1013 | config SCHED_SMT |
| 973 | bool "SMT (Hyperthreading) scheduler support" | 1014 | bool "SMT (Hyperthreading) scheduler support" |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 70eddb3922ff..736771c9822e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
| @@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); | |||
| 148 | */ | 148 | */ |
| 149 | static __always_inline __pure bool _static_cpu_has(u16 bit) | 149 | static __always_inline __pure bool _static_cpu_has(u16 bit) |
| 150 | { | 150 | { |
| 151 | asm_volatile_goto("1: jmp 6f\n" | 151 | asm_volatile_goto("1: jmp 6f\n" |
| 152 | "2:\n" | 152 | "2:\n" |
| 153 | ".skip -(((5f-4f) - (2b-1b)) > 0) * " | 153 | ".skip -(((5f-4f) - (2b-1b)) > 0) * " |
| 154 | "((5f-4f) - (2b-1b)),0x90\n" | 154 | "((5f-4f) - (2b-1b)),0x90\n" |
| 155 | "3:\n" | 155 | "3:\n" |
| 156 | ".section .altinstructions,\"a\"\n" | 156 | ".section .altinstructions,\"a\"\n" |
| 157 | " .long 1b - .\n" /* src offset */ | 157 | " .long 1b - .\n" /* src offset */ |
| 158 | " .long 4f - .\n" /* repl offset */ | 158 | " .long 4f - .\n" /* repl offset */ |
| 159 | " .word %P1\n" /* always replace */ | 159 | " .word %P[always]\n" /* always replace */ |
| 160 | " .byte 3b - 1b\n" /* src len */ | 160 | " .byte 3b - 1b\n" /* src len */ |
| 161 | " .byte 5f - 4f\n" /* repl len */ | 161 | " .byte 5f - 4f\n" /* repl len */ |
| 162 | " .byte 3b - 2b\n" /* pad len */ | 162 | " .byte 3b - 2b\n" /* pad len */ |
| 163 | ".previous\n" | 163 | ".previous\n" |
| 164 | ".section .altinstr_replacement,\"ax\"\n" | 164 | ".section .altinstr_replacement,\"ax\"\n" |
| 165 | "4: jmp %l[t_no]\n" | 165 | "4: jmp %l[t_no]\n" |
| 166 | "5:\n" | 166 | "5:\n" |
| 167 | ".previous\n" | 167 | ".previous\n" |
| 168 | ".section .altinstructions,\"a\"\n" | 168 | ".section .altinstructions,\"a\"\n" |
| 169 | " .long 1b - .\n" /* src offset */ | 169 | " .long 1b - .\n" /* src offset */ |
| 170 | " .long 0\n" /* no replacement */ | 170 | " .long 0\n" /* no replacement */ |
| 171 | " .word %P0\n" /* feature bit */ | 171 | " .word %P[feature]\n" /* feature bit */ |
| 172 | " .byte 3b - 1b\n" /* src len */ | 172 | " .byte 3b - 1b\n" /* src len */ |
| 173 | " .byte 0\n" /* repl len */ | 173 | " .byte 0\n" /* repl len */ |
| 174 | " .byte 0\n" /* pad len */ | 174 | " .byte 0\n" /* pad len */ |
| 175 | ".previous\n" | 175 | ".previous\n" |
| 176 | ".section .altinstr_aux,\"ax\"\n" | 176 | ".section .altinstr_aux,\"ax\"\n" |
| 177 | "6:\n" | 177 | "6:\n" |
| 178 | " testb %[bitnum],%[cap_byte]\n" | 178 | " testb %[bitnum],%[cap_byte]\n" |
| 179 | " jnz %l[t_yes]\n" | 179 | " jnz %l[t_yes]\n" |
| 180 | " jmp %l[t_no]\n" | 180 | " jmp %l[t_no]\n" |
| 181 | ".previous\n" | 181 | ".previous\n" |
| 182 | : : "i" (bit), "i" (X86_FEATURE_ALWAYS), | 182 | : : [feature] "i" (bit), |
| 183 | [bitnum] "i" (1 << (bit & 7)), | 183 | [always] "i" (X86_FEATURE_ALWAYS), |
| 184 | [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) | 184 | [bitnum] "i" (1 << (bit & 7)), |
| 185 | : : t_yes, t_no); | 185 | [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) |
| 186 | t_yes: | 186 | : : t_yes, t_no); |
| 187 | return true; | 187 | t_yes: |
| 188 | t_no: | 188 | return true; |
| 189 | return false; | 189 | t_no: |
| 190 | return false; | ||
| 190 | } | 191 | } |
| 191 | 192 | ||
| 192 | #define static_cpu_has(bit) \ | 193 | #define static_cpu_has(bit) \ |
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..d652a3808065 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h | |||
| @@ -52,10 +52,6 @@ static inline void clear_page(void *page) | |||
| 52 | 52 | ||
| 53 | void copy_page(void *to, void *from); | 53 | void copy_page(void *to, void *from); |
| 54 | 54 | ||
| 55 | #ifdef CONFIG_X86_MCE | ||
| 56 | #define arch_unmap_kpfn arch_unmap_kpfn | ||
| 57 | #endif | ||
| 58 | |||
| 59 | #endif /* !__ASSEMBLY__ */ | 55 | #endif /* !__ASSEMBLY__ */ |
| 60 | 56 | ||
| 61 | #ifdef CONFIG_X86_VSYSCALL_EMULATION | 57 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 46b675aaf20b..f11910b44638 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
| @@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) | |||
| 1176 | 1176 | ||
| 1177 | uv_gre_table = gre; | 1177 | uv_gre_table = gre; |
| 1178 | for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { | 1178 | for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { |
| 1179 | unsigned long size = ((unsigned long)(gre->limit - lgre) | ||
| 1180 | << UV_GAM_RANGE_SHFT); | ||
| 1181 | int order = 0; | ||
| 1182 | char suffix[] = " KMGTPE"; | ||
| 1183 | |||
| 1184 | while (size > 9999 && order < sizeof(suffix)) { | ||
| 1185 | size /= 1024; | ||
| 1186 | order++; | ||
| 1187 | } | ||
| 1188 | |||
| 1179 | if (!index) { | 1189 | if (!index) { |
| 1180 | pr_info("UV: GAM Range Table...\n"); | 1190 | pr_info("UV: GAM Range Table...\n"); |
| 1181 | pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); | 1191 | pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); |
| 1182 | } | 1192 | } |
| 1183 | pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", | 1193 | pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", |
| 1184 | index++, | 1194 | index++, |
| 1185 | (unsigned long)lgre << UV_GAM_RANGE_SHFT, | 1195 | (unsigned long)lgre << UV_GAM_RANGE_SHFT, |
| 1186 | (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, | 1196 | (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, |
| 1187 | ((unsigned long)(gre->limit - lgre)) >> | 1197 | size, suffix[order], |
| 1188 | (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ | ||
| 1189 | gre->type, gre->nasid, gre->sockid, gre->pnode); | 1198 | gre->type, gre->nasid, gre->sockid, gre->pnode); |
| 1190 | 1199 | ||
| 1191 | lgre = gre->limit; | 1200 | lgre = gre->limit; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60..e956eb267061 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
| @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } | |||
| 115 | 115 | ||
| 116 | extern struct mca_config mca_cfg; | 116 | extern struct mca_config mca_cfg; |
| 117 | 117 | ||
| 118 | #ifndef CONFIG_X86_64 | ||
| 119 | /* | ||
| 120 | * On 32-bit systems it would be difficult to safely unmap a poison page | ||
| 121 | * from the kernel 1:1 map because there are no non-canonical addresses that | ||
| 122 | * we can use to refer to the address without risking a speculative access. | ||
| 123 | * However, this isn't much of an issue because: | ||
| 124 | * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which | ||
| 125 | * are only mapped into the kernel as needed | ||
| 126 | * 2) Few people would run a 32-bit kernel on a machine that supports | ||
| 127 | * recoverable errors because they have too much memory to boot 32-bit. | ||
| 128 | */ | ||
| 129 | static inline void mce_unmap_kpfn(unsigned long pfn) {} | ||
| 130 | #define mce_unmap_kpfn mce_unmap_kpfn | ||
| 131 | #endif | ||
| 132 | |||
| 118 | #endif /* __X86_MCE_INTERNAL_H__ */ | 133 | #endif /* __X86_MCE_INTERNAL_H__ */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3a8e88a611eb..8ff94d1e2dce 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -105,6 +105,10 @@ static struct irq_work mce_irq_work; | |||
| 105 | 105 | ||
| 106 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); | 106 | static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); |
| 107 | 107 | ||
| 108 | #ifndef mce_unmap_kpfn | ||
| 109 | static void mce_unmap_kpfn(unsigned long pfn); | ||
| 110 | #endif | ||
| 111 | |||
| 108 | /* | 112 | /* |
| 109 | * CPU/chipset specific EDAC code can register a notifier call here to print | 113 | * CPU/chipset specific EDAC code can register a notifier call here to print |
| 110 | * MCE errors in a human-readable form. | 114 | * MCE errors in a human-readable form. |
| @@ -234,7 +238,7 @@ static void __print_mce(struct mce *m) | |||
| 234 | m->cs, m->ip); | 238 | m->cs, m->ip); |
| 235 | 239 | ||
| 236 | if (m->cs == __KERNEL_CS) | 240 | if (m->cs == __KERNEL_CS) |
| 237 | pr_cont("{%pS}", (void *)m->ip); | 241 | pr_cont("{%pS}", (void *)(unsigned long)m->ip); |
| 238 | pr_cont("\n"); | 242 | pr_cont("\n"); |
| 239 | } | 243 | } |
| 240 | 244 | ||
| @@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, | |||
| 590 | 594 | ||
| 591 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { | 595 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { |
| 592 | pfn = mce->addr >> PAGE_SHIFT; | 596 | pfn = mce->addr >> PAGE_SHIFT; |
| 593 | memory_failure(pfn, 0); | 597 | if (!memory_failure(pfn, 0)) |
| 598 | mce_unmap_kpfn(pfn); | ||
| 594 | } | 599 | } |
| 595 | 600 | ||
| 596 | return NOTIFY_OK; | 601 | return NOTIFY_OK; |
| @@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m) | |||
| 1057 | ret = memory_failure(m->addr >> PAGE_SHIFT, flags); | 1062 | ret = memory_failure(m->addr >> PAGE_SHIFT, flags); |
| 1058 | if (ret) | 1063 | if (ret) |
| 1059 | pr_err("Memory error not recovered"); | 1064 | pr_err("Memory error not recovered"); |
| 1065 | else | ||
| 1066 | mce_unmap_kpfn(m->addr >> PAGE_SHIFT); | ||
| 1060 | return ret; | 1067 | return ret; |
| 1061 | } | 1068 | } |
| 1062 | 1069 | ||
| 1063 | #if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) | 1070 | #ifndef mce_unmap_kpfn |
| 1064 | 1071 | static void mce_unmap_kpfn(unsigned long pfn) | |
| 1065 | void arch_unmap_kpfn(unsigned long pfn) | ||
| 1066 | { | 1072 | { |
| 1067 | unsigned long decoy_addr; | 1073 | unsigned long decoy_addr; |
| 1068 | 1074 | ||
| @@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn) | |||
| 1073 | * We would like to just call: | 1079 | * We would like to just call: |
| 1074 | * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); | 1080 | * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); |
| 1075 | * but doing that would radically increase the odds of a | 1081 | * but doing that would radically increase the odds of a |
| 1076 | * speculative access to the posion page because we'd have | 1082 | * speculative access to the poison page because we'd have |
| 1077 | * the virtual address of the kernel 1:1 mapping sitting | 1083 | * the virtual address of the kernel 1:1 mapping sitting |
| 1078 | * around in registers. | 1084 | * around in registers. |
| 1079 | * Instead we get tricky. We create a non-canonical address | 1085 | * Instead we get tricky. We create a non-canonical address |
| @@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn) | |||
| 1098 | 1104 | ||
| 1099 | if (set_memory_np(decoy_addr, 1)) | 1105 | if (set_memory_np(decoy_addr, 1)) |
| 1100 | pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); | 1106 | pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); |
| 1101 | |||
| 1102 | } | 1107 | } |
| 1103 | #endif | 1108 | #endif |
| 1104 | 1109 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6f27facbaa9b..cfc61e1d45e2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1430,7 +1430,6 @@ static void remove_siblinginfo(int cpu) | |||
| 1430 | cpumask_clear(cpu_llc_shared_mask(cpu)); | 1430 | cpumask_clear(cpu_llc_shared_mask(cpu)); |
| 1431 | cpumask_clear(topology_sibling_cpumask(cpu)); | 1431 | cpumask_clear(topology_sibling_cpumask(cpu)); |
| 1432 | cpumask_clear(topology_core_cpumask(cpu)); | 1432 | cpumask_clear(topology_core_cpumask(cpu)); |
| 1433 | c->phys_proc_id = 0; | ||
| 1434 | c->cpu_core_id = 0; | 1433 | c->cpu_core_id = 0; |
| 1435 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); | 1434 | cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); |
| 1436 | recompute_smt_state(); | 1435 | recompute_smt_state(); |
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index 7b881d03d0dd..3cdf06128d13 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c | |||
| @@ -7,6 +7,7 @@ asmlinkage void just_return_func(void); | |||
| 7 | 7 | ||
| 8 | asm( | 8 | asm( |
| 9 | ".type just_return_func, @function\n" | 9 | ".type just_return_func, @function\n" |
| 10 | ".globl just_return_func\n" | ||
| 10 | "just_return_func:\n" | 11 | "just_return_func:\n" |
| 11 | " ret\n" | 12 | " ret\n" |
| 12 | ".size just_return_func, .-just_return_func\n" | 13 | ".size just_return_func, .-just_return_func\n" |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index fecb0c0a6077..8b72923f1d35 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -1193,8 +1193,8 @@ void __init mem_init(void) | |||
| 1193 | register_page_bootmem_info(); | 1193 | register_page_bootmem_info(); |
| 1194 | 1194 | ||
| 1195 | /* Register memory areas for /proc/kcore */ | 1195 | /* Register memory areas for /proc/kcore */ |
| 1196 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, | 1196 | if (get_gate_vma(&init_mm)) |
| 1197 | PAGE_SIZE, KCORE_OTHER); | 1197 | kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); |
| 1198 | 1198 | ||
| 1199 | mem_init_print_info(NULL); | 1199 | mem_init_print_info(NULL); |
| 1200 | } | 1200 | } |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e8a93bc8285d..d1e82761de81 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
| @@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) | |||
| 510 | /* we have to zero-fill user buffer even if no read */ | 510 | /* we have to zero-fill user buffer even if no read */ |
| 511 | if (copy_to_user(buffer, buf, tsz)) | 511 | if (copy_to_user(buffer, buf, tsz)) |
| 512 | return -EFAULT; | 512 | return -EFAULT; |
| 513 | } else if (m->type == KCORE_USER) { | ||
| 514 | /* User page is handled prior to normal kernel page: */ | ||
| 515 | if (copy_to_user(buffer, (char *)start, tsz)) | ||
| 516 | return -EFAULT; | ||
| 513 | } else { | 517 | } else { |
| 514 | if (kern_addr_valid(start)) { | 518 | if (kern_addr_valid(start)) { |
| 515 | /* | 519 | /* |
diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 7ff25a808fef..80db19d3a505 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h | |||
| @@ -10,6 +10,7 @@ enum kcore_type { | |||
| 10 | KCORE_VMALLOC, | 10 | KCORE_VMALLOC, |
| 11 | KCORE_RAM, | 11 | KCORE_RAM, |
| 12 | KCORE_VMEMMAP, | 12 | KCORE_VMEMMAP, |
| 13 | KCORE_USER, | ||
| 13 | KCORE_OTHER, | 14 | KCORE_OTHER, |
| 14 | }; | 15 | }; |
| 15 | 16 | ||
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c30b32e3c862..10191c28fc04 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
| @@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page) | |||
| 127 | 127 | ||
| 128 | #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) | 128 | #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) |
| 129 | 129 | ||
| 130 | #ifdef arch_unmap_kpfn | ||
| 131 | extern void arch_unmap_kpfn(unsigned long pfn); | ||
| 132 | #else | ||
| 133 | static __always_inline void arch_unmap_kpfn(unsigned long pfn) { } | ||
| 134 | #endif | ||
| 135 | |||
| 136 | #endif | 130 | #endif |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4b80ccee4535..8291b75f42c8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags) | |||
| 1139 | return 0; | 1139 | return 0; |
| 1140 | } | 1140 | } |
| 1141 | 1141 | ||
| 1142 | arch_unmap_kpfn(pfn); | ||
| 1143 | |||
| 1144 | orig_head = hpage = compound_head(p); | 1142 | orig_head = hpage = compound_head(p); |
| 1145 | num_poisoned_pages_inc(); | 1143 | num_poisoned_pages_inc(); |
| 1146 | 1144 | ||
