diff options
| -rw-r--r-- | arch/x86/Makefile | 5 | ||||
| -rw-r--r-- | arch/x86/include/asm/alternative-asm.h | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/alternative.h | 4 | ||||
| -rw-r--r-- | arch/x86/include/asm/dwarf2.h | 12 | ||||
| -rw-r--r-- | arch/x86/include/asm/hardirq.h | 2 | ||||
| -rw-r--r-- | arch/x86/include/asm/percpu.h | 24 | ||||
| -rw-r--r-- | arch/x86/kernel/alternative.c | 45 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 4 |
8 files changed, 71 insertions, 29 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 0a43dc515e4c..8aa1b59b9074 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp | |||
| 95 | cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) | 95 | cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) |
| 96 | # is .cfi_signal_frame supported too? | 96 | # is .cfi_signal_frame supported too? |
| 97 | cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) | 97 | cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) |
| 98 | KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) | 98 | cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) |
| 99 | KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) | 99 | KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) |
| 100 | KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) | ||
| 100 | 101 | ||
| 101 | LDFLAGS := -m elf_$(UTS_MACHINE) | 102 | LDFLAGS := -m elf_$(UTS_MACHINE) |
| 102 | 103 | ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index b97f786a48d5..a63a68be1cce 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | .macro LOCK_PREFIX | 6 | .macro LOCK_PREFIX |
| 7 | 1: lock | 7 | 1: lock |
| 8 | .section .smp_locks,"a" | 8 | .section .smp_locks,"a" |
| 9 | _ASM_ALIGN | 9 | .balign 4 |
| 10 | _ASM_PTR 1b | 10 | .long 1b - . |
| 11 | .previous | 11 | .previous |
| 12 | .endm | 12 | .endm |
| 13 | #else | 13 | #else |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index b09ec55650b3..714bf2417284 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
| @@ -30,8 +30,8 @@ | |||
| 30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
| 31 | #define LOCK_PREFIX \ | 31 | #define LOCK_PREFIX \ |
| 32 | ".section .smp_locks,\"a\"\n" \ | 32 | ".section .smp_locks,\"a\"\n" \ |
| 33 | _ASM_ALIGN "\n" \ | 33 | ".balign 4\n" \ |
| 34 | _ASM_PTR "661f\n" /* address */ \ | 34 | ".long 661f - .\n" /* offset */ \ |
| 35 | ".previous\n" \ | 35 | ".previous\n" \ |
| 36 | "661:\n\tlock; " | 36 | "661:\n\tlock; " |
| 37 | 37 | ||
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index ae6253ab9029..733f7e91e7a9 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h | |||
| @@ -34,6 +34,18 @@ | |||
| 34 | #define CFI_SIGNAL_FRAME | 34 | #define CFI_SIGNAL_FRAME |
| 35 | #endif | 35 | #endif |
| 36 | 36 | ||
| 37 | #if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) | ||
| 38 | /* | ||
| 39 | * Emit CFI data in .debug_frame sections, not .eh_frame sections. | ||
| 40 | * The latter we currently just discard since we don't do DWARF | ||
| 41 | * unwinding at runtime. So only the offline DWARF information is | ||
| 42 | * useful to anyone. Note we should not use this directive if this | ||
| 43 | * file is used in the vDSO assembly, or if vmlinux.lds.S gets | ||
| 44 | * changed so it doesn't discard .eh_frame. | ||
| 45 | */ | ||
| 46 | .cfi_sections .debug_frame | ||
| 47 | #endif | ||
| 48 | |||
| 37 | #else | 49 | #else |
| 38 | 50 | ||
| 39 | /* | 51 | /* |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 0f8576427cfe..aeab29aee617 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
| @@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
| 35 | 35 | ||
| 36 | #define __ARCH_IRQ_STAT | 36 | #define __ARCH_IRQ_STAT |
| 37 | 37 | ||
| 38 | #define inc_irq_stat(member) percpu_add(irq_stat.member, 1) | 38 | #define inc_irq_stat(member) percpu_inc(irq_stat.member) |
| 39 | 39 | ||
| 40 | #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) | 40 | #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) |
| 41 | 41 | ||
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 66a272dfd8b8..0ec6d12d84e6 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
| @@ -190,6 +190,29 @@ do { \ | |||
| 190 | pfo_ret__; \ | 190 | pfo_ret__; \ |
| 191 | }) | 191 | }) |
| 192 | 192 | ||
| 193 | #define percpu_unary_op(op, var) \ | ||
| 194 | ({ \ | ||
| 195 | switch (sizeof(var)) { \ | ||
| 196 | case 1: \ | ||
| 197 | asm(op "b "__percpu_arg(0) \ | ||
| 198 | : "+m" (var)); \ | ||
| 199 | break; \ | ||
| 200 | case 2: \ | ||
| 201 | asm(op "w "__percpu_arg(0) \ | ||
| 202 | : "+m" (var)); \ | ||
| 203 | break; \ | ||
| 204 | case 4: \ | ||
| 205 | asm(op "l "__percpu_arg(0) \ | ||
| 206 | : "+m" (var)); \ | ||
| 207 | break; \ | ||
| 208 | case 8: \ | ||
| 209 | asm(op "q "__percpu_arg(0) \ | ||
| 210 | : "+m" (var)); \ | ||
| 211 | break; \ | ||
| 212 | default: __bad_percpu_size(); \ | ||
| 213 | } \ | ||
| 214 | }) | ||
| 215 | |||
| 193 | /* | 216 | /* |
| 194 | * percpu_read() makes gcc load the percpu variable every time it is | 217 | * percpu_read() makes gcc load the percpu variable every time it is |
| 195 | * accessed while percpu_read_stable() allows the value to be cached. | 218 | * accessed while percpu_read_stable() allows the value to be cached. |
| @@ -207,6 +230,7 @@ do { \ | |||
| 207 | #define percpu_and(var, val) percpu_to_op("and", var, val) | 230 | #define percpu_and(var, val) percpu_to_op("and", var, val) |
| 208 | #define percpu_or(var, val) percpu_to_op("or", var, val) | 231 | #define percpu_or(var, val) percpu_to_op("or", var, val) |
| 209 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) | 232 | #define percpu_xor(var, val) percpu_to_op("xor", var, val) |
| 233 | #define percpu_inc(var) percpu_unary_op("inc", var) | ||
| 210 | 234 | ||
| 211 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 235 | #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
| 212 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) | 236 | #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 1a160d5d44d0..936738427223 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
| @@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) | |||
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
| 197 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 197 | extern s32 __smp_locks[], __smp_locks_end[]; |
| 198 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | 198 | static void *text_poke_early(void *addr, const void *opcode, size_t len); |
| 199 | 199 | ||
| 200 | /* Replace instructions with better alternatives for this CPU type. | 200 | /* Replace instructions with better alternatives for this CPU type. |
| @@ -235,37 +235,39 @@ void __init_or_module apply_alternatives(struct alt_instr *start, | |||
| 235 | 235 | ||
| 236 | #ifdef CONFIG_SMP | 236 | #ifdef CONFIG_SMP |
| 237 | 237 | ||
| 238 | static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 238 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
| 239 | u8 *text, u8 *text_end) | ||
| 239 | { | 240 | { |
| 240 | u8 **ptr; | 241 | const s32 *poff; |
| 241 | 242 | ||
| 242 | mutex_lock(&text_mutex); | 243 | mutex_lock(&text_mutex); |
| 243 | for (ptr = start; ptr < end; ptr++) { | 244 | for (poff = start; poff < end; poff++) { |
| 244 | if (*ptr < text) | 245 | u8 *ptr = (u8 *)poff + *poff; |
| 245 | continue; | 246 | |
| 246 | if (*ptr > text_end) | 247 | if (!*poff || ptr < text || ptr >= text_end) |
| 247 | continue; | 248 | continue; |
| 248 | /* turn DS segment override prefix into lock prefix */ | 249 | /* turn DS segment override prefix into lock prefix */ |
| 249 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | 250 | text_poke(ptr, ((unsigned char []){0xf0}), 1); |
| 250 | }; | 251 | }; |
| 251 | mutex_unlock(&text_mutex); | 252 | mutex_unlock(&text_mutex); |
| 252 | } | 253 | } |
| 253 | 254 | ||
| 254 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 255 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
| 256 | u8 *text, u8 *text_end) | ||
| 255 | { | 257 | { |
| 256 | u8 **ptr; | 258 | const s32 *poff; |
| 257 | 259 | ||
| 258 | if (noreplace_smp) | 260 | if (noreplace_smp) |
| 259 | return; | 261 | return; |
| 260 | 262 | ||
| 261 | mutex_lock(&text_mutex); | 263 | mutex_lock(&text_mutex); |
| 262 | for (ptr = start; ptr < end; ptr++) { | 264 | for (poff = start; poff < end; poff++) { |
| 263 | if (*ptr < text) | 265 | u8 *ptr = (u8 *)poff + *poff; |
| 264 | continue; | 266 | |
| 265 | if (*ptr > text_end) | 267 | if (!*poff || ptr < text || ptr >= text_end) |
| 266 | continue; | 268 | continue; |
| 267 | /* turn lock prefix into DS segment override prefix */ | 269 | /* turn lock prefix into DS segment override prefix */ |
| 268 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | 270 | text_poke(ptr, ((unsigned char []){0x3E}), 1); |
| 269 | }; | 271 | }; |
| 270 | mutex_unlock(&text_mutex); | 272 | mutex_unlock(&text_mutex); |
| 271 | } | 273 | } |
| @@ -276,8 +278,8 @@ struct smp_alt_module { | |||
| 276 | char *name; | 278 | char *name; |
| 277 | 279 | ||
| 278 | /* ptrs to lock prefixes */ | 280 | /* ptrs to lock prefixes */ |
| 279 | u8 **locks; | 281 | const s32 *locks; |
| 280 | u8 **locks_end; | 282 | const s32 *locks_end; |
| 281 | 283 | ||
| 282 | /* .text segment, needed to avoid patching init code ;) */ | 284 | /* .text segment, needed to avoid patching init code ;) */ |
| 283 | u8 *text; | 285 | u8 *text; |
| @@ -398,16 +400,19 @@ void alternatives_smp_switch(int smp) | |||
| 398 | int alternatives_text_reserved(void *start, void *end) | 400 | int alternatives_text_reserved(void *start, void *end) |
| 399 | { | 401 | { |
| 400 | struct smp_alt_module *mod; | 402 | struct smp_alt_module *mod; |
| 401 | u8 **ptr; | 403 | const s32 *poff; |
| 402 | u8 *text_start = start; | 404 | u8 *text_start = start; |
| 403 | u8 *text_end = end; | 405 | u8 *text_end = end; |
| 404 | 406 | ||
| 405 | list_for_each_entry(mod, &smp_alt_modules, next) { | 407 | list_for_each_entry(mod, &smp_alt_modules, next) { |
| 406 | if (mod->text > text_end || mod->text_end < text_start) | 408 | if (mod->text > text_end || mod->text_end < text_start) |
| 407 | continue; | 409 | continue; |
| 408 | for (ptr = mod->locks; ptr < mod->locks_end; ptr++) | 410 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
| 409 | if (text_start <= *ptr && text_end >= *ptr) | 411 | const u8 *ptr = (const u8 *)poff + *poff; |
| 412 | |||
| 413 | if (text_start <= ptr && text_end > ptr) | ||
| 410 | return 1; | 414 | return 1; |
| 415 | } | ||
| 411 | } | 416 | } |
| 412 | 417 | ||
| 413 | return 0; | 418 | return 0; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8a6f0afa767e..7a355ddcc64b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
| 539 | struct mce m; | 539 | struct mce m; |
| 540 | int i; | 540 | int i; |
| 541 | 541 | ||
| 542 | __get_cpu_var(mce_poll_count)++; | 542 | percpu_inc(mce_poll_count); |
| 543 | 543 | ||
| 544 | mce_setup(&m); | 544 | mce_setup(&m); |
| 545 | 545 | ||
| @@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
| 934 | 934 | ||
| 935 | atomic_inc(&mce_entry); | 935 | atomic_inc(&mce_entry); |
| 936 | 936 | ||
| 937 | __get_cpu_var(mce_exception_count)++; | 937 | percpu_inc(mce_exception_count); |
| 938 | 938 | ||
| 939 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | 939 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
| 940 | 18, SIGKILL) == NOTIFY_STOP) | 940 | 18, SIGKILL) == NOTIFY_STOP) |
