diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-06-11 23:46:58 -0400 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-06-11 23:46:58 -0400 |
commit | 66e61060d7ad9fcf61475fb836fb5987db7a7ee0 (patch) | |
tree | a0abe3fb2fa8858261dc41df9444c0e0ca85f1a6 /arch | |
parent | a89c3e956ae78cec8926b92f2d61b7a5b675e787 (diff) | |
parent | f242e50eee1ec7692c4854d94e8cd543991cce71 (diff) |
Merge branch 'asoc-ab8500' into for-3.6
Diffstat (limited to 'arch')
38 files changed, 373 insertions, 204 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b649c5904a4f..84449dd8f031 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -7,7 +7,6 @@ config ARM | |||
7 | select HAVE_IDE if PCI || ISA || PCMCIA | 7 | select HAVE_IDE if PCI || ISA || PCMCIA |
8 | select HAVE_DMA_ATTRS | 8 | select HAVE_DMA_ATTRS |
9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) | 9 | select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) |
10 | select CMA if (CPU_V6 || CPU_V6K || CPU_V7) | ||
11 | select HAVE_MEMBLOCK | 10 | select HAVE_MEMBLOCK |
12 | select RTC_LIB | 11 | select RTC_LIB |
13 | select SYS_SUPPORTS_APM_EMULATION | 12 | select SYS_SUPPORTS_APM_EMULATION |
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index f31383c32f9c..df33909205e2 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig | |||
@@ -186,6 +186,12 @@ config SH_TIMER_TMU | |||
186 | help | 186 | help |
187 | This enables build of the TMU timer driver. | 187 | This enables build of the TMU timer driver. |
188 | 188 | ||
189 | config EM_TIMER_STI | ||
190 | bool "STI timer driver" | ||
191 | default y | ||
192 | help | ||
193 | This enables build of the STI timer driver. | ||
194 | |||
189 | endmenu | 195 | endmenu |
190 | 196 | ||
191 | config SH_CLK_CPG | 197 | config SH_CLK_CPG |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 9c74ac545849..c8a8fde777bb 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mfd/tc3589x.h> | 25 | #include <linux/mfd/tc3589x.h> |
26 | #include <linux/mfd/tps6105x.h> | 26 | #include <linux/mfd/tps6105x.h> |
27 | #include <linux/mfd/abx500/ab8500-gpio.h> | 27 | #include <linux/mfd/abx500/ab8500-gpio.h> |
28 | #include <linux/mfd/abx500/ab8500-codec.h> | ||
28 | #include <linux/leds-lp5521.h> | 29 | #include <linux/leds-lp5521.h> |
29 | #include <linux/input.h> | 30 | #include <linux/input.h> |
30 | #include <linux/smsc911x.h> | 31 | #include <linux/smsc911x.h> |
@@ -97,6 +98,18 @@ static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { | |||
97 | 0x7A, 0x00, 0x00}, | 98 | 0x7A, 0x00, 0x00}, |
98 | }; | 99 | }; |
99 | 100 | ||
101 | /* ab8500-codec */ | ||
102 | static struct ab8500_codec_platform_data ab8500_codec_pdata = { | ||
103 | .amics = { | ||
104 | .mic1_type = AMIC_TYPE_DIFFERENTIAL, | ||
105 | .mic2_type = AMIC_TYPE_DIFFERENTIAL, | ||
106 | .mic1a_micbias = AMIC_MICBIAS_VAMIC1, | ||
107 | .mic1b_micbias = AMIC_MICBIAS_VAMIC1, | ||
108 | .mic2_micbias = AMIC_MICBIAS_VAMIC2 | ||
109 | }, | ||
110 | .ear_cmv = EAR_CMV_0_95V | ||
111 | }; | ||
112 | |||
100 | static struct gpio_keys_button snowball_key_array[] = { | 113 | static struct gpio_keys_button snowball_key_array[] = { |
101 | { | 114 | { |
102 | .gpio = 32, | 115 | .gpio = 32, |
@@ -195,6 +208,7 @@ static struct ab8500_platform_data ab8500_platdata = { | |||
195 | .regulator = ab8500_regulators, | 208 | .regulator = ab8500_regulators, |
196 | .num_regulator = ARRAY_SIZE(ab8500_regulators), | 209 | .num_regulator = ARRAY_SIZE(ab8500_regulators), |
197 | .gpio = &ab8500_gpio_pdata, | 210 | .gpio = &ab8500_gpio_pdata, |
211 | .codec = &ab8500_codec_pdata, | ||
198 | }; | 212 | }; |
199 | 213 | ||
200 | static struct resource ab8500_resources[] = { | 214 | static struct resource ab8500_resources[] = { |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ea6b43154090..106c4c0ebccd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -268,10 +268,8 @@ static int __init consistent_init(void) | |||
268 | unsigned long base = consistent_base; | 268 | unsigned long base = consistent_base; |
269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; | 269 | unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; |
270 | 270 | ||
271 | #ifndef CONFIG_ARM_DMA_USE_IOMMU | 271 | if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) |
272 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
273 | return 0; | 272 | return 0; |
274 | #endif | ||
275 | 273 | ||
276 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); | 274 | consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); |
277 | if (!consistent_pte) { | 275 | if (!consistent_pte) { |
@@ -342,7 +340,7 @@ static int __init coherent_init(void) | |||
342 | struct page *page; | 340 | struct page *page; |
343 | void *ptr; | 341 | void *ptr; |
344 | 342 | ||
345 | if (cpu_architecture() < CPU_ARCH_ARMv6) | 343 | if (!IS_ENABLED(CONFIG_CMA)) |
346 | return 0; | 344 | return 0; |
347 | 345 | ||
348 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); | 346 | ptr = __alloc_from_contiguous(NULL, size, prot, &page); |
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
704 | 702 | ||
705 | if (arch_is_coherent() || nommu()) | 703 | if (arch_is_coherent() || nommu()) |
706 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 704 | addr = __alloc_simple_buffer(dev, size, gfp, &page); |
707 | else if (cpu_architecture() < CPU_ARCH_ARMv6) | 705 | else if (!IS_ENABLED(CONFIG_CMA)) |
708 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 706 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); |
709 | else if (gfp & GFP_ATOMIC) | 707 | else if (gfp & GFP_ATOMIC) |
710 | addr = __alloc_from_pool(dev, size, &page, caller); | 708 | addr = __alloc_from_pool(dev, size, &page, caller); |
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | |||
773 | 771 | ||
774 | if (arch_is_coherent() || nommu()) { | 772 | if (arch_is_coherent() || nommu()) { |
775 | __dma_free_buffer(page, size); | 773 | __dma_free_buffer(page, size); |
776 | } else if (cpu_architecture() < CPU_ARCH_ARMv6) { | 774 | } else if (!IS_ENABLED(CONFIG_CMA)) { |
777 | __dma_free_remap(cpu_addr, size); | 775 | __dma_free_remap(cpu_addr, size); |
778 | __dma_free_buffer(page, size); | 776 | __dma_free_buffer(page, size); |
779 | } else { | 777 | } else { |
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index c140f9b41dce..d552a854dacc 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c | |||
@@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) | |||
300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) | 300 | if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) |
301 | syscall = 1; | 301 | syscall = 1; |
302 | 302 | ||
303 | if (ti->flags & _TIF_SIGPENDING)) | 303 | if (ti->flags & _TIF_SIGPENDING) |
304 | do_signal(regs, syscall); | 304 | do_signal(regs, syscall); |
305 | 305 | ||
306 | if (ti->flags & _TIF_NOTIFY_RESUME) { | 306 | if (ti->flags & _TIF_NOTIFY_RESUME) { |
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 2e3994b20169..62bcea7dcc6d 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) | |||
173 | unsigned long newsp; | 173 | unsigned long newsp; |
174 | 174 | ||
175 | #ifdef __ARCH_SYNC_CORE_DCACHE | 175 | #ifdef __ARCH_SYNC_CORE_DCACHE |
176 | if (current->rt.nr_cpus_allowed == num_possible_cpus()) | 176 | if (current->nr_cpus_allowed == num_possible_cpus()) |
177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); | 177 | set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); |
178 | #endif | 178 | #endif |
179 | 179 | ||
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index dbc3850b1d0d..5707f1a62341 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig | |||
21 | 21 | ||
22 | NM = sh $(srctree)/arch/parisc/nm | 22 | NM = sh $(srctree)/arch/parisc/nm |
23 | CHECKFLAGS += -D__hppa__=1 | 23 | CHECKFLAGS += -D__hppa__=1 |
24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
24 | 25 | ||
25 | MACHINE := $(shell uname -m) | 26 | MACHINE := $(shell uname -m) |
26 | ifeq ($(MACHINE),parisc*) | 27 | ifeq ($(MACHINE),parisc*) |
@@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/ | |||
79 | kernel-$(CONFIG_HPUX) += hpux/ | 80 | kernel-$(CONFIG_HPUX) += hpux/ |
80 | 81 | ||
81 | core-y += $(addprefix arch/parisc/, $(kernel-y)) | 82 | core-y += $(addprefix arch/parisc/, $(kernel-y)) |
82 | libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` | 83 | libs-y += arch/parisc/lib/ $(LIBGCC) |
83 | 84 | ||
84 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ | 85 | drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ |
85 | 86 | ||
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 19a434f55059..4383707d9801 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild | |||
@@ -1,3 +1,4 @@ | |||
1 | include include/asm-generic/Kbuild.asm | 1 | include include/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | header-y += pdc.h | 3 | header-y += pdc.h |
4 | generic-y += word-at-a-time.h | ||
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 72cfdb0cfdd1..62a33338549c 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _PARISC_BUG_H | 1 | #ifndef _PARISC_BUG_H |
2 | #define _PARISC_BUG_H | 2 | #define _PARISC_BUG_H |
3 | 3 | ||
4 | #include <linux/kernel.h> /* for BUGFLAG_TAINT */ | ||
5 | |||
4 | /* | 6 | /* |
5 | * Tell the user there is some problem. | 7 | * Tell the user there is some problem. |
6 | * The offending file and line are encoded in the __bug_table section. | 8 | * The offending file and line are encoded in the __bug_table section. |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 0b6d79617d7b..2e3200ca485f 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, | |||
176 | 176 | ||
177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) | 177 | static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) |
178 | { | 178 | { |
179 | if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) | 179 | if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) |
180 | && entry->jump[1] == 0x396b0000 + (val & 0xffff)) | 180 | && entry->jump[1] == 0x398c0000 + (val & 0xffff)) |
181 | return 1; | 181 | return 1; |
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location, | |||
204 | entry++; | 204 | entry++; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* Stolen from Paul Mackerras as well... */ | 207 | entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ |
208 | entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ | 208 | entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ |
209 | entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ | 209 | entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ |
210 | entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ | ||
211 | entry->jump[3] = 0x4e800420; /* bctr */ | 210 | entry->jump[3] = 0x4e800420; /* bctr */ |
212 | 211 | ||
213 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); | 212 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 99a995c2a3f2..be171ee73bf8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs) | |||
475 | struct pt_regs *old_regs; | 475 | struct pt_regs *old_regs; |
476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 476 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | 477 | struct clock_event_device *evt = &__get_cpu_var(decrementers); |
478 | u64 now; | ||
478 | 479 | ||
479 | /* Ensure a positive value is written to the decrementer, or else | 480 | /* Ensure a positive value is written to the decrementer, or else |
480 | * some CPUs will continue to take decrementer exceptions. | 481 | * some CPUs will continue to take decrementer exceptions. |
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs) | |||
509 | irq_work_run(); | 510 | irq_work_run(); |
510 | } | 511 | } |
511 | 512 | ||
512 | *next_tb = ~(u64)0; | 513 | now = get_tb_or_rtc(); |
513 | if (evt->event_handler) | 514 | if (now >= *next_tb) { |
514 | evt->event_handler(evt); | 515 | *next_tb = ~(u64)0; |
516 | if (evt->event_handler) | ||
517 | evt->event_handler(evt); | ||
518 | } else { | ||
519 | now = *next_tb - now; | ||
520 | if (now <= DECREMENTER_MAX) | ||
521 | set_dec((int)now); | ||
522 | } | ||
515 | 523 | ||
516 | #ifdef CONFIG_PPC64 | 524 | #ifdef CONFIG_PPC64 |
517 | /* collect purr register values often, for accurate calculations */ | 525 | /* collect purr register values often, for accurate calculations */ |
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 7e1fef36bde6..e9c670d7a7fe 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -91,11 +91,6 @@ extern void smp_nap(void); | |||
91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ | 91 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ |
92 | extern void _cpu_idle(void); | 92 | extern void _cpu_idle(void); |
93 | 93 | ||
94 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
95 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
96 | unsigned long new_sp, | ||
97 | unsigned long new_ss10); | ||
98 | |||
99 | #else /* __ASSEMBLY__ */ | 94 | #else /* __ASSEMBLY__ */ |
100 | 95 | ||
101 | /* | 96 | /* |
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 133c4b56a99e..c31637baff28 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current) | |||
68 | jrp lr /* keep backtracer happy */ | 68 | jrp lr /* keep backtracer happy */ |
69 | STD_ENDPROC(KBacktraceIterator_init_current) | 69 | STD_ENDPROC(KBacktraceIterator_init_current) |
70 | 70 | ||
71 | /* | ||
72 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
73 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
74 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
75 | */ | ||
76 | STD_ENTRY(cpu_idle_on_new_stack) | ||
77 | { | ||
78 | move sp, r1 | ||
79 | mtspr SPR_SYSTEM_SAVE_K_0, r2 | ||
80 | } | ||
81 | jal free_thread_info | ||
82 | j cpu_idle | ||
83 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
84 | |||
85 | /* Loop forever on a nap during SMP boot. */ | 71 | /* Loop forever on a nap during SMP boot. */ |
86 | STD_ENTRY(smp_nap) | 72 | STD_ENTRY(smp_nap) |
87 | nap | 73 | nap |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6098ccc59be2..dd87f3420390 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/smp.h> | 29 | #include <linux/smp.h> |
30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
31 | #include <linux/hugetlb.h> | 31 | #include <linux/hugetlb.h> |
32 | #include <linux/start_kernel.h> | ||
32 | #include <asm/setup.h> | 33 | #include <asm/setup.h> |
33 | #include <asm/sections.h> | 34 | #include <asm/sections.h> |
34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8bbea6aa40d9..efe5acfc79c3 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -94,10 +94,10 @@ bs_die: | |||
94 | 94 | ||
95 | .section ".bsdata", "a" | 95 | .section ".bsdata", "a" |
96 | bugger_off_msg: | 96 | bugger_off_msg: |
97 | .ascii "Direct booting from floppy is no longer supported.\r\n" | 97 | .ascii "Direct floppy boot is not supported. " |
98 | .ascii "Please use a boot loader program instead.\r\n" | 98 | .ascii "Use a boot loader program instead.\r\n" |
99 | .ascii "\n" | 99 | .ascii "\n" |
100 | .ascii "Remove disk and press any key to reboot . . .\r\n" | 100 | .ascii "Remove disk and press any key to reboot ...\r\n" |
101 | .byte 0 | 101 | .byte 0 |
102 | 102 | ||
103 | #ifdef CONFIG_EFI_STUB | 103 | #ifdef CONFIG_EFI_STUB |
@@ -111,7 +111,7 @@ coff_header: | |||
111 | #else | 111 | #else |
112 | .word 0x8664 # x86-64 | 112 | .word 0x8664 # x86-64 |
113 | #endif | 113 | #endif |
114 | .word 2 # nr_sections | 114 | .word 3 # nr_sections |
115 | .long 0 # TimeDateStamp | 115 | .long 0 # TimeDateStamp |
116 | .long 0 # PointerToSymbolTable | 116 | .long 0 # PointerToSymbolTable |
117 | .long 1 # NumberOfSymbols | 117 | .long 1 # NumberOfSymbols |
@@ -158,8 +158,8 @@ extra_header_fields: | |||
158 | #else | 158 | #else |
159 | .quad 0 # ImageBase | 159 | .quad 0 # ImageBase |
160 | #endif | 160 | #endif |
161 | .long 0x1000 # SectionAlignment | 161 | .long 0x20 # SectionAlignment |
162 | .long 0x200 # FileAlignment | 162 | .long 0x20 # FileAlignment |
163 | .word 0 # MajorOperatingSystemVersion | 163 | .word 0 # MajorOperatingSystemVersion |
164 | .word 0 # MinorOperatingSystemVersion | 164 | .word 0 # MinorOperatingSystemVersion |
165 | .word 0 # MajorImageVersion | 165 | .word 0 # MajorImageVersion |
@@ -200,8 +200,10 @@ extra_header_fields: | |||
200 | 200 | ||
201 | # Section table | 201 | # Section table |
202 | section_table: | 202 | section_table: |
203 | .ascii ".text" | 203 | # |
204 | .byte 0 | 204 | # The offset & size fields are filled in by build.c. |
205 | # | ||
206 | .ascii ".setup" | ||
205 | .byte 0 | 207 | .byte 0 |
206 | .byte 0 | 208 | .byte 0 |
207 | .long 0 | 209 | .long 0 |
@@ -217,9 +219,8 @@ section_table: | |||
217 | 219 | ||
218 | # | 220 | # |
219 | # The EFI application loader requires a relocation section | 221 | # The EFI application loader requires a relocation section |
220 | # because EFI applications must be relocatable. But since | 222 | # because EFI applications must be relocatable. The .reloc |
221 | # we don't need the loader to fixup any relocs for us, we | 223 | # offset & size fields are filled in by build.c. |
222 | # just create an empty (zero-length) .reloc section header. | ||
223 | # | 224 | # |
224 | .ascii ".reloc" | 225 | .ascii ".reloc" |
225 | .byte 0 | 226 | .byte 0 |
@@ -233,6 +234,25 @@ section_table: | |||
233 | .word 0 # NumberOfRelocations | 234 | .word 0 # NumberOfRelocations |
234 | .word 0 # NumberOfLineNumbers | 235 | .word 0 # NumberOfLineNumbers |
235 | .long 0x42100040 # Characteristics (section flags) | 236 | .long 0x42100040 # Characteristics (section flags) |
237 | |||
238 | # | ||
239 | # The offset & size fields are filled in by build.c. | ||
240 | # | ||
241 | .ascii ".text" | ||
242 | .byte 0 | ||
243 | .byte 0 | ||
244 | .byte 0 | ||
245 | .long 0 | ||
246 | .long 0x0 # startup_{32,64} | ||
247 | .long 0 # Size of initialized data | ||
248 | # on disk | ||
249 | .long 0x0 # startup_{32,64} | ||
250 | .long 0 # PointerToRelocations | ||
251 | .long 0 # PointerToLineNumbers | ||
252 | .word 0 # NumberOfRelocations | ||
253 | .word 0 # NumberOfLineNumbers | ||
254 | .long 0x60500020 # Characteristics (section flags) | ||
255 | |||
236 | #endif /* CONFIG_EFI_STUB */ | 256 | #endif /* CONFIG_EFI_STUB */ |
237 | 257 | ||
238 | # Kernel attributes; used by setup. This is part 1 of the | 258 | # Kernel attributes; used by setup. This is part 1 of the |
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 3f61f6e2b46f..4b8e165ee572 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c | |||
@@ -50,6 +50,8 @@ typedef unsigned int u32; | |||
50 | u8 buf[SETUP_SECT_MAX*512]; | 50 | u8 buf[SETUP_SECT_MAX*512]; |
51 | int is_big_kernel; | 51 | int is_big_kernel; |
52 | 52 | ||
53 | #define PECOFF_RELOC_RESERVE 0x20 | ||
54 | |||
53 | /*----------------------------------------------------------------------*/ | 55 | /*----------------------------------------------------------------------*/ |
54 | 56 | ||
55 | static const u32 crctab32[] = { | 57 | static const u32 crctab32[] = { |
@@ -133,11 +135,103 @@ static void usage(void) | |||
133 | die("Usage: build setup system [> image]"); | 135 | die("Usage: build setup system [> image]"); |
134 | } | 136 | } |
135 | 137 | ||
136 | int main(int argc, char ** argv) | ||
137 | { | ||
138 | #ifdef CONFIG_EFI_STUB | 138 | #ifdef CONFIG_EFI_STUB |
139 | unsigned int file_sz, pe_header; | 139 | |
140 | static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) | ||
141 | { | ||
142 | unsigned int pe_header; | ||
143 | unsigned short num_sections; | ||
144 | u8 *section; | ||
145 | |||
146 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
147 | num_sections = get_unaligned_le16(&buf[pe_header + 6]); | ||
148 | |||
149 | #ifdef CONFIG_X86_32 | ||
150 | section = &buf[pe_header + 0xa8]; | ||
151 | #else | ||
152 | section = &buf[pe_header + 0xb8]; | ||
140 | #endif | 153 | #endif |
154 | |||
155 | while (num_sections > 0) { | ||
156 | if (strncmp((char*)section, section_name, 8) == 0) { | ||
157 | /* section header size field */ | ||
158 | put_unaligned_le32(size, section + 0x8); | ||
159 | |||
160 | /* section header vma field */ | ||
161 | put_unaligned_le32(offset, section + 0xc); | ||
162 | |||
163 | /* section header 'size of initialised data' field */ | ||
164 | put_unaligned_le32(size, section + 0x10); | ||
165 | |||
166 | /* section header 'file offset' field */ | ||
167 | put_unaligned_le32(offset, section + 0x14); | ||
168 | |||
169 | break; | ||
170 | } | ||
171 | section += 0x28; | ||
172 | num_sections--; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void update_pecoff_setup_and_reloc(unsigned int size) | ||
177 | { | ||
178 | u32 setup_offset = 0x200; | ||
179 | u32 reloc_offset = size - PECOFF_RELOC_RESERVE; | ||
180 | u32 setup_size = reloc_offset - setup_offset; | ||
181 | |||
182 | update_pecoff_section_header(".setup", setup_offset, setup_size); | ||
183 | update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); | ||
184 | |||
185 | /* | ||
186 | * Modify .reloc section contents with a single entry. The | ||
187 | * relocation is applied to offset 10 of the relocation section. | ||
188 | */ | ||
189 | put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); | ||
190 | put_unaligned_le32(10, &buf[reloc_offset + 4]); | ||
191 | } | ||
192 | |||
193 | static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) | ||
194 | { | ||
195 | unsigned int pe_header; | ||
196 | unsigned int text_sz = file_sz - text_start; | ||
197 | |||
198 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
199 | |||
200 | /* Size of image */ | ||
201 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
202 | |||
203 | /* | ||
204 | * Size of code: Subtract the size of the first sector (512 bytes) | ||
205 | * which includes the header. | ||
206 | */ | ||
207 | put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); | ||
208 | |||
209 | #ifdef CONFIG_X86_32 | ||
210 | /* | ||
211 | * Address of entry point. | ||
212 | * | ||
213 | * The EFI stub entry point is +16 bytes from the start of | ||
214 | * the .text section. | ||
215 | */ | ||
216 | put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); | ||
217 | #else | ||
218 | /* | ||
219 | * Address of entry point. startup_32 is at the beginning and | ||
220 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
221 | * after. The EFI stub entry point is 16 bytes after that, as | ||
222 | * the first instruction allows legacy loaders to jump over | ||
223 | * the EFI stub initialisation | ||
224 | */ | ||
225 | put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); | ||
226 | #endif /* CONFIG_X86_32 */ | ||
227 | |||
228 | update_pecoff_section_header(".text", text_start, text_sz); | ||
229 | } | ||
230 | |||
231 | #endif /* CONFIG_EFI_STUB */ | ||
232 | |||
233 | int main(int argc, char ** argv) | ||
234 | { | ||
141 | unsigned int i, sz, setup_sectors; | 235 | unsigned int i, sz, setup_sectors; |
142 | int c; | 236 | int c; |
143 | u32 sys_size; | 237 | u32 sys_size; |
@@ -163,6 +257,12 @@ int main(int argc, char ** argv) | |||
163 | die("Boot block hasn't got boot flag (0xAA55)"); | 257 | die("Boot block hasn't got boot flag (0xAA55)"); |
164 | fclose(file); | 258 | fclose(file); |
165 | 259 | ||
260 | #ifdef CONFIG_EFI_STUB | ||
261 | /* Reserve 0x20 bytes for .reloc section */ | ||
262 | memset(buf+c, 0, PECOFF_RELOC_RESERVE); | ||
263 | c += PECOFF_RELOC_RESERVE; | ||
264 | #endif | ||
265 | |||
166 | /* Pad unused space with zeros */ | 266 | /* Pad unused space with zeros */ |
167 | setup_sectors = (c + 511) / 512; | 267 | setup_sectors = (c + 511) / 512; |
168 | if (setup_sectors < SETUP_SECT_MIN) | 268 | if (setup_sectors < SETUP_SECT_MIN) |
@@ -170,6 +270,10 @@ int main(int argc, char ** argv) | |||
170 | i = setup_sectors*512; | 270 | i = setup_sectors*512; |
171 | memset(buf+c, 0, i-c); | 271 | memset(buf+c, 0, i-c); |
172 | 272 | ||
273 | #ifdef CONFIG_EFI_STUB | ||
274 | update_pecoff_setup_and_reloc(i); | ||
275 | #endif | ||
276 | |||
173 | /* Set the default root device */ | 277 | /* Set the default root device */ |
174 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); | 278 | put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); |
175 | 279 | ||
@@ -194,66 +298,8 @@ int main(int argc, char ** argv) | |||
194 | put_unaligned_le32(sys_size, &buf[0x1f4]); | 298 | put_unaligned_le32(sys_size, &buf[0x1f4]); |
195 | 299 | ||
196 | #ifdef CONFIG_EFI_STUB | 300 | #ifdef CONFIG_EFI_STUB |
197 | file_sz = sz + i + ((sys_size * 16) - sz); | 301 | update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); |
198 | 302 | #endif | |
199 | pe_header = get_unaligned_le32(&buf[0x3c]); | ||
200 | |||
201 | /* Size of image */ | ||
202 | put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); | ||
203 | |||
204 | /* | ||
205 | * Subtract the size of the first section (512 bytes) which | ||
206 | * includes the header and .reloc section. The remaining size | ||
207 | * is that of the .text section. | ||
208 | */ | ||
209 | file_sz -= 512; | ||
210 | |||
211 | /* Size of code */ | ||
212 | put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); | ||
213 | |||
214 | #ifdef CONFIG_X86_32 | ||
215 | /* | ||
216 | * Address of entry point. | ||
217 | * | ||
218 | * The EFI stub entry point is +16 bytes from the start of | ||
219 | * the .text section. | ||
220 | */ | ||
221 | put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); | ||
222 | |||
223 | /* .text size */ | ||
224 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); | ||
225 | |||
226 | /* .text vma */ | ||
227 | put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); | ||
228 | |||
229 | /* .text size of initialised data */ | ||
230 | put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); | ||
231 | |||
232 | /* .text file offset */ | ||
233 | put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); | ||
234 | #else | ||
235 | /* | ||
236 | * Address of entry point. startup_32 is at the beginning and | ||
237 | * the 64-bit entry point (startup_64) is always 512 bytes | ||
238 | * after. The EFI stub entry point is 16 bytes after that, as | ||
239 | * the first instruction allows legacy loaders to jump over | ||
240 | * the EFI stub initialisation | ||
241 | */ | ||
242 | put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); | ||
243 | |||
244 | /* .text size */ | ||
245 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); | ||
246 | |||
247 | /* .text vma */ | ||
248 | put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); | ||
249 | |||
250 | /* .text size of initialised data */ | ||
251 | put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); | ||
252 | |||
253 | /* .text file offset */ | ||
254 | put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); | ||
255 | #endif /* CONFIG_X86_32 */ | ||
256 | #endif /* CONFIG_EFI_STUB */ | ||
257 | 303 | ||
258 | crc = partial_crc32(buf, i, crc); | 304 | crc = partial_crc32(buf, i, crc); |
259 | if (fwrite(buf, 1, i, stdout) != i) | 305 | if (fwrite(buf, 1, i, stdout) != i) |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 0e3793b821ef..dc580c42851c 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -54,6 +54,20 @@ struct nmiaction { | |||
54 | __register_nmi_handler((t), &fn##_na); \ | 54 | __register_nmi_handler((t), &fn##_na); \ |
55 | }) | 55 | }) |
56 | 56 | ||
57 | /* | ||
58 | * For special handlers that register/unregister in the | ||
59 | * init section only. This should be considered rare. | ||
60 | */ | ||
61 | #define register_nmi_handler_initonly(t, fn, fg, n) \ | ||
62 | ({ \ | ||
63 | static struct nmiaction fn##_na __initdata = { \ | ||
64 | .handler = (fn), \ | ||
65 | .name = (n), \ | ||
66 | .flags = (fg), \ | ||
67 | }; \ | ||
68 | __register_nmi_handler((t), &fn##_na); \ | ||
69 | }) | ||
70 | |||
57 | int __register_nmi_handler(unsigned int, struct nmiaction *); | 71 | int __register_nmi_handler(unsigned int, struct nmiaction *); |
58 | 72 | ||
59 | void unregister_nmi_handler(unsigned int, const char *); | 73 | void unregister_nmi_handler(unsigned int, const char *); |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 04cd6882308e..e1f3a17034fc 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -33,9 +33,8 @@ | |||
33 | #define segment_eq(a, b) ((a).seg == (b).seg) | 33 | #define segment_eq(a, b) ((a).seg == (b).seg) |
34 | 34 | ||
35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) | 35 | #define user_addr_max() (current_thread_info()->addr_limit.seg) |
36 | #define __addr_ok(addr) \ | 36 | #define __addr_ok(addr) \ |
37 | ((unsigned long __force)(addr) < \ | 37 | ((unsigned long __force)(addr) < user_addr_max()) |
38 | (current_thread_info()->addr_limit.seg)) | ||
39 | 38 | ||
40 | /* | 39 | /* |
41 | * Test whether a block of memory is a valid user space address. | 40 | * Test whether a block of memory is a valid user space address. |
@@ -47,14 +46,14 @@ | |||
47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | 46 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... |
48 | */ | 47 | */ |
49 | 48 | ||
50 | #define __range_not_ok(addr, size) \ | 49 | #define __range_not_ok(addr, size, limit) \ |
51 | ({ \ | 50 | ({ \ |
52 | unsigned long flag, roksum; \ | 51 | unsigned long flag, roksum; \ |
53 | __chk_user_ptr(addr); \ | 52 | __chk_user_ptr(addr); \ |
54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | 53 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ |
55 | : "=&r" (flag), "=r" (roksum) \ | 54 | : "=&r" (flag), "=r" (roksum) \ |
56 | : "1" (addr), "g" ((long)(size)), \ | 55 | : "1" (addr), "g" ((long)(size)), \ |
57 | "rm" (current_thread_info()->addr_limit.seg)); \ | 56 | "rm" (limit)); \ |
58 | flag; \ | 57 | flag; \ |
59 | }) | 58 | }) |
60 | 59 | ||
@@ -77,7 +76,8 @@ | |||
77 | * checks that the pointer is in the user space range - after calling | 76 | * checks that the pointer is in the user space range - after calling |
78 | * this function, memory access functions may still return -EFAULT. | 77 | * this function, memory access functions may still return -EFAULT. |
79 | */ | 78 | */ |
80 | #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) | 79 | #define access_ok(type, addr, size) \ |
80 | (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) | ||
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The exception table consists of pairs of addresses relative to the | 83 | * The exception table consists of pairs of addresses relative to the |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index becf47b81735..6149b476d9df 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -149,7 +149,6 @@ | |||
149 | /* 4 bits of software ack period */ | 149 | /* 4 bits of software ack period */ |
150 | #define UV2_ACK_MASK 0x7UL | 150 | #define UV2_ACK_MASK 0x7UL |
151 | #define UV2_ACK_UNITS_SHFT 3 | 151 | #define UV2_ACK_UNITS_SHFT 3 |
152 | #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT | ||
153 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT | 152 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT |
154 | 153 | ||
155 | /* | 154 | /* |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6e76c191a835..d5fd66f0d4cd 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
24 | #include <asm/e820.h> | 23 | #include <asm/e820.h> |
25 | #include <asm/io.h> | 24 | #include <asm/io.h> |
26 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void) | |||
95 | return 0; | 94 | return 0; |
96 | } | 95 | } |
97 | memblock_reserve(addr, aper_size); | 96 | memblock_reserve(addr, aper_size); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(phys_to_virt(addr)); | ||
103 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 97 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
104 | aper_size >> 10, addr); | 98 | aper_size >> 10, addr); |
105 | insert_aperture_resource((u32)addr, aper_size); | 99 | insert_aperture_resource((u32)addr, aper_size); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ac96561d1a99..5f0ff597437c 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1195 | BUG_ON(!cfg->vector); | 1195 | BUG_ON(!cfg->vector); |
1196 | 1196 | ||
1197 | vector = cfg->vector; | 1197 | vector = cfg->vector; |
1198 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | 1198 | for_each_cpu(cpu, cfg->domain) |
1199 | per_cpu(vector_irq, cpu)[vector] = -1; | 1199 | per_cpu(vector_irq, cpu)[vector] = -1; |
1200 | 1200 | ||
1201 | cfg->vector = 0; | 1201 | cfg->vector = 0; |
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | |||
1203 | 1203 | ||
1204 | if (likely(!cfg->move_in_progress)) | 1204 | if (likely(!cfg->move_in_progress)) |
1205 | return; | 1205 | return; |
1206 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | 1206 | for_each_cpu(cpu, cfg->old_domain) { |
1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1207 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1208 | vector++) { | 1208 | vector++) { |
1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1209 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0a687fd185e6..da27c5d2168a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data) | |||
1274 | */ | 1274 | */ |
1275 | iv = __this_cpu_read(mce_next_interval); | 1275 | iv = __this_cpu_read(mce_next_interval); |
1276 | if (mce_notify_irq()) | 1276 | if (mce_notify_irq()) |
1277 | iv = max(iv, (unsigned long) HZ/100); | 1277 | iv = max(iv / 2, (unsigned long) HZ/100); |
1278 | else | 1278 | else |
1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1279 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
1280 | __this_cpu_write(mce_next_interval, iv); | 1280 | __this_cpu_write(mce_next_interval, iv); |
@@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1557 | static void __mcheck_cpu_init_timer(void) | 1557 | static void __mcheck_cpu_init_timer(void) |
1558 | { | 1558 | { |
1559 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1559 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1560 | unsigned long iv = __this_cpu_read(mce_next_interval); | 1560 | unsigned long iv = check_interval * HZ; |
1561 | 1561 | ||
1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); | 1562 | setup_timer(t, mce_timer_fn, smp_processor_id()); |
1563 | 1563 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e049d6da0183..c4706cf9c011 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) | |||
1496 | if (!cpuc->shared_regs) | 1496 | if (!cpuc->shared_regs) |
1497 | goto error; | 1497 | goto error; |
1498 | } | 1498 | } |
1499 | cpuc->is_fake = 1; | ||
1499 | return cpuc; | 1500 | return cpuc; |
1500 | error: | 1501 | error: |
1501 | free_fake_cpuc(cpuc); | 1502 | free_fake_cpuc(cpuc); |
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1756 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); | 1757 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
1757 | } | 1758 | } |
1758 | 1759 | ||
1760 | static inline int | ||
1761 | valid_user_frame(const void __user *fp, unsigned long size) | ||
1762 | { | ||
1763 | return (__range_not_ok(fp, size, TASK_SIZE) == 0); | ||
1764 | } | ||
1765 | |||
1759 | #ifdef CONFIG_COMPAT | 1766 | #ifdef CONFIG_COMPAT |
1760 | 1767 | ||
1761 | #include <asm/compat.h> | 1768 | #include <asm/compat.h> |
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1780 | if (bytes != sizeof(frame)) | 1787 | if (bytes != sizeof(frame)) |
1781 | break; | 1788 | break; |
1782 | 1789 | ||
1783 | if (fp < compat_ptr(regs->sp)) | 1790 | if (!valid_user_frame(fp, sizeof(frame))) |
1784 | break; | 1791 | break; |
1785 | 1792 | ||
1786 | perf_callchain_store(entry, frame.return_address); | 1793 | perf_callchain_store(entry, frame.return_address); |
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1826 | if (bytes != sizeof(frame)) | 1833 | if (bytes != sizeof(frame)) |
1827 | break; | 1834 | break; |
1828 | 1835 | ||
1829 | if ((unsigned long)fp < regs->sp) | 1836 | if (!valid_user_frame(fp, sizeof(frame))) |
1830 | break; | 1837 | break; |
1831 | 1838 | ||
1832 | perf_callchain_store(entry, frame.return_address); | 1839 | perf_callchain_store(entry, frame.return_address); |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6638aaf54493..7241e2fc3c17 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -117,6 +117,7 @@ struct cpu_hw_events { | |||
117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ | 117 | struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
118 | 118 | ||
119 | unsigned int group_flag; | 119 | unsigned int group_flag; |
120 | int is_fake; | ||
120 | 121 | ||
121 | /* | 122 | /* |
122 | * Intel DebugStore bits | 123 | * Intel DebugStore bits |
@@ -364,6 +365,7 @@ struct x86_pmu { | |||
364 | int pebs_record_size; | 365 | int pebs_record_size; |
365 | void (*drain_pebs)(struct pt_regs *regs); | 366 | void (*drain_pebs)(struct pt_regs *regs); |
366 | struct event_constraint *pebs_constraints; | 367 | struct event_constraint *pebs_constraints; |
368 | void (*pebs_aliases)(struct perf_event *event); | ||
367 | 369 | ||
368 | /* | 370 | /* |
369 | * Intel LBR | 371 | * Intel LBR |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 166546ec6aef..187c294bc658 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event) | |||
1119 | return NULL; | 1119 | return NULL; |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static bool intel_try_alt_er(struct perf_event *event, int orig_idx) | 1122 | static int intel_alt_er(int idx) |
1123 | { | 1123 | { |
1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) | 1124 | if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) |
1125 | return false; | 1125 | return idx; |
1126 | 1126 | ||
1127 | if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { | 1127 | if (idx == EXTRA_REG_RSP_0) |
1128 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1128 | return EXTRA_REG_RSP_1; |
1129 | event->hw.config |= 0x01bb; | 1129 | |
1130 | event->hw.extra_reg.idx = EXTRA_REG_RSP_1; | 1130 | if (idx == EXTRA_REG_RSP_1) |
1131 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | 1131 | return EXTRA_REG_RSP_0; |
1132 | } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { | 1132 | |
1133 | return idx; | ||
1134 | } | ||
1135 | |||
1136 | static void intel_fixup_er(struct perf_event *event, int idx) | ||
1137 | { | ||
1138 | event->hw.extra_reg.idx = idx; | ||
1139 | |||
1140 | if (idx == EXTRA_REG_RSP_0) { | ||
1133 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1141 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; |
1134 | event->hw.config |= 0x01b7; | 1142 | event->hw.config |= 0x01b7; |
1135 | event->hw.extra_reg.idx = EXTRA_REG_RSP_0; | ||
1136 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | 1143 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; |
1144 | } else if (idx == EXTRA_REG_RSP_1) { | ||
1145 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | ||
1146 | event->hw.config |= 0x01bb; | ||
1147 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | ||
1137 | } | 1148 | } |
1138 | |||
1139 | if (event->hw.extra_reg.idx == orig_idx) | ||
1140 | return false; | ||
1141 | |||
1142 | return true; | ||
1143 | } | 1149 | } |
1144 | 1150 | ||
1145 | /* | 1151 | /* |
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, | |||
1157 | struct event_constraint *c = &emptyconstraint; | 1163 | struct event_constraint *c = &emptyconstraint; |
1158 | struct er_account *era; | 1164 | struct er_account *era; |
1159 | unsigned long flags; | 1165 | unsigned long flags; |
1160 | int orig_idx = reg->idx; | 1166 | int idx = reg->idx; |
1161 | 1167 | ||
1162 | /* already allocated shared msr */ | 1168 | /* |
1163 | if (reg->alloc) | 1169 | * reg->alloc can be set due to existing state, so for fake cpuc we |
1170 | * need to ignore this, otherwise we might fail to allocate proper fake | ||
1171 | * state for this extra reg constraint. Also see the comment below. | ||
1172 | */ | ||
1173 | if (reg->alloc && !cpuc->is_fake) | ||
1164 | return NULL; /* call x86_get_event_constraint() */ | 1174 | return NULL; /* call x86_get_event_constraint() */ |
1165 | 1175 | ||
1166 | again: | 1176 | again: |
1167 | era = &cpuc->shared_regs->regs[reg->idx]; | 1177 | era = &cpuc->shared_regs->regs[idx]; |
1168 | /* | 1178 | /* |
1169 | * we use spin_lock_irqsave() to avoid lockdep issues when | 1179 | * we use spin_lock_irqsave() to avoid lockdep issues when |
1170 | * passing a fake cpuc | 1180 | * passing a fake cpuc |
@@ -1173,6 +1183,29 @@ again: | |||
1173 | 1183 | ||
1174 | if (!atomic_read(&era->ref) || era->config == reg->config) { | 1184 | if (!atomic_read(&era->ref) || era->config == reg->config) { |
1175 | 1185 | ||
1186 | /* | ||
1187 | * If its a fake cpuc -- as per validate_{group,event}() we | ||
1188 | * shouldn't touch event state and we can avoid doing so | ||
1189 | * since both will only call get_event_constraints() once | ||
1190 | * on each event, this avoids the need for reg->alloc. | ||
1191 | * | ||
1192 | * Not doing the ER fixup will only result in era->reg being | ||
1193 | * wrong, but since we won't actually try and program hardware | ||
1194 | * this isn't a problem either. | ||
1195 | */ | ||
1196 | if (!cpuc->is_fake) { | ||
1197 | if (idx != reg->idx) | ||
1198 | intel_fixup_er(event, idx); | ||
1199 | |||
1200 | /* | ||
1201 | * x86_schedule_events() can call get_event_constraints() | ||
1202 | * multiple times on events in the case of incremental | ||
1203 | * scheduling(). reg->alloc ensures we only do the ER | ||
1204 | * allocation once. | ||
1205 | */ | ||
1206 | reg->alloc = 1; | ||
1207 | } | ||
1208 | |||
1176 | /* lock in msr value */ | 1209 | /* lock in msr value */ |
1177 | era->config = reg->config; | 1210 | era->config = reg->config; |
1178 | era->reg = reg->reg; | 1211 | era->reg = reg->reg; |
@@ -1180,17 +1213,17 @@ again: | |||
1180 | /* one more user */ | 1213 | /* one more user */ |
1181 | atomic_inc(&era->ref); | 1214 | atomic_inc(&era->ref); |
1182 | 1215 | ||
1183 | /* no need to reallocate during incremental event scheduling */ | ||
1184 | reg->alloc = 1; | ||
1185 | |||
1186 | /* | 1216 | /* |
1187 | * need to call x86_get_event_constraint() | 1217 | * need to call x86_get_event_constraint() |
1188 | * to check if associated event has constraints | 1218 | * to check if associated event has constraints |
1189 | */ | 1219 | */ |
1190 | c = NULL; | 1220 | c = NULL; |
1191 | } else if (intel_try_alt_er(event, orig_idx)) { | 1221 | } else { |
1192 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1222 | idx = intel_alt_er(idx); |
1193 | goto again; | 1223 | if (idx != reg->idx) { |
1224 | raw_spin_unlock_irqrestore(&era->lock, flags); | ||
1225 | goto again; | ||
1226 | } | ||
1194 | } | 1227 | } |
1195 | raw_spin_unlock_irqrestore(&era->lock, flags); | 1228 | raw_spin_unlock_irqrestore(&era->lock, flags); |
1196 | 1229 | ||
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, | |||
1204 | struct er_account *era; | 1237 | struct er_account *era; |
1205 | 1238 | ||
1206 | /* | 1239 | /* |
1207 | * only put constraint if extra reg was actually | 1240 | * Only put constraint if extra reg was actually allocated. Also takes |
1208 | * allocated. Also takes care of event which do | 1241 | * care of event which do not use an extra shared reg. |
1209 | * not use an extra shared reg | 1242 | * |
1243 | * Also, if this is a fake cpuc we shouldn't touch any event state | ||
1244 | * (reg->alloc) and we don't care about leaving inconsistent cpuc state | ||
1245 | * either since it'll be thrown out. | ||
1210 | */ | 1246 | */ |
1211 | if (!reg->alloc) | 1247 | if (!reg->alloc || cpuc->is_fake) |
1212 | return; | 1248 | return; |
1213 | 1249 | ||
1214 | era = &cpuc->shared_regs->regs[reg->idx]; | 1250 | era = &cpuc->shared_regs->regs[reg->idx]; |
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, | |||
1300 | intel_put_shared_regs_event_constraints(cpuc, event); | 1336 | intel_put_shared_regs_event_constraints(cpuc, event); |
1301 | } | 1337 | } |
1302 | 1338 | ||
1303 | static int intel_pmu_hw_config(struct perf_event *event) | 1339 | static void intel_pebs_aliases_core2(struct perf_event *event) |
1304 | { | 1340 | { |
1305 | int ret = x86_pmu_hw_config(event); | 1341 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { |
1306 | |||
1307 | if (ret) | ||
1308 | return ret; | ||
1309 | |||
1310 | if (event->attr.precise_ip && | ||
1311 | (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
1312 | /* | 1342 | /* |
1313 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | 1343 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P |
1314 | * (0x003c) so that we can use it with PEBS. | 1344 | * (0x003c) so that we can use it with PEBS. |
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1329 | */ | 1359 | */ |
1330 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); | 1360 | u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); |
1331 | 1361 | ||
1362 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | ||
1363 | event->hw.config = alt_config; | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | static void intel_pebs_aliases_snb(struct perf_event *event) | ||
1368 | { | ||
1369 | if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
1370 | /* | ||
1371 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | ||
1372 | * (0x003c) so that we can use it with PEBS. | ||
1373 | * | ||
1374 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | ||
1375 | * PEBS capable. However we can use UOPS_RETIRED.ALL | ||
1376 | * (0x01c2), which is a PEBS capable event, to get the same | ||
1377 | * count. | ||
1378 | * | ||
1379 | * UOPS_RETIRED.ALL counts the number of cycles that retires | ||
1380 | * CNTMASK micro-ops. By setting CNTMASK to a value (16) | ||
1381 | * larger than the maximum number of micro-ops that can be | ||
1382 | * retired per cycle (4) and then inverting the condition, we | ||
1383 | * count all cycles that retire 16 or less micro-ops, which | ||
1384 | * is every cycle. | ||
1385 | * | ||
1386 | * Thereby we gain a PEBS capable cycle counter. | ||
1387 | */ | ||
1388 | u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); | ||
1332 | 1389 | ||
1333 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | 1390 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); |
1334 | event->hw.config = alt_config; | 1391 | event->hw.config = alt_config; |
1335 | } | 1392 | } |
1393 | } | ||
1394 | |||
1395 | static int intel_pmu_hw_config(struct perf_event *event) | ||
1396 | { | ||
1397 | int ret = x86_pmu_hw_config(event); | ||
1398 | |||
1399 | if (ret) | ||
1400 | return ret; | ||
1401 | |||
1402 | if (event->attr.precise_ip && x86_pmu.pebs_aliases) | ||
1403 | x86_pmu.pebs_aliases(event); | ||
1336 | 1404 | ||
1337 | if (intel_pmu_needs_lbr_smpl(event)) { | 1405 | if (intel_pmu_needs_lbr_smpl(event)) { |
1338 | ret = intel_pmu_setup_lbr_filter(event); | 1406 | ret = intel_pmu_setup_lbr_filter(event); |
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1607 | .max_period = (1ULL << 31) - 1, | 1675 | .max_period = (1ULL << 31) - 1, |
1608 | .get_event_constraints = intel_get_event_constraints, | 1676 | .get_event_constraints = intel_get_event_constraints, |
1609 | .put_event_constraints = intel_put_event_constraints, | 1677 | .put_event_constraints = intel_put_event_constraints, |
1678 | .pebs_aliases = intel_pebs_aliases_core2, | ||
1610 | 1679 | ||
1611 | .format_attrs = intel_arch3_formats_attr, | 1680 | .format_attrs = intel_arch3_formats_attr, |
1612 | 1681 | ||
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void) | |||
1840 | break; | 1909 | break; |
1841 | 1910 | ||
1842 | case 42: /* SandyBridge */ | 1911 | case 42: /* SandyBridge */ |
1843 | x86_add_quirk(intel_sandybridge_quirk); | ||
1844 | case 45: /* SandyBridge, "Romely-EP" */ | 1912 | case 45: /* SandyBridge, "Romely-EP" */ |
1913 | x86_add_quirk(intel_sandybridge_quirk); | ||
1914 | case 58: /* IvyBridge */ | ||
1845 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 1915 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
1846 | sizeof(hw_cache_event_ids)); | 1916 | sizeof(hw_cache_event_ids)); |
1847 | 1917 | ||
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void) | |||
1849 | 1919 | ||
1850 | x86_pmu.event_constraints = intel_snb_event_constraints; | 1920 | x86_pmu.event_constraints = intel_snb_event_constraints; |
1851 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 1921 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
1922 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | ||
1852 | x86_pmu.extra_regs = intel_snb_extra_regs; | 1923 | x86_pmu.extra_regs = intel_snb_extra_regs; |
1853 | /* all extra regs are per-cpu when HT is on */ | 1924 | /* all extra regs are per-cpu when HT is on */ |
1854 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 1925 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 5a3edc27f6e5..35e2192df9f4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { | |||
400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ | 400 | INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ |
401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ | 401 | INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ |
402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ | 402 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ |
403 | INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ | 403 | INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ |
404 | INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ | ||
405 | INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ | ||
406 | INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ | ||
407 | INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ | ||
408 | INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ | ||
409 | INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ | ||
410 | INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ | ||
411 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ | 404 | INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
412 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ | 405 | INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
413 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ | 406 | INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ |
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index e31bf8d5c4d2..149b8d9c6ad4 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c | |||
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) | |||
42 | static void __init init_nmi_testsuite(void) | 42 | static void __init init_nmi_testsuite(void) |
43 | { | 43 | { |
44 | /* trap all the unknown NMIs we may generate */ | 44 | /* trap all the unknown NMIs we may generate */ |
45 | register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); | 45 | register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); |
46 | } | 46 | } |
47 | 47 | ||
48 | static void __init cleanup_nmi_testsuite(void) | 48 | static void __init cleanup_nmi_testsuite(void) |
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) | |||
64 | { | 64 | { |
65 | unsigned long timeout; | 65 | unsigned long timeout; |
66 | 66 | ||
67 | if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, | 67 | if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, |
68 | NMI_FLAG_FIRST, "nmi_selftest")) { | 68 | NMI_FLAG_FIRST, "nmi_selftest")) { |
69 | nmi_fail = FAILURE; | 69 | nmi_fail = FAILURE; |
70 | return; | 70 | return; |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 79c45af81604..25b48edb847c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -639,9 +639,11 @@ void native_machine_shutdown(void) | |||
639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); | 639 | set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); |
640 | 640 | ||
641 | /* | 641 | /* |
642 | * O.K Now that I'm on the appropriate processor, | 642 | * O.K Now that I'm on the appropriate processor, stop all of the |
643 | * stop all of the others. | 643 | * others. Also disable the local irq to not receive the per-cpu |
644 | * timer interrupt which may trigger scheduler's load balance. | ||
644 | */ | 645 | */ |
646 | local_irq_disable(); | ||
645 | stop_other_cpus(); | 647 | stop_other_cpus(); |
646 | #endif | 648 | #endif |
647 | 649 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f56f96da77f5..3fab55bea29b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -382,6 +382,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
382 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 382 | if ((i == cpu) || (has_mc && match_llc(c, o))) |
383 | link_mask(llc_shared, cpu, i); | 383 | link_mask(llc_shared, cpu, i); |
384 | 384 | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * This needs a separate iteration over the cpus because we rely on all | ||
389 | * cpu_sibling_mask links to be set-up. | ||
390 | */ | ||
391 | for_each_cpu(i, cpu_sibling_setup_mask) { | ||
392 | o = &cpu_data(i); | ||
393 | |||
385 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 394 | if ((i == cpu) || (has_mc && match_mc(c, o))) { |
386 | link_mask(core, cpu, i); | 395 | link_mask(core, cpu, i); |
387 | 396 | ||
@@ -410,15 +419,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
410 | /* maps the cpu to the sched domain representing multi-core */ | 419 | /* maps the cpu to the sched domain representing multi-core */ |
411 | const struct cpumask *cpu_coregroup_mask(int cpu) | 420 | const struct cpumask *cpu_coregroup_mask(int cpu) |
412 | { | 421 | { |
413 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 422 | return cpu_llc_shared_mask(cpu); |
414 | /* | ||
415 | * For perf, we return last level cache shared map. | ||
416 | * And for power savings, we return cpu_core_map | ||
417 | */ | ||
418 | if (!(cpu_has(c, X86_FEATURE_AMD_DCM))) | ||
419 | return cpu_core_mask(cpu); | ||
420 | else | ||
421 | return cpu_llc_shared_mask(cpu); | ||
422 | } | 423 | } |
423 | 424 | ||
424 | static void impress_friends(void) | 425 | static void impress_friends(void) |
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index f61ee67ec00f..677b1ed184c9 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | #include <asm/word-at-a-time.h> | 10 | #include <asm/word-at-a-time.h> |
11 | #include <linux/sched.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * best effort, GUP based copy_from_user() that is NMI-safe | 14 | * best effort, GUP based copy_from_user() that is NMI-safe |
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
21 | void *map; | 22 | void *map; |
22 | int ret; | 23 | int ret; |
23 | 24 | ||
25 | if (__range_not_ok(from, n, TASK_SIZE) == 0) | ||
26 | return len; | ||
27 | |||
24 | do { | 28 | do { |
25 | ret = __get_user_pages_fast(addr, 1, 0, &page); | 29 | ret = __get_user_pages_fast(addr, 1, 0, &page); |
26 | if (!ret) | 30 | if (!ret) |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 819137904428..5d7e51f3fd28 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -28,7 +28,7 @@ | |||
28 | # - (66): the last prefix is 0x66 | 28 | # - (66): the last prefix is 0x66 |
29 | # - (F3): the last prefix is 0xF3 | 29 | # - (F3): the last prefix is 0xF3 |
30 | # - (F2): the last prefix is 0xF2 | 30 | # - (F2): the last prefix is 0xF2 |
31 | # | 31 | # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) |
32 | 32 | ||
33 | Table: one byte opcode | 33 | Table: one byte opcode |
34 | Referrer: | 34 | Referrer: |
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp | |||
515 | b5: LGS Gv,Mp | 515 | b5: LGS Gv,Mp |
516 | b6: MOVZX Gv,Eb | 516 | b6: MOVZX Gv,Eb |
517 | b7: MOVZX Gv,Ew | 517 | b7: MOVZX Gv,Ew |
518 | b8: JMPE | POPCNT Gv,Ev (F3) | 518 | b8: JMPE (!F3) | POPCNT Gv,Ev (F3) |
519 | b9: Grp10 (1A) | 519 | b9: Grp10 (1A) |
520 | ba: Grp8 Ev,Ib (1A) | 520 | ba: Grp8 Ev,Ib (1A) |
521 | bb: BTC Ev,Gv | 521 | bb: BTC Ev,Gv |
522 | bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) | 522 | bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) |
523 | bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) | 523 | bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) |
524 | be: MOVSX Gv,Eb | 524 | be: MOVSX Gv,Eb |
525 | bf: MOVSX Gv,Ew | 525 | bf: MOVSX Gv,Ew |
526 | # 0x0f 0xc0-0xcf | 526 | # 0x0f 0xc0-0xcf |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 97141c26a13a..bc4e9d84157f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en | |||
62 | extra += PMD_SIZE; | 62 | extra += PMD_SIZE; |
63 | #endif | 63 | #endif |
64 | /* The first 2/4M doesn't use large pages. */ | 64 | /* The first 2/4M doesn't use large pages. */ |
65 | extra += mr->end - mr->start; | 65 | if (mr->start < PMD_SIZE) |
66 | extra += mr->end - mr->start; | ||
66 | 67 | ||
67 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 68 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
68 | } else | 69 | } else |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 732af3a96183..4599c3e8bcb6 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
176 | return; | 176 | return; |
177 | } | 177 | } |
178 | 178 | ||
179 | node_set(node, numa_nodes_parsed); | ||
180 | |||
179 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", | 181 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", |
180 | node, pxm, | 182 | node, pxm, |
181 | (unsigned long long) start, (unsigned long long) end - 1); | 183 | (unsigned long long) start, (unsigned long long) end - 1); |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e31bcd8f2eee..fd41a9262d65 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier); | |||
782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); | 782 | EXPORT_SYMBOL_GPL(intel_scu_notifier); |
783 | 783 | ||
784 | /* Called by IPC driver */ | 784 | /* Called by IPC driver */ |
785 | void intel_scu_devices_create(void) | 785 | void __devinit intel_scu_devices_create(void) |
786 | { | 786 | { |
787 | int i; | 787 | int i; |
788 | 788 | ||
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3ae0e61abd23..59880afa851f 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void) | |||
1295 | */ | 1295 | */ |
1296 | mmr_image |= (1L << SOFTACK_MSHIFT); | 1296 | mmr_image |= (1L << SOFTACK_MSHIFT); |
1297 | if (is_uv2_hub()) { | 1297 | if (is_uv2_hub()) { |
1298 | mmr_image &= ~(1L << UV2_LEG_SHFT); | ||
1299 | mmr_image |= (1L << UV2_EXT_SHFT); | 1298 | mmr_image |= (1L << UV2_EXT_SHFT); |
1300 | } | 1299 | } |
1301 | write_mmr_misc_control(pnode, mmr_image); | 1300 | write_mmr_misc_control(pnode, mmr_image); |
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 5f6a5b6c3a15..ddcf39b1a18d 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
@@ -66,9 +66,10 @@ BEGIN { | |||
66 | rex_expr = "^REX(\\.[XRWB]+)*" | 66 | rex_expr = "^REX(\\.[XRWB]+)*" |
67 | fpu_expr = "^ESC" # TODO | 67 | fpu_expr = "^ESC" # TODO |
68 | 68 | ||
69 | lprefix1_expr = "\\(66\\)" | 69 | lprefix1_expr = "\\((66|!F3)\\)" |
70 | lprefix2_expr = "\\(F3\\)" | 70 | lprefix2_expr = "\\(F3\\)" |
71 | lprefix3_expr = "\\(F2\\)" | 71 | lprefix3_expr = "\\((F2|!F3)\\)" |
72 | lprefix_expr = "\\((66|F2|F3)\\)" | ||
72 | max_lprefix = 4 | 73 | max_lprefix = 4 |
73 | 74 | ||
74 | # All opcodes starting with lower-case 'v' or with (v1) superscript | 75 | # All opcodes starting with lower-case 'v' or with (v1) superscript |
@@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod) | |||
333 | if (match(ext, lprefix1_expr)) { | 334 | if (match(ext, lprefix1_expr)) { |
334 | lptable1[idx] = add_flags(lptable1[idx],flags) | 335 | lptable1[idx] = add_flags(lptable1[idx],flags) |
335 | variant = "INAT_VARIANT" | 336 | variant = "INAT_VARIANT" |
336 | } else if (match(ext, lprefix2_expr)) { | 337 | } |
338 | if (match(ext, lprefix2_expr)) { | ||
337 | lptable2[idx] = add_flags(lptable2[idx],flags) | 339 | lptable2[idx] = add_flags(lptable2[idx],flags) |
338 | variant = "INAT_VARIANT" | 340 | variant = "INAT_VARIANT" |
339 | } else if (match(ext, lprefix3_expr)) { | 341 | } |
342 | if (match(ext, lprefix3_expr)) { | ||
340 | lptable3[idx] = add_flags(lptable3[idx],flags) | 343 | lptable3[idx] = add_flags(lptable3[idx],flags) |
341 | variant = "INAT_VARIANT" | 344 | variant = "INAT_VARIANT" |
342 | } else { | 345 | } |
346 | if (!match(ext, lprefix_expr)){ | ||
343 | table[idx] = add_flags(table[idx],flags) | 347 | table[idx] = add_flags(table[idx],flags) |
344 | } | 348 | } |
345 | } | 349 | } |
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 0b9f2e13c781..c1dacca312f3 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h | |||
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, | |||
31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, | 31 | asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, |
32 | struct timespec __user *tsp, const sigset_t __user *sigmask, | 32 | struct timespec __user *tsp, const sigset_t __user *sigmask, |
33 | size_t sigsetsize); | 33 | size_t sigsetsize); |
34 | 34 | asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, | |
35 | 35 | size_t sigsetsize); | |
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b9f8e5850d3a..efe4e854b3cd 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c | |||
@@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs) | |||
493 | if (ret) | 493 | if (ret) |
494 | return; | 494 | return; |
495 | 495 | ||
496 | signal_delivered(signr, info, ka, regs, 0); | 496 | signal_delivered(signr, &info, &ka, regs, 0); |
497 | if (current->ptrace & PT_SINGLESTEP) | 497 | if (current->ptrace & PT_SINGLESTEP) |
498 | task_pt_regs(current)->icountlevel = 1; | 498 | task_pt_regs(current)->icountlevel = 1; |
499 | 499 | ||