diff options
Diffstat (limited to 'arch/x86')
88 files changed, 1414 insertions, 919 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2817ab5a1204..c9086e6307a5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -34,6 +34,8 @@ config X86 | |||
34 | select HAVE_FUNCTION_TRACER | 34 | select HAVE_FUNCTION_TRACER |
35 | select HAVE_FUNCTION_GRAPH_TRACER | 35 | select HAVE_FUNCTION_GRAPH_TRACER |
36 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 36 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
37 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | ||
38 | select HAVE_FTRACE_SYSCALLS | ||
37 | select HAVE_KVM | 39 | select HAVE_KVM |
38 | select HAVE_ARCH_KGDB | 40 | select HAVE_ARCH_KGDB |
39 | select HAVE_ARCH_TRACEHOOK | 41 | select HAVE_ARCH_TRACEHOOK |
@@ -250,16 +252,13 @@ config SMP | |||
250 | 252 | ||
251 | config X86_X2APIC | 253 | config X86_X2APIC |
252 | bool "Support x2apic" | 254 | bool "Support x2apic" |
253 | depends on X86_LOCAL_APIC && X86_64 | 255 | depends on X86_LOCAL_APIC && X86_64 && INTR_REMAP |
254 | ---help--- | 256 | ---help--- |
255 | This enables x2apic support on CPUs that have this feature. | 257 | This enables x2apic support on CPUs that have this feature. |
256 | 258 | ||
257 | This allows 32-bit apic IDs (so it can support very large systems), | 259 | This allows 32-bit apic IDs (so it can support very large systems), |
258 | and accesses the local apic via MSRs not via mmio. | 260 | and accesses the local apic via MSRs not via mmio. |
259 | 261 | ||
260 | ( On certain CPU models you may need to enable INTR_REMAP too, | ||
261 | to get functional x2apic mode. ) | ||
262 | |||
263 | If you don't know what to do here, say N. | 262 | If you don't know what to do here, say N. |
264 | 263 | ||
265 | config SPARSE_IRQ | 264 | config SPARSE_IRQ |
@@ -1145,7 +1144,7 @@ config NODES_SHIFT | |||
1145 | depends on NEED_MULTIPLE_NODES | 1144 | depends on NEED_MULTIPLE_NODES |
1146 | ---help--- | 1145 | ---help--- |
1147 | Specify the maximum number of NUMA Nodes available on the target | 1146 | Specify the maximum number of NUMA Nodes available on the target |
1148 | system. Increases memory reserved to accomodate various tables. | 1147 | system. Increases memory reserved to accommodate various tables. |
1149 | 1148 | ||
1150 | config HAVE_ARCH_BOOTMEM | 1149 | config HAVE_ARCH_BOOTMEM |
1151 | def_bool y | 1150 | def_bool y |
@@ -1323,7 +1322,7 @@ config MTRR_SANITIZER | |||
1323 | add writeback entries. | 1322 | add writeback entries. |
1324 | 1323 | ||
1325 | Can be disabled with disable_mtrr_cleanup on the kernel command line. | 1324 | Can be disabled with disable_mtrr_cleanup on the kernel command line. |
1326 | The largest mtrr entry size for a continous block can be set with | 1325 | The largest mtrr entry size for a continuous block can be set with |
1327 | mtrr_chunk_size. | 1326 | mtrr_chunk_size. |
1328 | 1327 | ||
1329 | If unsure, say Y. | 1328 | If unsure, say Y. |
@@ -1838,8 +1837,8 @@ config PCI_MMCONFIG | |||
1838 | 1837 | ||
1839 | config DMAR | 1838 | config DMAR |
1840 | bool "Support for DMA Remapping Devices (EXPERIMENTAL)" | 1839 | bool "Support for DMA Remapping Devices (EXPERIMENTAL)" |
1841 | depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL | 1840 | depends on PCI_MSI && ACPI && EXPERIMENTAL |
1842 | ---help--- | 1841 | help |
1843 | DMA remapping (DMAR) devices support enables independent address | 1842 | DMA remapping (DMAR) devices support enables independent address |
1844 | translations for Direct Memory Access (DMA) from devices. | 1843 | translations for Direct Memory Access (DMA) from devices. |
1845 | These DMA remapping devices are reported via ACPI tables | 1844 | These DMA remapping devices are reported via ACPI tables |
@@ -1880,7 +1879,6 @@ config DMAR_FLOPPY_WA | |||
1880 | config INTR_REMAP | 1879 | config INTR_REMAP |
1881 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" | 1880 | bool "Support for Interrupt Remapping (EXPERIMENTAL)" |
1882 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL | 1881 | depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL |
1883 | select X86_X2APIC | ||
1884 | ---help--- | 1882 | ---help--- |
1885 | Supports Interrupt remapping for IO-APIC and MSI devices. | 1883 | Supports Interrupt remapping for IO-APIC and MSI devices. |
1886 | To use x2apic mode in the CPU's which support x2APIC enhancements or | 1884 | To use x2apic mode in the CPU's which support x2APIC enhancements or |
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c index 95d86ce0421c..9e0587a37768 100644 --- a/arch/x86/boot/video-vga.c +++ b/arch/x86/boot/video-vga.c | |||
@@ -129,22 +129,18 @@ u16 vga_crtc(void) | |||
129 | return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4; | 129 | return (inb(0x3cc) & 1) ? 0x3d4 : 0x3b4; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void vga_set_480_scanlines(int lines) | 132 | static void vga_set_480_scanlines(void) |
133 | { | 133 | { |
134 | u16 crtc; /* CRTC base address */ | 134 | u16 crtc; /* CRTC base address */ |
135 | u8 csel; /* CRTC miscellaneous output register */ | 135 | u8 csel; /* CRTC miscellaneous output register */ |
136 | u8 ovfw; /* CRTC overflow register */ | ||
137 | int end = lines-1; | ||
138 | 136 | ||
139 | crtc = vga_crtc(); | 137 | crtc = vga_crtc(); |
140 | 138 | ||
141 | ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40); | ||
142 | |||
143 | out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */ | 139 | out_idx(0x0c, crtc, 0x11); /* Vertical sync end, unlock CR0-7 */ |
144 | out_idx(0x0b, crtc, 0x06); /* Vertical total */ | 140 | out_idx(0x0b, crtc, 0x06); /* Vertical total */ |
145 | out_idx(ovfw, crtc, 0x07); /* Vertical overflow */ | 141 | out_idx(0x3e, crtc, 0x07); /* Vertical overflow */ |
146 | out_idx(0xea, crtc, 0x10); /* Vertical sync start */ | 142 | out_idx(0xea, crtc, 0x10); /* Vertical sync start */ |
147 | out_idx(end, crtc, 0x12); /* Vertical display end */ | 143 | out_idx(0xdf, crtc, 0x12); /* Vertical display end */ |
148 | out_idx(0xe7, crtc, 0x15); /* Vertical blank start */ | 144 | out_idx(0xe7, crtc, 0x15); /* Vertical blank start */ |
149 | out_idx(0x04, crtc, 0x16); /* Vertical blank end */ | 145 | out_idx(0x04, crtc, 0x16); /* Vertical blank end */ |
150 | csel = inb(0x3cc); | 146 | csel = inb(0x3cc); |
@@ -153,21 +149,38 @@ static void vga_set_480_scanlines(int lines) | |||
153 | outb(csel, 0x3c2); | 149 | outb(csel, 0x3c2); |
154 | } | 150 | } |
155 | 151 | ||
152 | static void vga_set_vertical_end(int lines) | ||
153 | { | ||
154 | u16 crtc; /* CRTC base address */ | ||
155 | u8 ovfw; /* CRTC overflow register */ | ||
156 | int end = lines-1; | ||
157 | |||
158 | crtc = vga_crtc(); | ||
159 | |||
160 | ovfw = 0x3c | ((end >> (8-1)) & 0x02) | ((end >> (9-6)) & 0x40); | ||
161 | |||
162 | out_idx(ovfw, crtc, 0x07); /* Vertical overflow */ | ||
163 | out_idx(end, crtc, 0x12); /* Vertical display end */ | ||
164 | } | ||
165 | |||
156 | static void vga_set_80x30(void) | 166 | static void vga_set_80x30(void) |
157 | { | 167 | { |
158 | vga_set_480_scanlines(30*16); | 168 | vga_set_480_scanlines(); |
169 | vga_set_vertical_end(30*16); | ||
159 | } | 170 | } |
160 | 171 | ||
161 | static void vga_set_80x34(void) | 172 | static void vga_set_80x34(void) |
162 | { | 173 | { |
174 | vga_set_480_scanlines(); | ||
163 | vga_set_14font(); | 175 | vga_set_14font(); |
164 | vga_set_480_scanlines(34*14); | 176 | vga_set_vertical_end(34*14); |
165 | } | 177 | } |
166 | 178 | ||
167 | static void vga_set_80x60(void) | 179 | static void vga_set_80x60(void) |
168 | { | 180 | { |
181 | vga_set_480_scanlines(); | ||
169 | vga_set_8font(); | 182 | vga_set_8font(); |
170 | vga_set_480_scanlines(60*8); | 183 | vga_set_vertical_end(60*8); |
171 | } | 184 | } |
172 | 185 | ||
173 | static int vga_set_mode(struct mode_info *mode) | 186 | static int vga_set_mode(struct mode_info *mode) |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index df8a300dfe6c..42f2f8377422 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -107,6 +107,9 @@ extern u32 native_safe_apic_wait_icr_idle(void); | |||
107 | extern void native_apic_icr_write(u32 low, u32 id); | 107 | extern void native_apic_icr_write(u32 low, u32 id); |
108 | extern u64 native_apic_icr_read(void); | 108 | extern u64 native_apic_icr_read(void); |
109 | 109 | ||
110 | #define EIM_8BIT_APIC_ID 0 | ||
111 | #define EIM_32BIT_APIC_ID 1 | ||
112 | |||
110 | #ifdef CONFIG_X86_X2APIC | 113 | #ifdef CONFIG_X86_X2APIC |
111 | /* | 114 | /* |
112 | * Make previous memory operations globally visible before | 115 | * Make previous memory operations globally visible before |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index b3894bf52fcd..e55dfc1ad453 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -126,6 +126,11 @@ void clflush_cache_range(void *addr, unsigned int size); | |||
126 | #ifdef CONFIG_DEBUG_RODATA | 126 | #ifdef CONFIG_DEBUG_RODATA |
127 | void mark_rodata_ro(void); | 127 | void mark_rodata_ro(void); |
128 | extern const int rodata_test_data; | 128 | extern const int rodata_test_data; |
129 | void set_kernel_text_rw(void); | ||
130 | void set_kernel_text_ro(void); | ||
131 | #else | ||
132 | static inline void set_kernel_text_rw(void) { } | ||
133 | static inline void set_kernel_text_ro(void) { } | ||
129 | #endif | 134 | #endif |
130 | 135 | ||
131 | #ifdef CONFIG_DEBUG_RODATA_TEST | 136 | #ifdef CONFIG_DEBUG_RODATA_TEST |
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h index 222802029fa6..222802029fa6 100755..100644 --- a/arch/x86/include/asm/cpu_debug.h +++ b/arch/x86/include/asm/cpu_debug.h | |||
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0beba0d1468d..bb83b1c397aa 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -154,6 +154,7 @@ | |||
154 | * CPUID levels like 0x6, 0xA etc | 154 | * CPUID levels like 0x6, 0xA etc |
155 | */ | 155 | */ |
156 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | 156 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
157 | #define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ | ||
157 | 158 | ||
158 | /* Virtualization flags: Linux defined */ | 159 | /* Virtualization flags: Linux defined */ |
159 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 160 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index a7f3c75f8ad7..61c852fa346b 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h | |||
@@ -3,8 +3,6 @@ | |||
3 | #ifndef __ASSEMBLY__ | 3 | #ifndef __ASSEMBLY__ |
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | ||
7 | |||
8 | extern cpumask_var_t cpu_callin_mask; | 6 | extern cpumask_var_t cpu_callin_mask; |
9 | extern cpumask_var_t cpu_callout_mask; | 7 | extern cpumask_var_t cpu_callout_mask; |
10 | extern cpumask_var_t cpu_initialized_mask; | 8 | extern cpumask_var_t cpu_initialized_mask; |
@@ -12,21 +10,5 @@ extern cpumask_var_t cpu_sibling_setup_mask; | |||
12 | 10 | ||
13 | extern void setup_cpu_local_masks(void); | 11 | extern void setup_cpu_local_masks(void); |
14 | 12 | ||
15 | #else /* CONFIG_X86_32 */ | ||
16 | |||
17 | extern cpumask_t cpu_callin_map; | ||
18 | extern cpumask_t cpu_callout_map; | ||
19 | extern cpumask_t cpu_initialized; | ||
20 | extern cpumask_t cpu_sibling_setup_map; | ||
21 | |||
22 | #define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) | ||
23 | #define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) | ||
24 | #define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) | ||
25 | #define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) | ||
26 | |||
27 | static inline void setup_cpu_local_masks(void) { } | ||
28 | |||
29 | #endif /* CONFIG_X86_32 */ | ||
30 | |||
31 | #endif /* __ASSEMBLY__ */ | 13 | #endif /* __ASSEMBLY__ */ |
32 | #endif /* _ASM_X86_CPUMASK_H */ | 14 | #endif /* _ASM_X86_CPUMASK_H */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index cea7b74963e9..f82fdc412c64 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -238,7 +238,7 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, | |||
238 | 238 | ||
239 | dma_mask = dev->coherent_dma_mask; | 239 | dma_mask = dev->coherent_dma_mask; |
240 | if (!dma_mask) | 240 | if (!dma_mask) |
241 | dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; | 241 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
242 | 242 | ||
243 | return dma_mask; | 243 | return dma_mask; |
244 | } | 244 | } |
@@ -247,10 +247,10 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) | |||
247 | { | 247 | { |
248 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); | 248 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
249 | 249 | ||
250 | if (dma_mask <= DMA_24BIT_MASK) | 250 | if (dma_mask <= DMA_BIT_MASK(24)) |
251 | gfp |= GFP_DMA; | 251 | gfp |= GFP_DMA; |
252 | #ifdef CONFIG_X86_64 | 252 | #ifdef CONFIG_X86_64 |
253 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) | 253 | if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
254 | gfp |= GFP_DMA32; | 254 | gfp |= GFP_DMA32; |
255 | #endif | 255 | #endif |
256 | return gfp; | 256 | return gfp; |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 63a79c77d220..2d81af3974a0 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -111,6 +111,8 @@ enum fixed_addresses { | |||
111 | #ifdef CONFIG_PARAVIRT | 111 | #ifdef CONFIG_PARAVIRT |
112 | FIX_PARAVIRT_BOOTMAP, | 112 | FIX_PARAVIRT_BOOTMAP, |
113 | #endif | 113 | #endif |
114 | FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */ | ||
115 | FIX_TEXT_POKE1, | ||
114 | __end_of_permanent_fixed_addresses, | 116 | __end_of_permanent_fixed_addresses, |
115 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 117 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
116 | FIX_OHCI1394_BASE, | 118 | FIX_OHCI1394_BASE, |
@@ -149,11 +151,11 @@ extern pte_t *pkmap_page_table; | |||
149 | 151 | ||
150 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); | 152 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); |
151 | void native_set_fixmap(enum fixed_addresses idx, | 153 | void native_set_fixmap(enum fixed_addresses idx, |
152 | unsigned long phys, pgprot_t flags); | 154 | phys_addr_t phys, pgprot_t flags); |
153 | 155 | ||
154 | #ifndef CONFIG_PARAVIRT | 156 | #ifndef CONFIG_PARAVIRT |
155 | static inline void __set_fixmap(enum fixed_addresses idx, | 157 | static inline void __set_fixmap(enum fixed_addresses idx, |
156 | unsigned long phys, pgprot_t flags) | 158 | phys_addr_t phys, pgprot_t flags) |
157 | { | 159 | { |
158 | native_set_fixmap(idx, phys, flags); | 160 | native_set_fixmap(idx, phys, flags); |
159 | } | 161 | } |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index db24c2278be0..bd2c6511c887 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,6 +28,13 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
31 | #ifdef CONFIG_FUNCTION_TRACER | 38 | #ifdef CONFIG_FUNCTION_TRACER |
32 | #define MCOUNT_ADDR ((long)(mcount)) | 39 | #define MCOUNT_ADDR ((long)(mcount)) |
33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e5383e3d2f8c..73739322b6d0 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); | |||
193 | */ | 193 | */ |
194 | extern void early_ioremap_init(void); | 194 | extern void early_ioremap_init(void); |
195 | extern void early_ioremap_reset(void); | 195 | extern void early_ioremap_reset(void); |
196 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 196 | extern void __iomem *early_ioremap(resource_size_t phys_addr, |
197 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 197 | unsigned long size); |
198 | extern void __iomem *early_memremap(resource_size_t phys_addr, | ||
199 | unsigned long size); | ||
198 | extern void early_iounmap(void __iomem *addr, unsigned long size); | 200 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
199 | 201 | ||
200 | #define IO_SPACE_LIMIT 0xffff | 202 | #define IO_SPACE_LIMIT 0xffff |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 373cc2bbcad2..9d826e436010 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -162,10 +162,13 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq); | |||
162 | extern void ioapic_init_mappings(void); | 162 | extern void ioapic_init_mappings(void); |
163 | 163 | ||
164 | #ifdef CONFIG_X86_64 | 164 | #ifdef CONFIG_X86_64 |
165 | extern int save_IO_APIC_setup(void); | 165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
166 | extern void mask_IO_APIC_setup(void); | 166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
167 | extern void restore_IO_APIC_setup(void); | 167 | extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
168 | extern void reinit_intr_remapped_IO_APIC(int); | 168 | extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); |
169 | extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); | ||
170 | extern void reinit_intr_remapped_IO_APIC(int intr_remapping, | ||
171 | struct IO_APIC_route_entry **ioapic_entries); | ||
169 | #endif | 172 | #endif |
170 | 173 | ||
171 | extern void probe_nr_irqs_gsi(void); | 174 | extern void probe_nr_irqs_gsi(void); |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7727aa8b7dda..378e3691c08c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -347,7 +347,7 @@ struct pv_mmu_ops { | |||
347 | /* Sometimes the physical address is a pfn, and sometimes its | 347 | /* Sometimes the physical address is a pfn, and sometimes its |
348 | an mfn. We can tell which is which from the index. */ | 348 | an mfn. We can tell which is which from the index. */ |
349 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | 349 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, |
350 | unsigned long phys, pgprot_t flags); | 350 | phys_addr_t phys, pgprot_t flags); |
351 | }; | 351 | }; |
352 | 352 | ||
353 | struct raw_spinlock; | 353 | struct raw_spinlock; |
@@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void) | |||
1432 | void arch_flush_lazy_mmu_mode(void); | 1432 | void arch_flush_lazy_mmu_mode(void); |
1433 | 1433 | ||
1434 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | 1434 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
1435 | unsigned long phys, pgprot_t flags) | 1435 | phys_addr_t phys, pgprot_t flags) |
1436 | { | 1436 | { |
1437 | pv_mmu_ops.set_fixmap(idx, phys, flags); | 1437 | pv_mmu_ops.set_fixmap(idx, phys, flags); |
1438 | } | 1438 | } |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index e545ea01abcf..b51a1e8b0baf 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -140,11 +140,6 @@ static inline int __pcibus_to_node(const struct pci_bus *bus) | |||
140 | return sd->node; | 140 | return sd->node; |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) | ||
144 | { | ||
145 | return node_to_cpumask(__pcibus_to_node(bus)); | ||
146 | } | ||
147 | |||
148 | static inline const struct cpumask * | 143 | static inline const struct cpumask * |
149 | cpumask_of_pcibus(const struct pci_bus *bus) | 144 | cpumask_of_pcibus(const struct pci_bus *bus) |
150 | { | 145 | { |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ae85a8d66a30..fcf4d92e7e04 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -94,7 +94,7 @@ struct cpuinfo_x86 { | |||
94 | unsigned long loops_per_jiffy; | 94 | unsigned long loops_per_jiffy; |
95 | #ifdef CONFIG_SMP | 95 | #ifdef CONFIG_SMP |
96 | /* cpus sharing the last level cache: */ | 96 | /* cpus sharing the last level cache: */ |
97 | cpumask_t llc_shared_map; | 97 | cpumask_var_t llc_shared_map; |
98 | #endif | 98 | #endif |
99 | /* cpuid returned max cores value: */ | 99 | /* cpuid returned max cores value: */ |
100 | u16 x86_max_cores; | 100 | u16 x86_max_cores; |
@@ -352,6 +352,11 @@ struct i387_soft_struct { | |||
352 | u32 entry_eip; | 352 | u32 entry_eip; |
353 | }; | 353 | }; |
354 | 354 | ||
355 | struct ymmh_struct { | ||
356 | /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ | ||
357 | u32 ymmh_space[64]; | ||
358 | }; | ||
359 | |||
355 | struct xsave_hdr_struct { | 360 | struct xsave_hdr_struct { |
356 | u64 xstate_bv; | 361 | u64 xstate_bv; |
357 | u64 reserved1[2]; | 362 | u64 reserved1[2]; |
@@ -361,6 +366,7 @@ struct xsave_hdr_struct { | |||
361 | struct xsave_struct { | 366 | struct xsave_struct { |
362 | struct i387_fxsave_struct i387; | 367 | struct i387_fxsave_struct i387; |
363 | struct xsave_hdr_struct xsave_hdr; | 368 | struct xsave_hdr_struct xsave_hdr; |
369 | struct ymmh_struct ymmh; | ||
364 | /* new processor state extensions will go here */ | 370 | /* new processor state extensions will go here */ |
365 | } __attribute__ ((packed, aligned (64))); | 371 | } __attribute__ ((packed, aligned (64))); |
366 | 372 | ||
@@ -736,6 +742,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
736 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 742 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
737 | 743 | ||
738 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 744 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
745 | extern void init_c1e_mask(void); | ||
739 | 746 | ||
740 | extern unsigned long boot_option_idle_override; | 747 | extern unsigned long boot_option_idle_override; |
741 | extern unsigned long idle_halt; | 748 | extern unsigned long idle_halt; |
diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h index 8e0f8d199e05..86723035a515 100644 --- a/arch/x86/include/asm/ptrace-abi.h +++ b/arch/x86/include/asm/ptrace-abi.h | |||
@@ -80,8 +80,6 @@ | |||
80 | 80 | ||
81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ | 81 | #define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ |
82 | 82 | ||
83 | #ifdef CONFIG_X86_PTRACE_BTS | ||
84 | |||
85 | #ifndef __ASSEMBLY__ | 83 | #ifndef __ASSEMBLY__ |
86 | #include <linux/types.h> | 84 | #include <linux/types.h> |
87 | 85 | ||
@@ -140,6 +138,5 @@ struct ptrace_bts_config { | |||
140 | BTS records are read from oldest to newest. | 138 | BTS records are read from oldest to newest. |
141 | Returns number of BTS records drained. | 139 | Returns number of BTS records drained. |
142 | */ | 140 | */ |
143 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
144 | 141 | ||
145 | #endif /* _ASM_X86_PTRACE_ABI_H */ | 142 | #endif /* _ASM_X86_PTRACE_ABI_H */ |
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d5cd6c586881..a4737dddfd58 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h | |||
@@ -50,7 +50,7 @@ | |||
50 | #ifdef CONFIG_X86_64 | 50 | #ifdef CONFIG_X86_64 |
51 | #define NEED_PSE 0 | 51 | #define NEED_PSE 0 |
52 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) | 52 | #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
53 | #define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) | 53 | #define NEED_PGE 0 |
54 | #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) | 54 | #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
55 | #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) | 55 | #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
56 | #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) | 56 | #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index ec666491aaa4..72e5a4491661 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h | |||
@@ -269,6 +269,11 @@ struct _xsave_hdr { | |||
269 | __u64 reserved2[5]; | 269 | __u64 reserved2[5]; |
270 | }; | 270 | }; |
271 | 271 | ||
272 | struct _ymmh_state { | ||
273 | /* 16 * 16 bytes for each YMMH-reg */ | ||
274 | __u32 ymmh_space[64]; | ||
275 | }; | ||
276 | |||
272 | /* | 277 | /* |
273 | * Extended state pointed by the fpstate pointer in the sigcontext. | 278 | * Extended state pointed by the fpstate pointer in the sigcontext. |
274 | * In addition to the fpstate, information encoded in the xstate_hdr | 279 | * In addition to the fpstate, information encoded in the xstate_hdr |
@@ -278,6 +283,7 @@ struct _xsave_hdr { | |||
278 | struct _xstate { | 283 | struct _xstate { |
279 | struct _fpstate fpstate; | 284 | struct _fpstate fpstate; |
280 | struct _xsave_hdr xstate_hdr; | 285 | struct _xsave_hdr xstate_hdr; |
286 | struct _ymmh_state ymmh; | ||
281 | /* new processor state extensions go here */ | 287 | /* new processor state extensions go here */ |
282 | }; | 288 | }; |
283 | 289 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 47d0e21f2b9e..19e0d88b966d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -21,19 +21,19 @@ | |||
21 | extern int smp_num_siblings; | 21 | extern int smp_num_siblings; |
22 | extern unsigned int num_processors; | 22 | extern unsigned int num_processors; |
23 | 23 | ||
24 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 24 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
25 | DECLARE_PER_CPU(cpumask_t, cpu_core_map); | 25 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); |
26 | DECLARE_PER_CPU(u16, cpu_llc_id); | 26 | DECLARE_PER_CPU(u16, cpu_llc_id); |
27 | DECLARE_PER_CPU(int, cpu_number); | 27 | DECLARE_PER_CPU(int, cpu_number); |
28 | 28 | ||
29 | static inline struct cpumask *cpu_sibling_mask(int cpu) | 29 | static inline struct cpumask *cpu_sibling_mask(int cpu) |
30 | { | 30 | { |
31 | return &per_cpu(cpu_sibling_map, cpu); | 31 | return per_cpu(cpu_sibling_map, cpu); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline struct cpumask *cpu_core_mask(int cpu) | 34 | static inline struct cpumask *cpu_core_mask(int cpu) |
35 | { | 35 | { |
36 | return &per_cpu(cpu_core_map, cpu); | 36 | return per_cpu(cpu_core_map, cpu); |
37 | } | 37 | } |
38 | 38 | ||
39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); | 39 | DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); |
@@ -121,9 +121,10 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
121 | smp_ops.send_call_func_single_ipi(cpu); | 121 | smp_ops.send_call_func_single_ipi(cpu); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 124 | #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask |
125 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) | ||
125 | { | 126 | { |
126 | smp_ops.send_call_func_ipi(&mask); | 127 | smp_ops.send_call_func_ipi(mask); |
127 | } | 128 | } |
128 | 129 | ||
129 | void cpu_disable_common(void); | 130 | void cpu_disable_common(void); |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index df9d5f78385e..8820a73ae090 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -94,6 +94,7 @@ struct thread_info { | |||
94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ | 94 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_SYSCALL_FTRACE 27 /* for ftrace syscall instrumentation */ | ||
97 | 98 | ||
98 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 99 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
99 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 100 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -115,15 +116,17 @@ struct thread_info { | |||
115 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) | 116 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
116 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 117 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
117 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 118 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
119 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | ||
118 | 120 | ||
119 | /* work to do in syscall_trace_enter() */ | 121 | /* work to do in syscall_trace_enter() */ |
120 | #define _TIF_WORK_SYSCALL_ENTRY \ | 122 | #define _TIF_WORK_SYSCALL_ENTRY \ |
121 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | \ | 123 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ |
122 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 124 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) |
123 | 125 | ||
124 | /* work to do in syscall_trace_leave() */ | 126 | /* work to do in syscall_trace_leave() */ |
125 | #define _TIF_WORK_SYSCALL_EXIT \ | 127 | #define _TIF_WORK_SYSCALL_EXIT \ |
126 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP) | 128 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
129 | _TIF_SYSCALL_FTRACE) | ||
127 | 130 | ||
128 | /* work to do on interrupt/exception return */ | 131 | /* work to do on interrupt/exception return */ |
129 | #define _TIF_WORK_MASK \ | 132 | #define _TIF_WORK_MASK \ |
@@ -132,7 +135,7 @@ struct thread_info { | |||
132 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 135 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
133 | 136 | ||
134 | /* work to do on any return to user space */ | 137 | /* work to do on any return to user space */ |
135 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 138 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) |
136 | 139 | ||
137 | /* Only used for 64 bit */ | 140 | /* Only used for 64 bit */ |
138 | #define _TIF_DO_NOTIFY_MASK \ | 141 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 744299c0b774..892b119dba6f 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -44,9 +44,6 @@ | |||
44 | 44 | ||
45 | #ifdef CONFIG_X86_32 | 45 | #ifdef CONFIG_X86_32 |
46 | 46 | ||
47 | /* Mappings between node number and cpus on that node. */ | ||
48 | extern cpumask_t node_to_cpumask_map[]; | ||
49 | |||
50 | /* Mappings between logical cpu number and node number */ | 47 | /* Mappings between logical cpu number and node number */ |
51 | extern int cpu_to_node_map[]; | 48 | extern int cpu_to_node_map[]; |
52 | 49 | ||
@@ -57,30 +54,8 @@ static inline int cpu_to_node(int cpu) | |||
57 | } | 54 | } |
58 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) | 55 | #define early_cpu_to_node(cpu) cpu_to_node(cpu) |
59 | 56 | ||
60 | /* Returns a bitmask of CPUs on Node 'node'. | ||
61 | * | ||
62 | * Side note: this function creates the returned cpumask on the stack | ||
63 | * so with a high NR_CPUS count, excessive stack space is used. The | ||
64 | * cpumask_of_node function should be used whenever possible. | ||
65 | */ | ||
66 | static inline cpumask_t node_to_cpumask(int node) | ||
67 | { | ||
68 | return node_to_cpumask_map[node]; | ||
69 | } | ||
70 | |||
71 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
72 | static inline const struct cpumask *cpumask_of_node(int node) | ||
73 | { | ||
74 | return &node_to_cpumask_map[node]; | ||
75 | } | ||
76 | |||
77 | static inline void setup_node_to_cpumask_map(void) { } | ||
78 | |||
79 | #else /* CONFIG_X86_64 */ | 57 | #else /* CONFIG_X86_64 */ |
80 | 58 | ||
81 | /* Mappings between node number and cpus on that node. */ | ||
82 | extern cpumask_t *node_to_cpumask_map; | ||
83 | |||
84 | /* Mappings between logical cpu number and node number */ | 59 | /* Mappings between logical cpu number and node number */ |
85 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); | 60 | DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); |
86 | 61 | ||
@@ -91,8 +66,6 @@ DECLARE_PER_CPU(int, node_number); | |||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | 66 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
92 | extern int cpu_to_node(int cpu); | 67 | extern int cpu_to_node(int cpu); |
93 | extern int early_cpu_to_node(int cpu); | 68 | extern int early_cpu_to_node(int cpu); |
94 | extern const cpumask_t *cpumask_of_node(int node); | ||
95 | extern cpumask_t node_to_cpumask(int node); | ||
96 | 69 | ||
97 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 70 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
98 | 71 | ||
@@ -108,42 +81,32 @@ static inline int early_cpu_to_node(int cpu) | |||
108 | return early_per_cpu(x86_cpu_to_node_map, cpu); | 81 | return early_per_cpu(x86_cpu_to_node_map, cpu); |
109 | } | 82 | } |
110 | 83 | ||
111 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | 84 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
112 | static inline const cpumask_t *cpumask_of_node(int node) | 85 | |
113 | { | 86 | #endif /* CONFIG_X86_64 */ |
114 | return &node_to_cpumask_map[node]; | ||
115 | } | ||
116 | 87 | ||
117 | /* Returns a bitmask of CPUs on Node 'node'. */ | 88 | /* Mappings between node number and cpus on that node. */ |
118 | static inline cpumask_t node_to_cpumask(int node) | 89 | extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
90 | |||
91 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
92 | extern const struct cpumask *cpumask_of_node(int node); | ||
93 | #else | ||
94 | /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ | ||
95 | static inline const struct cpumask *cpumask_of_node(int node) | ||
119 | { | 96 | { |
120 | return node_to_cpumask_map[node]; | 97 | return node_to_cpumask_map[node]; |
121 | } | 98 | } |
122 | 99 | #endif | |
123 | #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ | ||
124 | 100 | ||
125 | extern void setup_node_to_cpumask_map(void); | 101 | extern void setup_node_to_cpumask_map(void); |
126 | 102 | ||
127 | /* | 103 | /* |
128 | * Replace default node_to_cpumask_ptr with optimized version | ||
129 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
130 | */ | ||
131 | #define node_to_cpumask_ptr(v, node) \ | ||
132 | const cpumask_t *v = cpumask_of_node(node) | ||
133 | |||
134 | #define node_to_cpumask_ptr_next(v, node) \ | ||
135 | v = cpumask_of_node(node) | ||
136 | |||
137 | #endif /* CONFIG_X86_64 */ | ||
138 | |||
139 | /* | ||
140 | * Returns the number of the node containing Node 'node'. This | 104 | * Returns the number of the node containing Node 'node'. This |
141 | * architecture is flat, so it is a pretty simple function! | 105 | * architecture is flat, so it is a pretty simple function! |
142 | */ | 106 | */ |
143 | #define parent_node(node) (node) | 107 | #define parent_node(node) (node) |
144 | 108 | ||
145 | #define pcibus_to_node(bus) __pcibus_to_node(bus) | 109 | #define pcibus_to_node(bus) __pcibus_to_node(bus) |
146 | #define pcibus_to_cpumask(bus) __pcibus_to_cpumask(bus) | ||
147 | 110 | ||
148 | #ifdef CONFIG_X86_32 | 111 | #ifdef CONFIG_X86_32 |
149 | extern unsigned long node_start_pfn[]; | 112 | extern unsigned long node_start_pfn[]; |
@@ -209,40 +172,24 @@ static inline int early_cpu_to_node(int cpu) | |||
209 | return 0; | 172 | return 0; |
210 | } | 173 | } |
211 | 174 | ||
212 | static inline const cpumask_t *cpumask_of_node(int node) | 175 | static inline const struct cpumask *cpumask_of_node(int node) |
213 | { | ||
214 | return &cpu_online_map; | ||
215 | } | ||
216 | static inline cpumask_t node_to_cpumask(int node) | ||
217 | { | 176 | { |
218 | return cpu_online_map; | 177 | return cpu_online_mask; |
219 | } | 178 | } |
220 | 179 | ||
221 | static inline void setup_node_to_cpumask_map(void) { } | 180 | static inline void setup_node_to_cpumask_map(void) { } |
222 | 181 | ||
223 | /* | ||
224 | * Replace default node_to_cpumask_ptr with optimized version | ||
225 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
226 | */ | ||
227 | #define node_to_cpumask_ptr(v, node) \ | ||
228 | const cpumask_t *v = cpumask_of_node(node) | ||
229 | |||
230 | #define node_to_cpumask_ptr_next(v, node) \ | ||
231 | v = cpumask_of_node(node) | ||
232 | #endif | 182 | #endif |
233 | 183 | ||
234 | #include <asm-generic/topology.h> | 184 | #include <asm-generic/topology.h> |
235 | 185 | ||
236 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
237 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | 186 | extern const struct cpumask *cpu_coregroup_mask(int cpu); |
238 | 187 | ||
239 | #ifdef ENABLE_TOPO_DEFINES | 188 | #ifdef ENABLE_TOPO_DEFINES |
240 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 189 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
241 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 190 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
242 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 191 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
243 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 192 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
244 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
245 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
246 | 193 | ||
247 | /* indicates that pointers to the topology cpumask_t maps are valid */ | 194 | /* indicates that pointers to the topology cpumask_t maps are valid */ |
248 | #define arch_provides_topology_pointers yes | 195 | #define arch_provides_topology_pointers yes |
@@ -256,7 +203,7 @@ struct pci_bus; | |||
256 | void set_pci_bus_resources_arch_default(struct pci_bus *b); | 203 | void set_pci_bus_resources_arch_default(struct pci_bus *b); |
257 | 204 | ||
258 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
259 | #define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids) | 206 | #define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) |
260 | #define smt_capable() (smp_num_siblings > 1) | 207 | #define smt_capable() (smp_num_siblings > 1) |
261 | #endif | 208 | #endif |
262 | 209 | ||
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 1a918dde46b5..018a0a400799 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) | |||
124 | 124 | ||
125 | /* VIRT <-> MACHINE conversion */ | 125 | /* VIRT <-> MACHINE conversion */ |
126 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) | 126 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) |
127 | #define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) | 127 | #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) |
128 | #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) | ||
128 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) | 129 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
129 | 130 | ||
130 | static inline unsigned long pte_mfn(pte_t pte) | 131 | static inline unsigned long pte_mfn(pte_t pte) |
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 08e9a1ac07a9..727acc152344 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #define XSTATE_FP 0x1 | 8 | #define XSTATE_FP 0x1 |
9 | #define XSTATE_SSE 0x2 | 9 | #define XSTATE_SSE 0x2 |
10 | #define XSTATE_YMM 0x4 | ||
10 | 11 | ||
11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | 12 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) |
12 | 13 | ||
@@ -15,7 +16,7 @@ | |||
15 | /* | 16 | /* |
16 | * These are the features that the OS can handle currently. | 17 | * These are the features that the OS can handle currently. |
17 | */ | 18 | */ |
18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) | 19 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) |
19 | 20 | ||
20 | #ifdef CONFIG_X86_64 | 21 | #ifdef CONFIG_X86_64 |
21 | #define REX_PREFIX "0x48, " | 22 | #define REX_PREFIX "0x48, " |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c611ad64137f..145cce75cda7 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -66,7 +66,8 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o | |||
66 | obj-y += apic/ | 66 | obj-y += apic/ |
67 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o | 67 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o |
68 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 68 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
69 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 69 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
70 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
70 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o | 71 | obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o |
71 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o | 72 | obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o |
72 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o | 73 | obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a18eb7ce2236..723989d7f802 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -230,6 +230,35 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled) | |||
230 | } | 230 | } |
231 | 231 | ||
232 | static int __init | 232 | static int __init |
233 | acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) | ||
234 | { | ||
235 | struct acpi_madt_local_x2apic *processor = NULL; | ||
236 | |||
237 | processor = (struct acpi_madt_local_x2apic *)header; | ||
238 | |||
239 | if (BAD_MADT_ENTRY(processor, end)) | ||
240 | return -EINVAL; | ||
241 | |||
242 | acpi_table_print_madt_entry(header); | ||
243 | |||
244 | #ifdef CONFIG_X86_X2APIC | ||
245 | /* | ||
246 | * We need to register disabled CPU as well to permit | ||
247 | * counting disabled CPUs. This allows us to size | ||
248 | * cpus_possible_map more accurately, to permit | ||
249 | * to not preallocating memory for all NR_CPUS | ||
250 | * when we use CPU hotplug. | ||
251 | */ | ||
252 | acpi_register_lapic(processor->local_apic_id, /* APIC ID */ | ||
253 | processor->lapic_flags & ACPI_MADT_ENABLED); | ||
254 | #else | ||
255 | printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); | ||
256 | #endif | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static int __init | ||
233 | acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) | 262 | acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) |
234 | { | 263 | { |
235 | struct acpi_madt_local_apic *processor = NULL; | 264 | struct acpi_madt_local_apic *processor = NULL; |
@@ -289,6 +318,25 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, | |||
289 | } | 318 | } |
290 | 319 | ||
291 | static int __init | 320 | static int __init |
321 | acpi_parse_x2apic_nmi(struct acpi_subtable_header *header, | ||
322 | const unsigned long end) | ||
323 | { | ||
324 | struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL; | ||
325 | |||
326 | x2apic_nmi = (struct acpi_madt_local_x2apic_nmi *)header; | ||
327 | |||
328 | if (BAD_MADT_ENTRY(x2apic_nmi, end)) | ||
329 | return -EINVAL; | ||
330 | |||
331 | acpi_table_print_madt_entry(header); | ||
332 | |||
333 | if (x2apic_nmi->lint != 1) | ||
334 | printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static int __init | ||
292 | acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) | 340 | acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) |
293 | { | 341 | { |
294 | struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; | 342 | struct acpi_madt_local_apic_nmi *lapic_nmi = NULL; |
@@ -793,6 +841,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void) | |||
793 | static int __init acpi_parse_madt_lapic_entries(void) | 841 | static int __init acpi_parse_madt_lapic_entries(void) |
794 | { | 842 | { |
795 | int count; | 843 | int count; |
844 | int x2count = 0; | ||
796 | 845 | ||
797 | if (!cpu_has_apic) | 846 | if (!cpu_has_apic) |
798 | return -ENODEV; | 847 | return -ENODEV; |
@@ -816,22 +865,28 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
816 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, | 865 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, |
817 | acpi_parse_sapic, MAX_APICS); | 866 | acpi_parse_sapic, MAX_APICS); |
818 | 867 | ||
819 | if (!count) | 868 | if (!count) { |
869 | x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC, | ||
870 | acpi_parse_x2apic, MAX_APICS); | ||
820 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, | 871 | count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, |
821 | acpi_parse_lapic, MAX_APICS); | 872 | acpi_parse_lapic, MAX_APICS); |
822 | if (!count) { | 873 | } |
874 | if (!count && !x2count) { | ||
823 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); | 875 | printk(KERN_ERR PREFIX "No LAPIC entries present\n"); |
824 | /* TBD: Cleanup to allow fallback to MPS */ | 876 | /* TBD: Cleanup to allow fallback to MPS */ |
825 | return -ENODEV; | 877 | return -ENODEV; |
826 | } else if (count < 0) { | 878 | } else if (count < 0 || x2count < 0) { |
827 | printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); | 879 | printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n"); |
828 | /* TBD: Cleanup to allow fallback to MPS */ | 880 | /* TBD: Cleanup to allow fallback to MPS */ |
829 | return count; | 881 | return count; |
830 | } | 882 | } |
831 | 883 | ||
884 | x2count = | ||
885 | acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC_NMI, | ||
886 | acpi_parse_x2apic_nmi, 0); | ||
832 | count = | 887 | count = |
833 | acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); | 888 | acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0); |
834 | if (count < 0) { | 889 | if (count < 0 || x2count < 0) { |
835 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | 890 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); |
836 | /* TBD: Cleanup to allow fallback to MPS */ | 891 | /* TBD: Cleanup to allow fallback to MPS */ |
837 | return count; | 892 | return count; |
@@ -1470,7 +1525,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | |||
1470 | 1525 | ||
1471 | /* | 1526 | /* |
1472 | * If your system is blacklisted here, but you find that acpi=force | 1527 | * If your system is blacklisted here, but you find that acpi=force |
1473 | * works for you, please contact acpi-devel@sourceforge.net | 1528 | * works for you, please contact linux-acpi@vger.kernel.org |
1474 | */ | 1529 | */ |
1475 | static struct dmi_system_id __initdata acpi_dmi_table[] = { | 1530 | static struct dmi_system_id __initdata acpi_dmi_table[] = { |
1476 | /* | 1531 | /* |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4c80f1557433..f57658702571 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/kprobes.h> | 5 | #include <linux/kprobes.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/vmalloc.h> | 7 | #include <linux/vmalloc.h> |
8 | #include <linux/memory.h> | ||
8 | #include <asm/alternative.h> | 9 | #include <asm/alternative.h> |
9 | #include <asm/sections.h> | 10 | #include <asm/sections.h> |
10 | #include <asm/pgtable.h> | 11 | #include <asm/pgtable.h> |
@@ -12,7 +13,9 @@ | |||
12 | #include <asm/nmi.h> | 13 | #include <asm/nmi.h> |
13 | #include <asm/vsyscall.h> | 14 | #include <asm/vsyscall.h> |
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/tlbflush.h> | ||
15 | #include <asm/io.h> | 17 | #include <asm/io.h> |
18 | #include <asm/fixmap.h> | ||
16 | 19 | ||
17 | #define MAX_PATCH_LEN (255-1) | 20 | #define MAX_PATCH_LEN (255-1) |
18 | 21 | ||
@@ -226,6 +229,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
226 | { | 229 | { |
227 | u8 **ptr; | 230 | u8 **ptr; |
228 | 231 | ||
232 | mutex_lock(&text_mutex); | ||
229 | for (ptr = start; ptr < end; ptr++) { | 233 | for (ptr = start; ptr < end; ptr++) { |
230 | if (*ptr < text) | 234 | if (*ptr < text) |
231 | continue; | 235 | continue; |
@@ -234,6 +238,7 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) | |||
234 | /* turn DS segment override prefix into lock prefix */ | 238 | /* turn DS segment override prefix into lock prefix */ |
235 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); | 239 | text_poke(*ptr, ((unsigned char []){0xf0}), 1); |
236 | }; | 240 | }; |
241 | mutex_unlock(&text_mutex); | ||
237 | } | 242 | } |
238 | 243 | ||
239 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) | 244 | static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) |
@@ -243,6 +248,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end | |||
243 | if (noreplace_smp) | 248 | if (noreplace_smp) |
244 | return; | 249 | return; |
245 | 250 | ||
251 | mutex_lock(&text_mutex); | ||
246 | for (ptr = start; ptr < end; ptr++) { | 252 | for (ptr = start; ptr < end; ptr++) { |
247 | if (*ptr < text) | 253 | if (*ptr < text) |
248 | continue; | 254 | continue; |
@@ -251,6 +257,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end | |||
251 | /* turn lock prefix into DS segment override prefix */ | 257 | /* turn lock prefix into DS segment override prefix */ |
252 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); | 258 | text_poke(*ptr, ((unsigned char []){0x3E}), 1); |
253 | }; | 259 | }; |
260 | mutex_unlock(&text_mutex); | ||
254 | } | 261 | } |
255 | 262 | ||
256 | struct smp_alt_module { | 263 | struct smp_alt_module { |
@@ -500,15 +507,16 @@ void *text_poke_early(void *addr, const void *opcode, size_t len) | |||
500 | * It means the size must be writable atomically and the address must be aligned | 507 | * It means the size must be writable atomically and the address must be aligned |
501 | * in a way that permits an atomic write. It also makes sure we fit on a single | 508 | * in a way that permits an atomic write. It also makes sure we fit on a single |
502 | * page. | 509 | * page. |
510 | * | ||
511 | * Note: Must be called under text_mutex. | ||
503 | */ | 512 | */ |
504 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | 513 | void *__kprobes text_poke(void *addr, const void *opcode, size_t len) |
505 | { | 514 | { |
515 | unsigned long flags; | ||
506 | char *vaddr; | 516 | char *vaddr; |
507 | int nr_pages = 2; | ||
508 | struct page *pages[2]; | 517 | struct page *pages[2]; |
509 | int i; | 518 | int i; |
510 | 519 | ||
511 | might_sleep(); | ||
512 | if (!core_kernel_text((unsigned long)addr)) { | 520 | if (!core_kernel_text((unsigned long)addr)) { |
513 | pages[0] = vmalloc_to_page(addr); | 521 | pages[0] = vmalloc_to_page(addr); |
514 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); | 522 | pages[1] = vmalloc_to_page(addr + PAGE_SIZE); |
@@ -518,18 +526,21 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | |||
518 | pages[1] = virt_to_page(addr + PAGE_SIZE); | 526 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
519 | } | 527 | } |
520 | BUG_ON(!pages[0]); | 528 | BUG_ON(!pages[0]); |
521 | if (!pages[1]) | 529 | local_irq_save(flags); |
522 | nr_pages = 1; | 530 | set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); |
523 | vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); | 531 | if (pages[1]) |
524 | BUG_ON(!vaddr); | 532 | set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); |
525 | local_irq_disable(); | 533 | vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); |
526 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); | 534 | memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); |
527 | local_irq_enable(); | 535 | clear_fixmap(FIX_TEXT_POKE0); |
528 | vunmap(vaddr); | 536 | if (pages[1]) |
537 | clear_fixmap(FIX_TEXT_POKE1); | ||
538 | local_flush_tlb(); | ||
529 | sync_core(); | 539 | sync_core(); |
530 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 540 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
531 | that causes hangs on some VIA CPUs. */ | 541 | that causes hangs on some VIA CPUs. */ |
532 | for (i = 0; i < len; i++) | 542 | for (i = 0; i < len; i++) |
533 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); | 543 | BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); |
544 | local_irq_restore(flags); | ||
534 | return addr; | 545 | return addr; |
535 | } | 546 | } |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index c5962fe3796f..a97db99dad52 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1928,6 +1928,12 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
1928 | return paddr; | 1928 | return paddr; |
1929 | } | 1929 | } |
1930 | 1930 | ||
1931 | static int amd_iommu_domain_has_cap(struct iommu_domain *domain, | ||
1932 | unsigned long cap) | ||
1933 | { | ||
1934 | return 0; | ||
1935 | } | ||
1936 | |||
1931 | static struct iommu_ops amd_iommu_ops = { | 1937 | static struct iommu_ops amd_iommu_ops = { |
1932 | .domain_init = amd_iommu_domain_init, | 1938 | .domain_init = amd_iommu_domain_init, |
1933 | .domain_destroy = amd_iommu_domain_destroy, | 1939 | .domain_destroy = amd_iommu_domain_destroy, |
@@ -1936,5 +1942,6 @@ static struct iommu_ops amd_iommu_ops = { | |||
1936 | .map = amd_iommu_map_range, | 1942 | .map = amd_iommu_map_range, |
1937 | .unmap = amd_iommu_unmap_range, | 1943 | .unmap = amd_iommu_unmap_range, |
1938 | .iova_to_phys = amd_iommu_iova_to_phys, | 1944 | .iova_to_phys = amd_iommu_iova_to_phys, |
1945 | .domain_has_cap = amd_iommu_domain_has_cap, | ||
1939 | }; | 1946 | }; |
1940 | 1947 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 85eb8e100818..f2870920f246 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -431,6 +431,12 @@ static void __cpuinit setup_APIC_timer(void) | |||
431 | { | 431 | { |
432 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 432 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
433 | 433 | ||
434 | if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) { | ||
435 | lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; | ||
436 | /* Make LAPIC timer preferrable over percpu HPET */ | ||
437 | lapic_clockevent.rating = 150; | ||
438 | } | ||
439 | |||
434 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | 440 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
435 | levt->cpumask = cpumask_of(smp_processor_id()); | 441 | levt->cpumask = cpumask_of(smp_processor_id()); |
436 | 442 | ||
@@ -1304,6 +1310,7 @@ void __init enable_IR_x2apic(void) | |||
1304 | #ifdef CONFIG_INTR_REMAP | 1310 | #ifdef CONFIG_INTR_REMAP |
1305 | int ret; | 1311 | int ret; |
1306 | unsigned long flags; | 1312 | unsigned long flags; |
1313 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1307 | 1314 | ||
1308 | if (!cpu_has_x2apic) | 1315 | if (!cpu_has_x2apic) |
1309 | return; | 1316 | return; |
@@ -1334,17 +1341,23 @@ void __init enable_IR_x2apic(void) | |||
1334 | return; | 1341 | return; |
1335 | } | 1342 | } |
1336 | 1343 | ||
1337 | ret = save_IO_APIC_setup(); | 1344 | ioapic_entries = alloc_ioapic_entries(); |
1345 | if (!ioapic_entries) { | ||
1346 | pr_info("Allocate ioapic_entries failed: %d\n", ret); | ||
1347 | goto end; | ||
1348 | } | ||
1349 | |||
1350 | ret = save_IO_APIC_setup(ioapic_entries); | ||
1338 | if (ret) { | 1351 | if (ret) { |
1339 | pr_info("Saving IO-APIC state failed: %d\n", ret); | 1352 | pr_info("Saving IO-APIC state failed: %d\n", ret); |
1340 | goto end; | 1353 | goto end; |
1341 | } | 1354 | } |
1342 | 1355 | ||
1343 | local_irq_save(flags); | 1356 | local_irq_save(flags); |
1344 | mask_IO_APIC_setup(); | 1357 | mask_IO_APIC_setup(ioapic_entries); |
1345 | mask_8259A(); | 1358 | mask_8259A(); |
1346 | 1359 | ||
1347 | ret = enable_intr_remapping(1); | 1360 | ret = enable_intr_remapping(EIM_32BIT_APIC_ID); |
1348 | 1361 | ||
1349 | if (ret && x2apic_preenabled) { | 1362 | if (ret && x2apic_preenabled) { |
1350 | local_irq_restore(flags); | 1363 | local_irq_restore(flags); |
@@ -1364,9 +1377,9 @@ end_restore: | |||
1364 | /* | 1377 | /* |
1365 | * IR enabling failed | 1378 | * IR enabling failed |
1366 | */ | 1379 | */ |
1367 | restore_IO_APIC_setup(); | 1380 | restore_IO_APIC_setup(ioapic_entries); |
1368 | else | 1381 | else |
1369 | reinit_intr_remapped_IO_APIC(x2apic_preenabled); | 1382 | reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries); |
1370 | 1383 | ||
1371 | unmask_8259A(); | 1384 | unmask_8259A(); |
1372 | local_irq_restore(flags); | 1385 | local_irq_restore(flags); |
@@ -1379,6 +1392,8 @@ end: | |||
1379 | pr_info("Enabled Interrupt-remapping\n"); | 1392 | pr_info("Enabled Interrupt-remapping\n"); |
1380 | } else | 1393 | } else |
1381 | pr_err("Failed to enable Interrupt-remapping and x2apic\n"); | 1394 | pr_err("Failed to enable Interrupt-remapping and x2apic\n"); |
1395 | if (ioapic_entries) | ||
1396 | free_ioapic_entries(ioapic_entries); | ||
1382 | #else | 1397 | #else |
1383 | if (!cpu_has_x2apic) | 1398 | if (!cpu_has_x2apic) |
1384 | return; | 1399 | return; |
@@ -1954,6 +1969,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state) | |||
1954 | 1969 | ||
1955 | local_irq_save(flags); | 1970 | local_irq_save(flags); |
1956 | disable_local_APIC(); | 1971 | disable_local_APIC(); |
1972 | #ifdef CONFIG_INTR_REMAP | ||
1973 | if (intr_remapping_enabled) | ||
1974 | disable_intr_remapping(); | ||
1975 | #endif | ||
1957 | local_irq_restore(flags); | 1976 | local_irq_restore(flags); |
1958 | return 0; | 1977 | return 0; |
1959 | } | 1978 | } |
@@ -1964,15 +1983,41 @@ static int lapic_resume(struct sys_device *dev) | |||
1964 | unsigned long flags; | 1983 | unsigned long flags; |
1965 | int maxlvt; | 1984 | int maxlvt; |
1966 | 1985 | ||
1986 | #ifdef CONFIG_INTR_REMAP | ||
1987 | int ret; | ||
1988 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1989 | |||
1967 | if (!apic_pm_state.active) | 1990 | if (!apic_pm_state.active) |
1968 | return 0; | 1991 | return 0; |
1969 | 1992 | ||
1970 | maxlvt = lapic_get_maxlvt(); | ||
1971 | |||
1972 | local_irq_save(flags); | 1993 | local_irq_save(flags); |
1994 | if (x2apic) { | ||
1995 | ioapic_entries = alloc_ioapic_entries(); | ||
1996 | if (!ioapic_entries) { | ||
1997 | WARN(1, "Alloc ioapic_entries in lapic resume failed."); | ||
1998 | return -ENOMEM; | ||
1999 | } | ||
2000 | |||
2001 | ret = save_IO_APIC_setup(ioapic_entries); | ||
2002 | if (ret) { | ||
2003 | WARN(1, "Saving IO-APIC state failed: %d\n", ret); | ||
2004 | free_ioapic_entries(ioapic_entries); | ||
2005 | return ret; | ||
2006 | } | ||
2007 | |||
2008 | mask_IO_APIC_setup(ioapic_entries); | ||
2009 | mask_8259A(); | ||
2010 | enable_x2apic(); | ||
2011 | } | ||
2012 | #else | ||
2013 | if (!apic_pm_state.active) | ||
2014 | return 0; | ||
1973 | 2015 | ||
2016 | local_irq_save(flags); | ||
1974 | if (x2apic) | 2017 | if (x2apic) |
1975 | enable_x2apic(); | 2018 | enable_x2apic(); |
2019 | #endif | ||
2020 | |||
1976 | else { | 2021 | else { |
1977 | /* | 2022 | /* |
1978 | * Make sure the APICBASE points to the right address | 2023 | * Make sure the APICBASE points to the right address |
@@ -1986,6 +2031,7 @@ static int lapic_resume(struct sys_device *dev) | |||
1986 | wrmsr(MSR_IA32_APICBASE, l, h); | 2031 | wrmsr(MSR_IA32_APICBASE, l, h); |
1987 | } | 2032 | } |
1988 | 2033 | ||
2034 | maxlvt = lapic_get_maxlvt(); | ||
1989 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); | 2035 | apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED); |
1990 | apic_write(APIC_ID, apic_pm_state.apic_id); | 2036 | apic_write(APIC_ID, apic_pm_state.apic_id); |
1991 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); | 2037 | apic_write(APIC_DFR, apic_pm_state.apic_dfr); |
@@ -2009,8 +2055,20 @@ static int lapic_resume(struct sys_device *dev) | |||
2009 | apic_write(APIC_ESR, 0); | 2055 | apic_write(APIC_ESR, 0); |
2010 | apic_read(APIC_ESR); | 2056 | apic_read(APIC_ESR); |
2011 | 2057 | ||
2058 | #ifdef CONFIG_INTR_REMAP | ||
2059 | if (intr_remapping_enabled) | ||
2060 | reenable_intr_remapping(EIM_32BIT_APIC_ID); | ||
2061 | |||
2062 | if (x2apic) { | ||
2063 | unmask_8259A(); | ||
2064 | restore_IO_APIC_setup(ioapic_entries); | ||
2065 | free_ioapic_entries(ioapic_entries); | ||
2066 | } | ||
2067 | #endif | ||
2068 | |||
2012 | local_irq_restore(flags); | 2069 | local_irq_restore(flags); |
2013 | 2070 | ||
2071 | |||
2014 | return 0; | 2072 | return 0; |
2015 | } | 2073 | } |
2016 | 2074 | ||
@@ -2048,7 +2106,9 @@ static int __init init_lapic_sysfs(void) | |||
2048 | error = sysdev_register(&device_lapic); | 2106 | error = sysdev_register(&device_lapic); |
2049 | return error; | 2107 | return error; |
2050 | } | 2108 | } |
2051 | device_initcall(init_lapic_sysfs); | 2109 | |
2110 | /* local apic needs to resume before other devices access its registers. */ | ||
2111 | core_initcall(init_lapic_sysfs); | ||
2052 | 2112 | ||
2053 | #else /* CONFIG_PM */ | 2113 | #else /* CONFIG_PM */ |
2054 | 2114 | ||
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 0014714ea97b..306e5e88fb6f 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -212,7 +212,7 @@ struct apic apic_flat = { | |||
212 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 212 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
213 | .wait_for_init_deassert = NULL, | 213 | .wait_for_init_deassert = NULL, |
214 | .smp_callin_clear_local_apic = NULL, | 214 | .smp_callin_clear_local_apic = NULL, |
215 | .inquire_remote_apic = NULL, | 215 | .inquire_remote_apic = default_inquire_remote_apic, |
216 | 216 | ||
217 | .read = native_apic_mem_read, | 217 | .read = native_apic_mem_read, |
218 | .write = native_apic_mem_write, | 218 | .write = native_apic_mem_write, |
@@ -362,7 +362,7 @@ struct apic apic_physflat = { | |||
362 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | 362 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, |
363 | .wait_for_init_deassert = NULL, | 363 | .wait_for_init_deassert = NULL, |
364 | .smp_callin_clear_local_apic = NULL, | 364 | .smp_callin_clear_local_apic = NULL, |
365 | .inquire_remote_apic = NULL, | 365 | .inquire_remote_apic = default_inquire_remote_apic, |
366 | 366 | ||
367 | .read = native_apic_mem_read, | 367 | .read = native_apic_mem_read, |
368 | .write = native_apic_mem_write, | 368 | .write = native_apic_mem_write, |
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index d806ecaa948f..676cdac385c0 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
@@ -26,12 +26,12 @@ static int bigsmp_apic_id_registered(void) | |||
26 | return 1; | 26 | return 1; |
27 | } | 27 | } |
28 | 28 | ||
29 | static const cpumask_t *bigsmp_target_cpus(void) | 29 | static const struct cpumask *bigsmp_target_cpus(void) |
30 | { | 30 | { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | return &cpu_online_map; | 32 | return cpu_online_mask; |
33 | #else | 33 | #else |
34 | return &cpumask_of_cpu(0); | 34 | return cpumask_of(0); |
35 | #endif | 35 | #endif |
36 | } | 36 | } |
37 | 37 | ||
@@ -118,9 +118,9 @@ static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | /* As we are using single CPU as destination, pick only one CPU here */ | 120 | /* As we are using single CPU as destination, pick only one CPU here */ |
121 | static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) | 121 | static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) |
122 | { | 122 | { |
123 | return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); | 123 | return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); |
124 | } | 124 | } |
125 | 125 | ||
126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | 126 | static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, |
@@ -188,10 +188,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
188 | { } /* NULL entry stops DMI scanning */ | 188 | { } /* NULL entry stops DMI scanning */ |
189 | }; | 189 | }; |
190 | 190 | ||
191 | static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask) | 191 | static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) |
192 | { | 192 | { |
193 | cpus_clear(*retmask); | 193 | cpumask_clear(retmask); |
194 | cpu_set(cpu, *retmask); | 194 | cpumask_set_cpu(cpu, retmask); |
195 | } | 195 | } |
196 | 196 | ||
197 | static int probe_bigsmp(void) | 197 | static int probe_bigsmp(void) |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 19588f2770ee..1c11b819f245 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -410,7 +410,7 @@ static void es7000_enable_apic_mode(void) | |||
410 | WARN(1, "Command failed, status = %x\n", mip_status); | 410 | WARN(1, "Command failed, status = %x\n", mip_status); |
411 | } | 411 | } |
412 | 412 | ||
413 | static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | 413 | static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask) |
414 | { | 414 | { |
415 | /* Careful. Some cpus do not strictly honor the set of cpus | 415 | /* Careful. Some cpus do not strictly honor the set of cpus |
416 | * specified in the interrupt destination when using lowest | 416 | * specified in the interrupt destination when using lowest |
@@ -420,7 +420,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
420 | * deliver interrupts to the wrong hyperthread when only one | 420 | * deliver interrupts to the wrong hyperthread when only one |
421 | * hyperthread was specified in the interrupt desitination. | 421 | * hyperthread was specified in the interrupt desitination. |
422 | */ | 422 | */ |
423 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 423 | cpumask_clear(retmask); |
424 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
424 | } | 425 | } |
425 | 426 | ||
426 | 427 | ||
@@ -455,14 +456,14 @@ static int es7000_apic_id_registered(void) | |||
455 | return 1; | 456 | return 1; |
456 | } | 457 | } |
457 | 458 | ||
458 | static const cpumask_t *target_cpus_cluster(void) | 459 | static const struct cpumask *target_cpus_cluster(void) |
459 | { | 460 | { |
460 | return &CPU_MASK_ALL; | 461 | return cpu_all_mask; |
461 | } | 462 | } |
462 | 463 | ||
463 | static const cpumask_t *es7000_target_cpus(void) | 464 | static const struct cpumask *es7000_target_cpus(void) |
464 | { | 465 | { |
465 | return &cpumask_of_cpu(smp_processor_id()); | 466 | return cpumask_of(smp_processor_id()); |
466 | } | 467 | } |
467 | 468 | ||
468 | static unsigned long | 469 | static unsigned long |
@@ -517,7 +518,7 @@ static void es7000_setup_apic_routing(void) | |||
517 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 518 | "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
518 | (apic_version[apic] == 0x14) ? | 519 | (apic_version[apic] == 0x14) ? |
519 | "Physical Cluster" : "Logical Cluster", | 520 | "Physical Cluster" : "Logical Cluster", |
520 | nr_ioapics, cpus_addr(*es7000_target_cpus())[0]); | 521 | nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); |
521 | } | 522 | } |
522 | 523 | ||
523 | static int es7000_apicid_to_node(int logical_apicid) | 524 | static int es7000_apicid_to_node(int logical_apicid) |
@@ -572,7 +573,7 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | |||
572 | return 1; | 573 | return 1; |
573 | } | 574 | } |
574 | 575 | ||
575 | static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) | 576 | static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) |
576 | { | 577 | { |
577 | unsigned int round = 0; | 578 | unsigned int round = 0; |
578 | int cpu, uninitialized_var(apicid); | 579 | int cpu, uninitialized_var(apicid); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1bb5c6cee3eb..a2789e42e162 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -851,63 +851,74 @@ __setup("pirq=", ioapic_pirq_setup); | |||
851 | #endif /* CONFIG_X86_32 */ | 851 | #endif /* CONFIG_X86_32 */ |
852 | 852 | ||
853 | #ifdef CONFIG_INTR_REMAP | 853 | #ifdef CONFIG_INTR_REMAP |
854 | /* I/O APIC RTE contents at the OS boot up */ | 854 | struct IO_APIC_route_entry **alloc_ioapic_entries(void) |
855 | static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS]; | 855 | { |
856 | int apic; | ||
857 | struct IO_APIC_route_entry **ioapic_entries; | ||
858 | |||
859 | ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, | ||
860 | GFP_ATOMIC); | ||
861 | if (!ioapic_entries) | ||
862 | return 0; | ||
863 | |||
864 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
865 | ioapic_entries[apic] = | ||
866 | kzalloc(sizeof(struct IO_APIC_route_entry) * | ||
867 | nr_ioapic_registers[apic], GFP_ATOMIC); | ||
868 | if (!ioapic_entries[apic]) | ||
869 | goto nomem; | ||
870 | } | ||
871 | |||
872 | return ioapic_entries; | ||
873 | |||
874 | nomem: | ||
875 | while (--apic >= 0) | ||
876 | kfree(ioapic_entries[apic]); | ||
877 | kfree(ioapic_entries); | ||
878 | |||
879 | return 0; | ||
880 | } | ||
856 | 881 | ||
857 | /* | 882 | /* |
858 | * Saves all the IO-APIC RTE's | 883 | * Saves all the IO-APIC RTE's |
859 | */ | 884 | */ |
860 | int save_IO_APIC_setup(void) | 885 | int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) |
861 | { | 886 | { |
862 | union IO_APIC_reg_01 reg_01; | ||
863 | unsigned long flags; | ||
864 | int apic, pin; | 887 | int apic, pin; |
865 | 888 | ||
866 | /* | 889 | if (!ioapic_entries) |
867 | * The number of IO-APIC IRQ registers (== #pins): | 890 | return -ENOMEM; |
868 | */ | ||
869 | for (apic = 0; apic < nr_ioapics; apic++) { | ||
870 | spin_lock_irqsave(&ioapic_lock, flags); | ||
871 | reg_01.raw = io_apic_read(apic, 1); | ||
872 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
873 | nr_ioapic_registers[apic] = reg_01.bits.entries+1; | ||
874 | } | ||
875 | 891 | ||
876 | for (apic = 0; apic < nr_ioapics; apic++) { | 892 | for (apic = 0; apic < nr_ioapics; apic++) { |
877 | early_ioapic_entries[apic] = | 893 | if (!ioapic_entries[apic]) |
878 | kzalloc(sizeof(struct IO_APIC_route_entry) * | 894 | return -ENOMEM; |
879 | nr_ioapic_registers[apic], GFP_KERNEL); | ||
880 | if (!early_ioapic_entries[apic]) | ||
881 | goto nomem; | ||
882 | } | ||
883 | 895 | ||
884 | for (apic = 0; apic < nr_ioapics; apic++) | ||
885 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | 896 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) |
886 | early_ioapic_entries[apic][pin] = | 897 | ioapic_entries[apic][pin] = |
887 | ioapic_read_entry(apic, pin); | 898 | ioapic_read_entry(apic, pin); |
899 | } | ||
888 | 900 | ||
889 | return 0; | 901 | return 0; |
890 | |||
891 | nomem: | ||
892 | while (apic >= 0) | ||
893 | kfree(early_ioapic_entries[apic--]); | ||
894 | memset(early_ioapic_entries, 0, | ||
895 | ARRAY_SIZE(early_ioapic_entries)); | ||
896 | |||
897 | return -ENOMEM; | ||
898 | } | 902 | } |
899 | 903 | ||
900 | void mask_IO_APIC_setup(void) | 904 | /* |
905 | * Mask all IO APIC entries. | ||
906 | */ | ||
907 | void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) | ||
901 | { | 908 | { |
902 | int apic, pin; | 909 | int apic, pin; |
903 | 910 | ||
911 | if (!ioapic_entries) | ||
912 | return; | ||
913 | |||
904 | for (apic = 0; apic < nr_ioapics; apic++) { | 914 | for (apic = 0; apic < nr_ioapics; apic++) { |
905 | if (!early_ioapic_entries[apic]) | 915 | if (!ioapic_entries[apic]) |
906 | break; | 916 | break; |
917 | |||
907 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { | 918 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { |
908 | struct IO_APIC_route_entry entry; | 919 | struct IO_APIC_route_entry entry; |
909 | 920 | ||
910 | entry = early_ioapic_entries[apic][pin]; | 921 | entry = ioapic_entries[apic][pin]; |
911 | if (!entry.mask) { | 922 | if (!entry.mask) { |
912 | entry.mask = 1; | 923 | entry.mask = 1; |
913 | ioapic_write_entry(apic, pin, entry); | 924 | ioapic_write_entry(apic, pin, entry); |
@@ -916,22 +927,30 @@ void mask_IO_APIC_setup(void) | |||
916 | } | 927 | } |
917 | } | 928 | } |
918 | 929 | ||
919 | void restore_IO_APIC_setup(void) | 930 | /* |
931 | * Restore IO APIC entries which was saved in ioapic_entries. | ||
932 | */ | ||
933 | int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) | ||
920 | { | 934 | { |
921 | int apic, pin; | 935 | int apic, pin; |
922 | 936 | ||
937 | if (!ioapic_entries) | ||
938 | return -ENOMEM; | ||
939 | |||
923 | for (apic = 0; apic < nr_ioapics; apic++) { | 940 | for (apic = 0; apic < nr_ioapics; apic++) { |
924 | if (!early_ioapic_entries[apic]) | 941 | if (!ioapic_entries[apic]) |
925 | break; | 942 | return -ENOMEM; |
943 | |||
926 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) | 944 | for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) |
927 | ioapic_write_entry(apic, pin, | 945 | ioapic_write_entry(apic, pin, |
928 | early_ioapic_entries[apic][pin]); | 946 | ioapic_entries[apic][pin]); |
929 | kfree(early_ioapic_entries[apic]); | ||
930 | early_ioapic_entries[apic] = NULL; | ||
931 | } | 947 | } |
948 | return 0; | ||
932 | } | 949 | } |
933 | 950 | ||
934 | void reinit_intr_remapped_IO_APIC(int intr_remapping) | 951 | void reinit_intr_remapped_IO_APIC(int intr_remapping, |
952 | struct IO_APIC_route_entry **ioapic_entries) | ||
953 | |||
935 | { | 954 | { |
936 | /* | 955 | /* |
937 | * for now plain restore of previous settings. | 956 | * for now plain restore of previous settings. |
@@ -940,7 +959,17 @@ void reinit_intr_remapped_IO_APIC(int intr_remapping) | |||
940 | * table entries. for now, do a plain restore, and wait for | 959 | * table entries. for now, do a plain restore, and wait for |
941 | * the setup_IO_APIC_irqs() to do proper initialization. | 960 | * the setup_IO_APIC_irqs() to do proper initialization. |
942 | */ | 961 | */ |
943 | restore_IO_APIC_setup(); | 962 | restore_IO_APIC_setup(ioapic_entries); |
963 | } | ||
964 | |||
965 | void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) | ||
966 | { | ||
967 | int apic; | ||
968 | |||
969 | for (apic = 0; apic < nr_ioapics; apic++) | ||
970 | kfree(ioapic_entries[apic]); | ||
971 | |||
972 | kfree(ioapic_entries); | ||
944 | } | 973 | } |
945 | #endif | 974 | #endif |
946 | 975 | ||
@@ -2495,7 +2524,6 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2495 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2524 | static inline void irq_complete_move(struct irq_desc **descp) {} |
2496 | #endif | 2525 | #endif |
2497 | 2526 | ||
2498 | #ifdef CONFIG_INTR_REMAP | ||
2499 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2527 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2500 | { | 2528 | { |
2501 | int apic, pin; | 2529 | int apic, pin; |
@@ -2529,6 +2557,7 @@ eoi_ioapic_irq(struct irq_desc *desc) | |||
2529 | spin_unlock_irqrestore(&ioapic_lock, flags); | 2557 | spin_unlock_irqrestore(&ioapic_lock, flags); |
2530 | } | 2558 | } |
2531 | 2559 | ||
2560 | #ifdef CONFIG_X86_X2APIC | ||
2532 | static void ack_x2apic_level(unsigned int irq) | 2561 | static void ack_x2apic_level(unsigned int irq) |
2533 | { | 2562 | { |
2534 | struct irq_desc *desc = irq_to_desc(irq); | 2563 | struct irq_desc *desc = irq_to_desc(irq); |
@@ -2540,7 +2569,6 @@ static void ack_x2apic_edge(unsigned int irq) | |||
2540 | { | 2569 | { |
2541 | ack_x2APIC_irq(); | 2570 | ack_x2APIC_irq(); |
2542 | } | 2571 | } |
2543 | |||
2544 | #endif | 2572 | #endif |
2545 | 2573 | ||
2546 | static void ack_apic_edge(unsigned int irq) | 2574 | static void ack_apic_edge(unsigned int irq) |
@@ -2606,6 +2634,9 @@ static void ack_apic_level(unsigned int irq) | |||
2606 | */ | 2634 | */ |
2607 | ack_APIC_irq(); | 2635 | ack_APIC_irq(); |
2608 | 2636 | ||
2637 | if (irq_remapped(irq)) | ||
2638 | eoi_ioapic_irq(desc); | ||
2639 | |||
2609 | /* Now we can move and renable the irq */ | 2640 | /* Now we can move and renable the irq */ |
2610 | if (unlikely(do_unmask_irq)) { | 2641 | if (unlikely(do_unmask_irq)) { |
2611 | /* Only migrate the irq if the ack has been received. | 2642 | /* Only migrate the irq if the ack has been received. |
@@ -2651,6 +2682,26 @@ static void ack_apic_level(unsigned int irq) | |||
2651 | #endif | 2682 | #endif |
2652 | } | 2683 | } |
2653 | 2684 | ||
2685 | #ifdef CONFIG_INTR_REMAP | ||
2686 | static void ir_ack_apic_edge(unsigned int irq) | ||
2687 | { | ||
2688 | #ifdef CONFIG_X86_X2APIC | ||
2689 | if (x2apic_enabled()) | ||
2690 | return ack_x2apic_edge(irq); | ||
2691 | #endif | ||
2692 | return ack_apic_edge(irq); | ||
2693 | } | ||
2694 | |||
2695 | static void ir_ack_apic_level(unsigned int irq) | ||
2696 | { | ||
2697 | #ifdef CONFIG_X86_X2APIC | ||
2698 | if (x2apic_enabled()) | ||
2699 | return ack_x2apic_level(irq); | ||
2700 | #endif | ||
2701 | return ack_apic_level(irq); | ||
2702 | } | ||
2703 | #endif /* CONFIG_INTR_REMAP */ | ||
2704 | |||
2654 | static struct irq_chip ioapic_chip __read_mostly = { | 2705 | static struct irq_chip ioapic_chip __read_mostly = { |
2655 | .name = "IO-APIC", | 2706 | .name = "IO-APIC", |
2656 | .startup = startup_ioapic_irq, | 2707 | .startup = startup_ioapic_irq, |
@@ -2670,8 +2721,8 @@ static struct irq_chip ir_ioapic_chip __read_mostly = { | |||
2670 | .mask = mask_IO_APIC_irq, | 2721 | .mask = mask_IO_APIC_irq, |
2671 | .unmask = unmask_IO_APIC_irq, | 2722 | .unmask = unmask_IO_APIC_irq, |
2672 | #ifdef CONFIG_INTR_REMAP | 2723 | #ifdef CONFIG_INTR_REMAP |
2673 | .ack = ack_x2apic_edge, | 2724 | .ack = ir_ack_apic_edge, |
2674 | .eoi = ack_x2apic_level, | 2725 | .eoi = ir_ack_apic_level, |
2675 | #ifdef CONFIG_SMP | 2726 | #ifdef CONFIG_SMP |
2676 | .set_affinity = set_ir_ioapic_affinity_irq, | 2727 | .set_affinity = set_ir_ioapic_affinity_irq, |
2677 | #endif | 2728 | #endif |
@@ -3397,7 +3448,7 @@ static struct irq_chip msi_ir_chip = { | |||
3397 | .unmask = unmask_msi_irq, | 3448 | .unmask = unmask_msi_irq, |
3398 | .mask = mask_msi_irq, | 3449 | .mask = mask_msi_irq, |
3399 | #ifdef CONFIG_INTR_REMAP | 3450 | #ifdef CONFIG_INTR_REMAP |
3400 | .ack = ack_x2apic_edge, | 3451 | .ack = ir_ack_apic_edge, |
3401 | #ifdef CONFIG_SMP | 3452 | #ifdef CONFIG_SMP |
3402 | .set_affinity = ir_set_msi_irq_affinity, | 3453 | .set_affinity = ir_set_msi_irq_affinity, |
3403 | #endif | 3454 | #endif |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index bdfad80c3cf1..d6bd62407152 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,7 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_t backtrace_mask = CPU_MASK_NONE; | 42 | static cpumask_var_t backtrace_mask; |
43 | 43 | ||
44 | /* nmi_active: | 44 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -138,6 +138,7 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); | ||
141 | printk(KERN_INFO "Testing NMI watchdog ... "); | 142 | printk(KERN_INFO "Testing NMI watchdog ... "); |
142 | 143 | ||
143 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
@@ -413,14 +414,14 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
413 | touched = 1; | 414 | touched = 1; |
414 | } | 415 | } |
415 | 416 | ||
416 | if (cpu_isset(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, backtrace_mask)) { |
417 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
418 | 419 | ||
419 | spin_lock(&lock); | 420 | spin_lock(&lock); |
420 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
421 | dump_stack(); | 422 | dump_stack(); |
422 | spin_unlock(&lock); | 423 | spin_unlock(&lock); |
423 | cpu_clear(cpu, backtrace_mask); | 424 | cpumask_clear_cpu(cpu, backtrace_mask); |
424 | } | 425 | } |
425 | 426 | ||
426 | /* Could check oops_in_progress here too, but it's safer not to */ | 427 | /* Could check oops_in_progress here too, but it's safer not to */ |
@@ -554,10 +555,10 @@ void __trigger_all_cpu_backtrace(void) | |||
554 | { | 555 | { |
555 | int i; | 556 | int i; |
556 | 557 | ||
557 | backtrace_mask = cpu_online_map; | 558 | cpumask_copy(backtrace_mask, cpu_online_mask); |
558 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 559 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
559 | for (i = 0; i < 10 * 1000; i++) { | 560 | for (i = 0; i < 10 * 1000; i++) { |
560 | if (cpus_empty(backtrace_mask)) | 561 | if (cpumask_empty(backtrace_mask)) |
561 | break; | 562 | break; |
562 | mdelay(1); | 563 | mdelay(1); |
563 | } | 564 | } |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index ba2fc6465534..533e59c6fc82 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -334,9 +334,9 @@ static inline void numaq_smp_callin_clear_local_apic(void) | |||
334 | clear_local_APIC(); | 334 | clear_local_APIC(); |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline const cpumask_t *numaq_target_cpus(void) | 337 | static inline const struct cpumask *numaq_target_cpus(void) |
338 | { | 338 | { |
339 | return &CPU_MASK_ALL; | 339 | return cpu_all_mask; |
340 | } | 340 | } |
341 | 341 | ||
342 | static inline unsigned long | 342 | static inline unsigned long |
@@ -427,7 +427,7 @@ static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
427 | * We use physical apicids here, not logical, so just return the default | 427 | * We use physical apicids here, not logical, so just return the default |
428 | * physical broadcast to stop people from breaking us | 428 | * physical broadcast to stop people from breaking us |
429 | */ | 429 | */ |
430 | static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask) | 430 | static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask) |
431 | { | 431 | { |
432 | return 0x0F; | 432 | return 0x0F; |
433 | } | 433 | } |
@@ -462,7 +462,7 @@ static int probe_numaq(void) | |||
462 | return found_numaq; | 462 | return found_numaq; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) | 465 | static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask) |
466 | { | 466 | { |
467 | /* Careful. Some cpus do not strictly honor the set of cpus | 467 | /* Careful. Some cpus do not strictly honor the set of cpus |
468 | * specified in the interrupt destination when using lowest | 468 | * specified in the interrupt destination when using lowest |
@@ -472,7 +472,8 @@ static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
472 | * deliver interrupts to the wrong hyperthread when only one | 472 | * deliver interrupts to the wrong hyperthread when only one |
473 | * hyperthread was specified in the interrupt desitination. | 473 | * hyperthread was specified in the interrupt desitination. |
474 | */ | 474 | */ |
475 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 475 | cpumask_clear(retmask); |
476 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
476 | } | 477 | } |
477 | 478 | ||
478 | static void numaq_setup_portio_remap(void) | 479 | static void numaq_setup_portio_remap(void) |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 141c99a1c264..01eda2ac65e4 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
@@ -83,7 +83,8 @@ static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) | |||
83 | * deliver interrupts to the wrong hyperthread when only one | 83 | * deliver interrupts to the wrong hyperthread when only one |
84 | * hyperthread was specified in the interrupt desitination. | 84 | * hyperthread was specified in the interrupt desitination. |
85 | */ | 85 | */ |
86 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; | 86 | cpumask_clear(retmask); |
87 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
87 | } | 88 | } |
88 | 89 | ||
89 | /* should be called last. */ | 90 | /* should be called last. */ |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index aac52fa873ff..9cfe1f415d81 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
@@ -53,23 +53,19 @@ static unsigned summit_get_apic_id(unsigned long x) | |||
53 | return (x >> 24) & 0xFF; | 53 | return (x >> 24) & 0xFF; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector) | 56 | static inline void summit_send_IPI_mask(const struct cpumask *mask, int vector) |
57 | { | 57 | { |
58 | default_send_IPI_mask_sequence_logical(mask, vector); | 58 | default_send_IPI_mask_sequence_logical(mask, vector); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void summit_send_IPI_allbutself(int vector) | 61 | static void summit_send_IPI_allbutself(int vector) |
62 | { | 62 | { |
63 | cpumask_t mask = cpu_online_map; | 63 | default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector); |
64 | cpu_clear(smp_processor_id(), mask); | ||
65 | |||
66 | if (!cpus_empty(mask)) | ||
67 | summit_send_IPI_mask(&mask, vector); | ||
68 | } | 64 | } |
69 | 65 | ||
70 | static void summit_send_IPI_all(int vector) | 66 | static void summit_send_IPI_all(int vector) |
71 | { | 67 | { |
72 | summit_send_IPI_mask(&cpu_online_map, vector); | 68 | summit_send_IPI_mask(cpu_online_mask, vector); |
73 | } | 69 | } |
74 | 70 | ||
75 | #include <asm/tsc.h> | 71 | #include <asm/tsc.h> |
@@ -186,13 +182,13 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
186 | 182 | ||
187 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 183 | #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
188 | 184 | ||
189 | static const cpumask_t *summit_target_cpus(void) | 185 | static const struct cpumask *summit_target_cpus(void) |
190 | { | 186 | { |
191 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 187 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
192 | * dest_LowestPrio mode logical clustered apic interrupt routing | 188 | * dest_LowestPrio mode logical clustered apic interrupt routing |
193 | * Just start on cpu 0. IRQ balancing will spread load | 189 | * Just start on cpu 0. IRQ balancing will spread load |
194 | */ | 190 | */ |
195 | return &cpumask_of_cpu(0); | 191 | return cpumask_of(0); |
196 | } | 192 | } |
197 | 193 | ||
198 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) | 194 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) |
@@ -289,7 +285,7 @@ static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
289 | return 1; | 285 | return 1; |
290 | } | 286 | } |
291 | 287 | ||
292 | static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) | 288 | static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) |
293 | { | 289 | { |
294 | unsigned int round = 0; | 290 | unsigned int round = 0; |
295 | int cpu, apicid = 0; | 291 | int cpu, apicid = 0; |
@@ -346,7 +342,7 @@ static int probe_summit(void) | |||
346 | return 0; | 342 | return 0; |
347 | } | 343 | } |
348 | 344 | ||
349 | static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | 345 | static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask) |
350 | { | 346 | { |
351 | /* Careful. Some cpus do not strictly honor the set of cpus | 347 | /* Careful. Some cpus do not strictly honor the set of cpus |
352 | * specified in the interrupt destination when using lowest | 348 | * specified in the interrupt destination when using lowest |
@@ -356,7 +352,8 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask) | |||
356 | * deliver interrupts to the wrong hyperthread when only one | 352 | * deliver interrupts to the wrong hyperthread when only one |
357 | * hyperthread was specified in the interrupt desitination. | 353 | * hyperthread was specified in the interrupt desitination. |
358 | */ | 354 | */ |
359 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; | 355 | cpumask_clear(retmask); |
356 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; | ||
360 | } | 357 | } |
361 | 358 | ||
362 | #ifdef CONFIG_X86_SUMMIT_NUMA | 359 | #ifdef CONFIG_X86_SUMMIT_NUMA |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index ac7783a67432..49e0939bac42 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -466,7 +466,7 @@ static const lookup_t error_table[] = { | |||
466 | * @err: APM BIOS return code | 466 | * @err: APM BIOS return code |
467 | * | 467 | * |
468 | * Write a meaningful log entry to the kernel log in the event of | 468 | * Write a meaningful log entry to the kernel log in the event of |
469 | * an APM error. | 469 | * an APM error. Note that this also handles (negative) kernel errors. |
470 | */ | 470 | */ |
471 | 471 | ||
472 | static void apm_error(char *str, int err) | 472 | static void apm_error(char *str, int err) |
@@ -478,43 +478,14 @@ static void apm_error(char *str, int err) | |||
478 | break; | 478 | break; |
479 | if (i < ERROR_COUNT) | 479 | if (i < ERROR_COUNT) |
480 | printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); | 480 | printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg); |
481 | else if (err < 0) | ||
482 | printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err); | ||
481 | else | 483 | else |
482 | printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", | 484 | printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n", |
483 | str, err); | 485 | str, err); |
484 | } | 486 | } |
485 | 487 | ||
486 | /* | 488 | /* |
487 | * Lock APM functionality to physical CPU 0 | ||
488 | */ | ||
489 | |||
490 | #ifdef CONFIG_SMP | ||
491 | |||
492 | static cpumask_t apm_save_cpus(void) | ||
493 | { | ||
494 | cpumask_t x = current->cpus_allowed; | ||
495 | /* Some bioses don't like being called from CPU != 0 */ | ||
496 | set_cpus_allowed(current, cpumask_of_cpu(0)); | ||
497 | BUG_ON(smp_processor_id() != 0); | ||
498 | return x; | ||
499 | } | ||
500 | |||
501 | static inline void apm_restore_cpus(cpumask_t mask) | ||
502 | { | ||
503 | set_cpus_allowed(current, mask); | ||
504 | } | ||
505 | |||
506 | #else | ||
507 | |||
508 | /* | ||
509 | * No CPU lockdown needed on a uniprocessor | ||
510 | */ | ||
511 | |||
512 | #define apm_save_cpus() (current->cpus_allowed) | ||
513 | #define apm_restore_cpus(x) (void)(x) | ||
514 | |||
515 | #endif | ||
516 | |||
517 | /* | ||
518 | * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and | 489 | * These are the actual BIOS calls. Depending on APM_ZERO_SEGS and |
519 | * apm_info.allow_ints, we are being really paranoid here! Not only | 490 | * apm_info.allow_ints, we are being really paranoid here! Not only |
520 | * are interrupts disabled, but all the segment registers (except SS) | 491 | * are interrupts disabled, but all the segment registers (except SS) |
@@ -568,16 +539,23 @@ static inline void apm_irq_restore(unsigned long flags) | |||
568 | # define APM_DO_RESTORE_SEGS | 539 | # define APM_DO_RESTORE_SEGS |
569 | #endif | 540 | #endif |
570 | 541 | ||
542 | struct apm_bios_call { | ||
543 | u32 func; | ||
544 | /* In and out */ | ||
545 | u32 ebx; | ||
546 | u32 ecx; | ||
547 | /* Out only */ | ||
548 | u32 eax; | ||
549 | u32 edx; | ||
550 | u32 esi; | ||
551 | |||
552 | /* Error: -ENOMEM, or bits 8-15 of eax */ | ||
553 | int err; | ||
554 | }; | ||
555 | |||
571 | /** | 556 | /** |
572 | * apm_bios_call - Make an APM BIOS 32bit call | 557 | * __apm_bios_call - Make an APM BIOS 32bit call |
573 | * @func: APM function to execute | 558 | * @_call: pointer to struct apm_bios_call. |
574 | * @ebx_in: EBX register for call entry | ||
575 | * @ecx_in: ECX register for call entry | ||
576 | * @eax: EAX register return | ||
577 | * @ebx: EBX register return | ||
578 | * @ecx: ECX register return | ||
579 | * @edx: EDX register return | ||
580 | * @esi: ESI register return | ||
581 | * | 559 | * |
582 | * Make an APM call using the 32bit protected mode interface. The | 560 | * Make an APM call using the 32bit protected mode interface. The |
583 | * caller is responsible for knowing if APM BIOS is configured and | 561 | * caller is responsible for knowing if APM BIOS is configured and |
@@ -586,80 +564,142 @@ static inline void apm_irq_restore(unsigned long flags) | |||
586 | * flag is loaded into AL. If there is an error, then the error | 564 | * flag is loaded into AL. If there is an error, then the error |
587 | * code is returned in AH (bits 8-15 of eax) and this function | 565 | * code is returned in AH (bits 8-15 of eax) and this function |
588 | * returns non-zero. | 566 | * returns non-zero. |
567 | * | ||
568 | * Note: this makes the call on the current CPU. | ||
589 | */ | 569 | */ |
590 | 570 | static long __apm_bios_call(void *_call) | |
591 | static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, | ||
592 | u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, u32 *esi) | ||
593 | { | 571 | { |
594 | APM_DECL_SEGS | 572 | APM_DECL_SEGS |
595 | unsigned long flags; | 573 | unsigned long flags; |
596 | cpumask_t cpus; | ||
597 | int cpu; | 574 | int cpu; |
598 | struct desc_struct save_desc_40; | 575 | struct desc_struct save_desc_40; |
599 | struct desc_struct *gdt; | 576 | struct desc_struct *gdt; |
600 | 577 | struct apm_bios_call *call = _call; | |
601 | cpus = apm_save_cpus(); | ||
602 | 578 | ||
603 | cpu = get_cpu(); | 579 | cpu = get_cpu(); |
580 | BUG_ON(cpu != 0); | ||
604 | gdt = get_cpu_gdt_table(cpu); | 581 | gdt = get_cpu_gdt_table(cpu); |
605 | save_desc_40 = gdt[0x40 / 8]; | 582 | save_desc_40 = gdt[0x40 / 8]; |
606 | gdt[0x40 / 8] = bad_bios_desc; | 583 | gdt[0x40 / 8] = bad_bios_desc; |
607 | 584 | ||
608 | apm_irq_save(flags); | 585 | apm_irq_save(flags); |
609 | APM_DO_SAVE_SEGS; | 586 | APM_DO_SAVE_SEGS; |
610 | apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); | 587 | apm_bios_call_asm(call->func, call->ebx, call->ecx, |
588 | &call->eax, &call->ebx, &call->ecx, &call->edx, | ||
589 | &call->esi); | ||
611 | APM_DO_RESTORE_SEGS; | 590 | APM_DO_RESTORE_SEGS; |
612 | apm_irq_restore(flags); | 591 | apm_irq_restore(flags); |
613 | gdt[0x40 / 8] = save_desc_40; | 592 | gdt[0x40 / 8] = save_desc_40; |
614 | put_cpu(); | 593 | put_cpu(); |
615 | apm_restore_cpus(cpus); | ||
616 | 594 | ||
617 | return *eax & 0xff; | 595 | return call->eax & 0xff; |
596 | } | ||
597 | |||
598 | /* Run __apm_bios_call or __apm_bios_call_simple on CPU 0 */ | ||
599 | static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call) | ||
600 | { | ||
601 | int ret; | ||
602 | |||
603 | /* Don't bother with work_on_cpu in the common case, so we don't | ||
604 | * have to worry about OOM or overhead. */ | ||
605 | if (get_cpu() == 0) { | ||
606 | ret = fn(call); | ||
607 | put_cpu(); | ||
608 | } else { | ||
609 | put_cpu(); | ||
610 | ret = work_on_cpu(0, fn, call); | ||
611 | } | ||
612 | |||
613 | /* work_on_cpu can fail with -ENOMEM */ | ||
614 | if (ret < 0) | ||
615 | call->err = ret; | ||
616 | else | ||
617 | call->err = (call->eax >> 8) & 0xff; | ||
618 | |||
619 | return ret; | ||
618 | } | 620 | } |
619 | 621 | ||
620 | /** | 622 | /** |
621 | * apm_bios_call_simple - make a simple APM BIOS 32bit call | 623 | * apm_bios_call - Make an APM BIOS 32bit call (on CPU 0) |
622 | * @func: APM function to invoke | 624 | * @call: the apm_bios_call registers. |
623 | * @ebx_in: EBX register value for BIOS call | 625 | * |
624 | * @ecx_in: ECX register value for BIOS call | 626 | * If there is an error, it is returned in @call.err. |
625 | * @eax: EAX register on return from the BIOS call | 627 | */ |
628 | static int apm_bios_call(struct apm_bios_call *call) | ||
629 | { | ||
630 | return on_cpu0(__apm_bios_call, call); | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * __apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0) | ||
635 | * @_call: pointer to struct apm_bios_call. | ||
626 | * | 636 | * |
627 | * Make a BIOS call that returns one value only, or just status. | 637 | * Make a BIOS call that returns one value only, or just status. |
628 | * If there is an error, then the error code is returned in AH | 638 | * If there is an error, then the error code is returned in AH |
629 | * (bits 8-15 of eax) and this function returns non-zero. This is | 639 | * (bits 8-15 of eax) and this function returns non-zero (it can |
630 | * used for simpler BIOS operations. This call may hold interrupts | 640 | * also return -ENOMEM). This is used for simpler BIOS operations. |
631 | * off for a long time on some laptops. | 641 | * This call may hold interrupts off for a long time on some laptops. |
642 | * | ||
643 | * Note: this makes the call on the current CPU. | ||
632 | */ | 644 | */ |
633 | 645 | static long __apm_bios_call_simple(void *_call) | |
634 | static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | ||
635 | { | 646 | { |
636 | u8 error; | 647 | u8 error; |
637 | APM_DECL_SEGS | 648 | APM_DECL_SEGS |
638 | unsigned long flags; | 649 | unsigned long flags; |
639 | cpumask_t cpus; | ||
640 | int cpu; | 650 | int cpu; |
641 | struct desc_struct save_desc_40; | 651 | struct desc_struct save_desc_40; |
642 | struct desc_struct *gdt; | 652 | struct desc_struct *gdt; |
643 | 653 | struct apm_bios_call *call = _call; | |
644 | cpus = apm_save_cpus(); | ||
645 | 654 | ||
646 | cpu = get_cpu(); | 655 | cpu = get_cpu(); |
656 | BUG_ON(cpu != 0); | ||
647 | gdt = get_cpu_gdt_table(cpu); | 657 | gdt = get_cpu_gdt_table(cpu); |
648 | save_desc_40 = gdt[0x40 / 8]; | 658 | save_desc_40 = gdt[0x40 / 8]; |
649 | gdt[0x40 / 8] = bad_bios_desc; | 659 | gdt[0x40 / 8] = bad_bios_desc; |
650 | 660 | ||
651 | apm_irq_save(flags); | 661 | apm_irq_save(flags); |
652 | APM_DO_SAVE_SEGS; | 662 | APM_DO_SAVE_SEGS; |
653 | error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); | 663 | error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx, |
664 | &call->eax); | ||
654 | APM_DO_RESTORE_SEGS; | 665 | APM_DO_RESTORE_SEGS; |
655 | apm_irq_restore(flags); | 666 | apm_irq_restore(flags); |
656 | gdt[0x40 / 8] = save_desc_40; | 667 | gdt[0x40 / 8] = save_desc_40; |
657 | put_cpu(); | 668 | put_cpu(); |
658 | apm_restore_cpus(cpus); | ||
659 | return error; | 669 | return error; |
660 | } | 670 | } |
661 | 671 | ||
662 | /** | 672 | /** |
673 | * apm_bios_call_simple - make a simple APM BIOS 32bit call | ||
674 | * @func: APM function to invoke | ||
675 | * @ebx_in: EBX register value for BIOS call | ||
676 | * @ecx_in: ECX register value for BIOS call | ||
677 | * @eax: EAX register on return from the BIOS call | ||
678 | * @err: bits | ||
679 | * | ||
680 | * Make a BIOS call that returns one value only, or just status. | ||
681 | * If there is an error, then the error code is returned in @err | ||
682 | * and this function returns non-zero. This is used for simpler | ||
683 | * BIOS operations. This call may hold interrupts off for a long | ||
684 | * time on some laptops. | ||
685 | */ | ||
686 | static int apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax, | ||
687 | int *err) | ||
688 | { | ||
689 | struct apm_bios_call call; | ||
690 | int ret; | ||
691 | |||
692 | call.func = func; | ||
693 | call.ebx = ebx_in; | ||
694 | call.ecx = ecx_in; | ||
695 | |||
696 | ret = on_cpu0(__apm_bios_call_simple, &call); | ||
697 | *eax = call.eax; | ||
698 | *err = call.err; | ||
699 | return ret; | ||
700 | } | ||
701 | |||
702 | /** | ||
663 | * apm_driver_version - APM driver version | 703 | * apm_driver_version - APM driver version |
664 | * @val: loaded with the APM version on return | 704 | * @val: loaded with the APM version on return |
665 | * | 705 | * |
@@ -678,9 +718,10 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) | |||
678 | static int apm_driver_version(u_short *val) | 718 | static int apm_driver_version(u_short *val) |
679 | { | 719 | { |
680 | u32 eax; | 720 | u32 eax; |
721 | int err; | ||
681 | 722 | ||
682 | if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax)) | 723 | if (apm_bios_call_simple(APM_FUNC_VERSION, 0, *val, &eax, &err)) |
683 | return (eax >> 8) & 0xff; | 724 | return err; |
684 | *val = eax; | 725 | *val = eax; |
685 | return APM_SUCCESS; | 726 | return APM_SUCCESS; |
686 | } | 727 | } |
@@ -701,22 +742,21 @@ static int apm_driver_version(u_short *val) | |||
701 | * that APM 1.2 is in use. If no messges are pending the value 0x80 | 742 | * that APM 1.2 is in use. If no messges are pending the value 0x80 |
702 | * is returned (No power management events pending). | 743 | * is returned (No power management events pending). |
703 | */ | 744 | */ |
704 | |||
705 | static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) | 745 | static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) |
706 | { | 746 | { |
707 | u32 eax; | 747 | struct apm_bios_call call; |
708 | u32 ebx; | ||
709 | u32 ecx; | ||
710 | u32 dummy; | ||
711 | 748 | ||
712 | if (apm_bios_call(APM_FUNC_GET_EVENT, 0, 0, &eax, &ebx, &ecx, | 749 | call.func = APM_FUNC_GET_EVENT; |
713 | &dummy, &dummy)) | 750 | call.ebx = call.ecx = 0; |
714 | return (eax >> 8) & 0xff; | 751 | |
715 | *event = ebx; | 752 | if (apm_bios_call(&call)) |
753 | return call.err; | ||
754 | |||
755 | *event = call.ebx; | ||
716 | if (apm_info.connection_version < 0x0102) | 756 | if (apm_info.connection_version < 0x0102) |
717 | *info = ~0; /* indicate info not valid */ | 757 | *info = ~0; /* indicate info not valid */ |
718 | else | 758 | else |
719 | *info = ecx; | 759 | *info = call.ecx; |
720 | return APM_SUCCESS; | 760 | return APM_SUCCESS; |
721 | } | 761 | } |
722 | 762 | ||
@@ -737,9 +777,10 @@ static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info) | |||
737 | static int set_power_state(u_short what, u_short state) | 777 | static int set_power_state(u_short what, u_short state) |
738 | { | 778 | { |
739 | u32 eax; | 779 | u32 eax; |
780 | int err; | ||
740 | 781 | ||
741 | if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax)) | 782 | if (apm_bios_call_simple(APM_FUNC_SET_STATE, what, state, &eax, &err)) |
742 | return (eax >> 8) & 0xff; | 783 | return err; |
743 | return APM_SUCCESS; | 784 | return APM_SUCCESS; |
744 | } | 785 | } |
745 | 786 | ||
@@ -770,6 +811,7 @@ static int apm_do_idle(void) | |||
770 | u8 ret = 0; | 811 | u8 ret = 0; |
771 | int idled = 0; | 812 | int idled = 0; |
772 | int polling; | 813 | int polling; |
814 | int err; | ||
773 | 815 | ||
774 | polling = !!(current_thread_info()->status & TS_POLLING); | 816 | polling = !!(current_thread_info()->status & TS_POLLING); |
775 | if (polling) { | 817 | if (polling) { |
@@ -782,7 +824,7 @@ static int apm_do_idle(void) | |||
782 | } | 824 | } |
783 | if (!need_resched()) { | 825 | if (!need_resched()) { |
784 | idled = 1; | 826 | idled = 1; |
785 | ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); | 827 | ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax, &err); |
786 | } | 828 | } |
787 | if (polling) | 829 | if (polling) |
788 | current_thread_info()->status |= TS_POLLING; | 830 | current_thread_info()->status |= TS_POLLING; |
@@ -797,8 +839,7 @@ static int apm_do_idle(void) | |||
797 | * Only report the failure the first 5 times. | 839 | * Only report the failure the first 5 times. |
798 | */ | 840 | */ |
799 | if (++t < 5) { | 841 | if (++t < 5) { |
800 | printk(KERN_DEBUG "apm_do_idle failed (%d)\n", | 842 | printk(KERN_DEBUG "apm_do_idle failed (%d)\n", err); |
801 | (eax >> 8) & 0xff); | ||
802 | t = jiffies; | 843 | t = jiffies; |
803 | } | 844 | } |
804 | return -1; | 845 | return -1; |
@@ -816,9 +857,10 @@ static int apm_do_idle(void) | |||
816 | static void apm_do_busy(void) | 857 | static void apm_do_busy(void) |
817 | { | 858 | { |
818 | u32 dummy; | 859 | u32 dummy; |
860 | int err; | ||
819 | 861 | ||
820 | if (clock_slowed || ALWAYS_CALL_BUSY) { | 862 | if (clock_slowed || ALWAYS_CALL_BUSY) { |
821 | (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy); | 863 | (void)apm_bios_call_simple(APM_FUNC_BUSY, 0, 0, &dummy, &err); |
822 | clock_slowed = 0; | 864 | clock_slowed = 0; |
823 | } | 865 | } |
824 | } | 866 | } |
@@ -937,7 +979,7 @@ static void apm_power_off(void) | |||
937 | 979 | ||
938 | /* Some bioses don't like being called from CPU != 0 */ | 980 | /* Some bioses don't like being called from CPU != 0 */ |
939 | if (apm_info.realmode_power_off) { | 981 | if (apm_info.realmode_power_off) { |
940 | (void)apm_save_cpus(); | 982 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
941 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); | 983 | machine_real_restart(po_bios_call, sizeof(po_bios_call)); |
942 | } else { | 984 | } else { |
943 | (void)set_system_power_state(APM_STATE_OFF); | 985 | (void)set_system_power_state(APM_STATE_OFF); |
@@ -956,12 +998,13 @@ static void apm_power_off(void) | |||
956 | static int apm_enable_power_management(int enable) | 998 | static int apm_enable_power_management(int enable) |
957 | { | 999 | { |
958 | u32 eax; | 1000 | u32 eax; |
1001 | int err; | ||
959 | 1002 | ||
960 | if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) | 1003 | if ((enable == 0) && (apm_info.bios.flags & APM_BIOS_DISENGAGED)) |
961 | return APM_NOT_ENGAGED; | 1004 | return APM_NOT_ENGAGED; |
962 | if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, | 1005 | if (apm_bios_call_simple(APM_FUNC_ENABLE_PM, APM_DEVICE_BALL, |
963 | enable, &eax)) | 1006 | enable, &eax, &err)) |
964 | return (eax >> 8) & 0xff; | 1007 | return err; |
965 | if (enable) | 1008 | if (enable) |
966 | apm_info.bios.flags &= ~APM_BIOS_DISABLED; | 1009 | apm_info.bios.flags &= ~APM_BIOS_DISABLED; |
967 | else | 1010 | else |
@@ -986,24 +1029,23 @@ static int apm_enable_power_management(int enable) | |||
986 | 1029 | ||
987 | static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) | 1030 | static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) |
988 | { | 1031 | { |
989 | u32 eax; | 1032 | struct apm_bios_call call; |
990 | u32 ebx; | 1033 | |
991 | u32 ecx; | 1034 | call.func = APM_FUNC_GET_STATUS; |
992 | u32 edx; | 1035 | call.ebx = APM_DEVICE_ALL; |
993 | u32 dummy; | 1036 | call.ecx = 0; |
994 | 1037 | ||
995 | if (apm_info.get_power_status_broken) | 1038 | if (apm_info.get_power_status_broken) |
996 | return APM_32_UNSUPPORTED; | 1039 | return APM_32_UNSUPPORTED; |
997 | if (apm_bios_call(APM_FUNC_GET_STATUS, APM_DEVICE_ALL, 0, | 1040 | if (apm_bios_call(&call)) |
998 | &eax, &ebx, &ecx, &edx, &dummy)) | 1041 | return call.err; |
999 | return (eax >> 8) & 0xff; | 1042 | *status = call.ebx; |
1000 | *status = ebx; | 1043 | *bat = call.ecx; |
1001 | *bat = ecx; | ||
1002 | if (apm_info.get_power_status_swabinminutes) { | 1044 | if (apm_info.get_power_status_swabinminutes) { |
1003 | *life = swab16((u16)edx); | 1045 | *life = swab16((u16)call.edx); |
1004 | *life |= 0x8000; | 1046 | *life |= 0x8000; |
1005 | } else | 1047 | } else |
1006 | *life = edx; | 1048 | *life = call.edx; |
1007 | return APM_SUCCESS; | 1049 | return APM_SUCCESS; |
1008 | } | 1050 | } |
1009 | 1051 | ||
@@ -1048,12 +1090,14 @@ static int apm_get_battery_status(u_short which, u_short *status, | |||
1048 | static int apm_engage_power_management(u_short device, int enable) | 1090 | static int apm_engage_power_management(u_short device, int enable) |
1049 | { | 1091 | { |
1050 | u32 eax; | 1092 | u32 eax; |
1093 | int err; | ||
1051 | 1094 | ||
1052 | if ((enable == 0) && (device == APM_DEVICE_ALL) | 1095 | if ((enable == 0) && (device == APM_DEVICE_ALL) |
1053 | && (apm_info.bios.flags & APM_BIOS_DISABLED)) | 1096 | && (apm_info.bios.flags & APM_BIOS_DISABLED)) |
1054 | return APM_DISABLED; | 1097 | return APM_DISABLED; |
1055 | if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, &eax)) | 1098 | if (apm_bios_call_simple(APM_FUNC_ENGAGE_PM, device, enable, |
1056 | return (eax >> 8) & 0xff; | 1099 | &eax, &err)) |
1100 | return err; | ||
1057 | if (device == APM_DEVICE_ALL) { | 1101 | if (device == APM_DEVICE_ALL) { |
1058 | if (enable) | 1102 | if (enable) |
1059 | apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; | 1103 | apm_info.bios.flags &= ~APM_BIOS_DISENGAGED; |
@@ -1689,16 +1733,14 @@ static int apm(void *unused) | |||
1689 | char *power_stat; | 1733 | char *power_stat; |
1690 | char *bat_stat; | 1734 | char *bat_stat; |
1691 | 1735 | ||
1692 | #ifdef CONFIG_SMP | ||
1693 | /* 2002/08/01 - WT | 1736 | /* 2002/08/01 - WT |
1694 | * This is to avoid random crashes at boot time during initialization | 1737 | * This is to avoid random crashes at boot time during initialization |
1695 | * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. | 1738 | * on SMP systems in case of "apm=power-off" mode. Seen on ASUS A7M266D. |
1696 | * Some bioses don't like being called from CPU != 0. | 1739 | * Some bioses don't like being called from CPU != 0. |
1697 | * Method suggested by Ingo Molnar. | 1740 | * Method suggested by Ingo Molnar. |
1698 | */ | 1741 | */ |
1699 | set_cpus_allowed(current, cpumask_of_cpu(0)); | 1742 | set_cpus_allowed_ptr(current, cpumask_of(0)); |
1700 | BUG_ON(smp_processor_id() != 0); | 1743 | BUG_ON(smp_processor_id() != 0); |
1701 | #endif | ||
1702 | 1744 | ||
1703 | if (apm_info.connection_version == 0) { | 1745 | if (apm_info.connection_version == 0) { |
1704 | apm_info.connection_version = apm_info.bios.version; | 1746 | apm_info.connection_version = apm_info.bios.version; |
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 8220ae69849d..c965e5212714 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
31 | 31 | ||
32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, |
34 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 }, | ||
34 | { 0, 0, 0, 0 } | 35 | { 0, 0, 0, 0 } |
35 | }; | 36 | }; |
36 | 37 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e2962cc1e27b..c4f667896c28 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -41,8 +41,6 @@ | |||
41 | 41 | ||
42 | #include "cpu.h" | 42 | #include "cpu.h" |
43 | 43 | ||
44 | #ifdef CONFIG_X86_64 | ||
45 | |||
46 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 44 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
47 | cpumask_var_t cpu_initialized_mask; | 45 | cpumask_var_t cpu_initialized_mask; |
48 | cpumask_var_t cpu_callout_mask; | 46 | cpumask_var_t cpu_callout_mask; |
@@ -60,16 +58,6 @@ void __init setup_cpu_local_masks(void) | |||
60 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 58 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
61 | } | 59 | } |
62 | 60 | ||
63 | #else /* CONFIG_X86_32 */ | ||
64 | |||
65 | cpumask_t cpu_sibling_setup_map; | ||
66 | cpumask_t cpu_callout_map; | ||
67 | cpumask_t cpu_initialized; | ||
68 | cpumask_t cpu_callin_map; | ||
69 | |||
70 | #endif /* CONFIG_X86_32 */ | ||
71 | |||
72 | |||
73 | static const struct cpu_dev *this_cpu __cpuinitdata; | 61 | static const struct cpu_dev *this_cpu __cpuinitdata; |
74 | 62 | ||
75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 63 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
@@ -859,6 +847,7 @@ static void vgetcpu_set_mode(void) | |||
859 | void __init identify_boot_cpu(void) | 847 | void __init identify_boot_cpu(void) |
860 | { | 848 | { |
861 | identify_cpu(&boot_cpu_data); | 849 | identify_cpu(&boot_cpu_data); |
850 | init_c1e_mask(); | ||
862 | #ifdef CONFIG_X86_32 | 851 | #ifdef CONFIG_X86_32 |
863 | sysenter_setup(); | 852 | sysenter_setup(); |
864 | enable_sep_cpu(); | 853 | enable_sep_cpu(); |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 46e29ab96c6a..46e29ab96c6a 100755..100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 23da96e57b17..ecdb682ab516 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
36 | #include <linux/ftrace.h> | 36 | #include <trace/power.h> |
37 | 37 | ||
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
@@ -68,10 +68,13 @@ struct acpi_cpufreq_data { | |||
68 | unsigned int max_freq; | 68 | unsigned int max_freq; |
69 | unsigned int resume; | 69 | unsigned int resume; |
70 | unsigned int cpu_feature; | 70 | unsigned int cpu_feature; |
71 | u64 saved_aperf, saved_mperf; | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 74 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); |
74 | 75 | ||
76 | DEFINE_TRACE(power_mark); | ||
77 | |||
75 | /* acpi_perf_data is a pointer to percpu data. */ | 78 | /* acpi_perf_data is a pointer to percpu data. */ |
76 | static struct acpi_processor_performance *acpi_perf_data; | 79 | static struct acpi_processor_performance *acpi_perf_data; |
77 | 80 | ||
@@ -150,7 +153,8 @@ struct drv_cmd { | |||
150 | u32 val; | 153 | u32 val; |
151 | }; | 154 | }; |
152 | 155 | ||
153 | static long do_drv_read(void *_cmd) | 156 | /* Called via smp_call_function_single(), on the target CPU */ |
157 | static void do_drv_read(void *_cmd) | ||
154 | { | 158 | { |
155 | struct drv_cmd *cmd = _cmd; | 159 | struct drv_cmd *cmd = _cmd; |
156 | u32 h; | 160 | u32 h; |
@@ -167,10 +171,10 @@ static long do_drv_read(void *_cmd) | |||
167 | default: | 171 | default: |
168 | break; | 172 | break; |
169 | } | 173 | } |
170 | return 0; | ||
171 | } | 174 | } |
172 | 175 | ||
173 | static long do_drv_write(void *_cmd) | 176 | /* Called via smp_call_function_many(), on the target CPUs */ |
177 | static void do_drv_write(void *_cmd) | ||
174 | { | 178 | { |
175 | struct drv_cmd *cmd = _cmd; | 179 | struct drv_cmd *cmd = _cmd; |
176 | u32 lo, hi; | 180 | u32 lo, hi; |
@@ -189,23 +193,24 @@ static long do_drv_write(void *_cmd) | |||
189 | default: | 193 | default: |
190 | break; | 194 | break; |
191 | } | 195 | } |
192 | return 0; | ||
193 | } | 196 | } |
194 | 197 | ||
195 | static void drv_read(struct drv_cmd *cmd) | 198 | static void drv_read(struct drv_cmd *cmd) |
196 | { | 199 | { |
197 | cmd->val = 0; | 200 | cmd->val = 0; |
198 | 201 | ||
199 | work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); | 202 | smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); |
200 | } | 203 | } |
201 | 204 | ||
202 | static void drv_write(struct drv_cmd *cmd) | 205 | static void drv_write(struct drv_cmd *cmd) |
203 | { | 206 | { |
204 | unsigned int i; | 207 | int this_cpu; |
205 | 208 | ||
206 | for_each_cpu(i, cmd->mask) { | 209 | this_cpu = get_cpu(); |
207 | work_on_cpu(i, do_drv_write, cmd); | 210 | if (cpumask_test_cpu(this_cpu, cmd->mask)) |
208 | } | 211 | do_drv_write(cmd); |
212 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); | ||
213 | put_cpu(); | ||
209 | } | 214 | } |
210 | 215 | ||
211 | static u32 get_cur_val(const struct cpumask *mask) | 216 | static u32 get_cur_val(const struct cpumask *mask) |
@@ -239,28 +244,23 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
239 | return cmd.val; | 244 | return cmd.val; |
240 | } | 245 | } |
241 | 246 | ||
242 | struct perf_cur { | 247 | struct perf_pair { |
243 | union { | 248 | union { |
244 | struct { | 249 | struct { |
245 | u32 lo; | 250 | u32 lo; |
246 | u32 hi; | 251 | u32 hi; |
247 | } split; | 252 | } split; |
248 | u64 whole; | 253 | u64 whole; |
249 | } aperf_cur, mperf_cur; | 254 | } aperf, mperf; |
250 | }; | 255 | }; |
251 | 256 | ||
252 | 257 | /* Called via smp_call_function_single(), on the target CPU */ | |
253 | static long read_measured_perf_ctrs(void *_cur) | 258 | static void read_measured_perf_ctrs(void *_cur) |
254 | { | 259 | { |
255 | struct perf_cur *cur = _cur; | 260 | struct perf_pair *cur = _cur; |
256 | |||
257 | rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); | ||
258 | rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); | ||
259 | 261 | ||
260 | wrmsr(MSR_IA32_APERF, 0, 0); | 262 | rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); |
261 | wrmsr(MSR_IA32_MPERF, 0, 0); | 263 | rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); |
262 | |||
263 | return 0; | ||
264 | } | 264 | } |
265 | 265 | ||
266 | /* | 266 | /* |
@@ -279,52 +279,57 @@ static long read_measured_perf_ctrs(void *_cur) | |||
279 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, | 279 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
280 | unsigned int cpu) | 280 | unsigned int cpu) |
281 | { | 281 | { |
282 | struct perf_cur cur; | 282 | struct perf_pair readin, cur; |
283 | unsigned int perf_percent; | 283 | unsigned int perf_percent; |
284 | unsigned int retval; | 284 | unsigned int retval; |
285 | 285 | ||
286 | if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) | 286 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) |
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | cur.aperf.whole = readin.aperf.whole - | ||
290 | per_cpu(drv_data, cpu)->saved_aperf; | ||
291 | cur.mperf.whole = readin.mperf.whole - | ||
292 | per_cpu(drv_data, cpu)->saved_mperf; | ||
293 | per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; | ||
294 | per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; | ||
295 | |||
289 | #ifdef __i386__ | 296 | #ifdef __i386__ |
290 | /* | 297 | /* |
291 | * We dont want to do 64 bit divide with 32 bit kernel | 298 | * We dont want to do 64 bit divide with 32 bit kernel |
292 | * Get an approximate value. Return failure in case we cannot get | 299 | * Get an approximate value. Return failure in case we cannot get |
293 | * an approximate value. | 300 | * an approximate value. |
294 | */ | 301 | */ |
295 | if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { | 302 | if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) { |
296 | int shift_count; | 303 | int shift_count; |
297 | u32 h; | 304 | u32 h; |
298 | 305 | ||
299 | h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); | 306 | h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi); |
300 | shift_count = fls(h); | 307 | shift_count = fls(h); |
301 | 308 | ||
302 | cur.aperf_cur.whole >>= shift_count; | 309 | cur.aperf.whole >>= shift_count; |
303 | cur.mperf_cur.whole >>= shift_count; | 310 | cur.mperf.whole >>= shift_count; |
304 | } | 311 | } |
305 | 312 | ||
306 | if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { | 313 | if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) { |
307 | int shift_count = 7; | 314 | int shift_count = 7; |
308 | cur.aperf_cur.split.lo >>= shift_count; | 315 | cur.aperf.split.lo >>= shift_count; |
309 | cur.mperf_cur.split.lo >>= shift_count; | 316 | cur.mperf.split.lo >>= shift_count; |
310 | } | 317 | } |
311 | 318 | ||
312 | if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) | 319 | if (cur.aperf.split.lo && cur.mperf.split.lo) |
313 | perf_percent = (cur.aperf_cur.split.lo * 100) / | 320 | perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo; |
314 | cur.mperf_cur.split.lo; | ||
315 | else | 321 | else |
316 | perf_percent = 0; | 322 | perf_percent = 0; |
317 | 323 | ||
318 | #else | 324 | #else |
319 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { | 325 | if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) { |
320 | int shift_count = 7; | 326 | int shift_count = 7; |
321 | cur.aperf_cur.whole >>= shift_count; | 327 | cur.aperf.whole >>= shift_count; |
322 | cur.mperf_cur.whole >>= shift_count; | 328 | cur.mperf.whole >>= shift_count; |
323 | } | 329 | } |
324 | 330 | ||
325 | if (cur.aperf_cur.whole && cur.mperf_cur.whole) | 331 | if (cur.aperf.whole && cur.mperf.whole) |
326 | perf_percent = (cur.aperf_cur.whole * 100) / | 332 | perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole; |
327 | cur.mperf_cur.whole; | ||
328 | else | 333 | else |
329 | perf_percent = 0; | 334 | perf_percent = 0; |
330 | 335 | ||
@@ -680,6 +685,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
680 | perf->states[i].transition_latency * 1000; | 685 | perf->states[i].transition_latency * 1000; |
681 | } | 686 | } |
682 | 687 | ||
688 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ | ||
689 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && | ||
690 | policy->cpuinfo.transition_latency > 20 * 1000) { | ||
691 | static int print_once; | ||
692 | policy->cpuinfo.transition_latency = 20 * 1000; | ||
693 | if (!print_once) { | ||
694 | print_once = 1; | ||
695 | printk(KERN_INFO "Capping off P-state tranision latency" | ||
696 | " at 20 uS\n"); | ||
697 | } | ||
698 | } | ||
699 | |||
683 | data->max_freq = perf->states[0].core_frequency * 1000; | 700 | data->max_freq = perf->states[0].core_frequency * 1000; |
684 | /* table init */ | 701 | /* table init */ |
685 | for (i = 0; i < perf->state_count; i++) { | 702 | for (i = 0; i < perf->state_count; i++) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index f1c51aea064d..ce2ed3e4aad9 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/timex.h> | 33 | #include <linux/timex.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/acpi.h> | 35 | #include <linux/acpi.h> |
36 | #include <linux/kernel.h> | ||
37 | 36 | ||
38 | #include <asm/msr.h> | 37 | #include <asm/msr.h> |
39 | #include <acpi/processor.h> | 38 | #include <acpi/processor.h> |
@@ -305,7 +304,7 @@ retry_loop: | |||
305 | outb(3, 0x22); | 304 | outb(3, 0x22); |
306 | } else if ((pr != NULL) && pr->flags.bm_control) { | 305 | } else if ((pr != NULL) && pr->flags.bm_control) { |
307 | /* Disable bus master arbitration */ | 306 | /* Disable bus master arbitration */ |
308 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | 307 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); |
309 | } | 308 | } |
310 | switch (longhaul_version) { | 309 | switch (longhaul_version) { |
311 | 310 | ||
@@ -328,7 +327,7 @@ retry_loop: | |||
328 | case TYPE_POWERSAVER: | 327 | case TYPE_POWERSAVER: |
329 | if (longhaul_flags & USE_ACPI_C3) { | 328 | if (longhaul_flags & USE_ACPI_C3) { |
330 | /* Don't allow wakeup */ | 329 | /* Don't allow wakeup */ |
331 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | 330 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0); |
332 | do_powersaver(cx->address, mults_index, dir); | 331 | do_powersaver(cx->address, mults_index, dir); |
333 | } else { | 332 | } else { |
334 | do_powersaver(0, mults_index, dir); | 333 | do_powersaver(0, mults_index, dir); |
@@ -341,7 +340,7 @@ retry_loop: | |||
341 | outb(0, 0x22); | 340 | outb(0, 0x22); |
342 | } else if ((pr != NULL) && pr->flags.bm_control) { | 341 | } else if ((pr != NULL) && pr->flags.bm_control) { |
343 | /* Enable bus master arbitration */ | 342 | /* Enable bus master arbitration */ |
344 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | 343 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); |
345 | } | 344 | } |
346 | outb(pic2_mask, 0xA1); /* restore mask */ | 345 | outb(pic2_mask, 0xA1); /* restore mask */ |
347 | outb(pic1_mask, 0x21); | 346 | outb(pic1_mask, 0x21); |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 41ed94915f97..6ac55bd341ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -211,7 +211,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
211 | unsigned int i; | 211 | unsigned int i; |
212 | 212 | ||
213 | #ifdef CONFIG_SMP | 213 | #ifdef CONFIG_SMP |
214 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 214 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | /* Errata workaround */ | 217 | /* Errata workaround */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index a15ac94e0b9b..4709ead2db52 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -54,7 +54,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); | |||
54 | static int cpu_family = CPU_OPTERON; | 54 | static int cpu_family = CPU_OPTERON; |
55 | 55 | ||
56 | #ifndef CONFIG_SMP | 56 | #ifndef CONFIG_SMP |
57 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 57 | static inline const struct cpumask *cpu_core_mask(int cpu) |
58 | { | ||
59 | return cpumask_of(0); | ||
60 | } | ||
58 | #endif | 61 | #endif |
59 | 62 | ||
60 | /* Return a frequency in MHz, given an input fid */ | 63 | /* Return a frequency in MHz, given an input fid */ |
@@ -699,7 +702,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, | |||
699 | 702 | ||
700 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 703 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
701 | data->powernow_table = powernow_table; | 704 | data->powernow_table = powernow_table; |
702 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 705 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
703 | print_basics(data); | 706 | print_basics(data); |
704 | 707 | ||
705 | for (j = 0; j < data->numps; j++) | 708 | for (j = 0; j < data->numps; j++) |
@@ -862,7 +865,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
862 | 865 | ||
863 | /* fill in data */ | 866 | /* fill in data */ |
864 | data->numps = data->acpi_data.state_count; | 867 | data->numps = data->acpi_data.state_count; |
865 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 868 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
866 | print_basics(data); | 869 | print_basics(data); |
867 | powernow_k8_acpi_pst_values(data, 0); | 870 | powernow_k8_acpi_pst_values(data, 0); |
868 | 871 | ||
@@ -1300,7 +1303,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1300 | if (cpu_family == CPU_HW_PSTATE) | 1303 | if (cpu_family == CPU_HW_PSTATE) |
1301 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); | 1304 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); |
1302 | else | 1305 | else |
1303 | cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); | 1306 | cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); |
1304 | data->available_cores = pol->cpus; | 1307 | data->available_cores = pol->cpus; |
1305 | 1308 | ||
1306 | if (cpu_family == CPU_HW_PSTATE) | 1309 | if (cpu_family == CPU_HW_PSTATE) |
@@ -1365,7 +1368,7 @@ static unsigned int powernowk8_get(unsigned int cpu) | |||
1365 | unsigned int khz = 0; | 1368 | unsigned int khz = 0; |
1366 | unsigned int first; | 1369 | unsigned int first; |
1367 | 1370 | ||
1368 | first = first_cpu(per_cpu(cpu_core_map, cpu)); | 1371 | first = cpumask_first(cpu_core_mask(cpu)); |
1369 | data = per_cpu(powernow_data, first); | 1372 | data = per_cpu(powernow_data, first); |
1370 | 1373 | ||
1371 | if (!data) | 1374 | if (!data) |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 8bbb11adb315..016c1a4fa3fc 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -321,7 +321,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
321 | 321 | ||
322 | /* only run on CPU to be set, or on its sibling */ | 322 | /* only run on CPU to be set, or on its sibling */ |
323 | #ifdef CONFIG_SMP | 323 | #ifdef CONFIG_SMP |
324 | cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); | 324 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
325 | #endif | 325 | #endif |
326 | 326 | ||
327 | cpus_allowed = current->cpus_allowed; | 327 | cpus_allowed = current->cpus_allowed; |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c471eb1a389c..483eda96e102 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -159,7 +159,7 @@ struct _cpuid4_info_regs { | |||
159 | unsigned long can_disable; | 159 | unsigned long can_disable; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | #ifdef CONFIG_PCI | 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) |
163 | static struct pci_device_id k8_nb_id[] = { | 163 | static struct pci_device_id k8_nb_id[] = { |
164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
@@ -324,15 +324,6 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
324 | return 0; | 324 | return 0; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int | ||
328 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
329 | { | ||
330 | struct _cpuid4_info_regs *leaf_regs = | ||
331 | (struct _cpuid4_info_regs *)this_leaf; | ||
332 | |||
333 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
334 | } | ||
335 | |||
336 | static int __cpuinit find_num_cache_leaves(void) | 327 | static int __cpuinit find_num_cache_leaves(void) |
337 | { | 328 | { |
338 | unsigned int eax, ebx, ecx, edx; | 329 | unsigned int eax, ebx, ecx, edx; |
@@ -508,6 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
508 | return l2; | 499 | return l2; |
509 | } | 500 | } |
510 | 501 | ||
502 | #ifdef CONFIG_SYSFS | ||
503 | |||
511 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 504 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
512 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 505 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
513 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 506 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
@@ -571,6 +564,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
571 | per_cpu(cpuid4_info, cpu) = NULL; | 564 | per_cpu(cpuid4_info, cpu) = NULL; |
572 | } | 565 | } |
573 | 566 | ||
567 | static int | ||
568 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
569 | { | ||
570 | struct _cpuid4_info_regs *leaf_regs = | ||
571 | (struct _cpuid4_info_regs *)this_leaf; | ||
572 | |||
573 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
574 | } | ||
575 | |||
574 | static void __cpuinit get_cpu_leaves(void *_retval) | 576 | static void __cpuinit get_cpu_leaves(void *_retval) |
575 | { | 577 | { |
576 | int j, *retval = _retval, cpu = smp_processor_id(); | 578 | int j, *retval = _retval, cpu = smp_processor_id(); |
@@ -612,8 +614,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
612 | return retval; | 614 | return retval; |
613 | } | 615 | } |
614 | 616 | ||
615 | #ifdef CONFIG_SYSFS | ||
616 | |||
617 | #include <linux/kobject.h> | 617 | #include <linux/kobject.h> |
618 | #include <linux/sysfs.h> | 618 | #include <linux/sysfs.h> |
619 | 619 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index ca14604611ec..863f89568b1a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -990,7 +990,7 @@ static struct sysdev_attribute *mce_attributes[] = { | |||
990 | NULL | 990 | NULL |
991 | }; | 991 | }; |
992 | 992 | ||
993 | static cpumask_t mce_device_initialized = CPU_MASK_NONE; | 993 | static cpumask_var_t mce_device_initialized; |
994 | 994 | ||
995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | 995 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ |
996 | static __cpuinit int mce_create_device(unsigned int cpu) | 996 | static __cpuinit int mce_create_device(unsigned int cpu) |
@@ -1021,7 +1021,7 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
1021 | if (err) | 1021 | if (err) |
1022 | goto error2; | 1022 | goto error2; |
1023 | } | 1023 | } |
1024 | cpu_set(cpu, mce_device_initialized); | 1024 | cpumask_set_cpu(cpu, mce_device_initialized); |
1025 | 1025 | ||
1026 | return 0; | 1026 | return 0; |
1027 | error2: | 1027 | error2: |
@@ -1043,7 +1043,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
1043 | { | 1043 | { |
1044 | int i; | 1044 | int i; |
1045 | 1045 | ||
1046 | if (!cpu_isset(cpu, mce_device_initialized)) | 1046 | if (!cpumask_test_cpu(cpu, mce_device_initialized)) |
1047 | return; | 1047 | return; |
1048 | 1048 | ||
1049 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
@@ -1053,7 +1053,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | 1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), |
1054 | &bank_attrs[i]); | 1054 | &bank_attrs[i]); |
1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
1056 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpumask_clear_cpu(cpu, mce_device_initialized); |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /* Make sure there are no machine checks on offlined CPUs. */ | 1059 | /* Make sure there are no machine checks on offlined CPUs. */ |
@@ -1162,6 +1162,8 @@ static __init int mce_init_device(void) | |||
1162 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
1163 | return -EIO; | 1163 | return -EIO; |
1164 | 1164 | ||
1165 | alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); | ||
1166 | |||
1165 | err = mce_init_banks(); | 1167 | err = mce_init_banks(); |
1166 | if (err) | 1168 | if (err) |
1167 | return err; | 1169 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 7d01be868870..56dde9c4bc96 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -485,7 +485,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
485 | 485 | ||
486 | #ifdef CONFIG_SMP | 486 | #ifdef CONFIG_SMP |
487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 487 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
488 | i = cpumask_first(&per_cpu(cpu_core_map, cpu)); | 488 | i = cpumask_first(cpu_core_mask(cpu)); |
489 | 489 | ||
490 | /* first core not up yet */ | 490 | /* first core not up yet */ |
491 | if (cpu_data(i).cpu_core_id) | 491 | if (cpu_data(i).cpu_core_id) |
@@ -505,7 +505,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
505 | if (err) | 505 | if (err) |
506 | goto out; | 506 | goto out; |
507 | 507 | ||
508 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); | 508 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
509 | per_cpu(threshold_banks, cpu)[bank] = b; | 509 | per_cpu(threshold_banks, cpu)[bank] = b; |
510 | goto out; | 510 | goto out; |
511 | } | 511 | } |
@@ -529,7 +529,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
529 | #ifndef CONFIG_SMP | 529 | #ifndef CONFIG_SMP |
530 | cpumask_setall(b->cpus); | 530 | cpumask_setall(b->cpus); |
531 | #else | 531 | #else |
532 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); | 532 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); |
533 | #endif | 533 | #endif |
534 | 534 | ||
535 | per_cpu(threshold_banks, cpu)[bank] = b; | 535 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 57df3d383470..d6b72df89d69 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -249,7 +249,7 @@ void cmci_rediscover(int dying) | |||
249 | for_each_online_cpu (cpu) { | 249 | for_each_online_cpu (cpu) { |
250 | if (cpu == dying) | 250 | if (cpu == dying) |
251 | continue; | 251 | continue; |
252 | if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) | 252 | if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) |
253 | continue; | 253 | continue; |
254 | /* Recheck banks in case CPUs don't all have the same */ | 254 | /* Recheck banks in case CPUs don't all have the same */ |
255 | if (cmci_supported(&banks)) | 255 | if (cmci_supported(&banks)) |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 37f28fc7cf95..0b776c09aff3 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -462,9 +462,6 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
462 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; | 462 | *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; |
463 | *type = base_lo & 0xff; | 463 | *type = base_lo & 0xff; |
464 | 464 | ||
465 | printk(KERN_DEBUG " get_mtrr: cpu%d reg%02d base=%010lx size=%010lx %s\n", | ||
466 | cpu, reg, *base, *size, | ||
467 | mtrr_attrib_to_str(*type & 0xff)); | ||
468 | out_put_cpu: | 465 | out_put_cpu: |
469 | put_cpu(); | 466 | put_cpu(); |
470 | } | 467 | } |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d67e0e48bc2d..f93047fed791 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, | |||
14 | if (c->x86_max_cores * smp_num_siblings > 1) { | 14 | if (c->x86_max_cores * smp_num_siblings > 1) { |
15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); | 15 | seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); |
16 | seq_printf(m, "siblings\t: %d\n", | 16 | seq_printf(m, "siblings\t: %d\n", |
17 | cpus_weight(per_cpu(cpu_core_map, cpu))); | 17 | cpumask_weight(cpu_sibling_mask(cpu))); |
18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); | 18 | seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); |
19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); | 19 | seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); |
20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); | 20 | seq_printf(m, "apicid\t\t: %d\n", c->apicid); |
@@ -143,9 +143,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
143 | static void *c_start(struct seq_file *m, loff_t *pos) | 143 | static void *c_start(struct seq_file *m, loff_t *pos) |
144 | { | 144 | { |
145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ | 145 | if (*pos == 0) /* just in case, cpu 0 is not the first */ |
146 | *pos = first_cpu(cpu_online_map); | 146 | *pos = cpumask_first(cpu_online_mask); |
147 | else | 147 | else |
148 | *pos = next_cpu_nr(*pos - 1, cpu_online_map); | 148 | *pos = cpumask_next(*pos - 1, cpu_online_mask); |
149 | if ((*pos) < nr_cpu_ids) | 149 | if ((*pos) < nr_cpu_ids) |
150 | return &cpu_data(*pos); | 150 | return &cpu_data(*pos); |
151 | return NULL; | 151 | return NULL; |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index dd2130b0fb3e..95ea5fa7d444 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
18 | #include <linux/ftrace.h> | ||
18 | 19 | ||
19 | #include <asm/stacktrace.h> | 20 | #include <asm/stacktrace.h> |
20 | 21 | ||
@@ -196,6 +197,11 @@ unsigned __kprobes long oops_begin(void) | |||
196 | int cpu; | 197 | int cpu; |
197 | unsigned long flags; | 198 | unsigned long flags; |
198 | 199 | ||
200 | /* notify the hw-branch tracer so it may disable tracing and | ||
201 | add the last trace to the trace buffer - | ||
202 | the earlier this happens, the more useful the trace. */ | ||
203 | trace_hw_branch_oops(); | ||
204 | |||
199 | oops_enter(); | 205 | oops_enter(); |
200 | 206 | ||
201 | /* racy, but better than risking deadlock. */ | 207 | /* racy, but better than risking deadlock. */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 76f7141e0f91..18dfa30795c9 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -18,14 +18,28 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | 20 | ||
21 | #include <trace/syscall.h> | ||
22 | |||
23 | #include <asm/cacheflush.h> | ||
21 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
22 | #include <linux/ftrace.h> | ||
23 | #include <asm/nops.h> | 25 | #include <asm/nops.h> |
24 | #include <asm/nmi.h> | 26 | #include <asm/nmi.h> |
25 | 27 | ||
26 | 28 | ||
27 | #ifdef CONFIG_DYNAMIC_FTRACE | 29 | #ifdef CONFIG_DYNAMIC_FTRACE |
28 | 30 | ||
31 | int ftrace_arch_code_modify_prepare(void) | ||
32 | { | ||
33 | set_kernel_text_rw(); | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | int ftrace_arch_code_modify_post_process(void) | ||
38 | { | ||
39 | set_kernel_text_ro(); | ||
40 | return 0; | ||
41 | } | ||
42 | |||
29 | union ftrace_code_union { | 43 | union ftrace_code_union { |
30 | char code[MCOUNT_INSN_SIZE]; | 44 | char code[MCOUNT_INSN_SIZE]; |
31 | struct { | 45 | struct { |
@@ -66,11 +80,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
66 | * | 80 | * |
67 | * 1) Put the instruction pointer into the IP buffer | 81 | * 1) Put the instruction pointer into the IP buffer |
68 | * and the new code into the "code" buffer. | 82 | * and the new code into the "code" buffer. |
69 | * 2) Set a flag that says we are modifying code | 83 | * 2) Wait for any running NMIs to finish and set a flag that says |
70 | * 3) Wait for any running NMIs to finish. | 84 | * we are modifying code, it is done in an atomic operation. |
71 | * 4) Write the code | 85 | * 3) Write the code |
72 | * 5) clear the flag. | 86 | * 4) clear the flag. |
73 | * 6) Wait for any running NMIs to finish. | 87 | * 5) Wait for any running NMIs to finish. |
74 | * | 88 | * |
75 | * If an NMI is executed, the first thing it does is to call | 89 | * If an NMI is executed, the first thing it does is to call |
76 | * "ftrace_nmi_enter". This will check if the flag is set to write | 90 | * "ftrace_nmi_enter". This will check if the flag is set to write |
@@ -82,9 +96,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
82 | * are the same as what exists. | 96 | * are the same as what exists. |
83 | */ | 97 | */ |
84 | 98 | ||
85 | static atomic_t in_nmi = ATOMIC_INIT(0); | 99 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ |
100 | static atomic_t nmi_running = ATOMIC_INIT(0); | ||
86 | static int mod_code_status; /* holds return value of text write */ | 101 | static int mod_code_status; /* holds return value of text write */ |
87 | static int mod_code_write; /* set when NMI should do the write */ | ||
88 | static void *mod_code_ip; /* holds the IP to write to */ | 102 | static void *mod_code_ip; /* holds the IP to write to */ |
89 | static void *mod_code_newcode; /* holds the text to write to the IP */ | 103 | static void *mod_code_newcode; /* holds the text to write to the IP */ |
90 | 104 | ||
@@ -101,6 +115,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size) | |||
101 | return r; | 115 | return r; |
102 | } | 116 | } |
103 | 117 | ||
118 | static void clear_mod_flag(void) | ||
119 | { | ||
120 | int old = atomic_read(&nmi_running); | ||
121 | |||
122 | for (;;) { | ||
123 | int new = old & ~MOD_CODE_WRITE_FLAG; | ||
124 | |||
125 | if (old == new) | ||
126 | break; | ||
127 | |||
128 | old = atomic_cmpxchg(&nmi_running, old, new); | ||
129 | } | ||
130 | } | ||
131 | |||
104 | static void ftrace_mod_code(void) | 132 | static void ftrace_mod_code(void) |
105 | { | 133 | { |
106 | /* | 134 | /* |
@@ -111,37 +139,52 @@ static void ftrace_mod_code(void) | |||
111 | */ | 139 | */ |
112 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | 140 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, |
113 | MCOUNT_INSN_SIZE); | 141 | MCOUNT_INSN_SIZE); |
142 | |||
143 | /* if we fail, then kill any new writers */ | ||
144 | if (mod_code_status) | ||
145 | clear_mod_flag(); | ||
114 | } | 146 | } |
115 | 147 | ||
116 | void ftrace_nmi_enter(void) | 148 | void ftrace_nmi_enter(void) |
117 | { | 149 | { |
118 | atomic_inc(&in_nmi); | 150 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { |
119 | /* Must have in_nmi seen before reading write flag */ | 151 | smp_rmb(); |
120 | smp_mb(); | ||
121 | if (mod_code_write) { | ||
122 | ftrace_mod_code(); | 152 | ftrace_mod_code(); |
123 | atomic_inc(&nmi_update_count); | 153 | atomic_inc(&nmi_update_count); |
124 | } | 154 | } |
155 | /* Must have previous changes seen before executions */ | ||
156 | smp_mb(); | ||
125 | } | 157 | } |
126 | 158 | ||
127 | void ftrace_nmi_exit(void) | 159 | void ftrace_nmi_exit(void) |
128 | { | 160 | { |
129 | /* Finish all executions before clearing in_nmi */ | 161 | /* Finish all executions before clearing nmi_running */ |
130 | smp_wmb(); | 162 | smp_mb(); |
131 | atomic_dec(&in_nmi); | 163 | atomic_dec(&nmi_running); |
164 | } | ||
165 | |||
166 | static void wait_for_nmi_and_set_mod_flag(void) | ||
167 | { | ||
168 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | ||
169 | return; | ||
170 | |||
171 | do { | ||
172 | cpu_relax(); | ||
173 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | ||
174 | |||
175 | nmi_wait_count++; | ||
132 | } | 176 | } |
133 | 177 | ||
134 | static void wait_for_nmi(void) | 178 | static void wait_for_nmi(void) |
135 | { | 179 | { |
136 | int waited = 0; | 180 | if (!atomic_read(&nmi_running)) |
181 | return; | ||
137 | 182 | ||
138 | while (atomic_read(&in_nmi)) { | 183 | do { |
139 | waited = 1; | ||
140 | cpu_relax(); | 184 | cpu_relax(); |
141 | } | 185 | } while (atomic_read(&nmi_running)); |
142 | 186 | ||
143 | if (waited) | 187 | nmi_wait_count++; |
144 | nmi_wait_count++; | ||
145 | } | 188 | } |
146 | 189 | ||
147 | static int | 190 | static int |
@@ -151,14 +194,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
151 | mod_code_newcode = new_code; | 194 | mod_code_newcode = new_code; |
152 | 195 | ||
153 | /* The buffers need to be visible before we let NMIs write them */ | 196 | /* The buffers need to be visible before we let NMIs write them */ |
154 | smp_wmb(); | ||
155 | |||
156 | mod_code_write = 1; | ||
157 | |||
158 | /* Make sure write bit is visible before we wait on NMIs */ | ||
159 | smp_mb(); | 197 | smp_mb(); |
160 | 198 | ||
161 | wait_for_nmi(); | 199 | wait_for_nmi_and_set_mod_flag(); |
162 | 200 | ||
163 | /* Make sure all running NMIs have finished before we write the code */ | 201 | /* Make sure all running NMIs have finished before we write the code */ |
164 | smp_mb(); | 202 | smp_mb(); |
@@ -166,13 +204,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
166 | ftrace_mod_code(); | 204 | ftrace_mod_code(); |
167 | 205 | ||
168 | /* Make sure the write happens before clearing the bit */ | 206 | /* Make sure the write happens before clearing the bit */ |
169 | smp_wmb(); | ||
170 | |||
171 | mod_code_write = 0; | ||
172 | |||
173 | /* make sure NMIs see the cleared bit */ | ||
174 | smp_mb(); | 207 | smp_mb(); |
175 | 208 | ||
209 | clear_mod_flag(); | ||
176 | wait_for_nmi(); | 210 | wait_for_nmi(); |
177 | 211 | ||
178 | return mod_code_status; | 212 | return mod_code_status; |
@@ -368,25 +402,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
368 | return ftrace_mod_jmp(ip, old_offset, new_offset); | 402 | return ftrace_mod_jmp(ip, old_offset, new_offset); |
369 | } | 403 | } |
370 | 404 | ||
371 | #else /* CONFIG_DYNAMIC_FTRACE */ | ||
372 | |||
373 | /* | ||
374 | * These functions are picked from those used on | ||
375 | * this page for dynamic ftrace. They have been | ||
376 | * simplified to ignore all traces in NMI context. | ||
377 | */ | ||
378 | static atomic_t in_nmi; | ||
379 | |||
380 | void ftrace_nmi_enter(void) | ||
381 | { | ||
382 | atomic_inc(&in_nmi); | ||
383 | } | ||
384 | |||
385 | void ftrace_nmi_exit(void) | ||
386 | { | ||
387 | atomic_dec(&in_nmi); | ||
388 | } | ||
389 | |||
390 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | 405 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
391 | 406 | ||
392 | /* | 407 | /* |
@@ -396,14 +411,13 @@ void ftrace_nmi_exit(void) | |||
396 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 411 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
397 | { | 412 | { |
398 | unsigned long old; | 413 | unsigned long old; |
399 | unsigned long long calltime; | ||
400 | int faulted; | 414 | int faulted; |
401 | struct ftrace_graph_ent trace; | 415 | struct ftrace_graph_ent trace; |
402 | unsigned long return_hooker = (unsigned long) | 416 | unsigned long return_hooker = (unsigned long) |
403 | &return_to_handler; | 417 | &return_to_handler; |
404 | 418 | ||
405 | /* Nmi's are currently unsupported */ | 419 | /* Nmi's are currently unsupported */ |
406 | if (unlikely(atomic_read(&in_nmi))) | 420 | if (unlikely(in_nmi())) |
407 | return; | 421 | return; |
408 | 422 | ||
409 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 423 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
@@ -439,17 +453,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
439 | return; | 453 | return; |
440 | } | 454 | } |
441 | 455 | ||
442 | if (unlikely(!__kernel_text_address(old))) { | 456 | if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { |
443 | ftrace_graph_stop(); | ||
444 | *parent = old; | ||
445 | WARN_ON(1); | ||
446 | return; | ||
447 | } | ||
448 | |||
449 | calltime = cpu_clock(raw_smp_processor_id()); | ||
450 | |||
451 | if (ftrace_push_return_trace(old, calltime, | ||
452 | self_addr, &trace.depth) == -EBUSY) { | ||
453 | *parent = old; | 457 | *parent = old; |
454 | return; | 458 | return; |
455 | } | 459 | } |
@@ -463,3 +467,66 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
463 | } | 467 | } |
464 | } | 468 | } |
465 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 469 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
470 | |||
471 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
472 | |||
473 | extern unsigned long __start_syscalls_metadata[]; | ||
474 | extern unsigned long __stop_syscalls_metadata[]; | ||
475 | extern unsigned long *sys_call_table; | ||
476 | |||
477 | static struct syscall_metadata **syscalls_metadata; | ||
478 | |||
479 | static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) | ||
480 | { | ||
481 | struct syscall_metadata *start; | ||
482 | struct syscall_metadata *stop; | ||
483 | char str[KSYM_SYMBOL_LEN]; | ||
484 | |||
485 | |||
486 | start = (struct syscall_metadata *)__start_syscalls_metadata; | ||
487 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | ||
488 | kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); | ||
489 | |||
490 | for ( ; start < stop; start++) { | ||
491 | if (start->name && !strcmp(start->name, str)) | ||
492 | return start; | ||
493 | } | ||
494 | return NULL; | ||
495 | } | ||
496 | |||
497 | struct syscall_metadata *syscall_nr_to_meta(int nr) | ||
498 | { | ||
499 | if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) | ||
500 | return NULL; | ||
501 | |||
502 | return syscalls_metadata[nr]; | ||
503 | } | ||
504 | |||
505 | void arch_init_ftrace_syscalls(void) | ||
506 | { | ||
507 | int i; | ||
508 | struct syscall_metadata *meta; | ||
509 | unsigned long **psys_syscall_table = &sys_call_table; | ||
510 | static atomic_t refs; | ||
511 | |||
512 | if (atomic_inc_return(&refs) != 1) | ||
513 | goto end; | ||
514 | |||
515 | syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * | ||
516 | FTRACE_SYSCALL_MAX, GFP_KERNEL); | ||
517 | if (!syscalls_metadata) { | ||
518 | WARN_ON(1); | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { | ||
523 | meta = find_syscall_meta(psys_syscall_table[i]); | ||
524 | syscalls_metadata[i] = meta; | ||
525 | } | ||
526 | return; | ||
527 | |||
528 | /* Paranoid: avoid overflow */ | ||
529 | end: | ||
530 | atomic_dec(&refs); | ||
531 | } | ||
532 | #endif | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3aaf7b9e3a8b..c3fe010d74c8 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -65,7 +65,7 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
65 | seq_printf(p, " Spurious interrupts\n"); | 65 | seq_printf(p, " Spurious interrupts\n"); |
66 | #endif | 66 | #endif |
67 | if (generic_interrupt_extension) { | 67 | if (generic_interrupt_extension) { |
68 | seq_printf(p, "PLT: "); | 68 | seq_printf(p, "%*s: ", prec, "PLT"); |
69 | for_each_online_cpu(j) | 69 | for_each_online_cpu(j) |
70 | seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); | 70 | seq_printf(p, "%10u ", irq_stats(j)->generic_irqs); |
71 | seq_printf(p, " Platform interrupts\n"); | 71 | seq_printf(p, " Platform interrupts\n"); |
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 55b94614e348..7b5169d2b000 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
638 | #else | 638 | #else |
639 | " pushf\n" | 639 | " pushf\n" |
640 | /* | 640 | /* |
641 | * Skip cs, ip, orig_ax. | 641 | * Skip cs, ip, orig_ax and gs. |
642 | * trampoline_handler() will plug in these values | 642 | * trampoline_handler() will plug in these values |
643 | */ | 643 | */ |
644 | " subl $12, %esp\n" | 644 | " subl $16, %esp\n" |
645 | " pushl %fs\n" | 645 | " pushl %fs\n" |
646 | " pushl %ds\n" | ||
647 | " pushl %es\n" | 646 | " pushl %es\n" |
647 | " pushl %ds\n" | ||
648 | " pushl %eax\n" | 648 | " pushl %eax\n" |
649 | " pushl %ebp\n" | 649 | " pushl %ebp\n" |
650 | " pushl %edi\n" | 650 | " pushl %edi\n" |
@@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
655 | " movl %esp, %eax\n" | 655 | " movl %esp, %eax\n" |
656 | " call trampoline_handler\n" | 656 | " call trampoline_handler\n" |
657 | /* Move flags to cs */ | 657 | /* Move flags to cs */ |
658 | " movl 52(%esp), %edx\n" | 658 | " movl 56(%esp), %edx\n" |
659 | " movl %edx, 48(%esp)\n" | 659 | " movl %edx, 52(%esp)\n" |
660 | /* Replace saved flags with true return address. */ | 660 | /* Replace saved flags with true return address. */ |
661 | " movl %eax, 52(%esp)\n" | 661 | " movl %eax, 56(%esp)\n" |
662 | " popl %ebx\n" | 662 | " popl %ebx\n" |
663 | " popl %ecx\n" | 663 | " popl %ecx\n" |
664 | " popl %edx\n" | 664 | " popl %edx\n" |
@@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void) | |||
666 | " popl %edi\n" | 666 | " popl %edi\n" |
667 | " popl %ebp\n" | 667 | " popl %ebp\n" |
668 | " popl %eax\n" | 668 | " popl %eax\n" |
669 | /* Skip ip, orig_ax, es, ds, fs */ | 669 | /* Skip ds, es, fs, gs, orig_ax and ip */ |
670 | " addl $20, %esp\n" | 670 | " addl $24, %esp\n" |
671 | " popf\n" | 671 | " popf\n" |
672 | #endif | 672 | #endif |
673 | " ret\n"); | 673 | " ret\n"); |
@@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) | |||
691 | regs->cs = __KERNEL_CS; | 691 | regs->cs = __KERNEL_CS; |
692 | #else | 692 | #else |
693 | regs->cs = __KERNEL_CS | get_kernel_rpl(); | 693 | regs->cs = __KERNEL_CS | get_kernel_rpl(); |
694 | regs->gs = 0; | ||
694 | #endif | 695 | #endif |
695 | regs->ip = trampoline_address; | 696 | regs->ip = trampoline_address; |
696 | regs->orig_ax = ~0UL; | 697 | regs->orig_ax = ~0UL; |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index c25fdb382292..453b5795a5c6 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -12,31 +12,30 @@ | |||
12 | * | 12 | * |
13 | * Licensed under the terms of the GNU General Public | 13 | * Licensed under the terms of the GNU General Public |
14 | * License version 2. See file COPYING for details. | 14 | * License version 2. See file COPYING for details. |
15 | */ | 15 | */ |
16 | 16 | #include <linux/platform_device.h> | |
17 | #include <linux/capability.h> | 17 | #include <linux/capability.h> |
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
19 | #include <linux/firmware.h> | ||
26 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
27 | #include <linux/mm.h> | 21 | #include <linux/cpumask.h> |
28 | #include <linux/fs.h> | 22 | #include <linux/pci_ids.h> |
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
29 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/sched.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/slab.h> | ||
30 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
31 | #include <linux/firmware.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
34 | #include <linux/pci_ids.h> | 33 | #include <linux/fs.h> |
35 | #include <linux/uaccess.h> | 34 | #include <linux/mm.h> |
36 | 35 | ||
37 | #include <asm/msr.h> | ||
38 | #include <asm/processor.h> | ||
39 | #include <asm/microcode.h> | 36 | #include <asm/microcode.h> |
37 | #include <asm/processor.h> | ||
38 | #include <asm/msr.h> | ||
40 | 39 | ||
41 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | 40 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); |
42 | MODULE_AUTHOR("Peter Oruba"); | 41 | MODULE_AUTHOR("Peter Oruba"); |
@@ -72,8 +71,8 @@ struct microcode_header_amd { | |||
72 | } __attribute__((packed)); | 71 | } __attribute__((packed)); |
73 | 72 | ||
74 | struct microcode_amd { | 73 | struct microcode_amd { |
75 | struct microcode_header_amd hdr; | 74 | struct microcode_header_amd hdr; |
76 | unsigned int mpb[0]; | 75 | unsigned int mpb[0]; |
77 | }; | 76 | }; |
78 | 77 | ||
79 | #define UCODE_MAX_SIZE 2048 | 78 | #define UCODE_MAX_SIZE 2048 |
@@ -184,8 +183,8 @@ static int get_ucode_data(void *to, const u8 *from, size_t n) | |||
184 | return 0; | 183 | return 0; |
185 | } | 184 | } |
186 | 185 | ||
187 | static void *get_next_ucode(const u8 *buf, unsigned int size, | 186 | static void * |
188 | unsigned int *mc_size) | 187 | get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) |
189 | { | 188 | { |
190 | unsigned int total_size; | 189 | unsigned int total_size; |
191 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; | 190 | u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; |
@@ -223,7 +222,6 @@ static void *get_next_ucode(const u8 *buf, unsigned int size, | |||
223 | return mc; | 222 | return mc; |
224 | } | 223 | } |
225 | 224 | ||
226 | |||
227 | static int install_equiv_cpu_table(const u8 *buf) | 225 | static int install_equiv_cpu_table(const u8 *buf) |
228 | { | 226 | { |
229 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; | 227 | u8 *container_hdr[UCODE_CONTAINER_HEADER_SIZE]; |
@@ -372,4 +370,3 @@ struct microcode_ops * __init init_amd_microcode(void) | |||
372 | { | 370 | { |
373 | return µcode_amd_ops; | 371 | return µcode_amd_ops; |
374 | } | 372 | } |
375 | |||
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index c9b721ba968c..2e0eb4140951 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -70,41 +70,41 @@ | |||
70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
72 | */ | 72 | */ |
73 | #include <linux/platform_device.h> | ||
73 | #include <linux/capability.h> | 74 | #include <linux/capability.h> |
74 | #include <linux/kernel.h> | 75 | #include <linux/miscdevice.h> |
75 | #include <linux/init.h> | 76 | #include <linux/firmware.h> |
76 | #include <linux/sched.h> | ||
77 | #include <linux/smp_lock.h> | 77 | #include <linux/smp_lock.h> |
78 | #include <linux/spinlock.h> | ||
78 | #include <linux/cpumask.h> | 79 | #include <linux/cpumask.h> |
79 | #include <linux/module.h> | 80 | #include <linux/uaccess.h> |
80 | #include <linux/slab.h> | ||
81 | #include <linux/vmalloc.h> | 81 | #include <linux/vmalloc.h> |
82 | #include <linux/miscdevice.h> | 82 | #include <linux/kernel.h> |
83 | #include <linux/spinlock.h> | 83 | #include <linux/module.h> |
84 | #include <linux/mm.h> | ||
85 | #include <linux/fs.h> | ||
86 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
85 | #include <linux/sched.h> | ||
86 | #include <linux/init.h> | ||
87 | #include <linux/slab.h> | ||
87 | #include <linux/cpu.h> | 88 | #include <linux/cpu.h> |
88 | #include <linux/firmware.h> | 89 | #include <linux/fs.h> |
89 | #include <linux/platform_device.h> | 90 | #include <linux/mm.h> |
90 | 91 | ||
91 | #include <asm/msr.h> | ||
92 | #include <asm/uaccess.h> | ||
93 | #include <asm/processor.h> | ||
94 | #include <asm/microcode.h> | 92 | #include <asm/microcode.h> |
93 | #include <asm/processor.h> | ||
94 | #include <asm/msr.h> | ||
95 | 95 | ||
96 | MODULE_DESCRIPTION("Microcode Update Driver"); | 96 | MODULE_DESCRIPTION("Microcode Update Driver"); |
97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); |
98 | MODULE_LICENSE("GPL"); | 98 | MODULE_LICENSE("GPL"); |
99 | 99 | ||
100 | #define MICROCODE_VERSION "2.00" | 100 | #define MICROCODE_VERSION "2.00" |
101 | 101 | ||
102 | static struct microcode_ops *microcode_ops; | 102 | static struct microcode_ops *microcode_ops; |
103 | 103 | ||
104 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ | 104 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ |
105 | static DEFINE_MUTEX(microcode_mutex); | 105 | static DEFINE_MUTEX(microcode_mutex); |
106 | 106 | ||
107 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | 107 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); | 108 | EXPORT_SYMBOL_GPL(ucode_cpu_info); |
109 | 109 | ||
110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE | 110 | #ifdef CONFIG_MICROCODE_OLD_INTERFACE |
@@ -198,18 +198,33 @@ static void microcode_dev_exit(void) | |||
198 | 198 | ||
199 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | 199 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); |
200 | #else | 200 | #else |
201 | #define microcode_dev_init() 0 | 201 | #define microcode_dev_init() 0 |
202 | #define microcode_dev_exit() do { } while (0) | 202 | #define microcode_dev_exit() do { } while (0) |
203 | #endif | 203 | #endif |
204 | 204 | ||
205 | /* fake device for request_firmware */ | 205 | /* fake device for request_firmware */ |
206 | static struct platform_device *microcode_pdev; | 206 | static struct platform_device *microcode_pdev; |
207 | |||
208 | static long reload_for_cpu(void *unused) | ||
209 | { | ||
210 | struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); | ||
211 | int err = 0; | ||
212 | |||
213 | mutex_lock(µcode_mutex); | ||
214 | if (uci->valid) { | ||
215 | err = microcode_ops->request_microcode_fw(smp_processor_id(), | ||
216 | µcode_pdev->dev); | ||
217 | if (!err) | ||
218 | microcode_ops->apply_microcode(smp_processor_id()); | ||
219 | } | ||
220 | mutex_unlock(µcode_mutex); | ||
221 | return err; | ||
222 | } | ||
207 | 223 | ||
208 | static ssize_t reload_store(struct sys_device *dev, | 224 | static ssize_t reload_store(struct sys_device *dev, |
209 | struct sysdev_attribute *attr, | 225 | struct sysdev_attribute *attr, |
210 | const char *buf, size_t sz) | 226 | const char *buf, size_t sz) |
211 | { | 227 | { |
212 | struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; | ||
213 | char *end; | 228 | char *end; |
214 | unsigned long val = simple_strtoul(buf, &end, 0); | 229 | unsigned long val = simple_strtoul(buf, &end, 0); |
215 | int err = 0; | 230 | int err = 0; |
@@ -218,21 +233,9 @@ static ssize_t reload_store(struct sys_device *dev, | |||
218 | if (end == buf) | 233 | if (end == buf) |
219 | return -EINVAL; | 234 | return -EINVAL; |
220 | if (val == 1) { | 235 | if (val == 1) { |
221 | cpumask_t old = current->cpus_allowed; | ||
222 | |||
223 | get_online_cpus(); | 236 | get_online_cpus(); |
224 | if (cpu_online(cpu)) { | 237 | if (cpu_online(cpu)) |
225 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 238 | err = work_on_cpu(cpu, reload_for_cpu, NULL); |
226 | mutex_lock(µcode_mutex); | ||
227 | if (uci->valid) { | ||
228 | err = microcode_ops->request_microcode_fw(cpu, | ||
229 | µcode_pdev->dev); | ||
230 | if (!err) | ||
231 | microcode_ops->apply_microcode(cpu); | ||
232 | } | ||
233 | mutex_unlock(µcode_mutex); | ||
234 | set_cpus_allowed_ptr(current, &old); | ||
235 | } | ||
236 | put_online_cpus(); | 239 | put_online_cpus(); |
237 | } | 240 | } |
238 | if (err) | 241 | if (err) |
@@ -268,8 +271,8 @@ static struct attribute *mc_default_attrs[] = { | |||
268 | }; | 271 | }; |
269 | 272 | ||
270 | static struct attribute_group mc_attr_group = { | 273 | static struct attribute_group mc_attr_group = { |
271 | .attrs = mc_default_attrs, | 274 | .attrs = mc_default_attrs, |
272 | .name = "microcode", | 275 | .name = "microcode", |
273 | }; | 276 | }; |
274 | 277 | ||
275 | static void __microcode_fini_cpu(int cpu) | 278 | static void __microcode_fini_cpu(int cpu) |
@@ -328,9 +331,9 @@ static int microcode_resume_cpu(int cpu) | |||
328 | return 0; | 331 | return 0; |
329 | } | 332 | } |
330 | 333 | ||
331 | static void microcode_update_cpu(int cpu) | 334 | static long microcode_update_cpu(void *unused) |
332 | { | 335 | { |
333 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 336 | struct ucode_cpu_info *uci = ucode_cpu_info + smp_processor_id(); |
334 | int err = 0; | 337 | int err = 0; |
335 | 338 | ||
336 | /* | 339 | /* |
@@ -338,30 +341,27 @@ static void microcode_update_cpu(int cpu) | |||
338 | * otherwise just request a firmware: | 341 | * otherwise just request a firmware: |
339 | */ | 342 | */ |
340 | if (uci->valid) { | 343 | if (uci->valid) { |
341 | err = microcode_resume_cpu(cpu); | 344 | err = microcode_resume_cpu(smp_processor_id()); |
342 | } else { | 345 | } else { |
343 | collect_cpu_info(cpu); | 346 | collect_cpu_info(smp_processor_id()); |
344 | if (uci->valid && system_state == SYSTEM_RUNNING) | 347 | if (uci->valid && system_state == SYSTEM_RUNNING) |
345 | err = microcode_ops->request_microcode_fw(cpu, | 348 | err = microcode_ops->request_microcode_fw( |
349 | smp_processor_id(), | ||
346 | µcode_pdev->dev); | 350 | µcode_pdev->dev); |
347 | } | 351 | } |
348 | if (!err) | 352 | if (!err) |
349 | microcode_ops->apply_microcode(cpu); | 353 | microcode_ops->apply_microcode(smp_processor_id()); |
354 | return err; | ||
350 | } | 355 | } |
351 | 356 | ||
352 | static void microcode_init_cpu(int cpu) | 357 | static int microcode_init_cpu(int cpu) |
353 | { | 358 | { |
354 | cpumask_t old = current->cpus_allowed; | 359 | int err; |
355 | |||
356 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
357 | /* We should bind the task to the CPU */ | ||
358 | BUG_ON(raw_smp_processor_id() != cpu); | ||
359 | |||
360 | mutex_lock(µcode_mutex); | 360 | mutex_lock(µcode_mutex); |
361 | microcode_update_cpu(cpu); | 361 | err = work_on_cpu(cpu, microcode_update_cpu, NULL); |
362 | mutex_unlock(µcode_mutex); | 362 | mutex_unlock(µcode_mutex); |
363 | 363 | ||
364 | set_cpus_allowed_ptr(current, &old); | 364 | return err; |
365 | } | 365 | } |
366 | 366 | ||
367 | static int mc_sysdev_add(struct sys_device *sys_dev) | 367 | static int mc_sysdev_add(struct sys_device *sys_dev) |
@@ -379,8 +379,11 @@ static int mc_sysdev_add(struct sys_device *sys_dev) | |||
379 | if (err) | 379 | if (err) |
380 | return err; | 380 | return err; |
381 | 381 | ||
382 | microcode_init_cpu(cpu); | 382 | err = microcode_init_cpu(cpu); |
383 | return 0; | 383 | if (err) |
384 | sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); | ||
385 | |||
386 | return err; | ||
384 | } | 387 | } |
385 | 388 | ||
386 | static int mc_sysdev_remove(struct sys_device *sys_dev) | 389 | static int mc_sysdev_remove(struct sys_device *sys_dev) |
@@ -404,14 +407,14 @@ static int mc_sysdev_resume(struct sys_device *dev) | |||
404 | return 0; | 407 | return 0; |
405 | 408 | ||
406 | /* only CPU 0 will apply ucode here */ | 409 | /* only CPU 0 will apply ucode here */ |
407 | microcode_update_cpu(0); | 410 | microcode_update_cpu(NULL); |
408 | return 0; | 411 | return 0; |
409 | } | 412 | } |
410 | 413 | ||
411 | static struct sysdev_driver mc_sysdev_driver = { | 414 | static struct sysdev_driver mc_sysdev_driver = { |
412 | .add = mc_sysdev_add, | 415 | .add = mc_sysdev_add, |
413 | .remove = mc_sysdev_remove, | 416 | .remove = mc_sysdev_remove, |
414 | .resume = mc_sysdev_resume, | 417 | .resume = mc_sysdev_resume, |
415 | }; | 418 | }; |
416 | 419 | ||
417 | static __cpuinit int | 420 | static __cpuinit int |
@@ -424,7 +427,9 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
424 | switch (action) { | 427 | switch (action) { |
425 | case CPU_ONLINE: | 428 | case CPU_ONLINE: |
426 | case CPU_ONLINE_FROZEN: | 429 | case CPU_ONLINE_FROZEN: |
427 | microcode_init_cpu(cpu); | 430 | if (microcode_init_cpu(cpu)) |
431 | printk(KERN_ERR "microcode: failed to init CPU%d\n", | ||
432 | cpu); | ||
428 | case CPU_DOWN_FAILED: | 433 | case CPU_DOWN_FAILED: |
429 | case CPU_DOWN_FAILED_FROZEN: | 434 | case CPU_DOWN_FAILED_FROZEN: |
430 | pr_debug("microcode: CPU%d added\n", cpu); | 435 | pr_debug("microcode: CPU%d added\n", cpu); |
@@ -448,7 +453,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | |||
448 | } | 453 | } |
449 | 454 | ||
450 | static struct notifier_block __refdata mc_cpu_notifier = { | 455 | static struct notifier_block __refdata mc_cpu_notifier = { |
451 | .notifier_call = mc_cpu_callback, | 456 | .notifier_call = mc_cpu_callback, |
452 | }; | 457 | }; |
453 | 458 | ||
454 | static int __init microcode_init(void) | 459 | static int __init microcode_init(void) |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 5e9f4fc51385..149b9ec7c1ab 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -70,28 +70,28 @@ | |||
70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. | 70 | * Fix sigmatch() macro to handle old CPUs with pf == 0. |
71 | * Thanks to Stuart Swales for pointing out this bug. | 71 | * Thanks to Stuart Swales for pointing out this bug. |
72 | */ | 72 | */ |
73 | #include <linux/platform_device.h> | ||
73 | #include <linux/capability.h> | 74 | #include <linux/capability.h> |
74 | #include <linux/kernel.h> | 75 | #include <linux/miscdevice.h> |
75 | #include <linux/init.h> | 76 | #include <linux/firmware.h> |
76 | #include <linux/sched.h> | ||
77 | #include <linux/smp_lock.h> | 77 | #include <linux/smp_lock.h> |
78 | #include <linux/spinlock.h> | ||
78 | #include <linux/cpumask.h> | 79 | #include <linux/cpumask.h> |
79 | #include <linux/module.h> | 80 | #include <linux/uaccess.h> |
80 | #include <linux/slab.h> | ||
81 | #include <linux/vmalloc.h> | 81 | #include <linux/vmalloc.h> |
82 | #include <linux/miscdevice.h> | 82 | #include <linux/kernel.h> |
83 | #include <linux/spinlock.h> | 83 | #include <linux/module.h> |
84 | #include <linux/mm.h> | ||
85 | #include <linux/fs.h> | ||
86 | #include <linux/mutex.h> | 84 | #include <linux/mutex.h> |
85 | #include <linux/sched.h> | ||
86 | #include <linux/init.h> | ||
87 | #include <linux/slab.h> | ||
87 | #include <linux/cpu.h> | 88 | #include <linux/cpu.h> |
88 | #include <linux/firmware.h> | 89 | #include <linux/fs.h> |
89 | #include <linux/platform_device.h> | 90 | #include <linux/mm.h> |
90 | #include <linux/uaccess.h> | ||
91 | 91 | ||
92 | #include <asm/msr.h> | ||
93 | #include <asm/processor.h> | ||
94 | #include <asm/microcode.h> | 92 | #include <asm/microcode.h> |
93 | #include <asm/processor.h> | ||
94 | #include <asm/msr.h> | ||
95 | 95 | ||
96 | MODULE_DESCRIPTION("Microcode Update Driver"); | 96 | MODULE_DESCRIPTION("Microcode Update Driver"); |
97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 97 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); |
@@ -129,12 +129,13 @@ struct extended_sigtable { | |||
129 | struct extended_signature sigs[0]; | 129 | struct extended_signature sigs[0]; |
130 | }; | 130 | }; |
131 | 131 | ||
132 | #define DEFAULT_UCODE_DATASIZE (2000) | 132 | #define DEFAULT_UCODE_DATASIZE (2000) |
133 | #define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) | 133 | #define MC_HEADER_SIZE (sizeof(struct microcode_header_intel)) |
134 | #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) | 134 | #define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) |
135 | #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) | 135 | #define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) |
136 | #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) | 136 | #define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) |
137 | #define DWSIZE (sizeof(u32)) | 137 | #define DWSIZE (sizeof(u32)) |
138 | |||
138 | #define get_totalsize(mc) \ | 139 | #define get_totalsize(mc) \ |
139 | (((struct microcode_intel *)mc)->hdr.totalsize ? \ | 140 | (((struct microcode_intel *)mc)->hdr.totalsize ? \ |
140 | ((struct microcode_intel *)mc)->hdr.totalsize : \ | 141 | ((struct microcode_intel *)mc)->hdr.totalsize : \ |
@@ -197,30 +198,31 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf) | |||
197 | } | 198 | } |
198 | 199 | ||
199 | static inline int | 200 | static inline int |
200 | update_match_revision(struct microcode_header_intel *mc_header, int rev) | 201 | update_match_revision(struct microcode_header_intel *mc_header, int rev) |
201 | { | 202 | { |
202 | return (mc_header->rev <= rev) ? 0 : 1; | 203 | return (mc_header->rev <= rev) ? 0 : 1; |
203 | } | 204 | } |
204 | 205 | ||
205 | static int microcode_sanity_check(void *mc) | 206 | static int microcode_sanity_check(void *mc) |
206 | { | 207 | { |
208 | unsigned long total_size, data_size, ext_table_size; | ||
207 | struct microcode_header_intel *mc_header = mc; | 209 | struct microcode_header_intel *mc_header = mc; |
208 | struct extended_sigtable *ext_header = NULL; | 210 | struct extended_sigtable *ext_header = NULL; |
209 | struct extended_signature *ext_sig; | ||
210 | unsigned long total_size, data_size, ext_table_size; | ||
211 | int sum, orig_sum, ext_sigcount = 0, i; | 211 | int sum, orig_sum, ext_sigcount = 0, i; |
212 | struct extended_signature *ext_sig; | ||
212 | 213 | ||
213 | total_size = get_totalsize(mc_header); | 214 | total_size = get_totalsize(mc_header); |
214 | data_size = get_datasize(mc_header); | 215 | data_size = get_datasize(mc_header); |
216 | |||
215 | if (data_size + MC_HEADER_SIZE > total_size) { | 217 | if (data_size + MC_HEADER_SIZE > total_size) { |
216 | printk(KERN_ERR "microcode: error! " | 218 | printk(KERN_ERR "microcode: error! " |
217 | "Bad data size in microcode data file\n"); | 219 | "Bad data size in microcode data file\n"); |
218 | return -EINVAL; | 220 | return -EINVAL; |
219 | } | 221 | } |
220 | 222 | ||
221 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | 223 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { |
222 | printk(KERN_ERR "microcode: error! " | 224 | printk(KERN_ERR "microcode: error! " |
223 | "Unknown microcode update format\n"); | 225 | "Unknown microcode update format\n"); |
224 | return -EINVAL; | 226 | return -EINVAL; |
225 | } | 227 | } |
226 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | 228 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); |
@@ -318,11 +320,15 @@ get_matching_microcode(struct cpu_signature *cpu_sig, void *mc, int rev) | |||
318 | 320 | ||
319 | static void apply_microcode(int cpu) | 321 | static void apply_microcode(int cpu) |
320 | { | 322 | { |
323 | struct microcode_intel *mc_intel; | ||
324 | struct ucode_cpu_info *uci; | ||
321 | unsigned long flags; | 325 | unsigned long flags; |
322 | unsigned int val[2]; | 326 | unsigned int val[2]; |
323 | int cpu_num = raw_smp_processor_id(); | 327 | int cpu_num; |
324 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 328 | |
325 | struct microcode_intel *mc_intel = uci->mc; | 329 | cpu_num = raw_smp_processor_id(); |
330 | uci = ucode_cpu_info + cpu; | ||
331 | mc_intel = uci->mc; | ||
326 | 332 | ||
327 | /* We should bind the task to the CPU */ | 333 | /* We should bind the task to the CPU */ |
328 | BUG_ON(cpu_num != cpu); | 334 | BUG_ON(cpu_num != cpu); |
@@ -348,15 +354,17 @@ static void apply_microcode(int cpu) | |||
348 | spin_unlock_irqrestore(µcode_update_lock, flags); | 354 | spin_unlock_irqrestore(µcode_update_lock, flags); |
349 | if (val[1] != mc_intel->hdr.rev) { | 355 | if (val[1] != mc_intel->hdr.rev) { |
350 | printk(KERN_ERR "microcode: CPU%d update from revision " | 356 | printk(KERN_ERR "microcode: CPU%d update from revision " |
351 | "0x%x to 0x%x failed\n", cpu_num, uci->cpu_sig.rev, val[1]); | 357 | "0x%x to 0x%x failed\n", |
358 | cpu_num, uci->cpu_sig.rev, val[1]); | ||
352 | return; | 359 | return; |
353 | } | 360 | } |
354 | printk(KERN_INFO "microcode: CPU%d updated from revision " | 361 | printk(KERN_INFO "microcode: CPU%d updated from revision " |
355 | "0x%x to 0x%x, date = %04x-%02x-%02x \n", | 362 | "0x%x to 0x%x, date = %04x-%02x-%02x \n", |
356 | cpu_num, uci->cpu_sig.rev, val[1], | 363 | cpu_num, uci->cpu_sig.rev, val[1], |
357 | mc_intel->hdr.date & 0xffff, | 364 | mc_intel->hdr.date & 0xffff, |
358 | mc_intel->hdr.date >> 24, | 365 | mc_intel->hdr.date >> 24, |
359 | (mc_intel->hdr.date >> 16) & 0xff); | 366 | (mc_intel->hdr.date >> 16) & 0xff); |
367 | |||
360 | uci->cpu_sig.rev = val[1]; | 368 | uci->cpu_sig.rev = val[1]; |
361 | } | 369 | } |
362 | 370 | ||
@@ -404,18 +412,23 @@ static int generic_load_microcode(int cpu, void *data, size_t size, | |||
404 | leftover -= mc_size; | 412 | leftover -= mc_size; |
405 | } | 413 | } |
406 | 414 | ||
407 | if (new_mc) { | 415 | if (!new_mc) |
408 | if (!leftover) { | 416 | goto out; |
409 | if (uci->mc) | 417 | |
410 | vfree(uci->mc); | 418 | if (leftover) { |
411 | uci->mc = (struct microcode_intel *)new_mc; | 419 | vfree(new_mc); |
412 | pr_debug("microcode: CPU%d found a matching microcode update with" | 420 | goto out; |
413 | " version 0x%x (current=0x%x)\n", | ||
414 | cpu, new_rev, uci->cpu_sig.rev); | ||
415 | } else | ||
416 | vfree(new_mc); | ||
417 | } | 421 | } |
418 | 422 | ||
423 | if (uci->mc) | ||
424 | vfree(uci->mc); | ||
425 | uci->mc = (struct microcode_intel *)new_mc; | ||
426 | |||
427 | pr_debug("microcode: CPU%d found a matching microcode update with" | ||
428 | " version 0x%x (current=0x%x)\n", | ||
429 | cpu, new_rev, uci->cpu_sig.rev); | ||
430 | |||
431 | out: | ||
419 | return (int)leftover; | 432 | return (int)leftover; |
420 | } | 433 | } |
421 | 434 | ||
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index dce99dca6cf8..70fd7e414c15 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -679,7 +679,7 @@ void __init get_smp_config(void) | |||
679 | __get_smp_config(0); | 679 | __get_smp_config(0); |
680 | } | 680 | } |
681 | 681 | ||
682 | static void smp_reserve_bootmem(struct mpf_intel *mpf) | 682 | static void __init smp_reserve_bootmem(struct mpf_intel *mpf) |
683 | { | 683 | { |
684 | unsigned long size = get_mpc_size(mpf->physptr); | 684 | unsigned long size = get_mpc_size(mpf->physptr); |
685 | #ifdef CONFIG_X86_32 | 685 | #ifdef CONFIG_X86_32 |
@@ -838,7 +838,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) | |||
838 | 838 | ||
839 | static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; | 839 | static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; |
840 | 840 | ||
841 | static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) | 841 | static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) |
842 | { | 842 | { |
843 | int i; | 843 | int i; |
844 | 844 | ||
@@ -866,7 +866,8 @@ static void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) | |||
866 | } | 866 | } |
867 | } | 867 | } |
868 | #else /* CONFIG_X86_IO_APIC */ | 868 | #else /* CONFIG_X86_IO_APIC */ |
869 | static inline void check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} | 869 | static |
870 | inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {} | ||
870 | #endif /* CONFIG_X86_IO_APIC */ | 871 | #endif /* CONFIG_X86_IO_APIC */ |
871 | 872 | ||
872 | static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, | 873 | static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 90f5b9ef5def..745579bc8256 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -40,7 +40,7 @@ EXPORT_SYMBOL(bad_dma_address); | |||
40 | to older i386. */ | 40 | to older i386. */ |
41 | struct device x86_dma_fallback_dev = { | 41 | struct device x86_dma_fallback_dev = { |
42 | .init_name = "fallback device", | 42 | .init_name = "fallback device", |
43 | .coherent_dma_mask = DMA_32BIT_MASK, | 43 | .coherent_dma_mask = DMA_BIT_MASK(32), |
44 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, | 44 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, |
45 | }; | 45 | }; |
46 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 46 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
@@ -148,7 +148,7 @@ again: | |||
148 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 148 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { |
149 | __free_pages(page, get_order(size)); | 149 | __free_pages(page, get_order(size)); |
150 | 150 | ||
151 | if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) { | 151 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { |
152 | flag = (flag & ~GFP_DMA32) | GFP_DMA; | 152 | flag = (flag & ~GFP_DMA32) | GFP_DMA; |
153 | goto again; | 153 | goto again; |
154 | } | 154 | } |
@@ -243,7 +243,7 @@ int dma_supported(struct device *dev, u64 mask) | |||
243 | /* Copied from i386. Doesn't make much sense, because it will | 243 | /* Copied from i386. Doesn't make much sense, because it will |
244 | only work for pci_alloc_coherent. | 244 | only work for pci_alloc_coherent. |
245 | The caller just has to use GFP_DMA in this case. */ | 245 | The caller just has to use GFP_DMA in this case. */ |
246 | if (mask < DMA_24BIT_MASK) | 246 | if (mask < DMA_BIT_MASK(24)) |
247 | return 0; | 247 | return 0; |
248 | 248 | ||
249 | /* Tell the device to use SAC when IOMMU force is on. This | 249 | /* Tell the device to use SAC when IOMMU force is on. This |
@@ -258,7 +258,7 @@ int dma_supported(struct device *dev, u64 mask) | |||
258 | SAC for these. Assume all masks <= 40 bits are of this | 258 | SAC for these. Assume all masks <= 40 bits are of this |
259 | type. Normally this doesn't make any difference, but gives | 259 | type. Normally this doesn't make any difference, but gives |
260 | more gentle handling of IOMMU overflow. */ | 260 | more gentle handling of IOMMU overflow. */ |
261 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | 261 | if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { |
262 | dev_info(dev, "Force SAC with mask %Lx\n", mask); | 262 | dev_info(dev, "Force SAC with mask %Lx\n", mask); |
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index c6d703b39326..71d412a09f30 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -15,7 +15,7 @@ static int | |||
15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
16 | { | 16 | { |
17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { | 17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { |
18 | if (*hwdev->dma_mask >= DMA_32BIT_MASK) | 18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
19 | printk(KERN_ERR | 19 | printk(KERN_ERR |
20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
21 | name, (long long)bus, size, | 21 | name, (long long)bus, size, |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 156f87582c6c..ca989158e847 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/clockchips.h> | 10 | #include <linux/clockchips.h> |
11 | #include <linux/ftrace.h> | 11 | #include <trace/power.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
14 | #include <asm/idle.h> | 14 | #include <asm/idle.h> |
@@ -22,6 +22,9 @@ EXPORT_SYMBOL(idle_nomwait); | |||
22 | 22 | ||
23 | struct kmem_cache *task_xstate_cachep; | 23 | struct kmem_cache *task_xstate_cachep; |
24 | 24 | ||
25 | DEFINE_TRACE(power_start); | ||
26 | DEFINE_TRACE(power_end); | ||
27 | |||
25 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 28 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
26 | { | 29 | { |
27 | *dst = *src; | 30 | *dst = *src; |
@@ -325,7 +328,7 @@ void stop_this_cpu(void *dummy) | |||
325 | /* | 328 | /* |
326 | * Remove this CPU: | 329 | * Remove this CPU: |
327 | */ | 330 | */ |
328 | cpu_clear(smp_processor_id(), cpu_online_map); | 331 | set_cpu_online(smp_processor_id(), false); |
329 | disable_local_APIC(); | 332 | disable_local_APIC(); |
330 | 333 | ||
331 | for (;;) { | 334 | for (;;) { |
@@ -475,12 +478,13 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
475 | return 1; | 478 | return 1; |
476 | } | 479 | } |
477 | 480 | ||
478 | static cpumask_t c1e_mask = CPU_MASK_NONE; | 481 | static cpumask_var_t c1e_mask; |
479 | static int c1e_detected; | 482 | static int c1e_detected; |
480 | 483 | ||
481 | void c1e_remove_cpu(int cpu) | 484 | void c1e_remove_cpu(int cpu) |
482 | { | 485 | { |
483 | cpu_clear(cpu, c1e_mask); | 486 | if (c1e_mask != NULL) |
487 | cpumask_clear_cpu(cpu, c1e_mask); | ||
484 | } | 488 | } |
485 | 489 | ||
486 | /* | 490 | /* |
@@ -509,8 +513,8 @@ static void c1e_idle(void) | |||
509 | if (c1e_detected) { | 513 | if (c1e_detected) { |
510 | int cpu = smp_processor_id(); | 514 | int cpu = smp_processor_id(); |
511 | 515 | ||
512 | if (!cpu_isset(cpu, c1e_mask)) { | 516 | if (!cpumask_test_cpu(cpu, c1e_mask)) { |
513 | cpu_set(cpu, c1e_mask); | 517 | cpumask_set_cpu(cpu, c1e_mask); |
514 | /* | 518 | /* |
515 | * Force broadcast so ACPI can not interfere. Needs | 519 | * Force broadcast so ACPI can not interfere. Needs |
516 | * to run with interrupts enabled as it uses | 520 | * to run with interrupts enabled as it uses |
@@ -562,6 +566,15 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
562 | pm_idle = default_idle; | 566 | pm_idle = default_idle; |
563 | } | 567 | } |
564 | 568 | ||
569 | void __init init_c1e_mask(void) | ||
570 | { | ||
571 | /* If we're using c1e_idle, we need to allocate c1e_mask. */ | ||
572 | if (pm_idle == c1e_idle) { | ||
573 | alloc_cpumask_var(&c1e_mask, GFP_KERNEL); | ||
574 | cpumask_clear(c1e_mask); | ||
575 | } | ||
576 | } | ||
577 | |||
565 | static int __init idle_setup(char *str) | 578 | static int __init idle_setup(char *str) |
566 | { | 579 | { |
567 | if (!str) | 580 | if (!str) |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b7cc21bc6ae0..23b7c8f017e2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -34,6 +34,8 @@ | |||
34 | #include <asm/proto.h> | 34 | #include <asm/proto.h> |
35 | #include <asm/ds.h> | 35 | #include <asm/ds.h> |
36 | 36 | ||
37 | #include <trace/syscall.h> | ||
38 | |||
37 | #include "tls.h" | 39 | #include "tls.h" |
38 | 40 | ||
39 | enum x86_regset { | 41 | enum x86_regset { |
@@ -1415,6 +1417,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) | |||
1415 | tracehook_report_syscall_entry(regs)) | 1417 | tracehook_report_syscall_entry(regs)) |
1416 | ret = -1L; | 1418 | ret = -1L; |
1417 | 1419 | ||
1420 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | ||
1421 | ftrace_syscall_enter(regs); | ||
1422 | |||
1418 | if (unlikely(current->audit_context)) { | 1423 | if (unlikely(current->audit_context)) { |
1419 | if (IS_IA32) | 1424 | if (IS_IA32) |
1420 | audit_syscall_entry(AUDIT_ARCH_I386, | 1425 | audit_syscall_entry(AUDIT_ARCH_I386, |
@@ -1438,6 +1443,9 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) | |||
1438 | if (unlikely(current->audit_context)) | 1443 | if (unlikely(current->audit_context)) |
1439 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); | 1444 | audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); |
1440 | 1445 | ||
1446 | if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) | ||
1447 | ftrace_syscall_exit(regs); | ||
1448 | |||
1441 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 1449 | if (test_thread_flag(TIF_SYSCALL_TRACE)) |
1442 | tracehook_report_syscall_exit(regs, 0); | 1450 | tracehook_report_syscall_exit(regs, 0); |
1443 | 1451 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2aef36d8aca2..1340dad417f4 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -224,6 +224,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | 224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), |
225 | }, | 225 | }, |
226 | }, | 226 | }, |
227 | { /* Handle problems with rebooting on Dell DXP061 */ | ||
228 | .callback = set_bios_reboot, | ||
229 | .ident = "Dell DXP061", | ||
230 | .matches = { | ||
231 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
232 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"), | ||
233 | }, | ||
234 | }, | ||
227 | { } | 235 | { } |
228 | }; | 236 | }; |
229 | 237 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ef7d10170c30..58d24ef917d8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 101 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
102 | 102 | ||
103 | /* representing HT siblings of each logical CPU */ | 103 | /* representing HT siblings of each logical CPU */ |
104 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 104 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
105 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 105 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
106 | 106 | ||
107 | /* representing HT and core siblings of each logical CPU */ | 107 | /* representing HT and core siblings of each logical CPU */ |
108 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 108 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
109 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 109 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
110 | 110 | ||
111 | /* Per CPU bogomips and other parameters */ | 111 | /* Per CPU bogomips and other parameters */ |
@@ -115,11 +115,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
115 | atomic_t init_deasserted; | 115 | atomic_t init_deasserted; |
116 | 116 | ||
117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) | 117 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
118 | |||
119 | /* which logical CPUs are on which nodes */ | ||
120 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | ||
121 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | ||
122 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
123 | /* which node each logical CPU is on */ | 118 | /* which node each logical CPU is on */ |
124 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | 119 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; |
125 | EXPORT_SYMBOL(cpu_to_node_map); | 120 | EXPORT_SYMBOL(cpu_to_node_map); |
@@ -128,7 +123,7 @@ EXPORT_SYMBOL(cpu_to_node_map); | |||
128 | static void map_cpu_to_node(int cpu, int node) | 123 | static void map_cpu_to_node(int cpu, int node) |
129 | { | 124 | { |
130 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | 125 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); |
131 | cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); | 126 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); |
132 | cpu_to_node_map[cpu] = node; | 127 | cpu_to_node_map[cpu] = node; |
133 | } | 128 | } |
134 | 129 | ||
@@ -139,7 +134,7 @@ static void unmap_cpu_to_node(int cpu) | |||
139 | 134 | ||
140 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | 135 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); |
141 | for (node = 0; node < MAX_NUMNODES; node++) | 136 | for (node = 0; node < MAX_NUMNODES; node++) |
142 | cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); | 137 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
143 | cpu_to_node_map[cpu] = 0; | 138 | cpu_to_node_map[cpu] = 0; |
144 | } | 139 | } |
145 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | 140 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ |
@@ -301,7 +296,7 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
301 | __flush_tlb_all(); | 296 | __flush_tlb_all(); |
302 | #endif | 297 | #endif |
303 | 298 | ||
304 | /* This must be done before setting cpu_online_map */ | 299 | /* This must be done before setting cpu_online_mask */ |
305 | set_cpu_sibling_map(raw_smp_processor_id()); | 300 | set_cpu_sibling_map(raw_smp_processor_id()); |
306 | wmb(); | 301 | wmb(); |
307 | 302 | ||
@@ -334,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
334 | cpu_idle(); | 329 | cpu_idle(); |
335 | } | 330 | } |
336 | 331 | ||
332 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
333 | /* In this case, llc_shared_map is a pointer to a cpumask. */ | ||
334 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
335 | const struct cpuinfo_x86 *src) | ||
336 | { | ||
337 | struct cpumask *llc = dst->llc_shared_map; | ||
338 | *dst = *src; | ||
339 | dst->llc_shared_map = llc; | ||
340 | } | ||
341 | #else | ||
342 | static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst, | ||
343 | const struct cpuinfo_x86 *src) | ||
344 | { | ||
345 | *dst = *src; | ||
346 | } | ||
347 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
348 | |||
337 | /* | 349 | /* |
338 | * The bootstrap kernel entry code has set these up. Save them for | 350 | * The bootstrap kernel entry code has set these up. Save them for |
339 | * a given CPU | 351 | * a given CPU |
@@ -343,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
343 | { | 355 | { |
344 | struct cpuinfo_x86 *c = &cpu_data(id); | 356 | struct cpuinfo_x86 *c = &cpu_data(id); |
345 | 357 | ||
346 | *c = boot_cpu_data; | 358 | copy_cpuinfo_x86(c, &boot_cpu_data); |
347 | c->cpu_index = id; | 359 | c->cpu_index = id; |
348 | if (id != 0) | 360 | if (id != 0) |
349 | identify_secondary_cpu(c); | 361 | identify_secondary_cpu(c); |
@@ -367,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
367 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | 379 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); |
368 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 380 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
369 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 381 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
370 | cpumask_set_cpu(i, &c->llc_shared_map); | 382 | cpumask_set_cpu(i, c->llc_shared_map); |
371 | cpumask_set_cpu(cpu, &o->llc_shared_map); | 383 | cpumask_set_cpu(cpu, o->llc_shared_map); |
372 | } | 384 | } |
373 | } | 385 | } |
374 | } else { | 386 | } else { |
375 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 387 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
376 | } | 388 | } |
377 | 389 | ||
378 | cpumask_set_cpu(cpu, &c->llc_shared_map); | 390 | cpumask_set_cpu(cpu, c->llc_shared_map); |
379 | 391 | ||
380 | if (current_cpu_data.x86_max_cores == 1) { | 392 | if (current_cpu_data.x86_max_cores == 1) { |
381 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); | 393 | cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); |
@@ -386,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
386 | for_each_cpu(i, cpu_sibling_setup_mask) { | 398 | for_each_cpu(i, cpu_sibling_setup_mask) { |
387 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | 399 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && |
388 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 400 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
389 | cpumask_set_cpu(i, &c->llc_shared_map); | 401 | cpumask_set_cpu(i, c->llc_shared_map); |
390 | cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); | 402 | cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map); |
391 | } | 403 | } |
392 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 404 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
393 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 405 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
@@ -425,12 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
425 | if (sched_mc_power_savings || sched_smt_power_savings) | 437 | if (sched_mc_power_savings || sched_smt_power_savings) |
426 | return cpu_core_mask(cpu); | 438 | return cpu_core_mask(cpu); |
427 | else | 439 | else |
428 | return &c->llc_shared_map; | 440 | return c->llc_shared_map; |
429 | } | ||
430 | |||
431 | cpumask_t cpu_coregroup_map(int cpu) | ||
432 | { | ||
433 | return *cpu_coregroup_mask(cpu); | ||
434 | } | 441 | } |
435 | 442 | ||
436 | static void impress_friends(void) | 443 | static void impress_friends(void) |
@@ -897,9 +904,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) | |||
897 | */ | 904 | */ |
898 | static __init void disable_smp(void) | 905 | static __init void disable_smp(void) |
899 | { | 906 | { |
900 | /* use the read/write pointers to the present and possible maps */ | 907 | init_cpu_present(cpumask_of(0)); |
901 | cpumask_copy(&cpu_present_map, cpumask_of(0)); | 908 | init_cpu_possible(cpumask_of(0)); |
902 | cpumask_copy(&cpu_possible_map, cpumask_of(0)); | ||
903 | smpboot_clear_io_apic_irqs(); | 909 | smpboot_clear_io_apic_irqs(); |
904 | 910 | ||
905 | if (smp_found_config) | 911 | if (smp_found_config) |
@@ -1031,6 +1037,8 @@ static void __init smp_cpu_index_default(void) | |||
1031 | */ | 1037 | */ |
1032 | void __init native_smp_prepare_cpus(unsigned int max_cpus) | 1038 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
1033 | { | 1039 | { |
1040 | unsigned int i; | ||
1041 | |||
1034 | preempt_disable(); | 1042 | preempt_disable(); |
1035 | smp_cpu_index_default(); | 1043 | smp_cpu_index_default(); |
1036 | current_cpu_data = boot_cpu_data; | 1044 | current_cpu_data = boot_cpu_data; |
@@ -1044,6 +1052,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1044 | boot_cpu_logical_apicid = logical_smp_processor_id(); | 1052 | boot_cpu_logical_apicid = logical_smp_processor_id(); |
1045 | #endif | 1053 | #endif |
1046 | current_thread_info()->cpu = 0; /* needed? */ | 1054 | current_thread_info()->cpu = 0; /* needed? */ |
1055 | for_each_possible_cpu(i) { | ||
1056 | alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); | ||
1057 | alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); | ||
1058 | alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); | ||
1059 | cpumask_clear(per_cpu(cpu_core_map, i)); | ||
1060 | cpumask_clear(per_cpu(cpu_sibling_map, i)); | ||
1061 | cpumask_clear(cpu_data(i).llc_shared_map); | ||
1062 | } | ||
1047 | set_cpu_sibling_map(0); | 1063 | set_cpu_sibling_map(0); |
1048 | 1064 | ||
1049 | enable_IR_x2apic(); | 1065 | enable_IR_x2apic(); |
@@ -1132,11 +1148,11 @@ early_param("possible_cpus", _setup_possible_cpus); | |||
1132 | 1148 | ||
1133 | 1149 | ||
1134 | /* | 1150 | /* |
1135 | * cpu_possible_map should be static, it cannot change as cpu's | 1151 | * cpu_possible_mask should be static, it cannot change as cpu's |
1136 | * are onlined, or offlined. The reason is per-cpu data-structures | 1152 | * are onlined, or offlined. The reason is per-cpu data-structures |
1137 | * are allocated by some modules at init time, and dont expect to | 1153 | * are allocated by some modules at init time, and dont expect to |
1138 | * do this dynamically on cpu arrival/departure. | 1154 | * do this dynamically on cpu arrival/departure. |
1139 | * cpu_present_map on the other hand can change dynamically. | 1155 | * cpu_present_mask on the other hand can change dynamically. |
1140 | * In case when cpu_hotplug is not compiled, then we resort to current | 1156 | * In case when cpu_hotplug is not compiled, then we resort to current |
1141 | * behaviour, which is cpu_possible == cpu_present. | 1157 | * behaviour, which is cpu_possible == cpu_present. |
1142 | * - Ashok Raj | 1158 | * - Ashok Raj |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 78422336ddea..ed0c33761e6d 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -305,6 +305,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode, | |||
305 | return NULL; | 305 | return NULL; |
306 | } | 306 | } |
307 | 307 | ||
308 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); | ||
309 | |||
308 | /** | 310 | /** |
309 | * uv_flush_tlb_others - globally purge translation cache of a virtual | 311 | * uv_flush_tlb_others - globally purge translation cache of a virtual |
310 | * address or all TLB's | 312 | * address or all TLB's |
@@ -334,8 +336,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
334 | struct mm_struct *mm, | 336 | struct mm_struct *mm, |
335 | unsigned long va, unsigned int cpu) | 337 | unsigned long va, unsigned int cpu) |
336 | { | 338 | { |
337 | static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); | 339 | struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); |
338 | struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); | ||
339 | int i; | 340 | int i; |
340 | int bit; | 341 | int bit; |
341 | int pnode; | 342 | int pnode; |
@@ -830,6 +831,10 @@ static int __init uv_bau_init(void) | |||
830 | if (!is_uv_system()) | 831 | if (!is_uv_system()) |
831 | return 0; | 832 | return 0; |
832 | 833 | ||
834 | for_each_possible_cpu(cur_cpu) | ||
835 | alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), | ||
836 | GFP_KERNEL, cpu_to_node(cur_cpu)); | ||
837 | |||
833 | uv_bau_retry_limit = 1; | 838 | uv_bau_retry_limit = 1; |
834 | uv_nshift = uv_hub_info->n_val; | 839 | uv_nshift = uv_hub_info->n_val; |
835 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | 840 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 2b54fe002e94..0a5b04aa98f1 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -324,7 +324,7 @@ void __ref xsave_cntxt_init(void) | |||
324 | } | 324 | } |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * for now OS knows only about FP/SSE | 327 | * Support only the state known to OS. |
328 | */ | 328 | */ |
329 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; | 329 | pcntxt_mask = pcntxt_mask & XCNTXT_MASK; |
330 | xsave_init(); | 330 | xsave_init(); |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 0a303c3ed11f..a58504ea78cc 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
@@ -59,7 +59,8 @@ config KVM_AMD | |||
59 | 59 | ||
60 | config KVM_TRACE | 60 | config KVM_TRACE |
61 | bool "KVM trace support" | 61 | bool "KVM trace support" |
62 | depends on KVM && MARKERS && SYSFS | 62 | depends on KVM && SYSFS |
63 | select MARKERS | ||
63 | select RELAY | 64 | select RELAY |
64 | select DEBUG_FS | 65 | select DEBUG_FS |
65 | default n | 66 | default n |
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 08537747cb58..fdd30d08ab52 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -14,7 +14,7 @@ obj-$(CONFIG_MMIOTRACE) += mmiotrace.o | |||
14 | mmiotrace-y := kmmio.o pf_in.o mmio-mod.o | 14 | mmiotrace-y := kmmio.o pf_in.o mmio-mod.o |
15 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o | 15 | obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o |
16 | 16 | ||
17 | obj-$(CONFIG_NUMA) += numa_$(BITS).o | 17 | obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o |
18 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o | 18 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o |
19 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o | 19 | obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o |
20 | 20 | ||
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index be54176e9eb2..6340cef6798a 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -219,6 +219,22 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | |||
219 | return 1; | 219 | return 1; |
220 | } | 220 | } |
221 | 221 | ||
222 | /** | ||
223 | * get_user_pages_fast() - pin user pages in memory | ||
224 | * @start: starting user address | ||
225 | * @nr_pages: number of pages from start to pin | ||
226 | * @write: whether pages will be written to | ||
227 | * @pages: array that receives pointers to the pages pinned. | ||
228 | * Should be at least nr_pages long. | ||
229 | * | ||
230 | * Attempt to pin user pages in memory without taking mm->mmap_sem. | ||
231 | * If not successful, it will fall back to taking the lock and | ||
232 | * calling get_user_pages(). | ||
233 | * | ||
234 | * Returns number of pages pinned. This may be fewer than the number | ||
235 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | ||
236 | * were pinned, returns -errno. | ||
237 | */ | ||
222 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | 238 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
223 | struct page **pages) | 239 | struct page **pages) |
224 | { | 240 | { |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index db81e9a8556b..749559ed80f5 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -1054,17 +1054,47 @@ static noinline int do_test_wp_bit(void) | |||
1054 | const int rodata_test_data = 0xC3; | 1054 | const int rodata_test_data = 0xC3; |
1055 | EXPORT_SYMBOL_GPL(rodata_test_data); | 1055 | EXPORT_SYMBOL_GPL(rodata_test_data); |
1056 | 1056 | ||
1057 | static int kernel_set_to_readonly; | ||
1058 | |||
1059 | void set_kernel_text_rw(void) | ||
1060 | { | ||
1061 | unsigned long start = PFN_ALIGN(_text); | ||
1062 | unsigned long size = PFN_ALIGN(_etext) - start; | ||
1063 | |||
1064 | if (!kernel_set_to_readonly) | ||
1065 | return; | ||
1066 | |||
1067 | pr_debug("Set kernel text: %lx - %lx for read write\n", | ||
1068 | start, start+size); | ||
1069 | |||
1070 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); | ||
1071 | } | ||
1072 | |||
1073 | void set_kernel_text_ro(void) | ||
1074 | { | ||
1075 | unsigned long start = PFN_ALIGN(_text); | ||
1076 | unsigned long size = PFN_ALIGN(_etext) - start; | ||
1077 | |||
1078 | if (!kernel_set_to_readonly) | ||
1079 | return; | ||
1080 | |||
1081 | pr_debug("Set kernel text: %lx - %lx for read only\n", | ||
1082 | start, start+size); | ||
1083 | |||
1084 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | ||
1085 | } | ||
1086 | |||
1057 | void mark_rodata_ro(void) | 1087 | void mark_rodata_ro(void) |
1058 | { | 1088 | { |
1059 | unsigned long start = PFN_ALIGN(_text); | 1089 | unsigned long start = PFN_ALIGN(_text); |
1060 | unsigned long size = PFN_ALIGN(_etext) - start; | 1090 | unsigned long size = PFN_ALIGN(_etext) - start; |
1061 | 1091 | ||
1062 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
1063 | /* Dynamic tracing modifies the kernel text section */ | ||
1064 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); | 1092 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
1065 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | 1093 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", |
1066 | size >> 10); | 1094 | size >> 10); |
1067 | 1095 | ||
1096 | kernel_set_to_readonly = 1; | ||
1097 | |||
1068 | #ifdef CONFIG_CPA_DEBUG | 1098 | #ifdef CONFIG_CPA_DEBUG |
1069 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", | 1099 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
1070 | start, start+size); | 1100 | start, start+size); |
@@ -1073,7 +1103,6 @@ void mark_rodata_ro(void) | |||
1073 | printk(KERN_INFO "Testing CPA: write protecting again\n"); | 1103 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
1074 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | 1104 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); |
1075 | #endif | 1105 | #endif |
1076 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
1077 | 1106 | ||
1078 | start += size; | 1107 | start += size; |
1079 | size = (unsigned long)__end_rodata - start; | 1108 | size = (unsigned long)__end_rodata - start; |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 54efa57d1c03..1753e8020df6 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -734,21 +734,48 @@ void __init mem_init(void) | |||
734 | const int rodata_test_data = 0xC3; | 734 | const int rodata_test_data = 0xC3; |
735 | EXPORT_SYMBOL_GPL(rodata_test_data); | 735 | EXPORT_SYMBOL_GPL(rodata_test_data); |
736 | 736 | ||
737 | static int kernel_set_to_readonly; | ||
738 | |||
739 | void set_kernel_text_rw(void) | ||
740 | { | ||
741 | unsigned long start = PFN_ALIGN(_stext); | ||
742 | unsigned long end = PFN_ALIGN(__start_rodata); | ||
743 | |||
744 | if (!kernel_set_to_readonly) | ||
745 | return; | ||
746 | |||
747 | pr_debug("Set kernel text: %lx - %lx for read write\n", | ||
748 | start, end); | ||
749 | |||
750 | set_memory_rw(start, (end - start) >> PAGE_SHIFT); | ||
751 | } | ||
752 | |||
753 | void set_kernel_text_ro(void) | ||
754 | { | ||
755 | unsigned long start = PFN_ALIGN(_stext); | ||
756 | unsigned long end = PFN_ALIGN(__start_rodata); | ||
757 | |||
758 | if (!kernel_set_to_readonly) | ||
759 | return; | ||
760 | |||
761 | pr_debug("Set kernel text: %lx - %lx for read only\n", | ||
762 | start, end); | ||
763 | |||
764 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); | ||
765 | } | ||
766 | |||
737 | void mark_rodata_ro(void) | 767 | void mark_rodata_ro(void) |
738 | { | 768 | { |
739 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); | 769 | unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); |
740 | unsigned long rodata_start = | 770 | unsigned long rodata_start = |
741 | ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | 771 | ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; |
742 | 772 | ||
743 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
744 | /* Dynamic tracing modifies the kernel text section */ | ||
745 | start = rodata_start; | ||
746 | #endif | ||
747 | |||
748 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 773 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
749 | (end - start) >> 10); | 774 | (end - start) >> 10); |
750 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); | 775 | set_memory_ro(start, (end - start) >> PAGE_SHIFT); |
751 | 776 | ||
777 | kernel_set_to_readonly = 1; | ||
778 | |||
752 | /* | 779 | /* |
753 | * The rodata section (but not the kernel text!) should also be | 780 | * The rodata section (but not the kernel text!) should also be |
754 | * not-executable. | 781 | * not-executable. |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index e331f77348a7..8056545e2d39 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | int is_io_mapping_possible(resource_size_t base, unsigned long size) | 24 | int is_io_mapping_possible(resource_size_t base, unsigned long size) |
25 | { | 25 | { |
26 | #ifndef CONFIG_X86_PAE | 26 | #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) |
27 | /* There is no way to map greater than 1 << 32 address without PAE */ | 27 | /* There is no way to map greater than 1 << 32 address without PAE */ |
28 | if (base + size > 0x100000000ULL) | 28 | if (base + size > 0x100000000ULL) |
29 | return 0; | 29 | return 0; |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0dfa09d69e80..09daebfdb11c 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -547,7 +547,7 @@ void __init early_ioremap_reset(void) | |||
547 | } | 547 | } |
548 | 548 | ||
549 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 549 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
550 | unsigned long phys, pgprot_t flags) | 550 | phys_addr_t phys, pgprot_t flags) |
551 | { | 551 | { |
552 | unsigned long addr = __fix_to_virt(idx); | 552 | unsigned long addr = __fix_to_virt(idx); |
553 | pte_t *pte; | 553 | pte_t *pte; |
@@ -566,7 +566,7 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, | |||
566 | } | 566 | } |
567 | 567 | ||
568 | static inline void __init early_set_fixmap(enum fixed_addresses idx, | 568 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
569 | unsigned long phys, pgprot_t prot) | 569 | phys_addr_t phys, pgprot_t prot) |
570 | { | 570 | { |
571 | if (after_paging_init) | 571 | if (after_paging_init) |
572 | __set_fixmap(idx, phys, prot); | 572 | __set_fixmap(idx, phys, prot); |
@@ -607,9 +607,10 @@ static int __init check_early_ioremap_leak(void) | |||
607 | late_initcall(check_early_ioremap_leak); | 607 | late_initcall(check_early_ioremap_leak); |
608 | 608 | ||
609 | static void __init __iomem * | 609 | static void __init __iomem * |
610 | __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) | 610 | __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) |
611 | { | 611 | { |
612 | unsigned long offset, last_addr; | 612 | unsigned long offset; |
613 | resource_size_t last_addr; | ||
613 | unsigned int nrpages; | 614 | unsigned int nrpages; |
614 | enum fixed_addresses idx0, idx; | 615 | enum fixed_addresses idx0, idx; |
615 | int i, slot; | 616 | int i, slot; |
@@ -625,15 +626,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) | |||
625 | } | 626 | } |
626 | 627 | ||
627 | if (slot < 0) { | 628 | if (slot < 0) { |
628 | printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n", | 629 | printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", |
629 | phys_addr, size); | 630 | (u64)phys_addr, size); |
630 | WARN_ON(1); | 631 | WARN_ON(1); |
631 | return NULL; | 632 | return NULL; |
632 | } | 633 | } |
633 | 634 | ||
634 | if (early_ioremap_debug) { | 635 | if (early_ioremap_debug) { |
635 | printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", | 636 | printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", |
636 | phys_addr, size, slot); | 637 | (u64)phys_addr, size, slot); |
637 | dump_stack(); | 638 | dump_stack(); |
638 | } | 639 | } |
639 | 640 | ||
@@ -680,13 +681,15 @@ __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot) | |||
680 | } | 681 | } |
681 | 682 | ||
682 | /* Remap an IO device */ | 683 | /* Remap an IO device */ |
683 | void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size) | 684 | void __init __iomem * |
685 | early_ioremap(resource_size_t phys_addr, unsigned long size) | ||
684 | { | 686 | { |
685 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); | 687 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); |
686 | } | 688 | } |
687 | 689 | ||
688 | /* Remap memory */ | 690 | /* Remap memory */ |
689 | void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size) | 691 | void __init __iomem * |
692 | early_memremap(resource_size_t phys_addr, unsigned long size) | ||
690 | { | 693 | { |
691 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); | 694 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); |
692 | } | 695 | } |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 2c4baa88f2cb..c9342ed8b402 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -378,27 +378,34 @@ static void clear_trace_list(void) | |||
378 | } | 378 | } |
379 | 379 | ||
380 | #ifdef CONFIG_HOTPLUG_CPU | 380 | #ifdef CONFIG_HOTPLUG_CPU |
381 | static cpumask_t downed_cpus; | 381 | static cpumask_var_t downed_cpus; |
382 | 382 | ||
383 | static void enter_uniprocessor(void) | 383 | static void enter_uniprocessor(void) |
384 | { | 384 | { |
385 | int cpu; | 385 | int cpu; |
386 | int err; | 386 | int err; |
387 | 387 | ||
388 | if (downed_cpus == NULL && | ||
389 | !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { | ||
390 | pr_notice(NAME "Failed to allocate mask\n"); | ||
391 | goto out; | ||
392 | } | ||
393 | |||
388 | get_online_cpus(); | 394 | get_online_cpus(); |
389 | downed_cpus = cpu_online_map; | 395 | cpumask_copy(downed_cpus, cpu_online_mask); |
390 | cpu_clear(first_cpu(cpu_online_map), downed_cpus); | 396 | cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus); |
391 | if (num_online_cpus() > 1) | 397 | if (num_online_cpus() > 1) |
392 | pr_notice(NAME "Disabling non-boot CPUs...\n"); | 398 | pr_notice(NAME "Disabling non-boot CPUs...\n"); |
393 | put_online_cpus(); | 399 | put_online_cpus(); |
394 | 400 | ||
395 | for_each_cpu_mask(cpu, downed_cpus) { | 401 | for_each_cpu(cpu, downed_cpus) { |
396 | err = cpu_down(cpu); | 402 | err = cpu_down(cpu); |
397 | if (!err) | 403 | if (!err) |
398 | pr_info(NAME "CPU%d is down.\n", cpu); | 404 | pr_info(NAME "CPU%d is down.\n", cpu); |
399 | else | 405 | else |
400 | pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err); | 406 | pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err); |
401 | } | 407 | } |
408 | out: | ||
402 | if (num_online_cpus() > 1) | 409 | if (num_online_cpus() > 1) |
403 | pr_warning(NAME "multiple CPUs still online, " | 410 | pr_warning(NAME "multiple CPUs still online, " |
404 | "may miss events.\n"); | 411 | "may miss events.\n"); |
@@ -411,10 +418,10 @@ static void __ref leave_uniprocessor(void) | |||
411 | int cpu; | 418 | int cpu; |
412 | int err; | 419 | int err; |
413 | 420 | ||
414 | if (cpus_weight(downed_cpus) == 0) | 421 | if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) |
415 | return; | 422 | return; |
416 | pr_notice(NAME "Re-enabling CPUs...\n"); | 423 | pr_notice(NAME "Re-enabling CPUs...\n"); |
417 | for_each_cpu_mask(cpu, downed_cpus) { | 424 | for_each_cpu(cpu, downed_cpus) { |
418 | err = cpu_up(cpu); | 425 | err = cpu_up(cpu); |
419 | if (!err) | 426 | if (!err) |
420 | pr_info(NAME "enabled CPU%d.\n", cpu); | 427 | pr_info(NAME "enabled CPU%d.\n", cpu); |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c new file mode 100644 index 000000000000..550df481accd --- /dev/null +++ b/arch/x86/mm/numa.c | |||
@@ -0,0 +1,67 @@ | |||
1 | /* Common code for 32 and 64-bit NUMA */ | ||
2 | #include <linux/topology.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/bootmem.h> | ||
5 | |||
6 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
7 | # define DBG(x...) printk(KERN_DEBUG x) | ||
8 | #else | ||
9 | # define DBG(x...) | ||
10 | #endif | ||
11 | |||
12 | /* | ||
13 | * Which logical CPUs are on which nodes | ||
14 | */ | ||
15 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | ||
16 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
17 | |||
18 | /* | ||
19 | * Allocate node_to_cpumask_map based on number of available nodes | ||
20 | * Requires node_possible_map to be valid. | ||
21 | * | ||
22 | * Note: node_to_cpumask() is not valid until after this is done. | ||
23 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | ||
24 | */ | ||
25 | void __init setup_node_to_cpumask_map(void) | ||
26 | { | ||
27 | unsigned int node, num = 0; | ||
28 | |||
29 | /* setup nr_node_ids if not done yet */ | ||
30 | if (nr_node_ids == MAX_NUMNODES) { | ||
31 | for_each_node_mask(node, node_possible_map) | ||
32 | num = node; | ||
33 | nr_node_ids = num + 1; | ||
34 | } | ||
35 | |||
36 | /* allocate the map */ | ||
37 | for (node = 0; node < nr_node_ids; node++) | ||
38 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | ||
39 | |||
40 | /* cpumask_of_node() will now work */ | ||
41 | pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); | ||
42 | } | ||
43 | |||
44 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
45 | /* | ||
46 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | ||
47 | */ | ||
48 | const struct cpumask *cpumask_of_node(int node) | ||
49 | { | ||
50 | if (node >= nr_node_ids) { | ||
51 | printk(KERN_WARNING | ||
52 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | ||
53 | node, nr_node_ids); | ||
54 | dump_stack(); | ||
55 | return cpu_none_mask; | ||
56 | } | ||
57 | if (node_to_cpumask_map[node] == NULL) { | ||
58 | printk(KERN_WARNING | ||
59 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | ||
60 | node); | ||
61 | dump_stack(); | ||
62 | return cpu_online_mask; | ||
63 | } | ||
64 | return node_to_cpumask_map[node]; | ||
65 | } | ||
66 | EXPORT_SYMBOL(cpumask_of_node); | ||
67 | #endif | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 64c9cf043cdd..d73aaa892371 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -20,12 +20,6 @@ | |||
20 | #include <asm/acpi.h> | 20 | #include <asm/acpi.h> |
21 | #include <asm/k8.h> | 21 | #include <asm/k8.h> |
22 | 22 | ||
23 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
24 | # define DBG(x...) printk(KERN_DEBUG x) | ||
25 | #else | ||
26 | # define DBG(x...) | ||
27 | #endif | ||
28 | |||
29 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 23 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
30 | EXPORT_SYMBOL(node_data); | 24 | EXPORT_SYMBOL(node_data); |
31 | 25 | ||
@@ -49,12 +43,6 @@ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | |||
49 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | 43 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
50 | 44 | ||
51 | /* | 45 | /* |
52 | * Which logical CPUs are on which nodes | ||
53 | */ | ||
54 | cpumask_t *node_to_cpumask_map; | ||
55 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
56 | |||
57 | /* | ||
58 | * Given a shift value, try to populate memnodemap[] | 46 | * Given a shift value, try to populate memnodemap[] |
59 | * Returns : | 47 | * Returns : |
60 | * 1 if OK | 48 | * 1 if OK |
@@ -661,36 +649,6 @@ void __init init_cpu_to_node(void) | |||
661 | #endif | 649 | #endif |
662 | 650 | ||
663 | 651 | ||
664 | /* | ||
665 | * Allocate node_to_cpumask_map based on number of available nodes | ||
666 | * Requires node_possible_map to be valid. | ||
667 | * | ||
668 | * Note: node_to_cpumask() is not valid until after this is done. | ||
669 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | ||
670 | */ | ||
671 | void __init setup_node_to_cpumask_map(void) | ||
672 | { | ||
673 | unsigned int node, num = 0; | ||
674 | cpumask_t *map; | ||
675 | |||
676 | /* setup nr_node_ids if not done yet */ | ||
677 | if (nr_node_ids == MAX_NUMNODES) { | ||
678 | for_each_node_mask(node, node_possible_map) | ||
679 | num = node; | ||
680 | nr_node_ids = num + 1; | ||
681 | } | ||
682 | |||
683 | /* allocate the map */ | ||
684 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | ||
685 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); | ||
686 | |||
687 | pr_debug("Node to cpumask map at %p for %d nodes\n", | ||
688 | map, nr_node_ids); | ||
689 | |||
690 | /* node_to_cpumask() will now work */ | ||
691 | node_to_cpumask_map = map; | ||
692 | } | ||
693 | |||
694 | void __cpuinit numa_set_node(int cpu, int node) | 652 | void __cpuinit numa_set_node(int cpu, int node) |
695 | { | 653 | { |
696 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | 654 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); |
@@ -723,12 +681,12 @@ void __cpuinit numa_clear_node(int cpu) | |||
723 | 681 | ||
724 | void __cpuinit numa_add_cpu(int cpu) | 682 | void __cpuinit numa_add_cpu(int cpu) |
725 | { | 683 | { |
726 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 684 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
727 | } | 685 | } |
728 | 686 | ||
729 | void __cpuinit numa_remove_cpu(int cpu) | 687 | void __cpuinit numa_remove_cpu(int cpu) |
730 | { | 688 | { |
731 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 689 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
732 | } | 690 | } |
733 | 691 | ||
734 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | 692 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
@@ -739,20 +697,20 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
739 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | 697 | static void __cpuinit numa_set_cpumask(int cpu, int enable) |
740 | { | 698 | { |
741 | int node = early_cpu_to_node(cpu); | 699 | int node = early_cpu_to_node(cpu); |
742 | cpumask_t *mask; | 700 | struct cpumask *mask; |
743 | char buf[64]; | 701 | char buf[64]; |
744 | 702 | ||
745 | if (node_to_cpumask_map == NULL) { | 703 | mask = node_to_cpumask_map[node]; |
746 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | 704 | if (mask == NULL) { |
705 | printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node); | ||
747 | dump_stack(); | 706 | dump_stack(); |
748 | return; | 707 | return; |
749 | } | 708 | } |
750 | 709 | ||
751 | mask = &node_to_cpumask_map[node]; | ||
752 | if (enable) | 710 | if (enable) |
753 | cpu_set(cpu, *mask); | 711 | cpumask_set_cpu(cpu, mask); |
754 | else | 712 | else |
755 | cpu_clear(cpu, *mask); | 713 | cpumask_clear_cpu(cpu, mask); |
756 | 714 | ||
757 | cpulist_scnprintf(buf, sizeof(buf), mask); | 715 | cpulist_scnprintf(buf, sizeof(buf), mask); |
758 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 716 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
@@ -799,59 +757,6 @@ int early_cpu_to_node(int cpu) | |||
799 | return per_cpu(x86_cpu_to_node_map, cpu); | 757 | return per_cpu(x86_cpu_to_node_map, cpu); |
800 | } | 758 | } |
801 | 759 | ||
802 | |||
803 | /* empty cpumask */ | ||
804 | static const cpumask_t cpu_mask_none; | ||
805 | |||
806 | /* | ||
807 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | ||
808 | */ | ||
809 | const cpumask_t *cpumask_of_node(int node) | ||
810 | { | ||
811 | if (node_to_cpumask_map == NULL) { | ||
812 | printk(KERN_WARNING | ||
813 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | ||
814 | node); | ||
815 | dump_stack(); | ||
816 | return (const cpumask_t *)&cpu_online_map; | ||
817 | } | ||
818 | if (node >= nr_node_ids) { | ||
819 | printk(KERN_WARNING | ||
820 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | ||
821 | node, nr_node_ids); | ||
822 | dump_stack(); | ||
823 | return &cpu_mask_none; | ||
824 | } | ||
825 | return &node_to_cpumask_map[node]; | ||
826 | } | ||
827 | EXPORT_SYMBOL(cpumask_of_node); | ||
828 | |||
829 | /* | ||
830 | * Returns a bitmask of CPUs on Node 'node'. | ||
831 | * | ||
832 | * Side note: this function creates the returned cpumask on the stack | ||
833 | * so with a high NR_CPUS count, excessive stack space is used. The | ||
834 | * node_to_cpumask_ptr function should be used whenever possible. | ||
835 | */ | ||
836 | cpumask_t node_to_cpumask(int node) | ||
837 | { | ||
838 | if (node_to_cpumask_map == NULL) { | ||
839 | printk(KERN_WARNING | ||
840 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | ||
841 | dump_stack(); | ||
842 | return cpu_online_map; | ||
843 | } | ||
844 | if (node >= nr_node_ids) { | ||
845 | printk(KERN_WARNING | ||
846 | "node_to_cpumask(%d): node > nr_node_ids(%d)\n", | ||
847 | node, nr_node_ids); | ||
848 | dump_stack(); | ||
849 | return cpu_mask_none; | ||
850 | } | ||
851 | return node_to_cpumask_map[node]; | ||
852 | } | ||
853 | EXPORT_SYMBOL(node_to_cpumask); | ||
854 | |||
855 | /* | 760 | /* |
856 | * --------- end of debug versions of the numa functions --------- | 761 | * --------- end of debug versions of the numa functions --------- |
857 | */ | 762 | */ |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 640339ee4fb2..c009a241d562 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #ifdef CONFIG_X86_PAT | 31 | #ifdef CONFIG_X86_PAT |
32 | int __read_mostly pat_enabled = 1; | 32 | int __read_mostly pat_enabled = 1; |
33 | 33 | ||
34 | void __cpuinit pat_disable(const char *reason) | 34 | static inline void pat_disable(const char *reason) |
35 | { | 35 | { |
36 | pat_enabled = 0; | 36 | pat_enabled = 0; |
37 | printk(KERN_INFO "%s\n", reason); | 37 | printk(KERN_INFO "%s\n", reason); |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5b7c7c8464fe..7aa03a5389f5 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -345,7 +345,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) | |||
345 | fixmaps_set++; | 345 | fixmaps_set++; |
346 | } | 346 | } |
347 | 347 | ||
348 | void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 348 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
349 | pgprot_t flags) | ||
349 | { | 350 | { |
350 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); | 351 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); |
351 | } | 352 | } |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 574c8bc95ef0..c7d272b8574c 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -116,6 +116,36 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) | |||
116 | reserve_early(phys, phys + length, "ACPI SLIT"); | 116 | reserve_early(phys, phys + length, "ACPI SLIT"); |
117 | } | 117 | } |
118 | 118 | ||
119 | /* Callback for Proximity Domain -> x2APIC mapping */ | ||
120 | void __init | ||
121 | acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) | ||
122 | { | ||
123 | int pxm, node; | ||
124 | int apic_id; | ||
125 | |||
126 | if (srat_disabled()) | ||
127 | return; | ||
128 | if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) { | ||
129 | bad_srat(); | ||
130 | return; | ||
131 | } | ||
132 | if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) | ||
133 | return; | ||
134 | pxm = pa->proximity_domain; | ||
135 | node = setup_node(pxm); | ||
136 | if (node < 0) { | ||
137 | printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); | ||
138 | bad_srat(); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | apic_id = pa->apic_id; | ||
143 | apicid_to_node[apic_id] = node; | ||
144 | acpi_numa = 1; | ||
145 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | ||
146 | pxm, apic_id, node); | ||
147 | } | ||
148 | |||
119 | /* Callback for Proximity Domain -> LAPIC mapping */ | 149 | /* Callback for Proximity Domain -> LAPIC mapping */ |
120 | void __init | 150 | void __init |
121 | acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | 151 | acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 4c4a51c90bc2..819b131fd752 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -380,7 +380,7 @@ static unsigned int get_stagger(void) | |||
380 | { | 380 | { |
381 | #ifdef CONFIG_SMP | 381 | #ifdef CONFIG_SMP |
382 | int cpu = smp_processor_id(); | 382 | int cpu = smp_processor_id(); |
383 | return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); | 383 | return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map)); |
384 | #endif | 384 | #endif |
385 | return 0; | 385 | return 0; |
386 | } | 386 | } |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index f234a37bd428..f1817f71e009 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -258,24 +258,7 @@ void pcibios_set_master(struct pci_dev *dev) | |||
258 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 258 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
259 | } | 259 | } |
260 | 260 | ||
261 | static void pci_unmap_page_range(struct vm_area_struct *vma) | ||
262 | { | ||
263 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
264 | free_memtype(addr, addr + vma->vm_end - vma->vm_start); | ||
265 | } | ||
266 | |||
267 | static void pci_track_mmap_page_range(struct vm_area_struct *vma) | ||
268 | { | ||
269 | u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; | ||
270 | unsigned long flags = pgprot_val(vma->vm_page_prot) | ||
271 | & _PAGE_CACHE_MASK; | ||
272 | |||
273 | reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); | ||
274 | } | ||
275 | |||
276 | static struct vm_operations_struct pci_mmap_ops = { | 261 | static struct vm_operations_struct pci_mmap_ops = { |
277 | .open = pci_track_mmap_page_range, | ||
278 | .close = pci_unmap_page_range, | ||
279 | .access = generic_access_phys, | 262 | .access = generic_access_phys, |
280 | }; | 263 | }; |
281 | 264 | ||
@@ -283,11 +266,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
283 | enum pci_mmap_state mmap_state, int write_combine) | 266 | enum pci_mmap_state mmap_state, int write_combine) |
284 | { | 267 | { |
285 | unsigned long prot; | 268 | unsigned long prot; |
286 | u64 addr = vma->vm_pgoff << PAGE_SHIFT; | ||
287 | unsigned long len = vma->vm_end - vma->vm_start; | ||
288 | unsigned long flags; | ||
289 | unsigned long new_flags; | ||
290 | int retval; | ||
291 | 269 | ||
292 | /* I/O space cannot be accessed via normal processor loads and | 270 | /* I/O space cannot be accessed via normal processor loads and |
293 | * stores on this platform. | 271 | * stores on this platform. |
@@ -308,30 +286,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
308 | 286 | ||
309 | vma->vm_page_prot = __pgprot(prot); | 287 | vma->vm_page_prot = __pgprot(prot); |
310 | 288 | ||
311 | flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK; | ||
312 | retval = reserve_memtype(addr, addr + len, flags, &new_flags); | ||
313 | if (retval) | ||
314 | return retval; | ||
315 | |||
316 | if (flags != new_flags) { | ||
317 | if (!is_new_memtype_allowed(flags, new_flags)) { | ||
318 | free_memtype(addr, addr+len); | ||
319 | return -EINVAL; | ||
320 | } | ||
321 | flags = new_flags; | ||
322 | vma->vm_page_prot = __pgprot( | ||
323 | (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) | | ||
324 | flags); | ||
325 | } | ||
326 | |||
327 | if (((vma->vm_pgoff < max_low_pfn_mapped) || | ||
328 | (vma->vm_pgoff >= (1UL<<(32 - PAGE_SHIFT)) && | ||
329 | vma->vm_pgoff < max_pfn_mapped)) && | ||
330 | ioremap_change_attr((unsigned long)__va(addr), len, flags)) { | ||
331 | free_memtype(addr, addr + len); | ||
332 | return -EINVAL; | ||
333 | } | ||
334 | |||
335 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 289 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
336 | vma->vm_end - vma->vm_start, | 290 | vma->vm_end - vma->vm_start, |
337 | vma->vm_page_prot)) | 291 | vma->vm_page_prot)) |
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile index 9ff4d5b55ad1..58b32db33125 100644 --- a/arch/x86/power/Makefile +++ b/arch/x86/power/Makefile | |||
@@ -1,2 +1,7 @@ | |||
1 | # __restore_processor_state() restores %gs after S3 resume and so should not | ||
2 | # itself be stack-protected | ||
3 | nostackp := $(call cc-option, -fno-stack-protector) | ||
4 | CFLAGS_cpu_$(BITS).o := $(nostackp) | ||
5 | |||
1 | obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o | 6 | obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o |
2 | obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o | 7 | obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 82cd39a6cbd3..f09e8c36ee80 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/xen/hypervisor.h> | 42 | #include <asm/xen/hypervisor.h> |
43 | #include <asm/fixmap.h> | 43 | #include <asm/fixmap.h> |
44 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
45 | #include <asm/proto.h> | ||
45 | #include <asm/msr-index.h> | 46 | #include <asm/msr-index.h> |
46 | #include <asm/setup.h> | 47 | #include <asm/setup.h> |
47 | #include <asm/desc.h> | 48 | #include <asm/desc.h> |
@@ -168,21 +169,23 @@ static void __init xen_banner(void) | |||
168 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | 169 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); |
169 | } | 170 | } |
170 | 171 | ||
172 | static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; | ||
173 | static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; | ||
174 | |||
171 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, | 175 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, |
172 | unsigned int *cx, unsigned int *dx) | 176 | unsigned int *cx, unsigned int *dx) |
173 | { | 177 | { |
178 | unsigned maskecx = ~0; | ||
174 | unsigned maskedx = ~0; | 179 | unsigned maskedx = ~0; |
175 | 180 | ||
176 | /* | 181 | /* |
177 | * Mask out inconvenient features, to try and disable as many | 182 | * Mask out inconvenient features, to try and disable as many |
178 | * unsupported kernel subsystems as possible. | 183 | * unsupported kernel subsystems as possible. |
179 | */ | 184 | */ |
180 | if (*ax == 1) | 185 | if (*ax == 1) { |
181 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ | 186 | maskecx = cpuid_leaf1_ecx_mask; |
182 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | 187 | maskedx = cpuid_leaf1_edx_mask; |
183 | (1 << X86_FEATURE_MCE) | /* disable MCE */ | 188 | } |
184 | (1 << X86_FEATURE_MCA) | /* disable MCA */ | ||
185 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | ||
186 | 189 | ||
187 | asm(XEN_EMULATE_PREFIX "cpuid" | 190 | asm(XEN_EMULATE_PREFIX "cpuid" |
188 | : "=a" (*ax), | 191 | : "=a" (*ax), |
@@ -190,9 +193,43 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
190 | "=c" (*cx), | 193 | "=c" (*cx), |
191 | "=d" (*dx) | 194 | "=d" (*dx) |
192 | : "0" (*ax), "2" (*cx)); | 195 | : "0" (*ax), "2" (*cx)); |
196 | |||
197 | *cx &= maskecx; | ||
193 | *dx &= maskedx; | 198 | *dx &= maskedx; |
194 | } | 199 | } |
195 | 200 | ||
201 | static __init void xen_init_cpuid_mask(void) | ||
202 | { | ||
203 | unsigned int ax, bx, cx, dx; | ||
204 | |||
205 | cpuid_leaf1_edx_mask = | ||
206 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ | ||
207 | (1 << X86_FEATURE_MCA) | /* disable MCA */ | ||
208 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | ||
209 | |||
210 | if (!xen_initial_domain()) | ||
211 | cpuid_leaf1_edx_mask &= | ||
212 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ | ||
213 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | ||
214 | |||
215 | ax = 1; | ||
216 | xen_cpuid(&ax, &bx, &cx, &dx); | ||
217 | |||
218 | /* cpuid claims we support xsave; try enabling it to see what happens */ | ||
219 | if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { | ||
220 | unsigned long cr4; | ||
221 | |||
222 | set_in_cr4(X86_CR4_OSXSAVE); | ||
223 | |||
224 | cr4 = read_cr4(); | ||
225 | |||
226 | if ((cr4 & X86_CR4_OSXSAVE) == 0) | ||
227 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); | ||
228 | |||
229 | clear_in_cr4(X86_CR4_OSXSAVE); | ||
230 | } | ||
231 | } | ||
232 | |||
196 | static void xen_set_debugreg(int reg, unsigned long val) | 233 | static void xen_set_debugreg(int reg, unsigned long val) |
197 | { | 234 | { |
198 | HYPERVISOR_set_debugreg(reg, val); | 235 | HYPERVISOR_set_debugreg(reg, val); |
@@ -284,12 +321,11 @@ static void xen_set_ldt(const void *addr, unsigned entries) | |||
284 | 321 | ||
285 | static void xen_load_gdt(const struct desc_ptr *dtr) | 322 | static void xen_load_gdt(const struct desc_ptr *dtr) |
286 | { | 323 | { |
287 | unsigned long *frames; | ||
288 | unsigned long va = dtr->address; | 324 | unsigned long va = dtr->address; |
289 | unsigned int size = dtr->size + 1; | 325 | unsigned int size = dtr->size + 1; |
290 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | 326 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
327 | unsigned long frames[pages]; | ||
291 | int f; | 328 | int f; |
292 | struct multicall_space mcs; | ||
293 | 329 | ||
294 | /* A GDT can be up to 64k in size, which corresponds to 8192 | 330 | /* A GDT can be up to 64k in size, which corresponds to 8192 |
295 | 8-byte entries, or 16 4k pages.. */ | 331 | 8-byte entries, or 16 4k pages.. */ |
@@ -297,19 +333,26 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
297 | BUG_ON(size > 65536); | 333 | BUG_ON(size > 65536); |
298 | BUG_ON(va & ~PAGE_MASK); | 334 | BUG_ON(va & ~PAGE_MASK); |
299 | 335 | ||
300 | mcs = xen_mc_entry(sizeof(*frames) * pages); | ||
301 | frames = mcs.args; | ||
302 | |||
303 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | 336 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { |
304 | frames[f] = arbitrary_virt_to_mfn((void *)va); | 337 | int level; |
338 | pte_t *ptep = lookup_address(va, &level); | ||
339 | unsigned long pfn, mfn; | ||
340 | void *virt; | ||
341 | |||
342 | BUG_ON(ptep == NULL); | ||
343 | |||
344 | pfn = pte_pfn(*ptep); | ||
345 | mfn = pfn_to_mfn(pfn); | ||
346 | virt = __va(PFN_PHYS(pfn)); | ||
347 | |||
348 | frames[f] = mfn; | ||
305 | 349 | ||
306 | make_lowmem_page_readonly((void *)va); | 350 | make_lowmem_page_readonly((void *)va); |
307 | make_lowmem_page_readonly(mfn_to_virt(frames[f])); | 351 | make_lowmem_page_readonly(virt); |
308 | } | 352 | } |
309 | 353 | ||
310 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | 354 | if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) |
311 | 355 | BUG(); | |
312 | xen_mc_issue(PARAVIRT_LAZY_CPU); | ||
313 | } | 356 | } |
314 | 357 | ||
315 | static void load_TLS_descriptor(struct thread_struct *t, | 358 | static void load_TLS_descriptor(struct thread_struct *t, |
@@ -385,7 +428,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, | |||
385 | static int cvt_gate_to_trap(int vector, const gate_desc *val, | 428 | static int cvt_gate_to_trap(int vector, const gate_desc *val, |
386 | struct trap_info *info) | 429 | struct trap_info *info) |
387 | { | 430 | { |
388 | if (val->type != 0xf && val->type != 0xe) | 431 | if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT) |
389 | return 0; | 432 | return 0; |
390 | 433 | ||
391 | info->vector = vector; | 434 | info->vector = vector; |
@@ -393,8 +436,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val, | |||
393 | info->cs = gate_segment(*val); | 436 | info->cs = gate_segment(*val); |
394 | info->flags = val->dpl; | 437 | info->flags = val->dpl; |
395 | /* interrupt gates clear IF */ | 438 | /* interrupt gates clear IF */ |
396 | if (val->type == 0xe) | 439 | if (val->type == GATE_INTERRUPT) |
397 | info->flags |= 4; | 440 | info->flags |= 1 << 2; |
398 | 441 | ||
399 | return 1; | 442 | return 1; |
400 | } | 443 | } |
@@ -872,7 +915,6 @@ static const struct machine_ops __initdata xen_machine_ops = { | |||
872 | .emergency_restart = xen_emergency_restart, | 915 | .emergency_restart = xen_emergency_restart, |
873 | }; | 916 | }; |
874 | 917 | ||
875 | |||
876 | /* First C function to be called on Xen boot */ | 918 | /* First C function to be called on Xen boot */ |
877 | asmlinkage void __init xen_start_kernel(void) | 919 | asmlinkage void __init xen_start_kernel(void) |
878 | { | 920 | { |
@@ -897,6 +939,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
897 | 939 | ||
898 | xen_init_irq_ops(); | 940 | xen_init_irq_ops(); |
899 | 941 | ||
942 | xen_init_cpuid_mask(); | ||
943 | |||
900 | #ifdef CONFIG_X86_LOCAL_APIC | 944 | #ifdef CONFIG_X86_LOCAL_APIC |
901 | /* | 945 | /* |
902 | * set up the basic apic ops. | 946 | * set up the basic apic ops. |
@@ -938,6 +982,11 @@ asmlinkage void __init xen_start_kernel(void) | |||
938 | if (!xen_initial_domain()) | 982 | if (!xen_initial_domain()) |
939 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | 983 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); |
940 | 984 | ||
985 | #ifdef CONFIG_X86_64 | ||
986 | /* Work out if we support NX */ | ||
987 | check_efer(); | ||
988 | #endif | ||
989 | |||
941 | /* Don't do the full vcpu_info placement stuff until we have a | 990 | /* Don't do the full vcpu_info placement stuff until we have a |
942 | possible map and a non-dummy shared_info. */ | 991 | possible map and a non-dummy shared_info. */ |
943 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 992 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index db3802fb7b84..9842b1212407 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -184,7 +184,7 @@ static inline unsigned p2m_index(unsigned long pfn) | |||
184 | } | 184 | } |
185 | 185 | ||
186 | /* Build the parallel p2m_top_mfn structures */ | 186 | /* Build the parallel p2m_top_mfn structures */ |
187 | void xen_setup_mfn_list_list(void) | 187 | static void __init xen_build_mfn_list_list(void) |
188 | { | 188 | { |
189 | unsigned pfn, idx; | 189 | unsigned pfn, idx; |
190 | 190 | ||
@@ -198,7 +198,10 @@ void xen_setup_mfn_list_list(void) | |||
198 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; | 198 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; |
199 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); | 199 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); |
200 | } | 200 | } |
201 | } | ||
201 | 202 | ||
203 | void xen_setup_mfn_list_list(void) | ||
204 | { | ||
202 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); | 205 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); |
203 | 206 | ||
204 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = | 207 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = |
@@ -218,6 +221,8 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
218 | 221 | ||
219 | p2m_top[topidx] = &mfn_list[pfn]; | 222 | p2m_top[topidx] = &mfn_list[pfn]; |
220 | } | 223 | } |
224 | |||
225 | xen_build_mfn_list_list(); | ||
221 | } | 226 | } |
222 | 227 | ||
223 | unsigned long get_phys_to_machine(unsigned long pfn) | 228 | unsigned long get_phys_to_machine(unsigned long pfn) |
@@ -233,47 +238,74 @@ unsigned long get_phys_to_machine(unsigned long pfn) | |||
233 | } | 238 | } |
234 | EXPORT_SYMBOL_GPL(get_phys_to_machine); | 239 | EXPORT_SYMBOL_GPL(get_phys_to_machine); |
235 | 240 | ||
236 | static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) | 241 | /* install a new p2m_top page */ |
242 | bool install_p2mtop_page(unsigned long pfn, unsigned long *p) | ||
237 | { | 243 | { |
238 | unsigned long *p; | 244 | unsigned topidx = p2m_top_index(pfn); |
245 | unsigned long **pfnp, *mfnp; | ||
239 | unsigned i; | 246 | unsigned i; |
240 | 247 | ||
241 | p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); | 248 | pfnp = &p2m_top[topidx]; |
242 | BUG_ON(p == NULL); | 249 | mfnp = &p2m_top_mfn[topidx]; |
243 | 250 | ||
244 | for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) | 251 | for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) |
245 | p[i] = INVALID_P2M_ENTRY; | 252 | p[i] = INVALID_P2M_ENTRY; |
246 | 253 | ||
247 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) | 254 | if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { |
248 | free_page((unsigned long)p); | ||
249 | else | ||
250 | *mfnp = virt_to_mfn(p); | 255 | *mfnp = virt_to_mfn(p); |
256 | return true; | ||
257 | } | ||
258 | |||
259 | return false; | ||
251 | } | 260 | } |
252 | 261 | ||
253 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 262 | static void alloc_p2m(unsigned long pfn) |
254 | { | 263 | { |
255 | unsigned topidx, idx; | 264 | unsigned long *p; |
256 | 265 | ||
257 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | 266 | p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); |
258 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 267 | BUG_ON(p == NULL); |
259 | return; | 268 | |
260 | } | 269 | if (!install_p2mtop_page(pfn, p)) |
270 | free_page((unsigned long)p); | ||
271 | } | ||
272 | |||
273 | /* Try to install p2m mapping; fail if intermediate bits missing */ | ||
274 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
275 | { | ||
276 | unsigned topidx, idx; | ||
261 | 277 | ||
262 | if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { | 278 | if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { |
263 | BUG_ON(mfn != INVALID_P2M_ENTRY); | 279 | BUG_ON(mfn != INVALID_P2M_ENTRY); |
264 | return; | 280 | return true; |
265 | } | 281 | } |
266 | 282 | ||
267 | topidx = p2m_top_index(pfn); | 283 | topidx = p2m_top_index(pfn); |
268 | if (p2m_top[topidx] == p2m_missing) { | 284 | if (p2m_top[topidx] == p2m_missing) { |
269 | /* no need to allocate a page to store an invalid entry */ | ||
270 | if (mfn == INVALID_P2M_ENTRY) | 285 | if (mfn == INVALID_P2M_ENTRY) |
271 | return; | 286 | return true; |
272 | alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); | 287 | return false; |
273 | } | 288 | } |
274 | 289 | ||
275 | idx = p2m_index(pfn); | 290 | idx = p2m_index(pfn); |
276 | p2m_top[topidx][idx] = mfn; | 291 | p2m_top[topidx][idx] = mfn; |
292 | |||
293 | return true; | ||
294 | } | ||
295 | |||
296 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | ||
297 | { | ||
298 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | ||
299 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
300 | return; | ||
301 | } | ||
302 | |||
303 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | ||
304 | alloc_p2m(pfn); | ||
305 | |||
306 | if (!__set_phys_to_machine(pfn, mfn)) | ||
307 | BUG(); | ||
308 | } | ||
277 | } | 309 | } |
278 | 310 | ||
279 | unsigned long arbitrary_virt_to_mfn(void *vaddr) | 311 | unsigned long arbitrary_virt_to_mfn(void *vaddr) |
@@ -987,7 +1019,7 @@ static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, | |||
987 | return 0; | 1019 | return 0; |
988 | } | 1020 | } |
989 | 1021 | ||
990 | void __init xen_mark_init_mm_pinned(void) | 1022 | static void __init xen_mark_init_mm_pinned(void) |
991 | { | 1023 | { |
992 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); | 1024 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); |
993 | } | 1025 | } |
@@ -1270,8 +1302,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1270 | } *args; | 1302 | } *args; |
1271 | struct multicall_space mcs; | 1303 | struct multicall_space mcs; |
1272 | 1304 | ||
1273 | BUG_ON(cpumask_empty(cpus)); | 1305 | if (cpumask_empty(cpus)) |
1274 | BUG_ON(!mm); | 1306 | return; /* nothing to do */ |
1275 | 1307 | ||
1276 | mcs = xen_mc_entry(sizeof(*args)); | 1308 | mcs = xen_mc_entry(sizeof(*args)); |
1277 | args = mcs.args; | 1309 | args = mcs.args; |
@@ -1438,6 +1470,15 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | |||
1438 | } | 1470 | } |
1439 | #endif | 1471 | #endif |
1440 | 1472 | ||
1473 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | ||
1474 | { | ||
1475 | struct mmuext_op op; | ||
1476 | op.cmd = cmd; | ||
1477 | op.arg1.mfn = pfn_to_mfn(pfn); | ||
1478 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | ||
1479 | BUG(); | ||
1480 | } | ||
1481 | |||
1441 | /* Early in boot, while setting up the initial pagetable, assume | 1482 | /* Early in boot, while setting up the initial pagetable, assume |
1442 | everything is pinned. */ | 1483 | everything is pinned. */ |
1443 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | 1484 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
@@ -1446,22 +1487,29 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | |||
1446 | BUG_ON(mem_map); /* should only be used early */ | 1487 | BUG_ON(mem_map); /* should only be used early */ |
1447 | #endif | 1488 | #endif |
1448 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | 1489 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
1490 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | ||
1491 | } | ||
1492 | |||
1493 | /* Used for pmd and pud */ | ||
1494 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | ||
1495 | { | ||
1496 | #ifdef CONFIG_FLATMEM | ||
1497 | BUG_ON(mem_map); /* should only be used early */ | ||
1498 | #endif | ||
1499 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | ||
1449 | } | 1500 | } |
1450 | 1501 | ||
1451 | /* Early release_pte assumes that all pts are pinned, since there's | 1502 | /* Early release_pte assumes that all pts are pinned, since there's |
1452 | only init_mm and anything attached to that is pinned. */ | 1503 | only init_mm and anything attached to that is pinned. */ |
1453 | static void xen_release_pte_init(unsigned long pfn) | 1504 | static __init void xen_release_pte_init(unsigned long pfn) |
1454 | { | 1505 | { |
1506 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | ||
1455 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1507 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1456 | } | 1508 | } |
1457 | 1509 | ||
1458 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | 1510 | static __init void xen_release_pmd_init(unsigned long pfn) |
1459 | { | 1511 | { |
1460 | struct mmuext_op op; | 1512 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1461 | op.cmd = cmd; | ||
1462 | op.arg1.mfn = pfn_to_mfn(pfn); | ||
1463 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | ||
1464 | BUG(); | ||
1465 | } | 1513 | } |
1466 | 1514 | ||
1467 | /* This needs to make sure the new pte page is pinned iff its being | 1515 | /* This needs to make sure the new pte page is pinned iff its being |
@@ -1750,7 +1798,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1750 | } | 1798 | } |
1751 | #endif /* CONFIG_X86_64 */ | 1799 | #endif /* CONFIG_X86_64 */ |
1752 | 1800 | ||
1753 | static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) | 1801 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) |
1754 | { | 1802 | { |
1755 | pte_t pte; | 1803 | pte_t pte; |
1756 | 1804 | ||
@@ -1773,6 +1821,9 @@ static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) | |||
1773 | #ifdef CONFIG_X86_LOCAL_APIC | 1821 | #ifdef CONFIG_X86_LOCAL_APIC |
1774 | case FIX_APIC_BASE: /* maps dummy local APIC */ | 1822 | case FIX_APIC_BASE: /* maps dummy local APIC */ |
1775 | #endif | 1823 | #endif |
1824 | case FIX_TEXT_POKE0: | ||
1825 | case FIX_TEXT_POKE1: | ||
1826 | /* All local page mappings */ | ||
1776 | pte = pfn_pte(phys, prot); | 1827 | pte = pfn_pte(phys, prot); |
1777 | break; | 1828 | break; |
1778 | 1829 | ||
@@ -1819,7 +1870,6 @@ __init void xen_post_allocator_init(void) | |||
1819 | xen_mark_init_mm_pinned(); | 1870 | xen_mark_init_mm_pinned(); |
1820 | } | 1871 | } |
1821 | 1872 | ||
1822 | |||
1823 | const struct pv_mmu_ops xen_mmu_ops __initdata = { | 1873 | const struct pv_mmu_ops xen_mmu_ops __initdata = { |
1824 | .pagetable_setup_start = xen_pagetable_setup_start, | 1874 | .pagetable_setup_start = xen_pagetable_setup_start, |
1825 | .pagetable_setup_done = xen_pagetable_setup_done, | 1875 | .pagetable_setup_done = xen_pagetable_setup_done, |
@@ -1843,9 +1893,9 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1843 | 1893 | ||
1844 | .alloc_pte = xen_alloc_pte_init, | 1894 | .alloc_pte = xen_alloc_pte_init, |
1845 | .release_pte = xen_release_pte_init, | 1895 | .release_pte = xen_release_pte_init, |
1846 | .alloc_pmd = xen_alloc_pte_init, | 1896 | .alloc_pmd = xen_alloc_pmd_init, |
1847 | .alloc_pmd_clone = paravirt_nop, | 1897 | .alloc_pmd_clone = paravirt_nop, |
1848 | .release_pmd = xen_release_pte_init, | 1898 | .release_pmd = xen_release_pmd_init, |
1849 | 1899 | ||
1850 | #ifdef CONFIG_HIGHPTE | 1900 | #ifdef CONFIG_HIGHPTE |
1851 | .kmap_atomic_pte = xen_kmap_atomic_pte, | 1901 | .kmap_atomic_pte = xen_kmap_atomic_pte, |
@@ -1883,8 +1933,8 @@ const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1883 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), | 1933 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), |
1884 | .set_pgd = xen_set_pgd_hyper, | 1934 | .set_pgd = xen_set_pgd_hyper, |
1885 | 1935 | ||
1886 | .alloc_pud = xen_alloc_pte_init, | 1936 | .alloc_pud = xen_alloc_pmd_init, |
1887 | .release_pud = xen_release_pte_init, | 1937 | .release_pud = xen_release_pmd_init, |
1888 | #endif /* PAGETABLE_LEVELS == 4 */ | 1938 | #endif /* PAGETABLE_LEVELS == 4 */ |
1889 | 1939 | ||
1890 | .activate_mm = xen_activate_mm, | 1940 | .activate_mm = xen_activate_mm, |
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 24d1b44a337d..da7302624897 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h | |||
@@ -11,6 +11,9 @@ enum pt_level { | |||
11 | }; | 11 | }; |
12 | 12 | ||
13 | 13 | ||
14 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | ||
15 | bool install_p2mtop_page(unsigned long pfn, unsigned long *p); | ||
16 | |||
14 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 17 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
15 | 18 | ||
16 | 19 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 8d470562ffc9..429834ec1687 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
158 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 158 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
159 | if (rc >= 0) { | 159 | if (rc >= 0) { |
160 | num_processors++; | 160 | num_processors++; |
161 | cpu_set(i, cpu_possible_map); | 161 | set_cpu_possible(i, true); |
162 | } | 162 | } |
163 | } | 163 | } |
164 | } | 164 | } |
@@ -197,7 +197,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
197 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 197 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
198 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) | 198 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
199 | continue; | 199 | continue; |
200 | cpu_clear(cpu, cpu_possible_map); | 200 | set_cpu_possible(cpu, false); |
201 | } | 201 | } |
202 | 202 | ||
203 | for_each_possible_cpu (cpu) { | 203 | for_each_possible_cpu (cpu) { |
@@ -210,7 +210,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
210 | if (IS_ERR(idle)) | 210 | if (IS_ERR(idle)) |
211 | panic("failed fork for CPU %d", cpu); | 211 | panic("failed fork for CPU %d", cpu); |
212 | 212 | ||
213 | cpu_set(cpu, cpu_present_map); | 213 | set_cpu_present(cpu, true); |
214 | } | 214 | } |
215 | } | 215 | } |
216 | 216 | ||
@@ -317,7 +317,7 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | |||
317 | BUG_ON(rc); | 317 | BUG_ON(rc); |
318 | 318 | ||
319 | while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { | 319 | while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { |
320 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 320 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); |
321 | barrier(); | 321 | barrier(); |
322 | } | 322 | } |
323 | 323 | ||
@@ -422,7 +422,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask) | |||
422 | /* Make sure other vcpus get a chance to run if they need to. */ | 422 | /* Make sure other vcpus get a chance to run if they need to. */ |
423 | for_each_cpu(cpu, mask) { | 423 | for_each_cpu(cpu, mask) { |
424 | if (xen_vcpu_stolen(cpu)) { | 424 | if (xen_vcpu_stolen(cpu)) { |
425 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 425 | HYPERVISOR_sched_op(SCHEDOP_yield, NULL); |
426 | break; | 426 | break; |
427 | } | 427 | } |
428 | } | 428 | } |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 2f5ef2632ea2..20139464943c 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -57,8 +57,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id); | |||
57 | 57 | ||
58 | bool xen_vcpu_stolen(int vcpu); | 58 | bool xen_vcpu_stolen(int vcpu); |
59 | 59 | ||
60 | void xen_mark_init_mm_pinned(void); | ||
61 | |||
62 | void xen_setup_vcpu_info_placement(void); | 60 | void xen_setup_vcpu_info_placement(void); |
63 | 61 | ||
64 | #ifdef CONFIG_SMP | 62 | #ifdef CONFIG_SMP |