diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/apic.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/string_32.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess_32.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/uv/uv_hub.h | 19 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 31 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/probe_64.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/if.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/vmware.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/early_printk.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/trampoline.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/trampoline_32.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/trampoline_64.S | 5 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 4 |
19 files changed, 98 insertions, 78 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c6d21b18806c..474d80d3e6cc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -66,6 +66,19 @@ static inline void default_inquire_remote_apic(int apicid) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * With 82489DX we can't rely on apic feature bit | ||
70 | * retrieved via cpuid but still have to deal with | ||
71 | * such an apic chip so we assume that SMP configuration | ||
72 | * is found from MP table (64bit case uses ACPI mostly | ||
73 | * which set smp presence flag as well so we are safe | ||
74 | * to use this helper too). | ||
75 | */ | ||
76 | static inline bool apic_from_smp_config(void) | ||
77 | { | ||
78 | return smp_found_config && !disable_apic; | ||
79 | } | ||
80 | |||
81 | /* | ||
69 | * Basic functions accessing APICs. | 82 | * Basic functions accessing APICs. |
70 | */ | 83 | */ |
71 | #ifdef CONFIG_PARAVIRT | 84 | #ifdef CONFIG_PARAVIRT |
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index c86f452256de..ae907e617181 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h | |||
@@ -65,7 +65,6 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, | |||
65 | case 4: | 65 | case 4: |
66 | *(int *)to = *(int *)from; | 66 | *(int *)to = *(int *)from; |
67 | return to; | 67 | return to; |
68 | |||
69 | case 3: | 68 | case 3: |
70 | *(short *)to = *(short *)from; | 69 | *(short *)to = *(short *)from; |
71 | *((char *)to + 2) = *((char *)from + 2); | 70 | *((char *)to + 2) = *((char *)from + 2); |
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5e06259e90e5..632fb44b4cb5 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h | |||
@@ -33,7 +33,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero | |||
33 | * Copy data from kernel space to user space. Caller must check | 33 | * Copy data from kernel space to user space. Caller must check |
34 | * the specified block with access_ok() before calling this function. | 34 | * the specified block with access_ok() before calling this function. |
35 | * The caller should also make sure he pins the user space address | 35 | * The caller should also make sure he pins the user space address |
36 | * so that the we don't result in page fault and sleep. | 36 | * so that we don't result in page fault and sleep. |
37 | * | 37 | * |
38 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault | 38 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault |
39 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. | 39 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 77a68505419a..04eb6c958b9d 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/numa.h> | 15 | #include <linux/numa.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
17 | #include <linux/timer.h> | 17 | #include <linux/timer.h> |
18 | #include <linux/io.h> | ||
18 | #include <asm/types.h> | 19 | #include <asm/types.h> |
19 | #include <asm/percpu.h> | 20 | #include <asm/percpu.h> |
20 | #include <asm/uv/uv_mmrs.h> | 21 | #include <asm/uv/uv_mmrs.h> |
@@ -258,13 +259,13 @@ static inline unsigned long *uv_global_mmr32_address(int pnode, | |||
258 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, | 259 | static inline void uv_write_global_mmr32(int pnode, unsigned long offset, |
259 | unsigned long val) | 260 | unsigned long val) |
260 | { | 261 | { |
261 | *uv_global_mmr32_address(pnode, offset) = val; | 262 | writeq(val, uv_global_mmr32_address(pnode, offset)); |
262 | } | 263 | } |
263 | 264 | ||
264 | static inline unsigned long uv_read_global_mmr32(int pnode, | 265 | static inline unsigned long uv_read_global_mmr32(int pnode, |
265 | unsigned long offset) | 266 | unsigned long offset) |
266 | { | 267 | { |
267 | return *uv_global_mmr32_address(pnode, offset); | 268 | return readq(uv_global_mmr32_address(pnode, offset)); |
268 | } | 269 | } |
269 | 270 | ||
270 | /* | 271 | /* |
@@ -281,13 +282,13 @@ static inline unsigned long *uv_global_mmr64_address(int pnode, | |||
281 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, | 282 | static inline void uv_write_global_mmr64(int pnode, unsigned long offset, |
282 | unsigned long val) | 283 | unsigned long val) |
283 | { | 284 | { |
284 | *uv_global_mmr64_address(pnode, offset) = val; | 285 | writeq(val, uv_global_mmr64_address(pnode, offset)); |
285 | } | 286 | } |
286 | 287 | ||
287 | static inline unsigned long uv_read_global_mmr64(int pnode, | 288 | static inline unsigned long uv_read_global_mmr64(int pnode, |
288 | unsigned long offset) | 289 | unsigned long offset) |
289 | { | 290 | { |
290 | return *uv_global_mmr64_address(pnode, offset); | 291 | return readq(uv_global_mmr64_address(pnode, offset)); |
291 | } | 292 | } |
292 | 293 | ||
293 | /* | 294 | /* |
@@ -301,22 +302,22 @@ static inline unsigned long *uv_local_mmr_address(unsigned long offset) | |||
301 | 302 | ||
302 | static inline unsigned long uv_read_local_mmr(unsigned long offset) | 303 | static inline unsigned long uv_read_local_mmr(unsigned long offset) |
303 | { | 304 | { |
304 | return *uv_local_mmr_address(offset); | 305 | return readq(uv_local_mmr_address(offset)); |
305 | } | 306 | } |
306 | 307 | ||
307 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) | 308 | static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) |
308 | { | 309 | { |
309 | *uv_local_mmr_address(offset) = val; | 310 | writeq(val, uv_local_mmr_address(offset)); |
310 | } | 311 | } |
311 | 312 | ||
312 | static inline unsigned char uv_read_local_mmr8(unsigned long offset) | 313 | static inline unsigned char uv_read_local_mmr8(unsigned long offset) |
313 | { | 314 | { |
314 | return *((unsigned char *)uv_local_mmr_address(offset)); | 315 | return readb(uv_local_mmr_address(offset)); |
315 | } | 316 | } |
316 | 317 | ||
317 | static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) | 318 | static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) |
318 | { | 319 | { |
319 | *((unsigned char *)uv_local_mmr_address(offset)) = val; | 320 | writeb(val, uv_local_mmr_address(offset)); |
320 | } | 321 | } |
321 | 322 | ||
322 | /* | 323 | /* |
@@ -422,7 +423,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | |||
422 | unsigned long val; | 423 | unsigned long val; |
423 | 424 | ||
424 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 425 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
425 | ((apicid & 0x3f) << UVH_IPI_INT_APIC_ID_SHFT) | | 426 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | |
426 | (vector << UVH_IPI_INT_VECTOR_SHFT); | 427 | (vector << UVH_IPI_INT_VECTOR_SHFT); |
427 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 428 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
428 | } | 429 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a34601f52987..a58ef98be155 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U; | |||
62 | /* | 62 | /* |
63 | * The highest APIC ID seen during enumeration. | 63 | * The highest APIC ID seen during enumeration. |
64 | * | 64 | * |
65 | * This determines the messaging protocol we can use: if all APIC IDs | 65 | * On AMD, this determines the messaging protocol we can use: if all APIC IDs |
66 | * are in the 0 ... 7 range, then we can use logical addressing which | 66 | * are in the 0 ... 7 range, then we can use logical addressing which |
67 | * has some performance advantages (better broadcasting). | 67 | * has some performance advantages (better broadcasting). |
68 | * | 68 | * |
@@ -979,7 +979,7 @@ void lapic_shutdown(void) | |||
979 | { | 979 | { |
980 | unsigned long flags; | 980 | unsigned long flags; |
981 | 981 | ||
982 | if (!cpu_has_apic) | 982 | if (!cpu_has_apic && !apic_from_smp_config()) |
983 | return; | 983 | return; |
984 | 984 | ||
985 | local_irq_save(flags); | 985 | local_irq_save(flags); |
@@ -1197,8 +1197,7 @@ void __cpuinit setup_local_APIC(void) | |||
1197 | * Double-check whether this APIC is really registered. | 1197 | * Double-check whether this APIC is really registered. |
1198 | * This is meaningless in clustered apic mode, so we skip it. | 1198 | * This is meaningless in clustered apic mode, so we skip it. |
1199 | */ | 1199 | */ |
1200 | if (!apic->apic_id_registered()) | 1200 | BUG_ON(!apic->apic_id_registered()); |
1201 | BUG(); | ||
1202 | 1201 | ||
1203 | /* | 1202 | /* |
1204 | * Intel recommends to set DFR, LDR and TPR before enabling | 1203 | * Intel recommends to set DFR, LDR and TPR before enabling |
@@ -1917,24 +1916,14 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1917 | max_physical_apicid = apicid; | 1916 | max_physical_apicid = apicid; |
1918 | 1917 | ||
1919 | #ifdef CONFIG_X86_32 | 1918 | #ifdef CONFIG_X86_32 |
1920 | /* | 1919 | switch (boot_cpu_data.x86_vendor) { |
1921 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | 1920 | case X86_VENDOR_INTEL: |
1922 | * but we need to work other dependencies like SMP_SUSPEND etc | 1921 | if (num_processors > 8) |
1923 | * before this can be done without some confusion. | 1922 | def_to_bigsmp = 1; |
1924 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) | 1923 | break; |
1925 | * - Ashok Raj <ashok.raj@intel.com> | 1924 | case X86_VENDOR_AMD: |
1926 | */ | 1925 | if (max_physical_apicid >= 8) |
1927 | if (max_physical_apicid >= 8) { | ||
1928 | switch (boot_cpu_data.x86_vendor) { | ||
1929 | case X86_VENDOR_INTEL: | ||
1930 | if (!APIC_XAPIC(version)) { | ||
1931 | def_to_bigsmp = 0; | ||
1932 | break; | ||
1933 | } | ||
1934 | /* If P4 and above fall through */ | ||
1935 | case X86_VENDOR_AMD: | ||
1936 | def_to_bigsmp = 1; | 1926 | def_to_bigsmp = 1; |
1937 | } | ||
1938 | } | 1927 | } |
1939 | #endif | 1928 | #endif |
1940 | 1929 | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 809e1cf86d6b..64970b9885f2 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1874,7 +1874,7 @@ __apicdebuginit(int) print_all_ICs(void) | |||
1874 | print_PIC(); | 1874 | print_PIC(); |
1875 | 1875 | ||
1876 | /* don't print out if apic is not there */ | 1876 | /* don't print out if apic is not there */ |
1877 | if (!cpu_has_apic || disable_apic) | 1877 | if (!cpu_has_apic && !apic_from_smp_config()) |
1878 | return 0; | 1878 | return 0; |
1879 | 1879 | ||
1880 | print_all_local_APICs(); | 1880 | print_all_local_APICs(); |
@@ -1999,7 +1999,7 @@ void disable_IO_APIC(void) | |||
1999 | /* | 1999 | /* |
2000 | * Use virtual wire A mode when interrupt remapping is enabled. | 2000 | * Use virtual wire A mode when interrupt remapping is enabled. |
2001 | */ | 2001 | */ |
2002 | if (cpu_has_apic) | 2002 | if (cpu_has_apic || apic_from_smp_config()) |
2003 | disconnect_bsp_APIC(!intr_remapping_enabled && | 2003 | disconnect_bsp_APIC(!intr_remapping_enabled && |
2004 | ioapic_i8259.pin != -1); | 2004 | ioapic_i8259.pin != -1); |
2005 | } | 2005 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 65edc180fc82..c4cbd3080c1c 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -64,16 +64,23 @@ void __init default_setup_apic_routing(void) | |||
64 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
65 | else | 65 | else |
66 | apic = &apic_x2apic_cluster; | 66 | apic = &apic_x2apic_cluster; |
67 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | ||
68 | } | 67 | } |
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | if (apic == &apic_flat) { | 70 | if (apic == &apic_flat) { |
72 | if (max_physical_apicid >= 8) | 71 | switch (boot_cpu_data.x86_vendor) { |
73 | apic = &apic_physflat; | 72 | case X86_VENDOR_INTEL: |
74 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 73 | if (num_processors > 8) |
74 | apic = &apic_physflat; | ||
75 | break; | ||
76 | case X86_VENDOR_AMD: | ||
77 | if (max_physical_apicid >= 8) | ||
78 | apic = &apic_physflat; | ||
79 | } | ||
75 | } | 80 | } |
76 | 81 | ||
82 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | ||
83 | |||
77 | if (is_vsmp_box()) { | 84 | if (is_vsmp_box()) { |
78 | /* need to update phys_pkg_id */ | 85 | /* need to update phys_pkg_id */ |
79 | apic->phys_pkg_id = apicid_phys_pkg_id; | 86 | apic->phys_pkg_id = apicid_phys_pkg_id; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 601159374e87..f5f5886a6b53 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -389,6 +389,16 @@ static __init void map_gru_high(int max_pnode) | |||
389 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 389 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); |
390 | } | 390 | } |
391 | 391 | ||
392 | static __init void map_mmr_high(int max_pnode) | ||
393 | { | ||
394 | union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; | ||
395 | int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; | ||
396 | |||
397 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | ||
398 | if (mmr.s.enable) | ||
399 | map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | ||
400 | } | ||
401 | |||
392 | static __init void map_mmioh_high(int max_pnode) | 402 | static __init void map_mmioh_high(int max_pnode) |
393 | { | 403 | { |
394 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; | 404 | union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; |
@@ -643,6 +653,7 @@ void __init uv_system_init(void) | |||
643 | } | 653 | } |
644 | 654 | ||
645 | map_gru_high(max_pnode); | 655 | map_gru_high(max_pnode); |
656 | map_mmr_high(max_pnode); | ||
646 | map_mmioh_high(max_pnode); | 657 | map_mmioh_high(max_pnode); |
647 | 658 | ||
648 | uv_cpu_init(); | 659 | uv_cpu_init(); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 8cd5224943b5..83a3d1f4efca 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -489,8 +489,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
489 | int i, err = 0; | 489 | int i, err = 0; |
490 | struct threshold_bank *b = NULL; | 490 | struct threshold_bank *b = NULL; |
491 | char name[32]; | 491 | char name[32]; |
492 | #ifdef CONFIG_SMP | ||
492 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 493 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
493 | 494 | #endif | |
494 | 495 | ||
495 | sprintf(name, "threshold_bank%i", bank); | 496 | sprintf(name, "threshold_bank%i", bank); |
496 | 497 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 08b6ea4c62b4..f04e72527604 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -126,8 +126,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
126 | return -EINVAL; | 126 | return -EINVAL; |
127 | 127 | ||
128 | base = simple_strtoull(line + 5, &ptr, 0); | 128 | base = simple_strtoull(line + 5, &ptr, 0); |
129 | for (; isspace(*ptr); ++ptr) | 129 | while (isspace(*ptr)) |
130 | ; | 130 | ptr++; |
131 | 131 | ||
132 | if (strncmp(ptr, "size=", 5)) | 132 | if (strncmp(ptr, "size=", 5)) |
133 | return -EINVAL; | 133 | return -EINVAL; |
@@ -135,14 +135,14 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
135 | size = simple_strtoull(ptr + 5, &ptr, 0); | 135 | size = simple_strtoull(ptr + 5, &ptr, 0); |
136 | if ((base & 0xfff) || (size & 0xfff)) | 136 | if ((base & 0xfff) || (size & 0xfff)) |
137 | return -EINVAL; | 137 | return -EINVAL; |
138 | for (; isspace(*ptr); ++ptr) | 138 | while (isspace(*ptr)) |
139 | ; | 139 | ptr++; |
140 | 140 | ||
141 | if (strncmp(ptr, "type=", 5)) | 141 | if (strncmp(ptr, "type=", 5)) |
142 | return -EINVAL; | 142 | return -EINVAL; |
143 | ptr += 5; | 143 | ptr += 5; |
144 | for (; isspace(*ptr); ++ptr) | 144 | while (isspace(*ptr)) |
145 | ; | 145 | ptr++; |
146 | 146 | ||
147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { | 147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { |
148 | if (strcmp(ptr, mtrr_strings[i])) | 148 | if (strcmp(ptr, mtrr_strings[i])) |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 0a46b4df5d80..1cbed97b59cf 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -58,6 +58,9 @@ static unsigned long vmware_get_tsc_khz(void) | |||
58 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 58 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
59 | do_div(tsc_hz, 1000); | 59 | do_div(tsc_hz, 1000); |
60 | BUG_ON(tsc_hz >> 32); | 60 | BUG_ON(tsc_hz >> 32); |
61 | printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", | ||
62 | (unsigned long) tsc_hz / 1000, | ||
63 | (unsigned long) tsc_hz % 1000); | ||
61 | return tsc_hz; | 64 | return tsc_hz; |
62 | } | 65 | } |
63 | 66 | ||
@@ -69,6 +72,9 @@ void __init vmware_platform_setup(void) | |||
69 | 72 | ||
70 | if (ebx != UINT_MAX) | 73 | if (ebx != UINT_MAX) |
71 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; | 74 | x86_platform.calibrate_tsc = vmware_get_tsc_khz; |
75 | else | ||
76 | printk(KERN_WARNING | ||
77 | "Failed to get TSC freq from the hypervisor\n"); | ||
72 | } | 78 | } |
73 | 79 | ||
74 | /* | 80 | /* |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 335f049d110f..b11cab3c323a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -624,7 +624,7 @@ try_next_port: | |||
624 | return -1; | 624 | return -1; |
625 | } | 625 | } |
626 | 626 | ||
627 | loop = 10; | 627 | loop = 100000; |
628 | /* Reset the EHCI controller */ | 628 | /* Reset the EHCI controller */ |
629 | cmd = readl(&ehci_regs->command); | 629 | cmd = readl(&ehci_regs->command); |
630 | cmd |= CMD_RESET; | 630 | cmd |= CMD_RESET; |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a55f6609fe1f..f327bccf5089 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -697,21 +697,6 @@ void __init setup_arch(char **cmdline_p) | |||
697 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 697 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
698 | #endif | 698 | #endif |
699 | 699 | ||
700 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
701 | *cmdline_p = command_line; | ||
702 | |||
703 | #ifdef CONFIG_X86_64 | ||
704 | /* | ||
705 | * Must call this twice: Once just to detect whether hardware doesn't | ||
706 | * support NX (so that the early EHCI debug console setup can safely | ||
707 | * call set_fixmap(), and then again after parsing early parameters to | ||
708 | * honor the respective command line option. | ||
709 | */ | ||
710 | check_efer(); | ||
711 | #endif | ||
712 | |||
713 | parse_early_param(); | ||
714 | |||
715 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | 700 | /* VMI may relocate the fixmap; do this before touching ioremap area */ |
716 | vmi_init(); | 701 | vmi_init(); |
717 | 702 | ||
@@ -794,6 +779,21 @@ void __init setup_arch(char **cmdline_p) | |||
794 | #endif | 779 | #endif |
795 | #endif | 780 | #endif |
796 | 781 | ||
782 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
783 | *cmdline_p = command_line; | ||
784 | |||
785 | #ifdef CONFIG_X86_64 | ||
786 | /* | ||
787 | * Must call this twice: Once just to detect whether hardware doesn't | ||
788 | * support NX (so that the early EHCI debug console setup can safely | ||
789 | * call set_fixmap(), and then again after parsing early parameters to | ||
790 | * honor the respective command line option. | ||
791 | */ | ||
792 | check_efer(); | ||
793 | #endif | ||
794 | |||
795 | parse_early_param(); | ||
796 | |||
797 | #ifdef CONFIG_X86_64 | 797 | #ifdef CONFIG_X86_64 |
798 | check_efer(); | 798 | check_efer(); |
799 | #endif | 799 | #endif |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 808031a5ba19..699f7eeb896a 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <asm/e820.h> | 4 | #include <asm/e820.h> |
5 | 5 | ||
6 | /* ready for x86_64 and x86 */ | 6 | /* ready for x86_64 and x86 */ |
7 | unsigned char *trampoline_base = __va(TRAMPOLINE_BASE); | 7 | unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); |
8 | 8 | ||
9 | void __init reserve_trampoline_memory(void) | 9 | void __init reserve_trampoline_memory(void) |
10 | { | 10 | { |
@@ -26,7 +26,7 @@ void __init reserve_trampoline_memory(void) | |||
26 | * bootstrap into the page concerned. The caller | 26 | * bootstrap into the page concerned. The caller |
27 | * has made sure it's suitably aligned. | 27 | * has made sure it's suitably aligned. |
28 | */ | 28 | */ |
29 | unsigned long setup_trampoline(void) | 29 | unsigned long __cpuinit setup_trampoline(void) |
30 | { | 30 | { |
31 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 31 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); |
32 | return virt_to_phys(trampoline_base); | 32 | return virt_to_phys(trampoline_base); |
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S index 66d874e5404c..8508237e8e43 100644 --- a/arch/x86/kernel/trampoline_32.S +++ b/arch/x86/kernel/trampoline_32.S | |||
@@ -28,16 +28,12 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/linkage.h> | 30 | #include <linux/linkage.h> |
31 | #include <linux/init.h> | ||
31 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
32 | #include <asm/page_types.h> | 33 | #include <asm/page_types.h> |
33 | 34 | ||
34 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ | 35 | /* We can free up trampoline after bootup if cpu hotplug is not supported. */ |
35 | #ifndef CONFIG_HOTPLUG_CPU | 36 | __CPUINITRODATA |
36 | .section ".cpuinit.data","aw",@progbits | ||
37 | #else | ||
38 | .section .rodata,"a",@progbits | ||
39 | #endif | ||
40 | |||
41 | .code16 | 37 | .code16 |
42 | 38 | ||
43 | ENTRY(trampoline_data) | 39 | ENTRY(trampoline_data) |
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index cddfb8d386b9..596d54c660a5 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -25,14 +25,15 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/linkage.h> | 27 | #include <linux/linkage.h> |
28 | #include <linux/init.h> | ||
28 | #include <asm/pgtable_types.h> | 29 | #include <asm/pgtable_types.h> |
29 | #include <asm/page_types.h> | 30 | #include <asm/page_types.h> |
30 | #include <asm/msr.h> | 31 | #include <asm/msr.h> |
31 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
32 | #include <asm/processor-flags.h> | 33 | #include <asm/processor-flags.h> |
33 | 34 | ||
34 | .section .rodata, "a", @progbits | 35 | /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ |
35 | 36 | __CPUINITRODATA | |
36 | .code16 | 37 | .code16 |
37 | 38 | ||
38 | ENTRY(trampoline_data) | 39 | ENTRY(trampoline_data) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 17409e8d1097..cd982f48e23e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -666,7 +666,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
666 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | 666 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
667 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | 667 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || |
668 | (val == CPUFREQ_RESUMECHANGE)) { | 668 | (val == CPUFREQ_RESUMECHANGE)) { |
669 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | 669 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); |
670 | 670 | ||
671 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | 671 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); |
672 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | 672 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 0ccb57d5ee35..a46acccec38a 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -45,9 +45,9 @@ PHDRS { | |||
45 | text PT_LOAD FLAGS(5); /* R_E */ | 45 | text PT_LOAD FLAGS(5); /* R_E */ |
46 | data PT_LOAD FLAGS(7); /* RWE */ | 46 | data PT_LOAD FLAGS(7); /* RWE */ |
47 | #ifdef CONFIG_X86_64 | 47 | #ifdef CONFIG_X86_64 |
48 | user PT_LOAD FLAGS(7); /* RWE */ | 48 | user PT_LOAD FLAGS(5); /* R_E */ |
49 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
50 | percpu PT_LOAD FLAGS(7); /* RWE */ | 50 | percpu PT_LOAD FLAGS(6); /* RW_ */ |
51 | #endif | 51 | #endif |
52 | init PT_LOAD FLAGS(7); /* RWE */ | 52 | init PT_LOAD FLAGS(7); /* RWE */ |
53 | #endif | 53 | #endif |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 417c9f5b4afa..8aa85f17667e 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -243,10 +243,6 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
243 | 243 | ||
244 | do_fpu_end(); | 244 | do_fpu_end(); |
245 | mtrr_bp_restore(); | 245 | mtrr_bp_restore(); |
246 | |||
247 | #ifdef CONFIG_X86_OLD_MCE | ||
248 | mcheck_init(&boot_cpu_data); | ||
249 | #endif | ||
250 | } | 246 | } |
251 | 247 | ||
252 | /* Needed by apm.c */ | 248 | /* Needed by apm.c */ |