diff options
Diffstat (limited to 'arch/x86/kernel')
38 files changed, 298 insertions, 182 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 8c44c232efcb..59cdfa4686b2 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, | |||
48 | * P4, Core and beyond CPUs | 48 | * P4, Core and beyond CPUs |
49 | */ | 49 | */ |
50 | if (c->x86_vendor == X86_VENDOR_INTEL && | 50 | if (c->x86_vendor == X86_VENDOR_INTEL && |
51 | (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14))) | 51 | (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14))) |
52 | flags->bm_control = 0; | 52 | flags->bm_control = 0; |
53 | } | 53 | } |
54 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); | 54 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); |
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S index 7da00b799cda..060fff8f5c5b 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.lds.S | |||
@@ -57,5 +57,8 @@ SECTIONS | |||
57 | *(.note*) | 57 | *(.note*) |
58 | } | 58 | } |
59 | 59 | ||
60 | /* | ||
61 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: | ||
62 | */ | ||
60 | . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); | 63 | . = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!"); |
61 | } | 64 | } |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 98f230f6a28d..0285521e0a99 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1220,6 +1220,8 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
1220 | amd_iommu_dev_table[devid].data[1] = 0; | 1220 | amd_iommu_dev_table[devid].data[1] = 0; |
1221 | amd_iommu_dev_table[devid].data[2] = 0; | 1221 | amd_iommu_dev_table[devid].data[2] = 0; |
1222 | 1222 | ||
1223 | amd_iommu_apply_erratum_63(devid); | ||
1224 | |||
1223 | /* decrease reference counter */ | 1225 | /* decrease reference counter */ |
1224 | domain->dev_cnt -= 1; | 1226 | domain->dev_cnt -= 1; |
1225 | 1227 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index b4b61d462dcc..c20001e4f556 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -240,7 +240,7 @@ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) | |||
240 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | 240 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
241 | } | 241 | } |
242 | 242 | ||
243 | static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | 243 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
244 | { | 244 | { |
245 | u32 ctrl; | 245 | u32 ctrl; |
246 | 246 | ||
@@ -519,6 +519,26 @@ static void set_dev_entry_bit(u16 devid, u8 bit) | |||
519 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | 519 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); |
520 | } | 520 | } |
521 | 521 | ||
522 | static int get_dev_entry_bit(u16 devid, u8 bit) | ||
523 | { | ||
524 | int i = (bit >> 5) & 0x07; | ||
525 | int _bit = bit & 0x1f; | ||
526 | |||
527 | return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; | ||
528 | } | ||
529 | |||
530 | |||
531 | void amd_iommu_apply_erratum_63(u16 devid) | ||
532 | { | ||
533 | int sysmgt; | ||
534 | |||
535 | sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | | ||
536 | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); | ||
537 | |||
538 | if (sysmgt == 0x01) | ||
539 | set_dev_entry_bit(devid, DEV_ENTRY_IW); | ||
540 | } | ||
541 | |||
522 | /* Writes the specific IOMMU for a device into the rlookup table */ | 542 | /* Writes the specific IOMMU for a device into the rlookup table */ |
523 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | 543 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) |
524 | { | 544 | { |
@@ -547,6 +567,8 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, | |||
547 | if (flags & ACPI_DEVFLAG_LINT1) | 567 | if (flags & ACPI_DEVFLAG_LINT1) |
548 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | 568 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); |
549 | 569 | ||
570 | amd_iommu_apply_erratum_63(devid); | ||
571 | |||
550 | set_iommu_for_device(iommu, devid); | 572 | set_iommu_for_device(iommu, devid); |
551 | } | 573 | } |
552 | 574 | ||
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index f5f5886a6b53..326c25477d3d 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -352,14 +352,14 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | |||
352 | 352 | ||
353 | for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { | 353 | for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { |
354 | alias.v = uv_read_local_mmr(redir_addrs[i].alias); | 354 | alias.v = uv_read_local_mmr(redir_addrs[i].alias); |
355 | if (alias.s.base == 0) { | 355 | if (alias.s.enable && alias.s.base == 0) { |
356 | *size = (1UL << alias.s.m_alias); | 356 | *size = (1UL << alias.s.m_alias); |
357 | redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); | 357 | redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); |
358 | *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; | 358 | *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; |
359 | return; | 359 | return; |
360 | } | 360 | } |
361 | } | 361 | } |
362 | BUG(); | 362 | *base = *size = 0; |
363 | } | 363 | } |
364 | 364 | ||
365 | enum map_type {map_wb, map_uc}; | 365 | enum map_type {map_wb, map_uc}; |
@@ -619,12 +619,12 @@ void __init uv_system_init(void) | |||
619 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; | 619 | uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; |
620 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; | 620 | uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; |
621 | uv_cpu_hub_info(cpu)->m_val = m_val; | 621 | uv_cpu_hub_info(cpu)->m_val = m_val; |
622 | uv_cpu_hub_info(cpu)->n_val = m_val; | 622 | uv_cpu_hub_info(cpu)->n_val = n_val; |
623 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; | 623 | uv_cpu_hub_info(cpu)->numa_blade_id = blade; |
624 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; | 624 | uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; |
625 | uv_cpu_hub_info(cpu)->pnode = pnode; | 625 | uv_cpu_hub_info(cpu)->pnode = pnode; |
626 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; | 626 | uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; |
627 | uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; | 627 | uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1; |
628 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; | 628 | uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; |
629 | uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; | 629 | uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; |
630 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | 630 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 7d5c3b0ea8da..8b581d3905cb 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { | |||
526 | 526 | ||
527 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | 527 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) |
528 | { | 528 | { |
529 | /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf | 529 | /* Intel Xeon Processor 7100 Series Specification Update |
530 | * http://www.intel.com/Assets/PDF/specupdate/314554.pdf | ||
530 | * AL30: A Machine Check Exception (MCE) Occurring during an | 531 | * AL30: A Machine Check Exception (MCE) Occurring during an |
531 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | 532 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause |
532 | * Both Processor Cores to Lock Up when HT is enabled*/ | 533 | * Both Processor Cores to Lock Up. */ |
533 | if (c->x86_vendor == X86_VENDOR_INTEL) { | 534 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
534 | if ((c->x86 == 15) && | 535 | if ((c->x86 == 15) && |
535 | (c->x86_model == 6) && | 536 | (c->x86_model == 6) && |
536 | (c->x86_mask == 8) && smt_capable()) | 537 | (c->x86_mask == 8)) { |
538 | printk(KERN_INFO "acpi-cpufreq: Intel(R) " | ||
539 | "Xeon(R) 7100 Errata AL30, processors may " | ||
540 | "lock up on frequency changes: disabling " | ||
541 | "acpi-cpufreq.\n"); | ||
537 | return -ENODEV; | 542 | return -ENODEV; |
543 | } | ||
538 | } | 544 | } |
539 | return 0; | 545 | return 0; |
540 | } | 546 | } |
@@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
549 | unsigned int result = 0; | 555 | unsigned int result = 0; |
550 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); | 556 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); |
551 | struct acpi_processor_performance *perf; | 557 | struct acpi_processor_performance *perf; |
558 | #ifdef CONFIG_SMP | ||
559 | static int blacklisted; | ||
560 | #endif | ||
552 | 561 | ||
553 | dprintk("acpi_cpufreq_cpu_init\n"); | 562 | dprintk("acpi_cpufreq_cpu_init\n"); |
554 | 563 | ||
555 | #ifdef CONFIG_SMP | 564 | #ifdef CONFIG_SMP |
556 | result = acpi_cpufreq_blacklist(c); | 565 | if (blacklisted) |
557 | if (result) | 566 | return blacklisted; |
558 | return result; | 567 | blacklisted = acpi_cpufreq_blacklist(c); |
568 | if (blacklisted) | ||
569 | return blacklisted; | ||
559 | #endif | 570 | #endif |
560 | 571 | ||
561 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 572 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index ce2ed3e4aad9..cabd2fa3fc93 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c | |||
@@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
813 | memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); | 813 | memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); |
814 | break; | 814 | break; |
815 | case 1 ... 15: | 815 | case 1 ... 15: |
816 | longhaul_version = TYPE_LONGHAUL_V1; | 816 | longhaul_version = TYPE_LONGHAUL_V2; |
817 | if (c->x86_mask < 8) { | 817 | if (c->x86_mask < 8) { |
818 | cpu_model = CPU_SAMUEL2; | 818 | cpu_model = CPU_SAMUEL2; |
819 | cpuname = "C3 'Samuel 2' [C5B]"; | 819 | cpuname = "C3 'Samuel 2' [C5B]"; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6394aa5c7985..3f12dabeab52 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data) | |||
1022 | * set it to 1 to avoid problems in the future. | 1022 | * set it to 1 to avoid problems in the future. |
1023 | * For all others it's a BIOS bug. | 1023 | * For all others it's a BIOS bug. |
1024 | */ | 1024 | */ |
1025 | if (!boot_cpu_data.x86 == 0x11) | 1025 | if (boot_cpu_data.x86 != 0x11) |
1026 | printk(KERN_ERR FW_WARN PFX "Invalid zero transition " | 1026 | printk(KERN_ERR FW_WARN PFX "Invalid zero transition " |
1027 | "latency\n"); | 1027 | "latency\n"); |
1028 | max_latency = 1; | 1028 | max_latency = 1; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 6911e91fb4f6..3ae5a7a3a500 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void) | |||
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | 234 | ||
235 | struct get_freq_data { | 235 | static void get_freq_data(void *_speed) |
236 | unsigned int speed; | ||
237 | unsigned int processor; | ||
238 | }; | ||
239 | |||
240 | static void get_freq_data(void *_data) | ||
241 | { | 236 | { |
242 | struct get_freq_data *data = _data; | 237 | unsigned int *speed = _speed; |
243 | 238 | ||
244 | data->speed = speedstep_get_frequency(data->processor); | 239 | *speed = speedstep_get_frequency(speedstep_processor); |
245 | } | 240 | } |
246 | 241 | ||
247 | static unsigned int speedstep_get(unsigned int cpu) | 242 | static unsigned int speedstep_get(unsigned int cpu) |
248 | { | 243 | { |
249 | struct get_freq_data data = { .processor = cpu }; | 244 | unsigned int speed; |
250 | 245 | ||
251 | /* You're supposed to ensure CPU is online. */ | 246 | /* You're supposed to ensure CPU is online. */ |
252 | if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) | 247 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) |
253 | BUG(); | 248 | BUG(); |
254 | 249 | ||
255 | dprintk("detected %u kHz as current frequency\n", data.speed); | 250 | dprintk("detected %u kHz as current frequency\n", speed); |
256 | return data.speed; | 251 | return speed; |
257 | } | 252 | } |
258 | 253 | ||
259 | /** | 254 | /** |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 7029f0e2acad..472763d92098 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -98,8 +98,9 @@ static struct notifier_block mce_raise_nb = { | |||
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* Inject mce on current CPU */ | 100 | /* Inject mce on current CPU */ |
101 | static int raise_local(struct mce *m) | 101 | static int raise_local(void) |
102 | { | 102 | { |
103 | struct mce *m = &__get_cpu_var(injectm); | ||
103 | int context = MCJ_CTX(m->inject_flags); | 104 | int context = MCJ_CTX(m->inject_flags); |
104 | int ret = 0; | 105 | int ret = 0; |
105 | int cpu = m->extcpu; | 106 | int cpu = m->extcpu; |
@@ -167,12 +168,12 @@ static void raise_mce(struct mce *m) | |||
167 | } | 168 | } |
168 | cpu_relax(); | 169 | cpu_relax(); |
169 | } | 170 | } |
170 | raise_local(m); | 171 | raise_local(); |
171 | put_cpu(); | 172 | put_cpu(); |
172 | put_online_cpus(); | 173 | put_online_cpus(); |
173 | } else | 174 | } else |
174 | #endif | 175 | #endif |
175 | raise_local(m); | 176 | raise_local(); |
176 | } | 177 | } |
177 | 178 | ||
178 | /* Error injection interface */ | 179 | /* Error injection interface */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 2f5aab26320e..721a77ca8115 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -85,6 +85,18 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | |||
85 | static DEFINE_PER_CPU(struct mce, mces_seen); | 85 | static DEFINE_PER_CPU(struct mce, mces_seen); |
86 | static int cpu_missing; | 86 | static int cpu_missing; |
87 | 87 | ||
88 | static void default_decode_mce(struct mce *m) | ||
89 | { | ||
90 | pr_emerg("No human readable MCE decoding support on this CPU type.\n"); | ||
91 | pr_emerg("Run the message through 'mcelog --ascii' to decode.\n"); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * CPU/chipset specific EDAC code can register a callback here to print | ||
96 | * MCE errors in a human-readable form: | ||
97 | */ | ||
98 | void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce; | ||
99 | EXPORT_SYMBOL(x86_mce_decode_callback); | ||
88 | 100 | ||
89 | /* MCA banks polled by the period polling timer for corrected events */ | 101 | /* MCA banks polled by the period polling timer for corrected events */ |
90 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | 102 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { |
@@ -165,49 +177,46 @@ void mce_log(struct mce *mce) | |||
165 | set_bit(0, &mce_need_notify); | 177 | set_bit(0, &mce_need_notify); |
166 | } | 178 | } |
167 | 179 | ||
168 | void __weak decode_mce(struct mce *m) | ||
169 | { | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | static void print_mce(struct mce *m) | 180 | static void print_mce(struct mce *m) |
174 | { | 181 | { |
175 | printk(KERN_EMERG | 182 | pr_emerg("CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", |
176 | "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", | ||
177 | m->extcpu, m->mcgstatus, m->bank, m->status); | 183 | m->extcpu, m->mcgstatus, m->bank, m->status); |
184 | |||
178 | if (m->ip) { | 185 | if (m->ip) { |
179 | printk(KERN_EMERG "RIP%s %02x:<%016Lx> ", | 186 | pr_emerg("RIP%s %02x:<%016Lx> ", |
180 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", | 187 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", |
181 | m->cs, m->ip); | 188 | m->cs, m->ip); |
189 | |||
182 | if (m->cs == __KERNEL_CS) | 190 | if (m->cs == __KERNEL_CS) |
183 | print_symbol("{%s}", m->ip); | 191 | print_symbol("{%s}", m->ip); |
184 | printk(KERN_CONT "\n"); | 192 | pr_cont("\n"); |
185 | } | 193 | } |
186 | printk(KERN_EMERG "TSC %llx ", m->tsc); | 194 | |
195 | pr_emerg("TSC %llx ", m->tsc); | ||
187 | if (m->addr) | 196 | if (m->addr) |
188 | printk(KERN_CONT "ADDR %llx ", m->addr); | 197 | pr_cont("ADDR %llx ", m->addr); |
189 | if (m->misc) | 198 | if (m->misc) |
190 | printk(KERN_CONT "MISC %llx ", m->misc); | 199 | pr_cont("MISC %llx ", m->misc); |
191 | printk(KERN_CONT "\n"); | ||
192 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | ||
193 | m->cpuvendor, m->cpuid, m->time, m->socketid, | ||
194 | m->apicid); | ||
195 | 200 | ||
196 | decode_mce(m); | 201 | pr_cont("\n"); |
202 | pr_emerg("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | ||
203 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid); | ||
204 | |||
205 | /* | ||
206 | * Print out human-readable details about the MCE error, | ||
207 | * (if the CPU has an implementation for that): | ||
208 | */ | ||
209 | x86_mce_decode_callback(m); | ||
197 | } | 210 | } |
198 | 211 | ||
199 | static void print_mce_head(void) | 212 | static void print_mce_head(void) |
200 | { | 213 | { |
201 | printk(KERN_EMERG "\nHARDWARE ERROR\n"); | 214 | pr_emerg("\nHARDWARE ERROR\n"); |
202 | } | 215 | } |
203 | 216 | ||
204 | static void print_mce_tail(void) | 217 | static void print_mce_tail(void) |
205 | { | 218 | { |
206 | printk(KERN_EMERG "This is not a software problem!\n" | 219 | pr_emerg("This is not a software problem!\n"); |
207 | #if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD)) | ||
208 | "Run through mcelog --ascii to decode and contact your hardware vendor\n" | ||
209 | #endif | ||
210 | ); | ||
211 | } | 220 | } |
212 | 221 | ||
213 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 222 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
@@ -221,6 +230,7 @@ static atomic_t mce_fake_paniced; | |||
221 | static void wait_for_panic(void) | 230 | static void wait_for_panic(void) |
222 | { | 231 | { |
223 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; | 232 | long timeout = PANIC_TIMEOUT*USEC_PER_SEC; |
233 | |||
224 | preempt_disable(); | 234 | preempt_disable(); |
225 | local_irq_enable(); | 235 | local_irq_enable(); |
226 | while (timeout-- > 0) | 236 | while (timeout-- > 0) |
@@ -288,6 +298,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) | |||
288 | static int msr_to_offset(u32 msr) | 298 | static int msr_to_offset(u32 msr) |
289 | { | 299 | { |
290 | unsigned bank = __get_cpu_var(injectm.bank); | 300 | unsigned bank = __get_cpu_var(injectm.bank); |
301 | |||
291 | if (msr == rip_msr) | 302 | if (msr == rip_msr) |
292 | return offsetof(struct mce, ip); | 303 | return offsetof(struct mce, ip); |
293 | if (msr == MSR_IA32_MCx_STATUS(bank)) | 304 | if (msr == MSR_IA32_MCx_STATUS(bank)) |
@@ -305,13 +316,25 @@ static int msr_to_offset(u32 msr) | |||
305 | static u64 mce_rdmsrl(u32 msr) | 316 | static u64 mce_rdmsrl(u32 msr) |
306 | { | 317 | { |
307 | u64 v; | 318 | u64 v; |
319 | |||
308 | if (__get_cpu_var(injectm).finished) { | 320 | if (__get_cpu_var(injectm).finished) { |
309 | int offset = msr_to_offset(msr); | 321 | int offset = msr_to_offset(msr); |
322 | |||
310 | if (offset < 0) | 323 | if (offset < 0) |
311 | return 0; | 324 | return 0; |
312 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); | 325 | return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); |
313 | } | 326 | } |
314 | rdmsrl(msr, v); | 327 | |
328 | if (rdmsrl_safe(msr, &v)) { | ||
329 | WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr); | ||
330 | /* | ||
331 | * Return zero in case the access faulted. This should | ||
332 | * not happen normally but can happen if the CPU does | ||
333 | * something weird, or if the code is buggy. | ||
334 | */ | ||
335 | v = 0; | ||
336 | } | ||
337 | |||
315 | return v; | 338 | return v; |
316 | } | 339 | } |
317 | 340 | ||
@@ -319,6 +342,7 @@ static void mce_wrmsrl(u32 msr, u64 v) | |||
319 | { | 342 | { |
320 | if (__get_cpu_var(injectm).finished) { | 343 | if (__get_cpu_var(injectm).finished) { |
321 | int offset = msr_to_offset(msr); | 344 | int offset = msr_to_offset(msr); |
345 | |||
322 | if (offset >= 0) | 346 | if (offset >= 0) |
323 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; | 347 | *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; |
324 | return; | 348 | return; |
@@ -415,7 +439,7 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
415 | m->ip = mce_rdmsrl(rip_msr); | 439 | m->ip = mce_rdmsrl(rip_msr); |
416 | } | 440 | } |
417 | 441 | ||
418 | #ifdef CONFIG_X86_LOCAL_APIC | 442 | #ifdef CONFIG_X86_LOCAL_APIC |
419 | /* | 443 | /* |
420 | * Called after interrupts have been reenabled again | 444 | * Called after interrupts have been reenabled again |
421 | * when a MCE happened during an interrupts off region | 445 | * when a MCE happened during an interrupts off region |
@@ -1172,6 +1196,7 @@ static int mce_banks_init(void) | |||
1172 | return -ENOMEM; | 1196 | return -ENOMEM; |
1173 | for (i = 0; i < banks; i++) { | 1197 | for (i = 0; i < banks; i++) { |
1174 | struct mce_bank *b = &mce_banks[i]; | 1198 | struct mce_bank *b = &mce_banks[i]; |
1199 | |||
1175 | b->ctl = -1ULL; | 1200 | b->ctl = -1ULL; |
1176 | b->init = 1; | 1201 | b->init = 1; |
1177 | } | 1202 | } |
@@ -1189,7 +1214,8 @@ static int __cpuinit mce_cap_init(void) | |||
1189 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 1214 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
1190 | 1215 | ||
1191 | b = cap & MCG_BANKCNT_MASK; | 1216 | b = cap & MCG_BANKCNT_MASK; |
1192 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); | 1217 | if (!banks) |
1218 | printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b); | ||
1193 | 1219 | ||
1194 | if (b > MAX_NR_BANKS) { | 1220 | if (b > MAX_NR_BANKS) { |
1195 | printk(KERN_WARNING | 1221 | printk(KERN_WARNING |
@@ -1203,6 +1229,7 @@ static int __cpuinit mce_cap_init(void) | |||
1203 | banks = b; | 1229 | banks = b; |
1204 | if (!mce_banks) { | 1230 | if (!mce_banks) { |
1205 | int err = mce_banks_init(); | 1231 | int err = mce_banks_init(); |
1232 | |||
1206 | if (err) | 1233 | if (err) |
1207 | return err; | 1234 | return err; |
1208 | } | 1235 | } |
@@ -1237,6 +1264,7 @@ static void mce_init(void) | |||
1237 | 1264 | ||
1238 | for (i = 0; i < banks; i++) { | 1265 | for (i = 0; i < banks; i++) { |
1239 | struct mce_bank *b = &mce_banks[i]; | 1266 | struct mce_bank *b = &mce_banks[i]; |
1267 | |||
1240 | if (!b->init) | 1268 | if (!b->init) |
1241 | continue; | 1269 | continue; |
1242 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); | 1270 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
@@ -1626,6 +1654,7 @@ static int mce_disable(void) | |||
1626 | 1654 | ||
1627 | for (i = 0; i < banks; i++) { | 1655 | for (i = 0; i < banks; i++) { |
1628 | struct mce_bank *b = &mce_banks[i]; | 1656 | struct mce_bank *b = &mce_banks[i]; |
1657 | |||
1629 | if (b->init) | 1658 | if (b->init) |
1630 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); | 1659 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
1631 | } | 1660 | } |
@@ -1911,6 +1940,7 @@ static void mce_disable_cpu(void *h) | |||
1911 | cmci_clear(); | 1940 | cmci_clear(); |
1912 | for (i = 0; i < banks; i++) { | 1941 | for (i = 0; i < banks; i++) { |
1913 | struct mce_bank *b = &mce_banks[i]; | 1942 | struct mce_bank *b = &mce_banks[i]; |
1943 | |||
1914 | if (b->init) | 1944 | if (b->init) |
1915 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); | 1945 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
1916 | } | 1946 | } |
@@ -1928,6 +1958,7 @@ static void mce_reenable_cpu(void *h) | |||
1928 | cmci_reenable(); | 1958 | cmci_reenable(); |
1929 | for (i = 0; i < banks; i++) { | 1959 | for (i = 0; i < banks; i++) { |
1930 | struct mce_bank *b = &mce_banks[i]; | 1960 | struct mce_bank *b = &mce_banks[i]; |
1961 | |||
1931 | if (b->init) | 1962 | if (b->init) |
1932 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); | 1963 | wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); |
1933 | } | 1964 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 889f665fe93d..7c785634af2b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
11 | #include <linux/sched.h> | ||
11 | #include <asm/apic.h> | 12 | #include <asm/apic.h> |
12 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
13 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 63a56d147e4a..b3a1dba75330 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -34,20 +34,31 @@ | |||
34 | /* How long to wait between reporting thermal events */ | 34 | /* How long to wait between reporting thermal events */ |
35 | #define CHECK_INTERVAL (300 * HZ) | 35 | #define CHECK_INTERVAL (300 * HZ) |
36 | 36 | ||
37 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; | 37 | /* |
38 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); | 38 | * Current thermal throttling state: |
39 | static DEFINE_PER_CPU(bool, thermal_throttle_active); | 39 | */ |
40 | struct thermal_state { | ||
41 | bool is_throttled; | ||
42 | |||
43 | u64 next_check; | ||
44 | unsigned long throttle_count; | ||
45 | unsigned long last_throttle_count; | ||
46 | }; | ||
47 | |||
48 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); | ||
40 | 49 | ||
41 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 50 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
42 | 51 | ||
43 | #ifdef CONFIG_SYSFS | 52 | #ifdef CONFIG_SYSFS |
44 | #define define_therm_throt_sysdev_one_ro(_name) \ | 53 | #define define_therm_throt_sysdev_one_ro(_name) \ |
45 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) | 54 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) |
46 | 55 | ||
47 | #define define_therm_throt_sysdev_show_func(name) \ | 56 | #define define_therm_throt_sysdev_show_func(name) \ |
48 | static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ | 57 | \ |
49 | struct sysdev_attribute *attr, \ | 58 | static ssize_t therm_throt_sysdev_show_##name( \ |
50 | char *buf) \ | 59 | struct sys_device *dev, \ |
60 | struct sysdev_attribute *attr, \ | ||
61 | char *buf) \ | ||
51 | { \ | 62 | { \ |
52 | unsigned int cpu = dev->id; \ | 63 | unsigned int cpu = dev->id; \ |
53 | ssize_t ret; \ | 64 | ssize_t ret; \ |
@@ -55,7 +66,7 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ | |||
55 | preempt_disable(); /* CPU hotplug */ \ | 66 | preempt_disable(); /* CPU hotplug */ \ |
56 | if (cpu_online(cpu)) \ | 67 | if (cpu_online(cpu)) \ |
57 | ret = sprintf(buf, "%lu\n", \ | 68 | ret = sprintf(buf, "%lu\n", \ |
58 | per_cpu(thermal_throttle_##name, cpu)); \ | 69 | per_cpu(thermal_state, cpu).name); \ |
59 | else \ | 70 | else \ |
60 | ret = 0; \ | 71 | ret = 0; \ |
61 | preempt_enable(); \ | 72 | preempt_enable(); \ |
@@ -63,11 +74,11 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ | |||
63 | return ret; \ | 74 | return ret; \ |
64 | } | 75 | } |
65 | 76 | ||
66 | define_therm_throt_sysdev_show_func(count); | 77 | define_therm_throt_sysdev_show_func(throttle_count); |
67 | define_therm_throt_sysdev_one_ro(count); | 78 | define_therm_throt_sysdev_one_ro(throttle_count); |
68 | 79 | ||
69 | static struct attribute *thermal_throttle_attrs[] = { | 80 | static struct attribute *thermal_throttle_attrs[] = { |
70 | &attr_count.attr, | 81 | &attr_throttle_count.attr, |
71 | NULL | 82 | NULL |
72 | }; | 83 | }; |
73 | 84 | ||
@@ -93,33 +104,39 @@ static struct attribute_group thermal_throttle_attr_group = { | |||
93 | * 1 : Event should be logged further, and a message has been | 104 | * 1 : Event should be logged further, and a message has been |
94 | * printed to the syslog. | 105 | * printed to the syslog. |
95 | */ | 106 | */ |
96 | static int therm_throt_process(int curr) | 107 | static int therm_throt_process(bool is_throttled) |
97 | { | 108 | { |
98 | unsigned int cpu = smp_processor_id(); | 109 | struct thermal_state *state; |
99 | __u64 tmp_jiffs = get_jiffies_64(); | 110 | unsigned int this_cpu; |
100 | bool was_throttled = __get_cpu_var(thermal_throttle_active); | 111 | bool was_throttled; |
101 | bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; | 112 | u64 now; |
113 | |||
114 | this_cpu = smp_processor_id(); | ||
115 | now = get_jiffies_64(); | ||
116 | state = &per_cpu(thermal_state, this_cpu); | ||
117 | |||
118 | was_throttled = state->is_throttled; | ||
119 | state->is_throttled = is_throttled; | ||
102 | 120 | ||
103 | if (is_throttled) | 121 | if (is_throttled) |
104 | __get_cpu_var(thermal_throttle_count)++; | 122 | state->throttle_count++; |
105 | 123 | ||
106 | if (!(was_throttled ^ is_throttled) && | 124 | if (time_before64(now, state->next_check) && |
107 | time_before64(tmp_jiffs, __get_cpu_var(next_check))) | 125 | state->throttle_count != state->last_throttle_count) |
108 | return 0; | 126 | return 0; |
109 | 127 | ||
110 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; | 128 | state->next_check = now + CHECK_INTERVAL; |
129 | state->last_throttle_count = state->throttle_count; | ||
111 | 130 | ||
112 | /* if we just entered the thermal event */ | 131 | /* if we just entered the thermal event */ |
113 | if (is_throttled) { | 132 | if (is_throttled) { |
114 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " | 133 | printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count); |
115 | "cpu clock throttled (total events = %lu)\n", | ||
116 | cpu, __get_cpu_var(thermal_throttle_count)); | ||
117 | 134 | ||
118 | add_taint(TAINT_MACHINE_CHECK); | 135 | add_taint(TAINT_MACHINE_CHECK); |
119 | return 1; | 136 | return 1; |
120 | } | 137 | } |
121 | if (was_throttled) { | 138 | if (was_throttled) { |
122 | printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); | 139 | printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu); |
123 | return 1; | 140 | return 1; |
124 | } | 141 | } |
125 | 142 | ||
@@ -213,7 +230,7 @@ static void intel_thermal_interrupt(void) | |||
213 | __u64 msr_val; | 230 | __u64 msr_val; |
214 | 231 | ||
215 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 232 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
216 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT)) | 233 | if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0)) |
217 | mce_log_therm_throt_event(msr_val); | 234 | mce_log_therm_throt_event(msr_val); |
218 | } | 235 | } |
219 | 236 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 315738c74aad..73c86db5acbe 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -846,7 +846,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); |
847 | 847 | ||
848 | range_sums = sum_ranges(range, nr_range); | 848 | range_sums = sum_ranges(range, nr_range); |
849 | printk(KERN_INFO "total RAM coverred: %ldM\n", | 849 | printk(KERN_INFO "total RAM covered: %ldM\n", |
850 | range_sums >> (20 - PAGE_SHIFT)); | 850 | range_sums >> (20 - PAGE_SHIFT)); |
851 | 851 | ||
852 | if (mtrr_chunk_size && mtrr_gran_size) { | 852 | if (mtrr_chunk_size && mtrr_gran_size) { |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index f04e72527604..3c1b12d461d1 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -96,17 +96,24 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
96 | unsigned long long base, size; | 96 | unsigned long long base, size; |
97 | char *ptr; | 97 | char *ptr; |
98 | char line[LINE_SIZE]; | 98 | char line[LINE_SIZE]; |
99 | int length; | ||
99 | size_t linelen; | 100 | size_t linelen; |
100 | 101 | ||
101 | if (!capable(CAP_SYS_ADMIN)) | 102 | if (!capable(CAP_SYS_ADMIN)) |
102 | return -EPERM; | 103 | return -EPERM; |
103 | if (!len) | ||
104 | return -EINVAL; | ||
105 | 104 | ||
106 | memset(line, 0, LINE_SIZE); | 105 | memset(line, 0, LINE_SIZE); |
107 | if (len > LINE_SIZE) | 106 | |
108 | len = LINE_SIZE; | 107 | length = len; |
109 | if (copy_from_user(line, buf, len - 1)) | 108 | length--; |
109 | |||
110 | if (length > LINE_SIZE - 1) | ||
111 | length = LINE_SIZE - 1; | ||
112 | |||
113 | if (length < 0) | ||
114 | return -EINVAL; | ||
115 | |||
116 | if (copy_from_user(line, buf, length)) | ||
110 | return -EFAULT; | 117 | return -EFAULT; |
111 | 118 | ||
112 | linelen = strlen(line); | 119 | linelen = strlen(line); |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a3c7adb06b78..b5801c311846 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1790,6 +1790,9 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) | |||
1790 | void set_perf_event_pending(void) | 1790 | void set_perf_event_pending(void) |
1791 | { | 1791 | { |
1792 | #ifdef CONFIG_X86_LOCAL_APIC | 1792 | #ifdef CONFIG_X86_LOCAL_APIC |
1793 | if (!x86_pmu.apic || !x86_pmu_initialized()) | ||
1794 | return; | ||
1795 | |||
1793 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | 1796 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); |
1794 | #endif | 1797 | #endif |
1795 | } | 1798 | } |
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index f7cdb3b457aa..cd97ce18c29d 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c | |||
@@ -16,6 +16,22 @@ static void *kdump_buf_page; | |||
16 | /* Stores the physical address of elf header of crash image. */ | 16 | /* Stores the physical address of elf header of crash image. */ |
17 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | 17 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; |
18 | 18 | ||
19 | static inline bool is_crashed_pfn_valid(unsigned long pfn) | ||
20 | { | ||
21 | #ifndef CONFIG_X86_PAE | ||
22 | /* | ||
23 | * non-PAE kdump kernel executed from a PAE one will crop high pte | ||
24 | * bits and poke unwanted space counting again from address 0, we | ||
25 | * don't want that. pte must fit into unsigned long. In fact the | ||
26 | * test checks high 12 bits for being zero (pfn will be shifted left | ||
27 | * by PAGE_SHIFT). | ||
28 | */ | ||
29 | return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; | ||
30 | #else | ||
31 | return true; | ||
32 | #endif | ||
33 | } | ||
34 | |||
19 | /** | 35 | /** |
20 | * copy_oldmem_page - copy one page from "oldmem" | 36 | * copy_oldmem_page - copy one page from "oldmem" |
21 | * @pfn: page frame number to be copied | 37 | * @pfn: page frame number to be copied |
@@ -41,6 +57,9 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
41 | if (!csize) | 57 | if (!csize) |
42 | return 0; | 58 | return 0; |
43 | 59 | ||
60 | if (!is_crashed_pfn_valid(pfn)) | ||
61 | return -EFAULT; | ||
62 | |||
44 | vaddr = kmap_atomic_pfn(pfn, KM_PTE0); | 63 | vaddr = kmap_atomic_pfn(pfn, KM_PTE0); |
45 | 64 | ||
46 | if (!userbuf) { | 65 | if (!userbuf) { |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 85419bb7d4ab..d17d482a04f4 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1378,8 +1378,8 @@ static unsigned long ram_alignment(resource_size_t pos) | |||
1378 | if (mb < 16) | 1378 | if (mb < 16) |
1379 | return 1024*1024; | 1379 | return 1024*1024; |
1380 | 1380 | ||
1381 | /* To 32MB for anything above that */ | 1381 | /* To 64MB for anything above that */ |
1382 | return 32*1024*1024; | 1382 | return 64*1024*1024; |
1383 | } | 1383 | } |
1384 | 1384 | ||
1385 | #define MAX_RESOURCE_SIZE ((resource_size_t)-1) | 1385 | #define MAX_RESOURCE_SIZE ((resource_size_t)-1) |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 2acfd3fdc0cc..b9c830c12b4a 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -178,6 +178,11 @@ asmlinkage void early_printk(const char *fmt, ...) | |||
178 | 178 | ||
179 | static inline void early_console_register(struct console *con, int keep_early) | 179 | static inline void early_console_register(struct console *con, int keep_early) |
180 | { | 180 | { |
181 | if (early_console->index != -1) { | ||
182 | printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", | ||
183 | con->name); | ||
184 | return; | ||
185 | } | ||
181 | early_console = con; | 186 | early_console = con; |
182 | if (keep_early) | 187 | if (keep_early) |
183 | early_console->flags &= ~CON_BOOT; | 188 | early_console->flags &= ~CON_BOOT; |
@@ -201,8 +206,11 @@ static int __init setup_early_printk(char *buf) | |||
201 | 206 | ||
202 | while (*buf != '\0') { | 207 | while (*buf != '\0') { |
203 | if (!strncmp(buf, "serial", 6)) { | 208 | if (!strncmp(buf, "serial", 6)) { |
204 | early_serial_init(buf + 6); | 209 | buf += 6; |
210 | early_serial_init(buf); | ||
205 | early_console_register(&early_serial_console, keep); | 211 | early_console_register(&early_serial_console, keep); |
212 | if (!strncmp(buf, ",ttyS", 5)) | ||
213 | buf += 5; | ||
206 | } | 214 | } |
207 | if (!strncmp(buf, "ttyS", 4)) { | 215 | if (!strncmp(buf, "ttyS", 4)) { |
208 | early_serial_init(buf + 4); | 216 | early_serial_init(buf + 4); |
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index ad5bd988fb79..cdcfb122f256 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c | |||
@@ -454,8 +454,10 @@ void __init efi_init(void) | |||
454 | if (add_efi_memmap) | 454 | if (add_efi_memmap) |
455 | do_add_efi_memmap(); | 455 | do_add_efi_memmap(); |
456 | 456 | ||
457 | #ifdef CONFIG_X86_32 | ||
457 | x86_platform.get_wallclock = efi_get_time; | 458 | x86_platform.get_wallclock = efi_get_time; |
458 | x86_platform.set_wallclock = efi_set_rtc_mmss; | 459 | x86_platform.set_wallclock = efi_set_rtc_mmss; |
460 | #endif | ||
459 | 461 | ||
460 | /* Setup for EFI runtime service */ | 462 | /* Setup for EFI runtime service */ |
461 | reboot_type = BOOT_EFI; | 463 | reboot_type = BOOT_EFI; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 218aad7ee76e..050c278481b1 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -79,7 +79,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) | |||
79 | * any particular GDT layout, because we load our own as soon as we | 79 | * any particular GDT layout, because we load our own as soon as we |
80 | * can. | 80 | * can. |
81 | */ | 81 | */ |
82 | .section .text.head,"ax",@progbits | 82 | __HEAD |
83 | ENTRY(startup_32) | 83 | ENTRY(startup_32) |
84 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking | 84 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
85 | us to not reload segments */ | 85 | us to not reload segments */ |
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index d0bc0a13a437..780cd928fcd5 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -40,7 +40,7 @@ L4_START_KERNEL = pgd_index(__START_KERNEL_map) | |||
40 | L3_START_KERNEL = pud_index(__START_KERNEL_map) | 40 | L3_START_KERNEL = pud_index(__START_KERNEL_map) |
41 | 41 | ||
42 | .text | 42 | .text |
43 | .section .text.head | 43 | __HEAD |
44 | .code64 | 44 | .code64 |
45 | .globl startup_64 | 45 | .globl startup_64 |
46 | startup_64: | 46 | startup_64: |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 43cec6bdda63..9c3bd4a2050e 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -10,6 +10,16 @@ | |||
10 | EXPORT_SYMBOL(mcount); | 10 | EXPORT_SYMBOL(mcount); |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | /* | ||
14 | * Note, this is a prototype to get at the symbol for | ||
15 | * the export, but dont use it from C code, it is used | ||
16 | * by assembly code and is not using C calling convention! | ||
17 | */ | ||
18 | #ifndef CONFIG_X86_CMPXCHG64 | ||
19 | extern void cmpxchg8b_emu(void); | ||
20 | EXPORT_SYMBOL(cmpxchg8b_emu); | ||
21 | #endif | ||
22 | |||
13 | /* Networking helper routines. */ | 23 | /* Networking helper routines. */ |
14 | EXPORT_SYMBOL(csum_partial_copy_generic); | 24 | EXPORT_SYMBOL(csum_partial_copy_generic); |
15 | 25 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 74656d1d4e30..04bbd5278568 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -63,10 +63,10 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
63 | for_each_online_cpu(j) | 63 | for_each_online_cpu(j) |
64 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); | 64 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); |
65 | seq_printf(p, " Spurious interrupts\n"); | 65 | seq_printf(p, " Spurious interrupts\n"); |
66 | seq_printf(p, "%*s: ", prec, "CNT"); | 66 | seq_printf(p, "%*s: ", prec, "PMI"); |
67 | for_each_online_cpu(j) | 67 | for_each_online_cpu(j) |
68 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); | 68 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); |
69 | seq_printf(p, " Performance counter interrupts\n"); | 69 | seq_printf(p, " Performance monitoring interrupts\n"); |
70 | seq_printf(p, "%*s: ", prec, "PND"); | 70 | seq_printf(p, "%*s: ", prec, "PND"); |
71 | for_each_online_cpu(j) | 71 | for_each_online_cpu(j) |
72 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); | 72 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 366baa179913..f4c538b681ca 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -317,6 +317,12 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) | |||
317 | return UCODE_NFOUND; | 317 | return UCODE_NFOUND; |
318 | } | 318 | } |
319 | 319 | ||
320 | if (*(u32 *)firmware->data != UCODE_MAGIC) { | ||
321 | printk(KERN_ERR "microcode: invalid UCODE_MAGIC (0x%08x)\n", | ||
322 | *(u32 *)firmware->data); | ||
323 | return UCODE_ERROR; | ||
324 | } | ||
325 | |||
320 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); | 326 | ret = generic_load_microcode(cpu, firmware->data, firmware->size); |
321 | 327 | ||
322 | release_firmware(firmware); | 328 | release_firmware(firmware); |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 64b838eac18c..a6e804d16c35 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -35,7 +35,7 @@ int iommu_detected __read_mostly = 0; | |||
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. | 37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. |
38 | * If this variable is 1, IOMMU implementations do no DMA ranslation for | 38 | * If this variable is 1, IOMMU implementations do no DMA translation for |
39 | * devices and allow every device to access to whole physical memory. This is | 39 | * devices and allow every device to access to whole physical memory. This is |
40 | * useful if a user want to use an IOMMU only for KVM device assignment to | 40 | * useful if a user want to use an IOMMU only for KVM device assignment to |
41 | * guests and not for driver dma translation. | 41 | * guests and not for driver dma translation. |
@@ -45,12 +45,10 @@ int iommu_pass_through __read_mostly; | |||
45 | dma_addr_t bad_dma_address __read_mostly = 0; | 45 | dma_addr_t bad_dma_address __read_mostly = 0; |
46 | EXPORT_SYMBOL(bad_dma_address); | 46 | EXPORT_SYMBOL(bad_dma_address); |
47 | 47 | ||
48 | /* Dummy device used for NULL arguments (normally ISA). Better would | 48 | /* Dummy device used for NULL arguments (normally ISA). */ |
49 | be probably a smaller DMA mask, but this is bug-to-bug compatible | ||
50 | to older i386. */ | ||
51 | struct device x86_dma_fallback_dev = { | 49 | struct device x86_dma_fallback_dev = { |
52 | .init_name = "fallback device", | 50 | .init_name = "fallback device", |
53 | .coherent_dma_mask = DMA_BIT_MASK(32), | 51 | .coherent_dma_mask = ISA_DMA_BIT_MASK, |
54 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, | 52 | .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, |
55 | }; | 53 | }; |
56 | EXPORT_SYMBOL(x86_dma_fallback_dev); | 54 | EXPORT_SYMBOL(x86_dma_fallback_dev); |
@@ -311,7 +309,7 @@ void pci_iommu_shutdown(void) | |||
311 | amd_iommu_shutdown(); | 309 | amd_iommu_shutdown(); |
312 | } | 310 | } |
313 | /* Must execute after PCI subsystem */ | 311 | /* Must execute after PCI subsystem */ |
314 | fs_initcall(pci_iommu_init); | 312 | rootfs_initcall(pci_iommu_init); |
315 | 313 | ||
316 | #ifdef CONFIG_PCI | 314 | #ifdef CONFIG_PCI |
317 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | 315 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 98a827ee9ed7..a7f1b64f86e0 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/agp_backend.h> | 16 | #include <linux/agp_backend.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/sched.h> | ||
19 | #include <linux/string.h> | 20 | #include <linux/string.h> |
20 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
21 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ad535b683170..eb62cbcaa490 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -664,3 +664,8 @@ long sys_arch_prctl(int code, unsigned long addr) | |||
664 | return do_arch_prctl(current, code, addr); | 664 | return do_arch_prctl(current, code, addr); |
665 | } | 665 | } |
666 | 666 | ||
667 | unsigned long KSTK_ESP(struct task_struct *task) | ||
668 | { | ||
669 | return (test_tsk_thread_flag(task, TIF_IA32)) ? | ||
670 | (task_pt_regs(task)->sp) : ((task)->thread.usersp); | ||
671 | } | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 27349f92a6d7..f93078746e00 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/pm.h> | 4 | #include <linux/pm.h> |
5 | #include <linux/efi.h> | 5 | #include <linux/efi.h> |
6 | #include <linux/dmi.h> | 6 | #include <linux/dmi.h> |
7 | #include <linux/sched.h> | ||
7 | #include <linux/tboot.h> | 8 | #include <linux/tboot.h> |
8 | #include <acpi/reboot.h> | 9 | #include <acpi/reboot.h> |
9 | #include <asm/io.h> | 10 | #include <asm/io.h> |
@@ -435,6 +436,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { | |||
435 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), | 436 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), |
436 | }, | 437 | }, |
437 | }, | 438 | }, |
439 | { /* Handle problems with rebooting on Apple Macmini3,1 */ | ||
440 | .callback = set_pci_reboot, | ||
441 | .ident = "Apple Macmini3,1", | ||
442 | .matches = { | ||
443 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
444 | DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), | ||
445 | }, | ||
446 | }, | ||
438 | { } | 447 | { } |
439 | }; | 448 | }; |
440 | 449 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e09f0e2c14b5..2a34f9c5be21 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -660,6 +660,13 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | |||
660 | }, | 660 | }, |
661 | }, | 661 | }, |
662 | { | 662 | { |
663 | .callback = dmi_low_memory_corruption, | ||
664 | .ident = "Phoenix/MSC BIOS", | ||
665 | .matches = { | ||
666 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), | ||
667 | }, | ||
668 | }, | ||
669 | { | ||
663 | /* | 670 | /* |
664 | * AMI BIOS with low memory corruption was found on Intel DG45ID board. | 671 | * AMI BIOS with low memory corruption was found on Intel DG45ID board. |
665 | * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | 672 | * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will |
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index dcb00d278512..be2573448ed9 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c | |||
@@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
38 | #ifdef CONFIG_FRAME_POINTER | 38 | #ifdef CONFIG_FRAME_POINTER |
39 | return *(unsigned long *)(regs->bp + sizeof(long)); | 39 | return *(unsigned long *)(regs->bp + sizeof(long)); |
40 | #else | 40 | #else |
41 | unsigned long *sp = (unsigned long *)regs->sp; | 41 | unsigned long *sp = |
42 | (unsigned long *)kernel_stack_pointer(regs); | ||
42 | /* | 43 | /* |
43 | * Return address is either directly at stack pointer | 44 | * Return address is either directly at stack pointer |
44 | * or above a saved flags. Eflags has bits 22-31 zero, | 45 | * or above a saved flags. Eflags has bits 22-31 zero, |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 503c1f2e8835..1740c85e24bb 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -23,8 +23,6 @@ | |||
23 | static struct bau_control **uv_bau_table_bases __read_mostly; | 23 | static struct bau_control **uv_bau_table_bases __read_mostly; |
24 | static int uv_bau_retry_limit __read_mostly; | 24 | static int uv_bau_retry_limit __read_mostly; |
25 | 25 | ||
26 | /* position of pnode (which is nasid>>1): */ | ||
27 | static int uv_nshift __read_mostly; | ||
28 | /* base pnode in this partition */ | 26 | /* base pnode in this partition */ |
29 | static int uv_partition_base_pnode __read_mostly; | 27 | static int uv_partition_base_pnode __read_mostly; |
30 | 28 | ||
@@ -723,7 +721,7 @@ uv_activation_descriptor_init(int node, int pnode) | |||
723 | BUG_ON(!adp); | 721 | BUG_ON(!adp); |
724 | 722 | ||
725 | pa = uv_gpa(adp); /* need the real nasid*/ | 723 | pa = uv_gpa(adp); /* need the real nasid*/ |
726 | n = pa >> uv_nshift; | 724 | n = uv_gpa_to_pnode(pa); |
727 | m = pa & uv_mmask; | 725 | m = pa & uv_mmask; |
728 | 726 | ||
729 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, | 727 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
@@ -778,7 +776,7 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) | |||
778 | * need the pnode of where the memory was really allocated | 776 | * need the pnode of where the memory was really allocated |
779 | */ | 777 | */ |
780 | pa = uv_gpa(pqp); | 778 | pa = uv_gpa(pqp); |
781 | pn = pa >> uv_nshift; | 779 | pn = uv_gpa_to_pnode(pa); |
782 | uv_write_global_mmr64(pnode, | 780 | uv_write_global_mmr64(pnode, |
783 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, | 781 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, |
784 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | | 782 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
@@ -843,8 +841,7 @@ static int __init uv_bau_init(void) | |||
843 | GFP_KERNEL, cpu_to_node(cur_cpu)); | 841 | GFP_KERNEL, cpu_to_node(cur_cpu)); |
844 | 842 | ||
845 | uv_bau_retry_limit = 1; | 843 | uv_bau_retry_limit = 1; |
846 | uv_nshift = uv_hub_info->n_val; | 844 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; |
847 | uv_mmask = (1UL << uv_hub_info->n_val) - 1; | ||
848 | nblades = uv_num_possible_blades(); | 845 | nblades = uv_num_possible_blades(); |
849 | 846 | ||
850 | uv_bau_table_bases = (struct bau_control **) | 847 | uv_bau_table_bases = (struct bau_control **) |
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index 699f7eeb896a..cd022121cab6 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c | |||
@@ -3,8 +3,16 @@ | |||
3 | #include <asm/trampoline.h> | 3 | #include <asm/trampoline.h> |
4 | #include <asm/e820.h> | 4 | #include <asm/e820.h> |
5 | 5 | ||
6 | #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) | ||
7 | #define __trampinit | ||
8 | #define __trampinitdata | ||
9 | #else | ||
10 | #define __trampinit __cpuinit | ||
11 | #define __trampinitdata __cpuinitdata | ||
12 | #endif | ||
13 | |||
6 | /* ready for x86_64 and x86 */ | 14 | /* ready for x86_64 and x86 */ |
7 | unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE); | 15 | unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE); |
8 | 16 | ||
9 | void __init reserve_trampoline_memory(void) | 17 | void __init reserve_trampoline_memory(void) |
10 | { | 18 | { |
@@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void) | |||
26 | * bootstrap into the page concerned. The caller | 34 | * bootstrap into the page concerned. The caller |
27 | * has made sure it's suitably aligned. | 35 | * has made sure it's suitably aligned. |
28 | */ | 36 | */ |
29 | unsigned long __cpuinit setup_trampoline(void) | 37 | unsigned long __trampinit setup_trampoline(void) |
30 | { | 38 | { |
31 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); | 39 | memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); |
32 | return virt_to_phys(trampoline_base); | 40 | return virt_to_phys(trampoline_base); |
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S index 596d54c660a5..3af2dff58b21 100644 --- a/arch/x86/kernel/trampoline_64.S +++ b/arch/x86/kernel/trampoline_64.S | |||
@@ -32,8 +32,12 @@ | |||
32 | #include <asm/segment.h> | 32 | #include <asm/segment.h> |
33 | #include <asm/processor-flags.h> | 33 | #include <asm/processor-flags.h> |
34 | 34 | ||
35 | #ifdef CONFIG_ACPI_SLEEP | ||
36 | .section .rodata, "a", @progbits | ||
37 | #else | ||
35 | /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ | 38 | /* We can free up the trampoline after bootup if cpu hotplug is not supported. */ |
36 | __CPUINITRODATA | 39 | __CPUINITRODATA |
40 | #endif | ||
37 | .code16 | 41 | .code16 |
38 | 42 | ||
39 | ENTRY(trampoline_data) | 43 | ENTRY(trampoline_data) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a665c71352b8..7e37dcee0cc3 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -72,11 +72,9 @@ char ignore_fpu_irq; | |||
72 | 72 | ||
73 | /* | 73 | /* |
74 | * The IDT has to be page-aligned to simplify the Pentium | 74 | * The IDT has to be page-aligned to simplify the Pentium |
75 | * F0 0F bug workaround.. We have a special link segment | 75 | * F0 0F bug workaround. |
76 | * for this. | ||
77 | */ | 76 | */ |
78 | gate_desc idt_table[NR_VECTORS] | 77 | gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; |
79 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | ||
80 | #endif | 78 | #endif |
81 | 79 | ||
82 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | 80 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 027b5b498993..f37930954d15 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -114,7 +114,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
114 | return; | 114 | return; |
115 | 115 | ||
116 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { | 116 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
117 | pr_info("Skipping synchronization checks as TSC is reliable.\n"); | 117 | printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n"); |
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | 120 | ||
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 31e6f6cfe53e..d430e4c30193 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -648,7 +648,7 @@ static inline int __init activate_vmi(void) | |||
648 | 648 | ||
649 | pv_info.paravirt_enabled = 1; | 649 | pv_info.paravirt_enabled = 1; |
650 | pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | 650 | pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; |
651 | pv_info.name = "vmi"; | 651 | pv_info.name = "vmi [deprecated]"; |
652 | 652 | ||
653 | pv_init_ops.patch = vmi_patch; | 653 | pv_init_ops.patch = vmi_patch; |
654 | 654 | ||
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index a46acccec38a..3c68fe2d46cf 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -65,17 +65,11 @@ SECTIONS | |||
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | /* Text and read-only data */ | 67 | /* Text and read-only data */ |
68 | |||
69 | /* bootstrapping code */ | ||
70 | .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) { | ||
71 | _text = .; | ||
72 | *(.text.head) | ||
73 | } :text = 0x9090 | ||
74 | |||
75 | /* The rest of the text */ | ||
76 | .text : AT(ADDR(.text) - LOAD_OFFSET) { | 68 | .text : AT(ADDR(.text) - LOAD_OFFSET) { |
69 | _text = .; | ||
70 | /* bootstrapping code */ | ||
71 | HEAD_TEXT | ||
77 | #ifdef CONFIG_X86_32 | 72 | #ifdef CONFIG_X86_32 |
78 | /* not really needed, already page aligned */ | ||
79 | . = ALIGN(PAGE_SIZE); | 73 | . = ALIGN(PAGE_SIZE); |
80 | *(.text.page_aligned) | 74 | *(.text.page_aligned) |
81 | #endif | 75 | #endif |
@@ -94,13 +88,7 @@ SECTIONS | |||
94 | 88 | ||
95 | NOTES :text :note | 89 | NOTES :text :note |
96 | 90 | ||
97 | /* Exception table */ | 91 | EXCEPTION_TABLE(16) :text = 0x9090 |
98 | . = ALIGN(16); | ||
99 | __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { | ||
100 | __start___ex_table = .; | ||
101 | *(__ex_table) | ||
102 | __stop___ex_table = .; | ||
103 | } :text = 0x9090 | ||
104 | 92 | ||
105 | RO_DATA(PAGE_SIZE) | 93 | RO_DATA(PAGE_SIZE) |
106 | 94 | ||
@@ -118,7 +106,6 @@ SECTIONS | |||
118 | #endif | 106 | #endif |
119 | 107 | ||
120 | PAGE_ALIGNED_DATA(PAGE_SIZE) | 108 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
121 | *(.data.idt) | ||
122 | 109 | ||
123 | CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) | 110 | CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) |
124 | 111 | ||
@@ -135,24 +122,21 @@ SECTIONS | |||
135 | #ifdef CONFIG_X86_64 | 122 | #ifdef CONFIG_X86_64 |
136 | 123 | ||
137 | #define VSYSCALL_ADDR (-10*1024*1024) | 124 | #define VSYSCALL_ADDR (-10*1024*1024) |
138 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \ | ||
139 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) | ||
140 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \ | ||
141 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) | ||
142 | 125 | ||
143 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) | 126 | #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET) |
144 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) | 127 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) |
145 | 128 | ||
146 | #define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) | 129 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) |
147 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | 130 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) |
148 | 131 | ||
132 | . = ALIGN(4096); | ||
133 | __vsyscall_0 = .; | ||
134 | |||
149 | . = VSYSCALL_ADDR; | 135 | . = VSYSCALL_ADDR; |
150 | .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { | 136 | .vsyscall_0 : AT(VLOAD(.vsyscall_0)) { |
151 | *(.vsyscall_0) | 137 | *(.vsyscall_0) |
152 | } :user | 138 | } :user |
153 | 139 | ||
154 | __vsyscall_0 = VSYSCALL_VIRT_ADDR; | ||
155 | |||
156 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 140 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
157 | .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { | 141 | .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { |
158 | *(.vsyscall_fn) | 142 | *(.vsyscall_fn) |
@@ -192,11 +176,9 @@ SECTIONS | |||
192 | *(.vsyscall_3) | 176 | *(.vsyscall_3) |
193 | } | 177 | } |
194 | 178 | ||
195 | . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; | 179 | . = __vsyscall_0 + PAGE_SIZE; |
196 | 180 | ||
197 | #undef VSYSCALL_ADDR | 181 | #undef VSYSCALL_ADDR |
198 | #undef VSYSCALL_PHYS_ADDR | ||
199 | #undef VSYSCALL_VIRT_ADDR | ||
200 | #undef VLOAD_OFFSET | 182 | #undef VLOAD_OFFSET |
201 | #undef VLOAD | 183 | #undef VLOAD |
202 | #undef VVIRT_OFFSET | 184 | #undef VVIRT_OFFSET |
@@ -219,36 +201,12 @@ SECTIONS | |||
219 | PERCPU_VADDR(0, :percpu) | 201 | PERCPU_VADDR(0, :percpu) |
220 | #endif | 202 | #endif |
221 | 203 | ||
222 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { | 204 | INIT_TEXT_SECTION(PAGE_SIZE) |
223 | _sinittext = .; | ||
224 | INIT_TEXT | ||
225 | _einittext = .; | ||
226 | } | ||
227 | #ifdef CONFIG_X86_64 | 205 | #ifdef CONFIG_X86_64 |
228 | :init | 206 | :init |
229 | #endif | 207 | #endif |
230 | 208 | ||
231 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { | 209 | INIT_DATA_SECTION(16) |
232 | INIT_DATA | ||
233 | } | ||
234 | |||
235 | . = ALIGN(16); | ||
236 | .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { | ||
237 | __setup_start = .; | ||
238 | *(.init.setup) | ||
239 | __setup_end = .; | ||
240 | } | ||
241 | .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { | ||
242 | __initcall_start = .; | ||
243 | INITCALLS | ||
244 | __initcall_end = .; | ||
245 | } | ||
246 | |||
247 | .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { | ||
248 | __con_initcall_start = .; | ||
249 | *(.con_initcall.init) | ||
250 | __con_initcall_end = .; | ||
251 | } | ||
252 | 210 | ||
253 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { | 211 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
254 | __x86_cpu_dev_start = .; | 212 | __x86_cpu_dev_start = .; |
@@ -256,8 +214,6 @@ SECTIONS | |||
256 | __x86_cpu_dev_end = .; | 214 | __x86_cpu_dev_end = .; |
257 | } | 215 | } |
258 | 216 | ||
259 | SECURITY_INIT | ||
260 | |||
261 | . = ALIGN(8); | 217 | . = ALIGN(8); |
262 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { | 218 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { |
263 | __parainstructions = .; | 219 | __parainstructions = .; |
@@ -288,15 +244,6 @@ SECTIONS | |||
288 | EXIT_DATA | 244 | EXIT_DATA |
289 | } | 245 | } |
290 | 246 | ||
291 | #ifdef CONFIG_BLK_DEV_INITRD | ||
292 | . = ALIGN(PAGE_SIZE); | ||
293 | .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { | ||
294 | __initramfs_start = .; | ||
295 | *(.init.ramfs) | ||
296 | __initramfs_end = .; | ||
297 | } | ||
298 | #endif | ||
299 | |||
300 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) | 247 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
301 | PERCPU(PAGE_SIZE) | 248 | PERCPU(PAGE_SIZE) |
302 | #endif | 249 | #endif |
@@ -358,6 +305,9 @@ SECTIONS | |||
358 | 305 | ||
359 | 306 | ||
360 | #ifdef CONFIG_X86_32 | 307 | #ifdef CONFIG_X86_32 |
308 | /* | ||
309 | * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: | ||
310 | */ | ||
361 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), | 311 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
362 | "kernel image bigger than KERNEL_IMAGE_SIZE"); | 312 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
363 | #else | 313 | #else |