diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 18 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 19 | ||||
-rw-r--r-- | arch/x86/kernel/early-quirks.c | 211 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/pci-calgary_64.c | 31 |
8 files changed, 253 insertions, 46 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index e69182fd01cf..4b28159e0421 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -87,7 +87,9 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) | |||
87 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; | 87 | num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; |
88 | 88 | ||
89 | retval = 0; | 89 | retval = 0; |
90 | if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { | 90 | /* If the HW does not support any sub-states in this C-state */ |
91 | if (num_cstate_subtype == 0) { | ||
92 | pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part); | ||
91 | retval = -1; | 93 | retval = -1; |
92 | goto out; | 94 | goto out; |
93 | } | 95 | } |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 481ae38f6a44..ad28db7e6bde 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1996,7 +1996,8 @@ static inline void __smp_error_interrupt(struct pt_regs *regs) | |||
1996 | }; | 1996 | }; |
1997 | 1997 | ||
1998 | /* First tickle the hardware, only then report what went on. -- REW */ | 1998 | /* First tickle the hardware, only then report what went on. -- REW */ |
1999 | apic_write(APIC_ESR, 0); | 1999 | if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */ |
2000 | apic_write(APIC_ESR, 0); | ||
2000 | v = apic_read(APIC_ESR); | 2001 | v = apic_read(APIC_ESR); |
2001 | ack_APIC_irq(); | 2002 | ack_APIC_irq(); |
2002 | atomic_inc(&irq_err_count); | 2003 | atomic_inc(&irq_err_count); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 9b7734b1f975..eeee23ff75ef 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -89,6 +89,9 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait); | |||
89 | static DEFINE_PER_CPU(struct mce, mces_seen); | 89 | static DEFINE_PER_CPU(struct mce, mces_seen); |
90 | static int cpu_missing; | 90 | static int cpu_missing; |
91 | 91 | ||
92 | /* CMCI storm detection filter */ | ||
93 | static DEFINE_PER_CPU(unsigned long, mce_polled_error); | ||
94 | |||
92 | /* | 95 | /* |
93 | * MCA banks polled by the period polling timer for corrected events. | 96 | * MCA banks polled by the period polling timer for corrected events. |
94 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). | 97 | * With Intel CMCI, this only has MCA banks which do not support CMCI (if any). |
@@ -595,6 +598,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
595 | { | 598 | { |
596 | struct mce m; | 599 | struct mce m; |
597 | int i; | 600 | int i; |
601 | unsigned long *v; | ||
598 | 602 | ||
599 | this_cpu_inc(mce_poll_count); | 603 | this_cpu_inc(mce_poll_count); |
600 | 604 | ||
@@ -614,6 +618,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
614 | if (!(m.status & MCI_STATUS_VAL)) | 618 | if (!(m.status & MCI_STATUS_VAL)) |
615 | continue; | 619 | continue; |
616 | 620 | ||
621 | v = &get_cpu_var(mce_polled_error); | ||
622 | set_bit(0, v); | ||
617 | /* | 623 | /* |
618 | * Uncorrected or signalled events are handled by the exception | 624 | * Uncorrected or signalled events are handled by the exception |
619 | * handler when it is enabled, so don't process those here. | 625 | * handler when it is enabled, so don't process those here. |
@@ -1278,10 +1284,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) | |||
1278 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = | 1284 | static unsigned long (*mce_adjust_timer)(unsigned long interval) = |
1279 | mce_adjust_timer_default; | 1285 | mce_adjust_timer_default; |
1280 | 1286 | ||
1287 | static int cmc_error_seen(void) | ||
1288 | { | ||
1289 | unsigned long *v = &__get_cpu_var(mce_polled_error); | ||
1290 | |||
1291 | return test_and_clear_bit(0, v); | ||
1292 | } | ||
1293 | |||
1281 | static void mce_timer_fn(unsigned long data) | 1294 | static void mce_timer_fn(unsigned long data) |
1282 | { | 1295 | { |
1283 | struct timer_list *t = &__get_cpu_var(mce_timer); | 1296 | struct timer_list *t = &__get_cpu_var(mce_timer); |
1284 | unsigned long iv; | 1297 | unsigned long iv; |
1298 | int notify; | ||
1285 | 1299 | ||
1286 | WARN_ON(smp_processor_id() != data); | 1300 | WARN_ON(smp_processor_id() != data); |
1287 | 1301 | ||
@@ -1296,7 +1310,9 @@ static void mce_timer_fn(unsigned long data) | |||
1296 | * polling interval, otherwise increase the polling interval. | 1310 | * polling interval, otherwise increase the polling interval. |
1297 | */ | 1311 | */ |
1298 | iv = __this_cpu_read(mce_next_interval); | 1312 | iv = __this_cpu_read(mce_next_interval); |
1299 | if (mce_notify_irq()) { | 1313 | notify = mce_notify_irq(); |
1314 | notify |= cmc_error_seen(); | ||
1315 | if (notify) { | ||
1300 | iv = max(iv / 2, (unsigned long) HZ/100); | 1316 | iv = max(iv / 2, (unsigned long) HZ/100); |
1301 | } else { | 1317 | } else { |
1302 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); | 1318 | iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index fb6156fee6f7..3bdb95ae8c43 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/cpumask.h> | ||
12 | #include <asm/apic.h> | 13 | #include <asm/apic.h> |
13 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
14 | #include <asm/msr.h> | 15 | #include <asm/msr.h> |
@@ -137,6 +138,22 @@ unsigned long mce_intel_adjust_timer(unsigned long interval) | |||
137 | } | 138 | } |
138 | } | 139 | } |
139 | 140 | ||
141 | static void cmci_storm_disable_banks(void) | ||
142 | { | ||
143 | unsigned long flags, *owned; | ||
144 | int bank; | ||
145 | u64 val; | ||
146 | |||
147 | raw_spin_lock_irqsave(&cmci_discover_lock, flags); | ||
148 | owned = __get_cpu_var(mce_banks_owned); | ||
149 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { | ||
150 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); | ||
151 | val &= ~MCI_CTL2_CMCI_EN; | ||
152 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); | ||
153 | } | ||
154 | raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); | ||
155 | } | ||
156 | |||
140 | static bool cmci_storm_detect(void) | 157 | static bool cmci_storm_detect(void) |
141 | { | 158 | { |
142 | unsigned int cnt = __this_cpu_read(cmci_storm_cnt); | 159 | unsigned int cnt = __this_cpu_read(cmci_storm_cnt); |
@@ -158,7 +175,7 @@ static bool cmci_storm_detect(void) | |||
158 | if (cnt <= CMCI_STORM_THRESHOLD) | 175 | if (cnt <= CMCI_STORM_THRESHOLD) |
159 | return false; | 176 | return false; |
160 | 177 | ||
161 | cmci_clear(); | 178 | cmci_storm_disable_banks(); |
162 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); | 179 | __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); |
163 | r = atomic_add_return(1, &cmci_storm_on_cpus); | 180 | r = atomic_add_return(1, &cmci_storm_on_cpus); |
164 | mce_timer_kick(CMCI_POLL_INTERVAL); | 181 | mce_timer_kick(CMCI_POLL_INTERVAL); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 6d7d5a1260a6..b0cc3809723d 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -225,7 +225,7 @@ static void __init intel_remapping_check(int num, int slot, int func) | |||
225 | * | 225 | * |
226 | * And yes, so far on current devices the base addr is always under 4G. | 226 | * And yes, so far on current devices the base addr is always under 4G. |
227 | */ | 227 | */ |
228 | static u32 __init intel_stolen_base(int num, int slot, int func) | 228 | static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size) |
229 | { | 229 | { |
230 | u32 base; | 230 | u32 base; |
231 | 231 | ||
@@ -244,6 +244,114 @@ static u32 __init intel_stolen_base(int num, int slot, int func) | |||
244 | #define MB(x) (KB (KB (x))) | 244 | #define MB(x) (KB (KB (x))) |
245 | #define GB(x) (MB (KB (x))) | 245 | #define GB(x) (MB (KB (x))) |
246 | 246 | ||
247 | static size_t __init i830_tseg_size(void) | ||
248 | { | ||
249 | u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); | ||
250 | |||
251 | if (!(tmp & TSEG_ENABLE)) | ||
252 | return 0; | ||
253 | |||
254 | if (tmp & I830_TSEG_SIZE_1M) | ||
255 | return MB(1); | ||
256 | else | ||
257 | return KB(512); | ||
258 | } | ||
259 | |||
260 | static size_t __init i845_tseg_size(void) | ||
261 | { | ||
262 | u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); | ||
263 | |||
264 | if (!(tmp & TSEG_ENABLE)) | ||
265 | return 0; | ||
266 | |||
267 | switch (tmp & I845_TSEG_SIZE_MASK) { | ||
268 | case I845_TSEG_SIZE_512K: | ||
269 | return KB(512); | ||
270 | case I845_TSEG_SIZE_1M: | ||
271 | return MB(1); | ||
272 | default: | ||
273 | WARN_ON(1); | ||
274 | return 0; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | static size_t __init i85x_tseg_size(void) | ||
279 | { | ||
280 | u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); | ||
281 | |||
282 | if (!(tmp & TSEG_ENABLE)) | ||
283 | return 0; | ||
284 | |||
285 | return MB(1); | ||
286 | } | ||
287 | |||
288 | static size_t __init i830_mem_size(void) | ||
289 | { | ||
290 | return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32); | ||
291 | } | ||
292 | |||
293 | static size_t __init i85x_mem_size(void) | ||
294 | { | ||
295 | return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * On 830/845/85x the stolen memory base isn't available in any | ||
300 | * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. | ||
301 | */ | ||
302 | static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) | ||
303 | { | ||
304 | return i830_mem_size() - i830_tseg_size() - stolen_size; | ||
305 | } | ||
306 | |||
307 | static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) | ||
308 | { | ||
309 | return i830_mem_size() - i845_tseg_size() - stolen_size; | ||
310 | } | ||
311 | |||
312 | static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) | ||
313 | { | ||
314 | return i85x_mem_size() - i85x_tseg_size() - stolen_size; | ||
315 | } | ||
316 | |||
317 | static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) | ||
318 | { | ||
319 | /* | ||
320 | * FIXME is the graphics stolen memory region | ||
321 | * always at TOUD? Ie. is it always the last | ||
322 | * one to be allocated by the BIOS? | ||
323 | */ | ||
324 | return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; | ||
325 | } | ||
326 | |||
327 | static size_t __init i830_stolen_size(int num, int slot, int func) | ||
328 | { | ||
329 | size_t stolen_size; | ||
330 | u16 gmch_ctrl; | ||
331 | |||
332 | gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); | ||
333 | |||
334 | switch (gmch_ctrl & I830_GMCH_GMS_MASK) { | ||
335 | case I830_GMCH_GMS_STOLEN_512: | ||
336 | stolen_size = KB(512); | ||
337 | break; | ||
338 | case I830_GMCH_GMS_STOLEN_1024: | ||
339 | stolen_size = MB(1); | ||
340 | break; | ||
341 | case I830_GMCH_GMS_STOLEN_8192: | ||
342 | stolen_size = MB(8); | ||
343 | break; | ||
344 | case I830_GMCH_GMS_LOCAL: | ||
345 | /* local memory isn't part of the normal address space */ | ||
346 | stolen_size = 0; | ||
347 | break; | ||
348 | default: | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | return stolen_size; | ||
353 | } | ||
354 | |||
247 | static size_t __init gen3_stolen_size(int num, int slot, int func) | 355 | static size_t __init gen3_stolen_size(int num, int slot, int func) |
248 | { | 356 | { |
249 | size_t stolen_size; | 357 | size_t stolen_size; |
@@ -310,7 +418,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func) | |||
310 | return gmch_ctrl << 25; /* 32 MB units */ | 418 | return gmch_ctrl << 25; /* 32 MB units */ |
311 | } | 419 | } |
312 | 420 | ||
313 | static inline size_t gen8_stolen_size(int num, int slot, int func) | 421 | static size_t gen8_stolen_size(int num, int slot, int func) |
314 | { | 422 | { |
315 | u16 gmch_ctrl; | 423 | u16 gmch_ctrl; |
316 | 424 | ||
@@ -320,31 +428,74 @@ static inline size_t gen8_stolen_size(int num, int slot, int func) | |||
320 | return gmch_ctrl << 25; /* 32 MB units */ | 428 | return gmch_ctrl << 25; /* 32 MB units */ |
321 | } | 429 | } |
322 | 430 | ||
323 | typedef size_t (*stolen_size_fn)(int num, int slot, int func); | 431 | |
432 | struct intel_stolen_funcs { | ||
433 | size_t (*size)(int num, int slot, int func); | ||
434 | u32 (*base)(int num, int slot, int func, size_t size); | ||
435 | }; | ||
436 | |||
437 | static const struct intel_stolen_funcs i830_stolen_funcs = { | ||
438 | .base = i830_stolen_base, | ||
439 | .size = i830_stolen_size, | ||
440 | }; | ||
441 | |||
442 | static const struct intel_stolen_funcs i845_stolen_funcs = { | ||
443 | .base = i845_stolen_base, | ||
444 | .size = i830_stolen_size, | ||
445 | }; | ||
446 | |||
447 | static const struct intel_stolen_funcs i85x_stolen_funcs = { | ||
448 | .base = i85x_stolen_base, | ||
449 | .size = gen3_stolen_size, | ||
450 | }; | ||
451 | |||
452 | static const struct intel_stolen_funcs i865_stolen_funcs = { | ||
453 | .base = i865_stolen_base, | ||
454 | .size = gen3_stolen_size, | ||
455 | }; | ||
456 | |||
457 | static const struct intel_stolen_funcs gen3_stolen_funcs = { | ||
458 | .base = intel_stolen_base, | ||
459 | .size = gen3_stolen_size, | ||
460 | }; | ||
461 | |||
462 | static const struct intel_stolen_funcs gen6_stolen_funcs = { | ||
463 | .base = intel_stolen_base, | ||
464 | .size = gen6_stolen_size, | ||
465 | }; | ||
466 | |||
467 | static const struct intel_stolen_funcs gen8_stolen_funcs = { | ||
468 | .base = intel_stolen_base, | ||
469 | .size = gen8_stolen_size, | ||
470 | }; | ||
324 | 471 | ||
325 | static struct pci_device_id intel_stolen_ids[] __initdata = { | 472 | static struct pci_device_id intel_stolen_ids[] __initdata = { |
326 | INTEL_I915G_IDS(gen3_stolen_size), | 473 | INTEL_I830_IDS(&i830_stolen_funcs), |
327 | INTEL_I915GM_IDS(gen3_stolen_size), | 474 | INTEL_I845G_IDS(&i845_stolen_funcs), |
328 | INTEL_I945G_IDS(gen3_stolen_size), | 475 | INTEL_I85X_IDS(&i85x_stolen_funcs), |
329 | INTEL_I945GM_IDS(gen3_stolen_size), | 476 | INTEL_I865G_IDS(&i865_stolen_funcs), |
330 | INTEL_VLV_M_IDS(gen6_stolen_size), | 477 | INTEL_I915G_IDS(&gen3_stolen_funcs), |
331 | INTEL_VLV_D_IDS(gen6_stolen_size), | 478 | INTEL_I915GM_IDS(&gen3_stolen_funcs), |
332 | INTEL_PINEVIEW_IDS(gen3_stolen_size), | 479 | INTEL_I945G_IDS(&gen3_stolen_funcs), |
333 | INTEL_I965G_IDS(gen3_stolen_size), | 480 | INTEL_I945GM_IDS(&gen3_stolen_funcs), |
334 | INTEL_G33_IDS(gen3_stolen_size), | 481 | INTEL_VLV_M_IDS(&gen6_stolen_funcs), |
335 | INTEL_I965GM_IDS(gen3_stolen_size), | 482 | INTEL_VLV_D_IDS(&gen6_stolen_funcs), |
336 | INTEL_GM45_IDS(gen3_stolen_size), | 483 | INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), |
337 | INTEL_G45_IDS(gen3_stolen_size), | 484 | INTEL_I965G_IDS(&gen3_stolen_funcs), |
338 | INTEL_IRONLAKE_D_IDS(gen3_stolen_size), | 485 | INTEL_G33_IDS(&gen3_stolen_funcs), |
339 | INTEL_IRONLAKE_M_IDS(gen3_stolen_size), | 486 | INTEL_I965GM_IDS(&gen3_stolen_funcs), |
340 | INTEL_SNB_D_IDS(gen6_stolen_size), | 487 | INTEL_GM45_IDS(&gen3_stolen_funcs), |
341 | INTEL_SNB_M_IDS(gen6_stolen_size), | 488 | INTEL_G45_IDS(&gen3_stolen_funcs), |
342 | INTEL_IVB_M_IDS(gen6_stolen_size), | 489 | INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), |
343 | INTEL_IVB_D_IDS(gen6_stolen_size), | 490 | INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), |
344 | INTEL_HSW_D_IDS(gen6_stolen_size), | 491 | INTEL_SNB_D_IDS(&gen6_stolen_funcs), |
345 | INTEL_HSW_M_IDS(gen6_stolen_size), | 492 | INTEL_SNB_M_IDS(&gen6_stolen_funcs), |
346 | INTEL_BDW_M_IDS(gen8_stolen_size), | 493 | INTEL_IVB_M_IDS(&gen6_stolen_funcs), |
347 | INTEL_BDW_D_IDS(gen8_stolen_size) | 494 | INTEL_IVB_D_IDS(&gen6_stolen_funcs), |
495 | INTEL_HSW_D_IDS(&gen6_stolen_funcs), | ||
496 | INTEL_HSW_M_IDS(&gen6_stolen_funcs), | ||
497 | INTEL_BDW_M_IDS(&gen8_stolen_funcs), | ||
498 | INTEL_BDW_D_IDS(&gen8_stolen_funcs) | ||
348 | }; | 499 | }; |
349 | 500 | ||
350 | static void __init intel_graphics_stolen(int num, int slot, int func) | 501 | static void __init intel_graphics_stolen(int num, int slot, int func) |
@@ -361,11 +512,13 @@ static void __init intel_graphics_stolen(int num, int slot, int func) | |||
361 | 512 | ||
362 | for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { | 513 | for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { |
363 | if (intel_stolen_ids[i].device == device) { | 514 | if (intel_stolen_ids[i].device == device) { |
364 | stolen_size_fn stolen_size = | 515 | const struct intel_stolen_funcs *stolen_funcs = |
365 | (stolen_size_fn)intel_stolen_ids[i].driver_data; | 516 | (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; |
366 | size = stolen_size(num, slot, func); | 517 | size = stolen_funcs->size(num, slot, func); |
367 | start = intel_stolen_base(num, slot, func); | 518 | start = stolen_funcs->base(num, slot, func, size); |
368 | if (size && start) { | 519 | if (size && start) { |
520 | printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", | ||
521 | start, start + (u32)size - 1); | ||
369 | /* Mark this space as reserved */ | 522 | /* Mark this space as reserved */ |
370 | e820_add_region(start, size, E820_RESERVED); | 523 | e820_add_region(start, size, E820_RESERVED); |
371 | sanitize_e820_map(e820.map, | 524 | sanitize_e820_map(e820.map, |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 42805fac0092..283a76a9cc40 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -125,7 +125,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
125 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); | 125 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); |
126 | seq_printf(p, " Machine check polls\n"); | 126 | seq_printf(p, " Machine check polls\n"); |
127 | #endif | 127 | #endif |
128 | #if defined(CONFIG_HYPERV) || defined(CONFIG_XEN) | 128 | #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) |
129 | seq_printf(p, "%*s: ", prec, "THR"); | 129 | seq_printf(p, "%*s: ", prec, "THR"); |
130 | for_each_online_cpu(j) | 130 | for_each_online_cpu(j) |
131 | seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); | 131 | seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count); |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ebc987398923..af1d14a9ebda 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -229,6 +229,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | /* | ||
233 | * On x86-64 we do not support 16-bit segments due to | ||
234 | * IRET leaking the high bits of the kernel stack address. | ||
235 | */ | ||
236 | #ifdef CONFIG_X86_64 | ||
237 | if (!ldt_info.seg_32bit) { | ||
238 | error = -EINVAL; | ||
239 | goto out_unlock; | ||
240 | } | ||
241 | #endif | ||
242 | |||
232 | fill_ldt(&ldt, &ldt_info); | 243 | fill_ldt(&ldt, &ldt_info); |
233 | if (oldmode) | 244 | if (oldmode) |
234 | ldt.avl = 0; | 245 | ldt.avl = 0; |
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 299d49302e7d..0497f719977d 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c | |||
@@ -1207,23 +1207,31 @@ error: | |||
1207 | return ret; | 1207 | return ret; |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | static inline int __init determine_tce_table_size(u64 ram) | 1210 | static inline int __init determine_tce_table_size(void) |
1211 | { | 1211 | { |
1212 | int ret; | 1212 | int ret; |
1213 | 1213 | ||
1214 | if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED) | 1214 | if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED) |
1215 | return specified_table_size; | 1215 | return specified_table_size; |
1216 | 1216 | ||
1217 | /* | 1217 | if (is_kdump_kernel() && saved_max_pfn) { |
1218 | * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to | 1218 | /* |
1219 | * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each | 1219 | * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to |
1220 | * larger table size has twice as many entries, so shift the | 1220 | * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each |
1221 | * max ram address by 13 to divide by 8K and then look at the | 1221 | * larger table size has twice as many entries, so shift the |
1222 | * order of the result to choose between 0-7. | 1222 | * max ram address by 13 to divide by 8K and then look at the |
1223 | */ | 1223 | * order of the result to choose between 0-7. |
1224 | ret = get_order(ram >> 13); | 1224 | */ |
1225 | if (ret > TCE_TABLE_SIZE_8M) | 1225 | ret = get_order((saved_max_pfn * PAGE_SIZE) >> 13); |
1226 | if (ret > TCE_TABLE_SIZE_8M) | ||
1227 | ret = TCE_TABLE_SIZE_8M; | ||
1228 | } else { | ||
1229 | /* | ||
1230 | * Use 8M by default (suggested by Muli) if it's not | ||
1231 | * kdump kernel and saved_max_pfn isn't set. | ||
1232 | */ | ||
1226 | ret = TCE_TABLE_SIZE_8M; | 1233 | ret = TCE_TABLE_SIZE_8M; |
1234 | } | ||
1227 | 1235 | ||
1228 | return ret; | 1236 | return ret; |
1229 | } | 1237 | } |
@@ -1418,8 +1426,7 @@ int __init detect_calgary(void) | |||
1418 | return -ENOMEM; | 1426 | return -ENOMEM; |
1419 | } | 1427 | } |
1420 | 1428 | ||
1421 | specified_table_size = determine_tce_table_size((is_kdump_kernel() ? | 1429 | specified_table_size = determine_tce_table_size(); |
1422 | saved_max_pfn : max_pfn) * PAGE_SIZE); | ||
1423 | 1430 | ||
1424 | for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { | 1431 | for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) { |
1425 | struct calgary_bus_info *info = &bus_info[bus]; | 1432 | struct calgary_bus_info *info = &bus_info[bus]; |