diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 9 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 21 | ||||
-rw-r--r-- | arch/x86/mm/kmmio.c | 15 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 11 |
5 files changed, 38 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b585e04cbc9e..3178c3acd97e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -277,7 +277,6 @@ static struct cpufreq_driver p4clockmod_driver = { | |||
277 | .name = "p4-clockmod", | 277 | .name = "p4-clockmod", |
278 | .owner = THIS_MODULE, | 278 | .owner = THIS_MODULE, |
279 | .attr = p4clockmod_attr, | 279 | .attr = p4clockmod_attr, |
280 | .hide_interface = 1, | ||
281 | }; | 280 | }; |
282 | 281 | ||
283 | 282 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 599e58168631..9c8b71531ca8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -17,20 +17,21 @@ | |||
17 | #include <asm/delay.h> | 17 | #include <asm/delay.h> |
18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
19 | 19 | ||
20 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | 20 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
21 | EXPORT_SYMBOL(cpu_khz); | 21 | EXPORT_SYMBOL(cpu_khz); |
22 | unsigned int tsc_khz; | 22 | |
23 | unsigned int __read_mostly tsc_khz; | ||
23 | EXPORT_SYMBOL(tsc_khz); | 24 | EXPORT_SYMBOL(tsc_khz); |
24 | 25 | ||
25 | /* | 26 | /* |
26 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | 27 | * TSC can be unstable due to cpufreq or due to unsynced TSCs |
27 | */ | 28 | */ |
28 | static int tsc_unstable; | 29 | static int __read_mostly tsc_unstable; |
29 | 30 | ||
30 | /* native_sched_clock() is called before tsc_init(), so | 31 | /* native_sched_clock() is called before tsc_init(), so |
31 | we must start with the TSC soft disabled to prevent | 32 | we must start with the TSC soft disabled to prevent |
32 | erroneous rdtsc usage on !cpu_has_tsc processors */ | 33 | erroneous rdtsc usage on !cpu_has_tsc processors */ |
33 | static int tsc_disabled = -1; | 34 | static int __read_mostly tsc_disabled = -1; |
34 | 35 | ||
35 | static int tsc_clocksource_reliable; | 36 | static int tsc_clocksource_reliable; |
36 | /* | 37 | /* |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 92f1c6f3e19d..960a8d9c049c 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -343,6 +343,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, | |||
343 | * flush_tlb_user() for both user and kernel mappings unless | 343 | * flush_tlb_user() for both user and kernel mappings unless |
344 | * the Page Global Enable (PGE) feature bit is set. */ | 344 | * the Page Global Enable (PGE) feature bit is set. */ |
345 | *dx |= 0x00002000; | 345 | *dx |= 0x00002000; |
346 | /* We also lie, and say we're family id 5. 6 or greater | ||
347 | * leads to a rdmsr in early_init_intel which we can't handle. | ||
348 | * Family ID is returned as bits 8-12 in ax. */ | ||
349 | *ax &= 0xFFFFF0FF; | ||
350 | *ax |= 0x00000500; | ||
346 | break; | 351 | break; |
347 | case 0x80000000: | 352 | case 0x80000000: |
348 | /* Futureproof this a little: if they ask how much extended | 353 | /* Futureproof this a little: if they ask how much extended |
@@ -589,19 +594,21 @@ static void __init lguest_init_IRQ(void) | |||
589 | /* Some systems map "vectors" to interrupts weirdly. Lguest has | 594 | /* Some systems map "vectors" to interrupts weirdly. Lguest has |
590 | * a straightforward 1 to 1 mapping, so force that here. */ | 595 | * a straightforward 1 to 1 mapping, so force that here. */ |
591 | __get_cpu_var(vector_irq)[vector] = i; | 596 | __get_cpu_var(vector_irq)[vector] = i; |
592 | if (vector != SYSCALL_VECTOR) { | 597 | if (vector != SYSCALL_VECTOR) |
593 | set_intr_gate(vector, | 598 | set_intr_gate(vector, interrupt[i]); |
594 | interrupt[vector-FIRST_EXTERNAL_VECTOR]); | ||
595 | set_irq_chip_and_handler_name(i, &lguest_irq_controller, | ||
596 | handle_level_irq, | ||
597 | "level"); | ||
598 | } | ||
599 | } | 599 | } |
600 | /* This call is required to set up for 4k stacks, where we have | 600 | /* This call is required to set up for 4k stacks, where we have |
601 | * separate stacks for hard and soft interrupts. */ | 601 | * separate stacks for hard and soft interrupts. */ |
602 | irq_ctx_init(smp_processor_id()); | 602 | irq_ctx_init(smp_processor_id()); |
603 | } | 603 | } |
604 | 604 | ||
605 | void lguest_setup_irq(unsigned int irq) | ||
606 | { | ||
607 | irq_to_desc_alloc_cpu(irq, 0); | ||
608 | set_irq_chip_and_handler_name(irq, &lguest_irq_controller, | ||
609 | handle_level_irq, "level"); | ||
610 | } | ||
611 | |||
605 | /* | 612 | /* |
606 | * Time. | 613 | * Time. |
607 | * | 614 | * |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 9f205030d9aa..6a518dd08a36 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -451,23 +451,24 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head) | |||
451 | 451 | ||
452 | static void remove_kmmio_fault_pages(struct rcu_head *head) | 452 | static void remove_kmmio_fault_pages(struct rcu_head *head) |
453 | { | 453 | { |
454 | struct kmmio_delayed_release *dr = container_of( | 454 | struct kmmio_delayed_release *dr = |
455 | head, | 455 | container_of(head, struct kmmio_delayed_release, rcu); |
456 | struct kmmio_delayed_release, | ||
457 | rcu); | ||
458 | struct kmmio_fault_page *p = dr->release_list; | 456 | struct kmmio_fault_page *p = dr->release_list; |
459 | struct kmmio_fault_page **prevp = &dr->release_list; | 457 | struct kmmio_fault_page **prevp = &dr->release_list; |
460 | unsigned long flags; | 458 | unsigned long flags; |
459 | |||
461 | spin_lock_irqsave(&kmmio_lock, flags); | 460 | spin_lock_irqsave(&kmmio_lock, flags); |
462 | while (p) { | 461 | while (p) { |
463 | if (!p->count) | 462 | if (!p->count) { |
464 | list_del_rcu(&p->list); | 463 | list_del_rcu(&p->list); |
465 | else | 464 | prevp = &p->release_next; |
465 | } else { | ||
466 | *prevp = p->release_next; | 466 | *prevp = p->release_next; |
467 | prevp = &p->release_next; | 467 | } |
468 | p = p->release_next; | 468 | p = p->release_next; |
469 | } | 469 | } |
470 | spin_unlock_irqrestore(&kmmio_lock, flags); | 470 | spin_unlock_irqrestore(&kmmio_lock, flags); |
471 | |||
471 | /* This is the real RCU destroy call. */ | 472 | /* This is the real RCU destroy call. */ |
472 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); | 473 | call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); |
473 | } | 474 | } |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7be47d1a97e4..7233bd7e357b 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -515,6 +515,17 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
515 | * primary protection behavior: | 515 | * primary protection behavior: |
516 | */ | 516 | */ |
517 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); | 517 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
518 | |||
519 | /* | ||
520 | * Intel Atom errata AAH41 workaround. | ||
521 | * | ||
522 | * The real fix should be in hw or in a microcode update, but | ||
523 | * we also probabilistically try to reduce the window of having | ||
524 | * a large TLB mixed with 4K TLBs while instruction fetches are | ||
525 | * going on. | ||
526 | */ | ||
527 | __flush_tlb_all(); | ||
528 | |||
518 | base = NULL; | 529 | base = NULL; |
519 | 530 | ||
520 | out_unlock: | 531 | out_unlock: |