diff options
author | Shaohua Li <shaohua.li@intel.com> | 2005-07-07 20:56:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-07 21:23:42 -0400 |
commit | 3b520b238e018ef0e9d11c9115d5e7d9419c4ef9 (patch) | |
tree | 8b9bf3ccf2dd13dbbbcb4a3ff5028a351817b657 /arch | |
parent | 01d299367fe868851a632cfbdb606845f57682aa (diff) |
[PATCH] MTRR suspend/resume cleanup
There has been some discuss about solving the SMP MTRR suspend/resume
breakage, but I didn't find a patch for it. This is an intent for it. The
basic idea is moving mtrr initializing into cpu_identify for all APs (so it
works for cpu hotplug). For BP, restore_processor_state is responsible for
restoring MTRR.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/generic.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/main.c | 76 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/mtrr.h | 1 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 1 | ||||
-rw-r--r-- | arch/x86_64/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/suspend.c | 1 |
7 files changed, 75 insertions, 35 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 2203a9d20212..4553ffd94b1f 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -435,6 +435,11 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
435 | if (c == &boot_cpu_data) | 435 | if (c == &boot_cpu_data) |
436 | sysenter_setup(); | 436 | sysenter_setup(); |
437 | enable_sep_cpu(); | 437 | enable_sep_cpu(); |
438 | |||
439 | if (c == &boot_cpu_data) | ||
440 | mtrr_bp_init(); | ||
441 | else | ||
442 | mtrr_ap_init(); | ||
438 | } | 443 | } |
439 | 444 | ||
440 | #ifdef CONFIG_X86_HT | 445 | #ifdef CONFIG_X86_HT |
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index 64d91f73a0a4..169ac8e0db68 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c | |||
@@ -67,13 +67,6 @@ void __init get_mtrr_state(void) | |||
67 | mtrr_state.enabled = (lo & 0xc00) >> 10; | 67 | mtrr_state.enabled = (lo & 0xc00) >> 10; |
68 | } | 68 | } |
69 | 69 | ||
70 | /* Free resources associated with a struct mtrr_state */ | ||
71 | void __init finalize_mtrr_state(void) | ||
72 | { | ||
73 | kfree(mtrr_state.var_ranges); | ||
74 | mtrr_state.var_ranges = NULL; | ||
75 | } | ||
76 | |||
77 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 70 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ |
78 | void __init mtrr_state_warn(void) | 71 | void __init mtrr_state_warn(void) |
79 | { | 72 | { |
@@ -334,6 +327,9 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
334 | */ | 327 | */ |
335 | { | 328 | { |
336 | unsigned long flags; | 329 | unsigned long flags; |
330 | struct mtrr_var_range *vr; | ||
331 | |||
332 | vr = &mtrr_state.var_ranges[reg]; | ||
337 | 333 | ||
338 | local_irq_save(flags); | 334 | local_irq_save(flags); |
339 | prepare_set(); | 335 | prepare_set(); |
@@ -342,11 +338,15 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
342 | /* The invalid bit is kept in the mask, so we simply clear the | 338 | /* The invalid bit is kept in the mask, so we simply clear the |
343 | relevant mask register to disable a range. */ | 339 | relevant mask register to disable a range. */ |
344 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); | 340 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
341 | memset(vr, 0, sizeof(struct mtrr_var_range)); | ||
345 | } else { | 342 | } else { |
346 | mtrr_wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type, | 343 | vr->base_lo = base << PAGE_SHIFT | type; |
347 | (base & size_and_mask) >> (32 - PAGE_SHIFT)); | 344 | vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); |
348 | mtrr_wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800, | 345 | vr->mask_lo = -size << PAGE_SHIFT | 0x800; |
349 | (-size & size_and_mask) >> (32 - PAGE_SHIFT)); | 346 | vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); |
347 | |||
348 | mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); | ||
349 | mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); | ||
350 | } | 350 | } |
351 | 351 | ||
352 | post_set(); | 352 | post_set(); |
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index d66b09e0c820..764cac64e211 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c | |||
@@ -332,6 +332,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
332 | 332 | ||
333 | error = -EINVAL; | 333 | error = -EINVAL; |
334 | 334 | ||
335 | /* No CPU hotplug when we change MTRR entries */ | ||
336 | lock_cpu_hotplug(); | ||
335 | /* Search for existing MTRR */ | 337 | /* Search for existing MTRR */ |
336 | down(&main_lock); | 338 | down(&main_lock); |
337 | for (i = 0; i < num_var_ranges; ++i) { | 339 | for (i = 0; i < num_var_ranges; ++i) { |
@@ -372,6 +374,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
372 | error = i; | 374 | error = i; |
373 | out: | 375 | out: |
374 | up(&main_lock); | 376 | up(&main_lock); |
377 | unlock_cpu_hotplug(); | ||
375 | return error; | 378 | return error; |
376 | } | 379 | } |
377 | 380 | ||
@@ -461,6 +464,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
461 | return -ENXIO; | 464 | return -ENXIO; |
462 | 465 | ||
463 | max = num_var_ranges; | 466 | max = num_var_ranges; |
467 | /* No CPU hotplug when we change MTRR entries */ | ||
468 | lock_cpu_hotplug(); | ||
464 | down(&main_lock); | 469 | down(&main_lock); |
465 | if (reg < 0) { | 470 | if (reg < 0) { |
466 | /* Search for existing MTRR */ | 471 | /* Search for existing MTRR */ |
@@ -501,6 +506,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
501 | error = reg; | 506 | error = reg; |
502 | out: | 507 | out: |
503 | up(&main_lock); | 508 | up(&main_lock); |
509 | unlock_cpu_hotplug(); | ||
504 | return error; | 510 | return error; |
505 | } | 511 | } |
506 | /** | 512 | /** |
@@ -544,21 +550,9 @@ static void __init init_ifs(void) | |||
544 | centaur_init_mtrr(); | 550 | centaur_init_mtrr(); |
545 | } | 551 | } |
546 | 552 | ||
547 | static void __init init_other_cpus(void) | 553 | /* The suspend/resume methods are only for CPU without MTRR. CPU using generic |
548 | { | 554 | * MTRR driver doesn't require this |
549 | if (use_intel()) | 555 | */ |
550 | get_mtrr_state(); | ||
551 | |||
552 | /* bring up the other processors */ | ||
553 | set_mtrr(~0U,0,0,0); | ||
554 | |||
555 | if (use_intel()) { | ||
556 | finalize_mtrr_state(); | ||
557 | mtrr_state_warn(); | ||
558 | } | ||
559 | } | ||
560 | |||
561 | |||
562 | struct mtrr_value { | 556 | struct mtrr_value { |
563 | mtrr_type ltype; | 557 | mtrr_type ltype; |
564 | unsigned long lbase; | 558 | unsigned long lbase; |
@@ -611,13 +605,13 @@ static struct sysdev_driver mtrr_sysdev_driver = { | |||
611 | 605 | ||
612 | 606 | ||
613 | /** | 607 | /** |
614 | * mtrr_init - initialize mtrrs on the boot CPU | 608 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
615 | * | 609 | * |
616 | * This needs to be called early; before any of the other CPUs are | 610 | * This needs to be called early; before any of the other CPUs are |
617 | * initialized (i.e. before smp_init()). | 611 | * initialized (i.e. before smp_init()). |
618 | * | 612 | * |
619 | */ | 613 | */ |
620 | static int __init mtrr_init(void) | 614 | void __init mtrr_bp_init(void) |
621 | { | 615 | { |
622 | init_ifs(); | 616 | init_ifs(); |
623 | 617 | ||
@@ -674,12 +668,48 @@ static int __init mtrr_init(void) | |||
674 | if (mtrr_if) { | 668 | if (mtrr_if) { |
675 | set_num_var_ranges(); | 669 | set_num_var_ranges(); |
676 | init_table(); | 670 | init_table(); |
677 | init_other_cpus(); | 671 | if (use_intel()) |
678 | 672 | get_mtrr_state(); | |
679 | return sysdev_driver_register(&cpu_sysdev_class, | ||
680 | &mtrr_sysdev_driver); | ||
681 | } | 673 | } |
682 | return -ENXIO; | ||
683 | } | 674 | } |
684 | 675 | ||
685 | subsys_initcall(mtrr_init); | 676 | void mtrr_ap_init(void) |
677 | { | ||
678 | unsigned long flags; | ||
679 | |||
680 | if (!mtrr_if || !use_intel()) | ||
681 | return; | ||
682 | /* | ||
683 | * Ideally we should hold main_lock here to avoid mtrr entries changed, | ||
684 | * but this routine will be called in cpu boot time, holding the lock | ||
685 | * breaks it. This routine is called in two cases: 1.very earily time | ||
686 | * of software resume, when there absolutely isn't mtrr entry changes; | ||
687 | * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to | ||
688 | * prevent mtrr entry changes | ||
689 | */ | ||
690 | local_irq_save(flags); | ||
691 | |||
692 | mtrr_if->set_all(); | ||
693 | |||
694 | local_irq_restore(flags); | ||
695 | } | ||
696 | |||
697 | static int __init mtrr_init_finialize(void) | ||
698 | { | ||
699 | if (!mtrr_if) | ||
700 | return 0; | ||
701 | if (use_intel()) | ||
702 | mtrr_state_warn(); | ||
703 | else { | ||
704 | /* The CPUs haven't MTRR and seemes not support SMP. They have | ||
705 | * specific drivers, we use a tricky method to support | ||
706 | * suspend/resume for them. | ||
707 | * TBD: is there any system with such CPU which supports | ||
708 | * suspend/resume? if no, we should remove the code. | ||
709 | */ | ||
710 | sysdev_driver_register(&cpu_sysdev_class, | ||
711 | &mtrr_sysdev_driver); | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | subsys_initcall(mtrr_init_finialize); | ||
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h index de1351245599..99c9f2682041 100644 --- a/arch/i386/kernel/cpu/mtrr/mtrr.h +++ b/arch/i386/kernel/cpu/mtrr/mtrr.h | |||
@@ -91,7 +91,6 @@ extern struct mtrr_ops * mtrr_if; | |||
91 | 91 | ||
92 | extern unsigned int num_var_ranges; | 92 | extern unsigned int num_var_ranges; |
93 | 93 | ||
94 | void finalize_mtrr_state(void); | ||
95 | void mtrr_state_warn(void); | 94 | void mtrr_state_warn(void); |
96 | char *mtrr_attrib_to_str(int x); | 95 | char *mtrr_attrib_to_str(int x); |
97 | void mtrr_wrmsr(unsigned, unsigned, unsigned); | 96 | void mtrr_wrmsr(unsigned, unsigned, unsigned); |
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c index 0e6b45b61251..c547c1af6fa1 100644 --- a/arch/i386/power/cpu.c +++ b/arch/i386/power/cpu.c | |||
@@ -137,6 +137,7 @@ void __restore_processor_state(struct saved_context *ctxt) | |||
137 | 137 | ||
138 | fix_processor_context(); | 138 | fix_processor_context(); |
139 | do_fpu_end(); | 139 | do_fpu_end(); |
140 | mtrr_ap_init(); | ||
140 | } | 141 | } |
141 | 142 | ||
142 | void restore_processor_state(void) | 143 | void restore_processor_state(void) |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index b02d921da4f7..5fd03225058a 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -1076,6 +1076,10 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
1076 | #ifdef CONFIG_X86_MCE | 1076 | #ifdef CONFIG_X86_MCE |
1077 | mcheck_init(c); | 1077 | mcheck_init(c); |
1078 | #endif | 1078 | #endif |
1079 | if (c == &boot_cpu_data) | ||
1080 | mtrr_bp_init(); | ||
1081 | else | ||
1082 | mtrr_ap_init(); | ||
1079 | #ifdef CONFIG_NUMA | 1083 | #ifdef CONFIG_NUMA |
1080 | if (c != &boot_cpu_data) | 1084 | if (c != &boot_cpu_data) |
1081 | numa_add_cpu(c - cpu_data); | 1085 | numa_add_cpu(c - cpu_data); |
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index 6c0f402e3a88..0612640d91b1 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c | |||
@@ -119,6 +119,7 @@ void __restore_processor_state(struct saved_context *ctxt) | |||
119 | fix_processor_context(); | 119 | fix_processor_context(); |
120 | 120 | ||
121 | do_fpu_end(); | 121 | do_fpu_end(); |
122 | mtrr_ap_init(); | ||
122 | } | 123 | } |
123 | 124 | ||
124 | void restore_processor_state(void) | 125 | void restore_processor_state(void) |