aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel.send@gmail.com>2008-04-29 06:52:33 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 04:55:09 -0400
commit95ffa2438d0e9c48779f0106b1c0eb36165e759c (patch)
tree63829c04435fd99fb655a0d1b94fbd2094549ceb /arch/x86/kernel/cpu
parent0dbfafa5fcd4dd189e2adc7b6ed9e0405e846d79 (diff)
x86: mtrr cleanup for converting continuous to discrete layout, v8
some BIOS like to use continus MTRR layout, and X driver can not add WB entries for graphical cards when 4g or more RAM installed. the patch will change MTRR to discrete. mtrr_chunk_size= could be used to have smaller continuous block to hold holes. default is 256m, could be set according to size of graphics card memory. mtrr_gran_size= could be used to send smallest mtrr block to avoid run out of MTRRs v2: fix -1 for UC checking v3: default to disable, and need use enable_mtrr_cleanup to enable this feature skip the var state change warning. remove next_basek in range_to_mtrr() v4: correct warning mask. v5: CONFIG_MTRR_SANITIZER v6: fix 1g, 2g, 512 aligment with extra hole v7: gran_sizek to prevent running out of MTRRs. v8: fix hole_basek caculation caused when removing next_basek gran_sizek using when basek is 0. need to apply [PATCH] x86: fix trimming e820 with MTRR holes. right after this one. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c32
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c467
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h3
3 files changed, 488 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0625d4158e58..5aae648600bf 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -37,7 +37,7 @@ static struct fixed_range_block fixed_range_blocks[] = {
37static unsigned long smp_changes_mask; 37static unsigned long smp_changes_mask;
38static struct mtrr_state mtrr_state = {}; 38static struct mtrr_state mtrr_state = {};
39static int mtrr_state_set; 39static int mtrr_state_set;
40static u64 tom2; 40u64 mtrr_tom2;
41 41
42#undef MODULE_PARAM_PREFIX 42#undef MODULE_PARAM_PREFIX
43#define MODULE_PARAM_PREFIX "mtrr." 43#define MODULE_PARAM_PREFIX "mtrr."
@@ -139,8 +139,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
139 } 139 }
140 } 140 }
141 141
142 if (tom2) { 142 if (mtrr_tom2) {
143 if (start >= (1ULL<<32) && (end < tom2)) 143 if (start >= (1ULL<<32) && (end < mtrr_tom2))
144 return MTRR_TYPE_WRBACK; 144 return MTRR_TYPE_WRBACK;
145 } 145 }
146 146
@@ -158,6 +158,20 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
159} 159}
160 160
161/* fill the MSR pair relating to a var range */
162void fill_mtrr_var_range(unsigned int index,
163 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
164{
165 struct mtrr_var_range *vr;
166
167 vr = mtrr_state.var_ranges;
168
169 vr[index].base_lo = base_lo;
170 vr[index].base_hi = base_hi;
171 vr[index].mask_lo = mask_lo;
172 vr[index].mask_hi = mask_hi;
173}
174
161static void 175static void
162get_fixed_ranges(mtrr_type * frs) 176get_fixed_ranges(mtrr_type * frs)
163{ 177{
@@ -216,10 +230,10 @@ void __init get_mtrr_state(void)
216 unsigned low, high; 230 unsigned low, high;
217 /* TOP_MEM2 */ 231 /* TOP_MEM2 */
218 rdmsr(MSR_K8_TOP_MEM2, low, high); 232 rdmsr(MSR_K8_TOP_MEM2, low, high);
219 tom2 = high; 233 mtrr_tom2 = high;
220 tom2 <<= 32; 234 mtrr_tom2 <<= 32;
221 tom2 |= low; 235 mtrr_tom2 |= low;
222 tom2 &= 0xffffff8000000ULL; 236 mtrr_tom2 &= 0xffffff8000000ULL;
223 } 237 }
224 if (mtrr_show) { 238 if (mtrr_show) {
225 int high_width; 239 int high_width;
@@ -251,9 +265,9 @@ void __init get_mtrr_state(void)
251 else 265 else
252 printk(KERN_INFO "MTRR %u disabled\n", i); 266 printk(KERN_INFO "MTRR %u disabled\n", i);
253 } 267 }
254 if (tom2) { 268 if (mtrr_tom2) {
255 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", 269 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
256 tom2, tom2>>20); 270 mtrr_tom2, mtrr_tom2>>20);
257 } 271 }
258 } 272 }
259 mtrr_state_set = 1; 273 mtrr_state_set = 1;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 6a1e278d9323..8a6f68b45e3e 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -37,6 +37,7 @@
37#include <linux/smp.h> 37#include <linux/smp.h>
38#include <linux/cpu.h> 38#include <linux/cpu.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/sort.h>
40 41
41#include <asm/e820.h> 42#include <asm/e820.h>
42#include <asm/mtrr.h> 43#include <asm/mtrr.h>
@@ -609,6 +610,452 @@ static struct sysdev_driver mtrr_sysdev_driver = {
609 .resume = mtrr_restore, 610 .resume = mtrr_restore,
610}; 611};
611 612
613#ifdef CONFIG_MTRR_SANITIZER
614
615#ifdef CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT
616static int enable_mtrr_cleanup __initdata = 1;
617#else
618static int enable_mtrr_cleanup __initdata;
619#endif
620
621#else
622
623static int enable_mtrr_cleanup __initdata = -1;
624
625#endif
626
627static int __init disable_mtrr_cleanup_setup(char *str)
628{
629 if (enable_mtrr_cleanup != -1)
630 enable_mtrr_cleanup = 0;
631 return 0;
632}
633early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
634
635static int __init enable_mtrr_cleanup_setup(char *str)
636{
637 if (enable_mtrr_cleanup != -1)
638 enable_mtrr_cleanup = 1;
639 return 0;
640}
641early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
642
643#define RANGE_NUM 256
644
645struct res_range {
646 unsigned long start;
647 unsigned long end;
648};
649
650static int __init add_range(struct res_range *range, int nr_range, unsigned long start,
651 unsigned long end, int merge)
652{
653 int i;
654
655 if (!merge)
656 goto addit;
657
658 /* try to merge it with old one */
659 for (i = 0; i < nr_range; i++) {
660 unsigned long final_start, final_end;
661 unsigned long common_start, common_end;
662
663 if (!range[i].end)
664 continue;
665
666 common_start = max(range[i].start, start);
667 common_end = min(range[i].end, end);
668 if (common_start > common_end + 1)
669 continue;
670
671 final_start = min(range[i].start, start);
672 final_end = max(range[i].end, end);
673
674 range[i].start = final_start;
675 range[i].end = final_end;
676 return nr_range;
677 }
678
679addit:
680 /* need to add that */
681 if (nr_range >= RANGE_NUM)
682 return nr_range;
683
684 range[nr_range].start = start;
685 range[nr_range].end = end;
686
687 nr_range++;
688
689 return nr_range;
690
691}
692static void __init subtract_range(struct res_range *range, unsigned long start,
693 unsigned long end)
694{
695 int i;
696 int j;
697
698 for (j = 0; j < RANGE_NUM; j++) {
699 if (!range[j].end)
700 continue;
701
702 if (start <= range[j].start && end >= range[j].end) {
703 range[j].start = 0;
704 range[j].end = 0;
705 continue;
706 }
707
708 if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
709 range[j].start = end + 1;
710 continue;
711 }
712
713
714 if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
715 range[j].end = start - 1;
716 continue;
717 }
718
719 if (start > range[j].start && end < range[j].end) {
720 /* find the new spare */
721 for (i = 0; i < RANGE_NUM; i++) {
722 if (range[i].end == 0)
723 break;
724 }
725 if (i < RANGE_NUM) {
726 range[i].end = range[j].end;
727 range[i].start = end + 1;
728 } else {
729 printk(KERN_ERR "run of slot in ranges\n");
730 }
731 range[j].end = start - 1;
732 continue;
733 }
734 }
735}
736
737static int __init cmp_range(const void *x1, const void *x2)
738{
739 const struct res_range *r1 = x1;
740 const struct res_range *r2 = x2;
741 long start1, start2;
742
743 start1 = r1->start;
744 start2 = r2->start;
745
746 return start1 - start2;
747}
748
749struct var_mtrr_state {
750 unsigned long range_startk, range_sizek;
751 unsigned long chunk_sizek;
752 unsigned long gran_sizek;
753 unsigned int reg;
754 unsigned address_bits;
755};
756
757static void __init set_var_mtrr(
758 unsigned int reg, unsigned long basek, unsigned long sizek,
759 unsigned char type, unsigned address_bits)
760{
761 u32 base_lo, base_hi, mask_lo, mask_hi;
762 unsigned address_mask_high;
763
764 if (!sizek) {
765 fill_mtrr_var_range(reg, 0, 0, 0, 0);
766 return;
767 }
768
769 address_mask_high = ((1u << (address_bits - 32u)) - 1u);
770
771 base_hi = basek >> 22;
772 base_lo = basek << 10;
773
774 if (sizek < 4*1024*1024) {
775 mask_hi = address_mask_high;
776 mask_lo = ~((sizek << 10) - 1);
777 } else {
778 mask_hi = address_mask_high & (~((sizek >> 22) - 1));
779 mask_lo = 0;
780 }
781
782 base_lo |= type;
783 mask_lo |= 0x800;
784 fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
785}
786
787static unsigned int __init range_to_mtrr(unsigned int reg,
788 unsigned long range_startk, unsigned long range_sizek,
789 unsigned char type, unsigned address_bits)
790{
791 if (!range_sizek || (reg >= num_var_ranges))
792 return reg;
793
794 while (range_sizek) {
795 unsigned long max_align, align;
796 unsigned long sizek;
797 /* Compute the maximum size I can make a range */
798 if (range_startk)
799 max_align = ffs(range_startk) - 1;
800 else
801 max_align = 32;
802 align = fls(range_sizek) - 1;
803 if (align > max_align)
804 align = max_align;
805
806 sizek = 1 << align;
807 printk(KERN_INFO "Setting variable MTRR %d, base: %ldMB, range: %ldMB, type %s\n",
808 reg, range_startk >> 10, sizek >> 10,
809 (type == MTRR_TYPE_UNCACHABLE)?"UC":
810 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
811 );
812 set_var_mtrr(reg++, range_startk, sizek, type, address_bits);
813 range_startk += sizek;
814 range_sizek -= sizek;
815 if (reg >= num_var_ranges)
816 break;
817 }
818 return reg;
819}
820
821static void __init range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek)
822{
823 unsigned long hole_basek, hole_sizek;
824 unsigned long range0_basek, range0_sizek;
825 unsigned long range_basek, range_sizek;
826 unsigned long chunk_sizek;
827 unsigned long gran_sizek;
828
829 hole_basek = 0;
830 hole_sizek = 0;
831 chunk_sizek = state->chunk_sizek;
832 gran_sizek = state->gran_sizek;
833
834 /* align with gran size, prevent small block used up MTRRs */
835 range_basek = ALIGN(state->range_startk, gran_sizek);
836 if ((range_basek > basek) && basek)
837 return;
838 range_sizek = ALIGN(state->range_sizek - (range_basek - state->range_startk), gran_sizek);
839
840 while (range_basek + range_sizek > (state->range_startk + state->range_sizek)) {
841 range_sizek -= gran_sizek;
842 if (!range_sizek)
843 return;
844 }
845 state->range_startk = range_basek;
846 state->range_sizek = range_sizek;
847
848 /* try to append some small hole */
849 range0_basek = state->range_startk;
850 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
851 if ((range0_sizek == state->range_sizek) ||
852 ((range0_basek + range0_sizek - chunk_sizek > basek) && basek)) {
853 printk(KERN_INFO "rangeX: %016lx - %016lx\n", range0_basek<<10, (range0_basek + state->range_sizek)<<10);
854 state->reg = range_to_mtrr(state->reg, range0_basek,
855 state->range_sizek, MTRR_TYPE_WRBACK, state->address_bits);
856 return;
857 }
858
859
860 range0_sizek -= chunk_sizek;
861 printk(KERN_INFO "range0: %016lx - %016lx\n", range0_basek<<10, (range0_basek + range0_sizek)<<10);
862 state->reg = range_to_mtrr(state->reg, range0_basek,
863 range0_sizek, MTRR_TYPE_WRBACK, state->address_bits);
864
865 range_basek = range0_basek + range0_sizek;
866 range_sizek = chunk_sizek;
867 if (range_sizek - (state->range_sizek - range0_sizek) < (chunk_sizek >> 1)) {
868 hole_sizek = range_sizek - (state->range_sizek - range0_sizek);
869 hole_basek = range_basek + range_sizek - hole_sizek;
870 } else
871 range_sizek = state->range_sizek - range0_sizek;
872
873 printk(KERN_INFO "range: %016lx - %016lx\n", range_basek<<10, (range_basek + range_sizek)<<10);
874 state->reg = range_to_mtrr(state->reg, range_basek,
875 range_sizek, MTRR_TYPE_WRBACK, state->address_bits);
876 if (hole_sizek) {
877 printk(KERN_INFO "hole: %016lx - %016lx\n", hole_basek<<10, (hole_basek + hole_sizek)<<10);
878 state->reg = range_to_mtrr(state->reg, hole_basek,
879 hole_sizek, MTRR_TYPE_UNCACHABLE, state->address_bits);
880 }
881}
882
883static void __init set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, unsigned long size_pfn)
884{
885 unsigned long basek, sizek;
886
887 if (state->reg >= num_var_ranges)
888 return;
889
890 basek = base_pfn << (PAGE_SHIFT - 10);
891 sizek = size_pfn << (PAGE_SHIFT - 10);
892
893 /* See if I can merge with the last range */
894 if ((basek <= 1024) || (state->range_startk + state->range_sizek == basek)) {
895 unsigned long endk = basek + sizek;
896 state->range_sizek = endk - state->range_startk;
897 return;
898 }
899 /* Write the range mtrrs */
900 if (state->range_sizek != 0) {
901 range_to_mtrr_with_hole(state, basek);
902
903 state->range_startk = 0;
904 state->range_sizek = 0;
905 }
906 /* Allocate an msr */
907 state->range_startk = basek;
908 state->range_sizek = sizek;
909}
910
911/* mininum size of mtrr block that can take hole */
912static u64 mtrr_chunk_size __initdata = (256ULL<<20);
913
914static int __init parse_mtrr_chunk_size_opt(char *p)
915{
916 if (!p)
917 return -EINVAL;
918 mtrr_chunk_size = memparse(p, &p);
919 return 0;
920}
921early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
922
923/* granity of mtrr of block */
924static u64 mtrr_gran_size __initdata = (64ULL<<20);
925
926static int __init parse_mtrr_gran_size_opt(char *p)
927{
928 if (!p)
929 return -EINVAL;
930 mtrr_gran_size = memparse(p, &p);
931 return 0;
932}
933early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
934
935static void __init x86_setup_var_mtrrs(struct res_range *range, int nr_range, unsigned address_bits)
936{
937 struct var_mtrr_state var_state;
938 int i;
939
940 var_state.range_startk = 0;
941 var_state.range_sizek = 0;
942 var_state.reg = 0;
943 var_state.address_bits = address_bits;
944 var_state.chunk_sizek = mtrr_chunk_size >> 10;
945 var_state.gran_sizek = mtrr_gran_size >> 10;
946
947 /* Write the range etc */
948 for (i = 0; i < nr_range; i++)
949 set_var_mtrr_range(&var_state, range[i].start, range[i].end - range[i].start + 1);
950
951 /* Write the last range */
952 range_to_mtrr_with_hole(&var_state, 0);
953 printk(KERN_INFO "DONE variable MTRRs\n");
954 /* Clear out the extra MTRR's */
955 while (var_state.reg < num_var_ranges)
956 set_var_mtrr(var_state.reg++, 0, 0, 0, var_state.address_bits);
957}
958
959static int __init x86_get_mtrr_mem_range(struct res_range *range, int nr_range, unsigned long extra_remove_base, unsigned long extra_remove_size)
960{
961 unsigned long i, base, size;
962 mtrr_type type;
963
964 for (i = 0; i < num_var_ranges; i++) {
965 mtrr_if->get(i, &base, &size, &type);
966 if (type != MTRR_TYPE_WRBACK)
967 continue;
968 nr_range = add_range(range, nr_range, base, base + size - 1, 1);
969 }
970 printk(KERN_INFO "After WB checking\n");
971 for (i = 0; i < nr_range; i++)
972 printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1);
973
974 /* take out UC ranges */
975 for (i = 0; i < num_var_ranges; i++) {
976 mtrr_if->get(i, &base, &size, &type);
977 if (type != MTRR_TYPE_UNCACHABLE)
978 continue;
979 if (!size)
980 continue;
981 subtract_range(range, base, base + size - 1);
982 }
983 if (extra_remove_size)
984 subtract_range(range, extra_remove_base, extra_remove_base + extra_remove_size - 1);
985
986 /* get new range num */
987 nr_range = 0;
988 for (i = 0; i < RANGE_NUM; i++) {
989 if (!range[i].end)
990 continue;
991 nr_range++;
992 }
993 printk(KERN_INFO "After UC checking\n");
994 for (i = 0; i < nr_range; i++)
995 printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1);
996
997 /* sort the ranges */
998 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
999 printk(KERN_INFO "After sorting\n");
1000 for (i = 0; i < nr_range; i++)
1001 printk(KERN_INFO "MTRR MAP PFN: %016lx - %016lx\n", range[i].start, range[i].end + 1);
1002
1003 return nr_range;
1004}
1005
1006static int __init mtrr_cleanup(unsigned address_bits)
1007{
1008 unsigned long i, base, size, def, dummy;
1009 mtrr_type type;
1010 struct res_range range[RANGE_NUM];
1011 int nr_range;
1012 unsigned long extra_remove_base, extra_remove_size;
1013
1014 /* extra one for all 0 */
1015 int num[MTRR_NUM_TYPES + 1];
1016
1017 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
1018 return 0;
1019 rdmsr(MTRRdefType_MSR, def, dummy);
1020 def &= 0xff;
1021 if (def != MTRR_TYPE_UNCACHABLE)
1022 return 0;
1023
1024 /* check entries number */
1025 memset(num, 0, sizeof(num));
1026 for (i = 0; i < num_var_ranges; i++) {
1027 mtrr_if->get(i, &base, &size, &type);
1028 if (type >= MTRR_NUM_TYPES)
1029 continue;
1030 if (!size)
1031 type = MTRR_NUM_TYPES;
1032 num[type]++;
1033 }
1034
1035 /* check if we got UC entries */
1036 if (!num[MTRR_TYPE_UNCACHABLE])
1037 return 0;
1038
1039 /* check if we only had WB and UC */
1040 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1041 num_var_ranges - num[MTRR_NUM_TYPES])
1042 return 0;
1043
1044 memset(range, 0, sizeof(range));
1045 extra_remove_size = 0;
1046 if (mtrr_tom2) {
1047 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1048 extra_remove_size = (mtrr_tom2>>PAGE_SHIFT) - extra_remove_base;
1049 }
1050 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, extra_remove_size);
1051
1052 /* convert ranges to var ranges state */
1053 x86_setup_var_mtrrs(range, nr_range, address_bits);
1054
1055 return 1;
1056
1057}
1058
612static int disable_mtrr_trim; 1059static int disable_mtrr_trim;
613 1060
614static int __init disable_mtrr_trim_setup(char *str) 1061static int __init disable_mtrr_trim_setup(char *str)
@@ -729,18 +1176,21 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
729 */ 1176 */
730void __init mtrr_bp_init(void) 1177void __init mtrr_bp_init(void)
731{ 1178{
1179 u32 phys_addr;
732 init_ifs(); 1180 init_ifs();
733 1181
1182 phys_addr = 32;
1183
734 if (cpu_has_mtrr) { 1184 if (cpu_has_mtrr) {
735 mtrr_if = &generic_mtrr_ops; 1185 mtrr_if = &generic_mtrr_ops;
736 size_or_mask = 0xff000000; /* 36 bits */ 1186 size_or_mask = 0xff000000; /* 36 bits */
737 size_and_mask = 0x00f00000; 1187 size_and_mask = 0x00f00000;
1188 phys_addr = 36;
738 1189
739 /* This is an AMD specific MSR, but we assume(hope?) that 1190 /* This is an AMD specific MSR, but we assume(hope?) that
740 Intel will implement it to when they extend the address 1191 Intel will implement it to when they extend the address
741 bus of the Xeon. */ 1192 bus of the Xeon. */
742 if (cpuid_eax(0x80000000) >= 0x80000008) { 1193 if (cpuid_eax(0x80000000) >= 0x80000008) {
743 u32 phys_addr;
744 phys_addr = cpuid_eax(0x80000008) & 0xff; 1194 phys_addr = cpuid_eax(0x80000008) & 0xff;
745 /* CPUID workaround for Intel 0F33/0F34 CPU */ 1195 /* CPUID workaround for Intel 0F33/0F34 CPU */
746 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 1196 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -758,6 +1208,7 @@ void __init mtrr_bp_init(void)
758 don't support PAE */ 1208 don't support PAE */
759 size_or_mask = 0xfff00000; /* 32 bits */ 1209 size_or_mask = 0xfff00000; /* 32 bits */
760 size_and_mask = 0; 1210 size_and_mask = 0;
1211 phys_addr = 32;
761 } 1212 }
762 } else { 1213 } else {
763 switch (boot_cpu_data.x86_vendor) { 1214 switch (boot_cpu_data.x86_vendor) {
@@ -791,8 +1242,13 @@ void __init mtrr_bp_init(void)
791 if (mtrr_if) { 1242 if (mtrr_if) {
792 set_num_var_ranges(); 1243 set_num_var_ranges();
793 init_table(); 1244 init_table();
794 if (use_intel()) 1245 if (use_intel()) {
795 get_mtrr_state(); 1246 get_mtrr_state();
1247
1248 if (mtrr_cleanup(phys_addr))
1249 mtrr_if->set_all();
1250
1251 }
796 } 1252 }
797} 1253}
798 1254
@@ -829,9 +1285,10 @@ static int __init mtrr_init_finialize(void)
829{ 1285{
830 if (!mtrr_if) 1286 if (!mtrr_if)
831 return 0; 1287 return 0;
832 if (use_intel()) 1288 if (use_intel()) {
833 mtrr_state_warn(); 1289 if (enable_mtrr_cleanup < 1)
834 else { 1290 mtrr_state_warn();
1291 } else {
835 /* The CPUs haven't MTRR and seem to not support SMP. They have 1292 /* The CPUs haven't MTRR and seem to not support SMP. They have
836 * specific drivers, we use a tricky method to support 1293 * specific drivers, we use a tricky method to support
837 * suspend/resume for them. 1294 * suspend/resume for them.
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 2cc77eb6fea3..2dc4ec656b23 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -81,6 +81,8 @@ void set_mtrr_done(struct set_mtrr_context *ctxt);
81void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); 81void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
82void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); 82void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
83 83
84void fill_mtrr_var_range(unsigned int index,
85 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
84void get_mtrr_state(void); 86void get_mtrr_state(void);
85 87
86extern void set_mtrr_ops(struct mtrr_ops * ops); 88extern void set_mtrr_ops(struct mtrr_ops * ops);
@@ -92,6 +94,7 @@ extern struct mtrr_ops * mtrr_if;
92#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) 94#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
93 95
94extern unsigned int num_var_ranges; 96extern unsigned int num_var_ranges;
97extern u64 mtrr_tom2;
95 98
96void mtrr_state_warn(void); 99void mtrr_state_warn(void);
97const char *mtrr_attrib_to_str(int x); 100const char *mtrr_attrib_to_str(int x);