diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-15 06:18:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-15 06:18:15 -0400 |
commit | dca2d6ac09d9ef59ff46820d4f0c94b08a671202 (patch) | |
tree | fdec753b842dad09e3a4151954fab3eb5c43500d /arch/x86/kernel | |
parent | d6a65dffb30d8636b1e5d4c201564ef401a246cf (diff) | |
parent | 18240904960a39e582ced8ba8ececb10b8c22dd3 (diff) |
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts:
arch/x86/kernel/process_64.c
Semantic conflict fixed in:
arch/x86/kvm/x86.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
57 files changed, 2367 insertions, 1674 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6b8ca3a0285d..67e929b89875 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -833,106 +833,6 @@ static int __init acpi_parse_madt_lapic_entries(void) | |||
833 | extern int es7000_plat; | 833 | extern int es7000_plat; |
834 | #endif | 834 | #endif |
835 | 835 | ||
836 | static struct { | ||
837 | int gsi_base; | ||
838 | int gsi_end; | ||
839 | } mp_ioapic_routing[MAX_IO_APICS]; | ||
840 | |||
841 | int mp_find_ioapic(int gsi) | ||
842 | { | ||
843 | int i = 0; | ||
844 | |||
845 | /* Find the IOAPIC that manages this GSI. */ | ||
846 | for (i = 0; i < nr_ioapics; i++) { | ||
847 | if ((gsi >= mp_ioapic_routing[i].gsi_base) | ||
848 | && (gsi <= mp_ioapic_routing[i].gsi_end)) | ||
849 | return i; | ||
850 | } | ||
851 | |||
852 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
853 | return -1; | ||
854 | } | ||
855 | |||
856 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
857 | { | ||
858 | if (WARN_ON(ioapic == -1)) | ||
859 | return -1; | ||
860 | if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end)) | ||
861 | return -1; | ||
862 | |||
863 | return gsi - mp_ioapic_routing[ioapic].gsi_base; | ||
864 | } | ||
865 | |||
866 | static u8 __init uniq_ioapic_id(u8 id) | ||
867 | { | ||
868 | #ifdef CONFIG_X86_32 | ||
869 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
870 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
871 | return io_apic_get_unique_id(nr_ioapics, id); | ||
872 | else | ||
873 | return id; | ||
874 | #else | ||
875 | int i; | ||
876 | DECLARE_BITMAP(used, 256); | ||
877 | bitmap_zero(used, 256); | ||
878 | for (i = 0; i < nr_ioapics; i++) { | ||
879 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
880 | __set_bit(ia->apicid, used); | ||
881 | } | ||
882 | if (!test_bit(id, used)) | ||
883 | return id; | ||
884 | return find_first_zero_bit(used, 256); | ||
885 | #endif | ||
886 | } | ||
887 | |||
888 | static int bad_ioapic(unsigned long address) | ||
889 | { | ||
890 | if (nr_ioapics >= MAX_IO_APICS) { | ||
891 | printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " | ||
892 | "(found %d)\n", MAX_IO_APICS, nr_ioapics); | ||
893 | panic("Recompile kernel with bigger MAX_IO_APICS!\n"); | ||
894 | } | ||
895 | if (!address) { | ||
896 | printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" | ||
897 | " found in table, skipping!\n"); | ||
898 | return 1; | ||
899 | } | ||
900 | return 0; | ||
901 | } | ||
902 | |||
903 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
904 | { | ||
905 | int idx = 0; | ||
906 | |||
907 | if (bad_ioapic(address)) | ||
908 | return; | ||
909 | |||
910 | idx = nr_ioapics; | ||
911 | |||
912 | mp_ioapics[idx].type = MP_IOAPIC; | ||
913 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
914 | mp_ioapics[idx].apicaddr = address; | ||
915 | |||
916 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
917 | mp_ioapics[idx].apicid = uniq_ioapic_id(id); | ||
918 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
919 | |||
920 | /* | ||
921 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
922 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
923 | */ | ||
924 | mp_ioapic_routing[idx].gsi_base = gsi_base; | ||
925 | mp_ioapic_routing[idx].gsi_end = gsi_base + | ||
926 | io_apic_get_redir_entries(idx); | ||
927 | |||
928 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
929 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
930 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
931 | mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); | ||
932 | |||
933 | nr_ioapics++; | ||
934 | } | ||
935 | |||
936 | int __init acpi_probe_gsi(void) | 836 | int __init acpi_probe_gsi(void) |
937 | { | 837 | { |
938 | int idx; | 838 | int idx; |
@@ -947,7 +847,7 @@ int __init acpi_probe_gsi(void) | |||
947 | 847 | ||
948 | max_gsi = 0; | 848 | max_gsi = 0; |
949 | for (idx = 0; idx < nr_ioapics; idx++) { | 849 | for (idx = 0; idx < nr_ioapics; idx++) { |
950 | gsi = mp_ioapic_routing[idx].gsi_end; | 850 | gsi = mp_gsi_routing[idx].gsi_end; |
951 | 851 | ||
952 | if (gsi > max_gsi) | 852 | if (gsi > max_gsi) |
953 | max_gsi = gsi; | 853 | max_gsi = gsi; |
@@ -1179,9 +1079,8 @@ static int __init acpi_parse_madt_ioapic_entries(void) | |||
1179 | * If MPS is present, it will handle them, | 1079 | * If MPS is present, it will handle them, |
1180 | * otherwise the system will stay in PIC mode | 1080 | * otherwise the system will stay in PIC mode |
1181 | */ | 1081 | */ |
1182 | if (acpi_disabled || acpi_noirq) { | 1082 | if (acpi_disabled || acpi_noirq) |
1183 | return -ENODEV; | 1083 | return -ENODEV; |
1184 | } | ||
1185 | 1084 | ||
1186 | if (!cpu_has_apic) | 1085 | if (!cpu_has_apic) |
1187 | return -ENODEV; | 1086 | return -ENODEV; |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f57658702571..de7353c0ce9c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
3 | #include <linux/mutex.h> | 3 | #include <linux/mutex.h> |
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/stringify.h> | ||
5 | #include <linux/kprobes.h> | 6 | #include <linux/kprobes.h> |
6 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
7 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly); | |||
32 | #define smp_alt_once 1 | 33 | #define smp_alt_once 1 |
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | static int debug_alternative; | 36 | static int __initdata_or_module debug_alternative; |
36 | 37 | ||
37 | static int __init debug_alt(char *str) | 38 | static int __init debug_alt(char *str) |
38 | { | 39 | { |
@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str) | |||
51 | __setup("noreplace-smp", setup_noreplace_smp); | 52 | __setup("noreplace-smp", setup_noreplace_smp); |
52 | 53 | ||
53 | #ifdef CONFIG_PARAVIRT | 54 | #ifdef CONFIG_PARAVIRT |
54 | static int noreplace_paravirt = 0; | 55 | static int __initdata_or_module noreplace_paravirt = 0; |
55 | 56 | ||
56 | static int __init setup_noreplace_paravirt(char *str) | 57 | static int __init setup_noreplace_paravirt(char *str) |
57 | { | 58 | { |
@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt); | |||
64 | #define DPRINTK(fmt, args...) if (debug_alternative) \ | 65 | #define DPRINTK(fmt, args...) if (debug_alternative) \ |
65 | printk(KERN_DEBUG fmt, args) | 66 | printk(KERN_DEBUG fmt, args) |
66 | 67 | ||
67 | #ifdef GENERIC_NOP1 | 68 | #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) |
68 | /* Use inline assembly to define this because the nops are defined | 69 | /* Use inline assembly to define this because the nops are defined |
69 | as inline assembly strings in the include files and we cannot | 70 | as inline assembly strings in the include files and we cannot |
70 | get them easily into strings. */ | 71 | get them easily into strings. */ |
71 | asm("\t.section .rodata, \"a\"\nintelnops: " | 72 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " |
72 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 | 73 | GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 |
73 | GENERIC_NOP7 GENERIC_NOP8 | 74 | GENERIC_NOP7 GENERIC_NOP8 |
74 | "\t.previous"); | 75 | "\t.previous"); |
75 | extern const unsigned char intelnops[]; | 76 | extern const unsigned char intelnops[]; |
76 | static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | 77 | static const unsigned char *const __initconst_or_module |
78 | intel_nops[ASM_NOP_MAX+1] = { | ||
77 | NULL, | 79 | NULL, |
78 | intelnops, | 80 | intelnops, |
79 | intelnops + 1, | 81 | intelnops + 1, |
@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { | |||
87 | #endif | 89 | #endif |
88 | 90 | ||
89 | #ifdef K8_NOP1 | 91 | #ifdef K8_NOP1 |
90 | asm("\t.section .rodata, \"a\"\nk8nops: " | 92 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " |
91 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 | 93 | K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 |
92 | K8_NOP7 K8_NOP8 | 94 | K8_NOP7 K8_NOP8 |
93 | "\t.previous"); | 95 | "\t.previous"); |
94 | extern const unsigned char k8nops[]; | 96 | extern const unsigned char k8nops[]; |
95 | static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | 97 | static const unsigned char *const __initconst_or_module |
98 | k8_nops[ASM_NOP_MAX+1] = { | ||
96 | NULL, | 99 | NULL, |
97 | k8nops, | 100 | k8nops, |
98 | k8nops + 1, | 101 | k8nops + 1, |
@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { | |||
105 | }; | 108 | }; |
106 | #endif | 109 | #endif |
107 | 110 | ||
108 | #ifdef K7_NOP1 | 111 | #if defined(K7_NOP1) && !defined(CONFIG_X86_64) |
109 | asm("\t.section .rodata, \"a\"\nk7nops: " | 112 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " |
110 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 | 113 | K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 |
111 | K7_NOP7 K7_NOP8 | 114 | K7_NOP7 K7_NOP8 |
112 | "\t.previous"); | 115 | "\t.previous"); |
113 | extern const unsigned char k7nops[]; | 116 | extern const unsigned char k7nops[]; |
114 | static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | 117 | static const unsigned char *const __initconst_or_module |
118 | k7_nops[ASM_NOP_MAX+1] = { | ||
115 | NULL, | 119 | NULL, |
116 | k7nops, | 120 | k7nops, |
117 | k7nops + 1, | 121 | k7nops + 1, |
@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { | |||
125 | #endif | 129 | #endif |
126 | 130 | ||
127 | #ifdef P6_NOP1 | 131 | #ifdef P6_NOP1 |
128 | asm("\t.section .rodata, \"a\"\np6nops: " | 132 | asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " |
129 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 | 133 | P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 |
130 | P6_NOP7 P6_NOP8 | 134 | P6_NOP7 P6_NOP8 |
131 | "\t.previous"); | 135 | "\t.previous"); |
132 | extern const unsigned char p6nops[]; | 136 | extern const unsigned char p6nops[]; |
133 | static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | 137 | static const unsigned char *const __initconst_or_module |
138 | p6_nops[ASM_NOP_MAX+1] = { | ||
134 | NULL, | 139 | NULL, |
135 | p6nops, | 140 | p6nops, |
136 | p6nops + 1, | 141 | p6nops + 1, |
@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { | |||
146 | #ifdef CONFIG_X86_64 | 151 | #ifdef CONFIG_X86_64 |
147 | 152 | ||
148 | extern char __vsyscall_0; | 153 | extern char __vsyscall_0; |
149 | const unsigned char *const *find_nop_table(void) | 154 | static const unsigned char *const *__init_or_module find_nop_table(void) |
150 | { | 155 | { |
151 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 156 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
152 | boot_cpu_has(X86_FEATURE_NOPL)) | 157 | boot_cpu_has(X86_FEATURE_NOPL)) |
@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void) | |||
157 | 162 | ||
158 | #else /* CONFIG_X86_64 */ | 163 | #else /* CONFIG_X86_64 */ |
159 | 164 | ||
160 | const unsigned char *const *find_nop_table(void) | 165 | static const unsigned char *const *__init_or_module find_nop_table(void) |
161 | { | 166 | { |
162 | if (boot_cpu_has(X86_FEATURE_K8)) | 167 | if (boot_cpu_has(X86_FEATURE_K8)) |
163 | return k8_nops; | 168 | return k8_nops; |
@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void) | |||
172 | #endif /* CONFIG_X86_64 */ | 177 | #endif /* CONFIG_X86_64 */ |
173 | 178 | ||
174 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ | 179 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
175 | void add_nops(void *insns, unsigned int len) | 180 | static void __init_or_module add_nops(void *insns, unsigned int len) |
176 | { | 181 | { |
177 | const unsigned char *const *noptable = find_nop_table(); | 182 | const unsigned char *const *noptable = find_nop_table(); |
178 | 183 | ||
@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len) | |||
185 | len -= noplen; | 190 | len -= noplen; |
186 | } | 191 | } |
187 | } | 192 | } |
188 | EXPORT_SYMBOL_GPL(add_nops); | ||
189 | 193 | ||
190 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 194 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
191 | extern u8 *__smp_locks[], *__smp_locks_end[]; | 195 | extern u8 *__smp_locks[], *__smp_locks_end[]; |
196 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
192 | 197 | ||
193 | /* Replace instructions with better alternatives for this CPU type. | 198 | /* Replace instructions with better alternatives for this CPU type. |
194 | This runs before SMP is initialized to avoid SMP problems with | 199 | This runs before SMP is initialized to avoid SMP problems with |
@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[]; | |||
196 | APs have less capabilities than the boot processor are not handled. | 201 | APs have less capabilities than the boot processor are not handled. |
197 | Tough. Make sure you disable such features by hand. */ | 202 | Tough. Make sure you disable such features by hand. */ |
198 | 203 | ||
199 | void apply_alternatives(struct alt_instr *start, struct alt_instr *end) | 204 | void __init_or_module apply_alternatives(struct alt_instr *start, |
205 | struct alt_instr *end) | ||
200 | { | 206 | { |
201 | struct alt_instr *a; | 207 | struct alt_instr *a; |
202 | char insnbuf[MAX_PATCH_LEN]; | 208 | char insnbuf[MAX_PATCH_LEN]; |
@@ -279,9 +285,10 @@ static LIST_HEAD(smp_alt_modules); | |||
279 | static DEFINE_MUTEX(smp_alt); | 285 | static DEFINE_MUTEX(smp_alt); |
280 | static int smp_mode = 1; /* protected by smp_alt */ | 286 | static int smp_mode = 1; /* protected by smp_alt */ |
281 | 287 | ||
282 | void alternatives_smp_module_add(struct module *mod, char *name, | 288 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
283 | void *locks, void *locks_end, | 289 | char *name, |
284 | void *text, void *text_end) | 290 | void *locks, void *locks_end, |
291 | void *text, void *text_end) | ||
285 | { | 292 | { |
286 | struct smp_alt_module *smp; | 293 | struct smp_alt_module *smp; |
287 | 294 | ||
@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name, | |||
317 | mutex_unlock(&smp_alt); | 324 | mutex_unlock(&smp_alt); |
318 | } | 325 | } |
319 | 326 | ||
320 | void alternatives_smp_module_del(struct module *mod) | 327 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
321 | { | 328 | { |
322 | struct smp_alt_module *item; | 329 | struct smp_alt_module *item; |
323 | 330 | ||
@@ -386,8 +393,8 @@ void alternatives_smp_switch(int smp) | |||
386 | #endif | 393 | #endif |
387 | 394 | ||
388 | #ifdef CONFIG_PARAVIRT | 395 | #ifdef CONFIG_PARAVIRT |
389 | void apply_paravirt(struct paravirt_patch_site *start, | 396 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
390 | struct paravirt_patch_site *end) | 397 | struct paravirt_patch_site *end) |
391 | { | 398 | { |
392 | struct paravirt_patch_site *p; | 399 | struct paravirt_patch_site *p; |
393 | char insnbuf[MAX_PATCH_LEN]; | 400 | char insnbuf[MAX_PATCH_LEN]; |
@@ -485,13 +492,14 @@ void __init alternative_instructions(void) | |||
485 | * instructions. And on the local CPU you need to be protected again NMI or MCE | 492 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
486 | * handlers seeing an inconsistent instruction while you patch. | 493 | * handlers seeing an inconsistent instruction while you patch. |
487 | */ | 494 | */ |
488 | void *text_poke_early(void *addr, const void *opcode, size_t len) | 495 | static void *__init_or_module text_poke_early(void *addr, const void *opcode, |
496 | size_t len) | ||
489 | { | 497 | { |
490 | unsigned long flags; | 498 | unsigned long flags; |
491 | local_irq_save(flags); | 499 | local_irq_save(flags); |
492 | memcpy(addr, opcode, len); | 500 | memcpy(addr, opcode, len); |
493 | local_irq_restore(flags); | ||
494 | sync_core(); | 501 | sync_core(); |
502 | local_irq_restore(flags); | ||
495 | /* Could also do a CLFLUSH here to speed up CPU recovery; but | 503 | /* Could also do a CLFLUSH here to speed up CPU recovery; but |
496 | that causes hangs on some VIA CPUs. */ | 504 | that causes hangs on some VIA CPUs. */ |
497 | return addr; | 505 | return addr; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 6c99f5037801..98f230f6a28d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
41 | static LIST_HEAD(iommu_pd_list); | 41 | static LIST_HEAD(iommu_pd_list); |
42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
43 | 43 | ||
44 | #ifdef CONFIG_IOMMU_API | 44 | /* |
45 | * Domain for untranslated devices - only allocated | ||
46 | * if iommu=pt passed on kernel cmd line. | ||
47 | */ | ||
48 | static struct protection_domain *pt_domain; | ||
49 | |||
45 | static struct iommu_ops amd_iommu_ops; | 50 | static struct iommu_ops amd_iommu_ops; |
46 | #endif | ||
47 | 51 | ||
48 | /* | 52 | /* |
49 | * general struct to manage commands send to an IOMMU | 53 | * general struct to manage commands send to an IOMMU |
@@ -55,16 +59,16 @@ struct iommu_cmd { | |||
55 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 59 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, |
56 | struct unity_map_entry *e); | 60 | struct unity_map_entry *e); |
57 | static struct dma_ops_domain *find_protection_domain(u16 devid); | 61 | static struct dma_ops_domain *find_protection_domain(u16 devid); |
58 | static u64* alloc_pte(struct protection_domain *dom, | 62 | static u64 *alloc_pte(struct protection_domain *domain, |
59 | unsigned long address, u64 | 63 | unsigned long address, int end_lvl, |
60 | **pte_page, gfp_t gfp); | 64 | u64 **pte_page, gfp_t gfp); |
61 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 65 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, |
62 | unsigned long start_page, | 66 | unsigned long start_page, |
63 | unsigned int pages); | 67 | unsigned int pages); |
64 | 68 | static void reset_iommu_command_buffer(struct amd_iommu *iommu); | |
65 | #ifndef BUS_NOTIFY_UNBOUND_DRIVER | 69 | static u64 *fetch_pte(struct protection_domain *domain, |
66 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 | 70 | unsigned long address, int map_size); |
67 | #endif | 71 | static void update_domain(struct protection_domain *domain); |
68 | 72 | ||
69 | #ifdef CONFIG_AMD_IOMMU_STATS | 73 | #ifdef CONFIG_AMD_IOMMU_STATS |
70 | 74 | ||
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu) | |||
138 | * | 142 | * |
139 | ****************************************************************************/ | 143 | ****************************************************************************/ |
140 | 144 | ||
141 | static void iommu_print_event(void *__evt) | 145 | static void dump_dte_entry(u16 devid) |
146 | { | ||
147 | int i; | ||
148 | |||
149 | for (i = 0; i < 8; ++i) | ||
150 | pr_err("AMD-Vi: DTE[%d]: %08x\n", i, | ||
151 | amd_iommu_dev_table[devid].data[i]); | ||
152 | } | ||
153 | |||
154 | static void dump_command(unsigned long phys_addr) | ||
155 | { | ||
156 | struct iommu_cmd *cmd = phys_to_virt(phys_addr); | ||
157 | int i; | ||
158 | |||
159 | for (i = 0; i < 4; ++i) | ||
160 | pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); | ||
161 | } | ||
162 | |||
163 | static void iommu_print_event(struct amd_iommu *iommu, void *__evt) | ||
142 | { | 164 | { |
143 | u32 *event = __evt; | 165 | u32 *event = __evt; |
144 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; | 166 | int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt) | |||
147 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 169 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; |
148 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; | 170 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; |
149 | 171 | ||
150 | printk(KERN_ERR "AMD IOMMU: Event logged ["); | 172 | printk(KERN_ERR "AMD-Vi: Event logged ["); |
151 | 173 | ||
152 | switch (type) { | 174 | switch (type) { |
153 | case EVENT_TYPE_ILL_DEV: | 175 | case EVENT_TYPE_ILL_DEV: |
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt) | |||
155 | "address=0x%016llx flags=0x%04x]\n", | 177 | "address=0x%016llx flags=0x%04x]\n", |
156 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 178 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), |
157 | address, flags); | 179 | address, flags); |
180 | dump_dte_entry(devid); | ||
158 | break; | 181 | break; |
159 | case EVENT_TYPE_IO_FAULT: | 182 | case EVENT_TYPE_IO_FAULT: |
160 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " | 183 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " |
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt) | |||
176 | break; | 199 | break; |
177 | case EVENT_TYPE_ILL_CMD: | 200 | case EVENT_TYPE_ILL_CMD: |
178 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 201 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); |
202 | reset_iommu_command_buffer(iommu); | ||
203 | dump_command(address); | ||
179 | break; | 204 | break; |
180 | case EVENT_TYPE_CMD_HARD_ERR: | 205 | case EVENT_TYPE_CMD_HARD_ERR: |
181 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " | 206 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " |
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
209 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 234 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
210 | 235 | ||
211 | while (head != tail) { | 236 | while (head != tail) { |
212 | iommu_print_event(iommu->evt_buf + head); | 237 | iommu_print_event(iommu, iommu->evt_buf + head); |
213 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; | 238 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; |
214 | } | 239 | } |
215 | 240 | ||
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) | |||
296 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 321 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; |
297 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 322 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); |
298 | 323 | ||
299 | if (unlikely(i == EXIT_LOOP_COUNT)) | 324 | if (unlikely(i == EXIT_LOOP_COUNT)) { |
300 | panic("AMD IOMMU: Completion wait loop failed\n"); | 325 | spin_unlock(&iommu->lock); |
326 | reset_iommu_command_buffer(iommu); | ||
327 | spin_lock(&iommu->lock); | ||
328 | } | ||
301 | } | 329 | } |
302 | 330 | ||
303 | /* | 331 | /* |
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) | |||
445 | } | 473 | } |
446 | 474 | ||
447 | /* | 475 | /* |
476 | * This function flushes one domain on one IOMMU | ||
477 | */ | ||
478 | static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) | ||
479 | { | ||
480 | struct iommu_cmd cmd; | ||
481 | unsigned long flags; | ||
482 | |||
483 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | ||
484 | domid, 1, 1); | ||
485 | |||
486 | spin_lock_irqsave(&iommu->lock, flags); | ||
487 | __iommu_queue_command(iommu, &cmd); | ||
488 | __iommu_completion_wait(iommu); | ||
489 | __iommu_wait_for_completion(iommu); | ||
490 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
491 | } | ||
492 | |||
493 | static void flush_all_domains_on_iommu(struct amd_iommu *iommu) | ||
494 | { | ||
495 | int i; | ||
496 | |||
497 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | ||
498 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | ||
499 | continue; | ||
500 | flush_domain_on_iommu(iommu, i); | ||
501 | } | ||
502 | |||
503 | } | ||
504 | |||
505 | /* | ||
448 | * This function is used to flush the IO/TLB for a given protection domain | 506 | * This function is used to flush the IO/TLB for a given protection domain |
449 | * on every IOMMU in the system | 507 | * on every IOMMU in the system |
450 | */ | 508 | */ |
451 | static void iommu_flush_domain(u16 domid) | 509 | static void iommu_flush_domain(u16 domid) |
452 | { | 510 | { |
453 | unsigned long flags; | ||
454 | struct amd_iommu *iommu; | 511 | struct amd_iommu *iommu; |
455 | struct iommu_cmd cmd; | ||
456 | 512 | ||
457 | INC_STATS_COUNTER(domain_flush_all); | 513 | INC_STATS_COUNTER(domain_flush_all); |
458 | 514 | ||
459 | __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, | 515 | for_each_iommu(iommu) |
460 | domid, 1, 1); | 516 | flush_domain_on_iommu(iommu, domid); |
461 | |||
462 | for_each_iommu(iommu) { | ||
463 | spin_lock_irqsave(&iommu->lock, flags); | ||
464 | __iommu_queue_command(iommu, &cmd); | ||
465 | __iommu_completion_wait(iommu); | ||
466 | __iommu_wait_for_completion(iommu); | ||
467 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
468 | } | ||
469 | } | 517 | } |
470 | 518 | ||
471 | void amd_iommu_flush_all_domains(void) | 519 | void amd_iommu_flush_all_domains(void) |
472 | { | 520 | { |
521 | struct amd_iommu *iommu; | ||
522 | |||
523 | for_each_iommu(iommu) | ||
524 | flush_all_domains_on_iommu(iommu); | ||
525 | } | ||
526 | |||
527 | static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
528 | { | ||
473 | int i; | 529 | int i; |
474 | 530 | ||
475 | for (i = 1; i < MAX_DOMAIN_ID; ++i) { | 531 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
476 | if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) | 532 | if (iommu != amd_iommu_rlookup_table[i]) |
477 | continue; | 533 | continue; |
478 | iommu_flush_domain(i); | 534 | |
535 | iommu_queue_inv_dev_entry(iommu, i); | ||
536 | iommu_completion_wait(iommu); | ||
479 | } | 537 | } |
480 | } | 538 | } |
481 | 539 | ||
482 | void amd_iommu_flush_all_devices(void) | 540 | static void flush_devices_by_domain(struct protection_domain *domain) |
483 | { | 541 | { |
484 | struct amd_iommu *iommu; | 542 | struct amd_iommu *iommu; |
485 | int i; | 543 | int i; |
486 | 544 | ||
487 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | 545 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { |
488 | if (amd_iommu_pd_table[i] == NULL) | 546 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || |
547 | (amd_iommu_pd_table[i] != domain)) | ||
489 | continue; | 548 | continue; |
490 | 549 | ||
491 | iommu = amd_iommu_rlookup_table[i]; | 550 | iommu = amd_iommu_rlookup_table[i]; |
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void) | |||
497 | } | 556 | } |
498 | } | 557 | } |
499 | 558 | ||
559 | static void reset_iommu_command_buffer(struct amd_iommu *iommu) | ||
560 | { | ||
561 | pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); | ||
562 | |||
563 | if (iommu->reset_in_progress) | ||
564 | panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); | ||
565 | |||
566 | iommu->reset_in_progress = true; | ||
567 | |||
568 | amd_iommu_reset_cmd_buffer(iommu); | ||
569 | flush_all_devices_for_iommu(iommu); | ||
570 | flush_all_domains_on_iommu(iommu); | ||
571 | |||
572 | iommu->reset_in_progress = false; | ||
573 | } | ||
574 | |||
575 | void amd_iommu_flush_all_devices(void) | ||
576 | { | ||
577 | flush_devices_by_domain(NULL); | ||
578 | } | ||
579 | |||
500 | /**************************************************************************** | 580 | /**************************************************************************** |
501 | * | 581 | * |
502 | * The functions below are used the create the page table mappings for | 582 | * The functions below are used the create the page table mappings for |
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void) | |||
514 | static int iommu_map_page(struct protection_domain *dom, | 594 | static int iommu_map_page(struct protection_domain *dom, |
515 | unsigned long bus_addr, | 595 | unsigned long bus_addr, |
516 | unsigned long phys_addr, | 596 | unsigned long phys_addr, |
517 | int prot) | 597 | int prot, |
598 | int map_size) | ||
518 | { | 599 | { |
519 | u64 __pte, *pte; | 600 | u64 __pte, *pte; |
520 | 601 | ||
521 | bus_addr = PAGE_ALIGN(bus_addr); | 602 | bus_addr = PAGE_ALIGN(bus_addr); |
522 | phys_addr = PAGE_ALIGN(phys_addr); | 603 | phys_addr = PAGE_ALIGN(phys_addr); |
523 | 604 | ||
524 | /* only support 512GB address spaces for now */ | 605 | BUG_ON(!PM_ALIGNED(map_size, bus_addr)); |
525 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 606 | BUG_ON(!PM_ALIGNED(map_size, phys_addr)); |
607 | |||
608 | if (!(prot & IOMMU_PROT_MASK)) | ||
526 | return -EINVAL; | 609 | return -EINVAL; |
527 | 610 | ||
528 | pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); | 611 | pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL); |
529 | 612 | ||
530 | if (IOMMU_PTE_PRESENT(*pte)) | 613 | if (IOMMU_PTE_PRESENT(*pte)) |
531 | return -EBUSY; | 614 | return -EBUSY; |
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom, | |||
538 | 621 | ||
539 | *pte = __pte; | 622 | *pte = __pte; |
540 | 623 | ||
624 | update_domain(dom); | ||
625 | |||
541 | return 0; | 626 | return 0; |
542 | } | 627 | } |
543 | 628 | ||
544 | static void iommu_unmap_page(struct protection_domain *dom, | 629 | static void iommu_unmap_page(struct protection_domain *dom, |
545 | unsigned long bus_addr) | 630 | unsigned long bus_addr, int map_size) |
546 | { | 631 | { |
547 | u64 *pte; | 632 | u64 *pte = fetch_pte(dom, bus_addr, map_size); |
548 | |||
549 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
550 | |||
551 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
552 | return; | ||
553 | |||
554 | pte = IOMMU_PTE_PAGE(*pte); | ||
555 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
556 | 633 | ||
557 | if (!IOMMU_PTE_PRESENT(*pte)) | 634 | if (pte) |
558 | return; | 635 | *pte = 0; |
559 | |||
560 | pte = IOMMU_PTE_PAGE(*pte); | ||
561 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
562 | |||
563 | *pte = 0; | ||
564 | } | 636 | } |
565 | 637 | ||
566 | /* | 638 | /* |
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | |||
615 | 687 | ||
616 | for (addr = e->address_start; addr < e->address_end; | 688 | for (addr = e->address_start; addr < e->address_end; |
617 | addr += PAGE_SIZE) { | 689 | addr += PAGE_SIZE) { |
618 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); | 690 | ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, |
691 | PM_MAP_4k); | ||
619 | if (ret) | 692 | if (ret) |
620 | return ret; | 693 | return ret; |
621 | /* | 694 | /* |
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | |||
670 | * This function checks if there is a PTE for a given dma address. If | 743 | * This function checks if there is a PTE for a given dma address. If |
671 | * there is one, it returns the pointer to it. | 744 | * there is one, it returns the pointer to it. |
672 | */ | 745 | */ |
673 | static u64* fetch_pte(struct protection_domain *domain, | 746 | static u64 *fetch_pte(struct protection_domain *domain, |
674 | unsigned long address) | 747 | unsigned long address, int map_size) |
675 | { | 748 | { |
749 | int level; | ||
676 | u64 *pte; | 750 | u64 *pte; |
677 | 751 | ||
678 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 752 | level = domain->mode - 1; |
753 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; | ||
679 | 754 | ||
680 | if (!IOMMU_PTE_PRESENT(*pte)) | 755 | while (level > map_size) { |
681 | return NULL; | 756 | if (!IOMMU_PTE_PRESENT(*pte)) |
757 | return NULL; | ||
682 | 758 | ||
683 | pte = IOMMU_PTE_PAGE(*pte); | 759 | level -= 1; |
684 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | ||
685 | 760 | ||
686 | if (!IOMMU_PTE_PRESENT(*pte)) | 761 | pte = IOMMU_PTE_PAGE(*pte); |
687 | return NULL; | 762 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
688 | 763 | ||
689 | pte = IOMMU_PTE_PAGE(*pte); | 764 | if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { |
690 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 765 | pte = NULL; |
766 | break; | ||
767 | } | ||
768 | } | ||
691 | 769 | ||
692 | return pte; | 770 | return pte; |
693 | } | 771 | } |
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
727 | u64 *pte, *pte_page; | 805 | u64 *pte, *pte_page; |
728 | 806 | ||
729 | for (i = 0; i < num_ptes; ++i) { | 807 | for (i = 0; i < num_ptes; ++i) { |
730 | pte = alloc_pte(&dma_dom->domain, address, | 808 | pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k, |
731 | &pte_page, gfp); | 809 | &pte_page, gfp); |
732 | if (!pte) | 810 | if (!pte) |
733 | goto out_free; | 811 | goto out_free; |
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu, | |||
760 | for (i = dma_dom->aperture[index]->offset; | 838 | for (i = dma_dom->aperture[index]->offset; |
761 | i < dma_dom->aperture_size; | 839 | i < dma_dom->aperture_size; |
762 | i += PAGE_SIZE) { | 840 | i += PAGE_SIZE) { |
763 | u64 *pte = fetch_pte(&dma_dom->domain, i); | 841 | u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k); |
764 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) | 842 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
765 | continue; | 843 | continue; |
766 | 844 | ||
767 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); | 845 | dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); |
768 | } | 846 | } |
769 | 847 | ||
848 | update_domain(&dma_dom->domain); | ||
849 | |||
770 | return 0; | 850 | return 0; |
771 | 851 | ||
772 | out_free: | 852 | out_free: |
853 | update_domain(&dma_dom->domain); | ||
854 | |||
773 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); | 855 | free_page((unsigned long)dma_dom->aperture[index]->bitmap); |
774 | 856 | ||
775 | kfree(dma_dom->aperture[index]); | 857 | kfree(dma_dom->aperture[index]); |
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) | |||
1009 | dma_dom->domain.id = domain_id_alloc(); | 1091 | dma_dom->domain.id = domain_id_alloc(); |
1010 | if (dma_dom->domain.id == 0) | 1092 | if (dma_dom->domain.id == 0) |
1011 | goto free_dma_dom; | 1093 | goto free_dma_dom; |
1012 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | 1094 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1013 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1095 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1014 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1096 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
1015 | dma_dom->domain.priv = dma_dom; | 1097 | dma_dom->domain.priv = dma_dom; |
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid) | |||
1063 | return dom; | 1145 | return dom; |
1064 | } | 1146 | } |
1065 | 1147 | ||
1148 | static void set_dte_entry(u16 devid, struct protection_domain *domain) | ||
1149 | { | ||
1150 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1151 | |||
1152 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1153 | << DEV_ENTRY_MODE_SHIFT; | ||
1154 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1155 | |||
1156 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1157 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1158 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
1159 | |||
1160 | amd_iommu_pd_table[devid] = domain; | ||
1161 | } | ||
1162 | |||
1163 | /* | ||
1164 | * If a device is not yet associated with a domain, this function does | ||
1165 | * assigns it visible for the hardware | ||
1166 | */ | ||
1167 | static void __attach_device(struct amd_iommu *iommu, | ||
1168 | struct protection_domain *domain, | ||
1169 | u16 devid) | ||
1170 | { | ||
1171 | /* lock domain */ | ||
1172 | spin_lock(&domain->lock); | ||
1173 | |||
1174 | /* update DTE entry */ | ||
1175 | set_dte_entry(devid, domain); | ||
1176 | |||
1177 | domain->dev_cnt += 1; | ||
1178 | |||
1179 | /* ready */ | ||
1180 | spin_unlock(&domain->lock); | ||
1181 | } | ||
1182 | |||
1066 | /* | 1183 | /* |
1067 | * If a device is not yet associated with a domain, this function does | 1184 | * If a device is not yet associated with a domain, this function does |
1068 | * assigns it visible for the hardware | 1185 | * assigns it visible for the hardware |
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu, | |||
1072 | u16 devid) | 1189 | u16 devid) |
1073 | { | 1190 | { |
1074 | unsigned long flags; | 1191 | unsigned long flags; |
1075 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
1076 | |||
1077 | domain->dev_cnt += 1; | ||
1078 | |||
1079 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | ||
1080 | << DEV_ENTRY_MODE_SHIFT; | ||
1081 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | ||
1082 | 1192 | ||
1083 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 1193 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); |
1084 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 1194 | __attach_device(iommu, domain, devid); |
1085 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
1086 | amd_iommu_dev_table[devid].data[2] = domain->id; | ||
1087 | |||
1088 | amd_iommu_pd_table[devid] = domain; | ||
1089 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1195 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1090 | 1196 | ||
1091 | /* | 1197 | /* |
1092 | * We might boot into a crash-kernel here. The crashed kernel | 1198 | * We might boot into a crash-kernel here. The crashed kernel |
1093 | * left the caches in the IOMMU dirty. So we have to flush | 1199 | * left the caches in the IOMMU dirty. So we have to flush |
1094 | * here to evict all dirty stuff. | 1200 | * here to evict all dirty stuff. |
1095 | */ | 1201 | */ |
1096 | iommu_queue_inv_dev_entry(iommu, devid); | 1202 | iommu_queue_inv_dev_entry(iommu, devid); |
1097 | iommu_flush_tlb_pde(iommu, domain->id); | 1203 | iommu_flush_tlb_pde(iommu, domain->id); |
1098 | } | 1204 | } |
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
1119 | 1225 | ||
1120 | /* ready */ | 1226 | /* ready */ |
1121 | spin_unlock(&domain->lock); | 1227 | spin_unlock(&domain->lock); |
1228 | |||
1229 | /* | ||
1230 | * If we run in passthrough mode the device must be assigned to the | ||
1231 | * passthrough domain if it is detached from any other domain | ||
1232 | */ | ||
1233 | if (iommu_pass_through) { | ||
1234 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
1235 | __attach_device(iommu, pt_domain, devid); | ||
1236 | } | ||
1122 | } | 1237 | } |
1123 | 1238 | ||
1124 | /* | 1239 | /* |
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb, | |||
1164 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1279 | case BUS_NOTIFY_UNBOUND_DRIVER: |
1165 | if (!domain) | 1280 | if (!domain) |
1166 | goto out; | 1281 | goto out; |
1282 | if (iommu_pass_through) | ||
1283 | break; | ||
1167 | detach_device(domain, devid); | 1284 | detach_device(domain, devid); |
1168 | break; | 1285 | break; |
1169 | case BUS_NOTIFY_ADD_DEVICE: | 1286 | case BUS_NOTIFY_ADD_DEVICE: |
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev, | |||
1292 | return 1; | 1409 | return 1; |
1293 | } | 1410 | } |
1294 | 1411 | ||
1412 | static void update_device_table(struct protection_domain *domain) | ||
1413 | { | ||
1414 | unsigned long flags; | ||
1415 | int i; | ||
1416 | |||
1417 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
1418 | if (amd_iommu_pd_table[i] != domain) | ||
1419 | continue; | ||
1420 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
1421 | set_dte_entry(i, domain); | ||
1422 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void update_domain(struct protection_domain *domain) | ||
1427 | { | ||
1428 | if (!domain->updated) | ||
1429 | return; | ||
1430 | |||
1431 | update_device_table(domain); | ||
1432 | flush_devices_by_domain(domain); | ||
1433 | iommu_flush_domain(domain->id); | ||
1434 | |||
1435 | domain->updated = false; | ||
1436 | } | ||
1437 | |||
1295 | /* | 1438 | /* |
1296 | * If the pte_page is not yet allocated this function is called | 1439 | * This function is used to add another level to an IO page table. Adding |
1440 | * another level increases the size of the address space by 9 bits to a size up | ||
1441 | * to 64 bits. | ||
1297 | */ | 1442 | */ |
1298 | static u64* alloc_pte(struct protection_domain *dom, | 1443 | static bool increase_address_space(struct protection_domain *domain, |
1299 | unsigned long address, u64 **pte_page, gfp_t gfp) | 1444 | gfp_t gfp) |
1445 | { | ||
1446 | u64 *pte; | ||
1447 | |||
1448 | if (domain->mode == PAGE_MODE_6_LEVEL) | ||
1449 | /* address space already 64 bit large */ | ||
1450 | return false; | ||
1451 | |||
1452 | pte = (void *)get_zeroed_page(gfp); | ||
1453 | if (!pte) | ||
1454 | return false; | ||
1455 | |||
1456 | *pte = PM_LEVEL_PDE(domain->mode, | ||
1457 | virt_to_phys(domain->pt_root)); | ||
1458 | domain->pt_root = pte; | ||
1459 | domain->mode += 1; | ||
1460 | domain->updated = true; | ||
1461 | |||
1462 | return true; | ||
1463 | } | ||
1464 | |||
1465 | static u64 *alloc_pte(struct protection_domain *domain, | ||
1466 | unsigned long address, | ||
1467 | int end_lvl, | ||
1468 | u64 **pte_page, | ||
1469 | gfp_t gfp) | ||
1300 | { | 1470 | { |
1301 | u64 *pte, *page; | 1471 | u64 *pte, *page; |
1472 | int level; | ||
1302 | 1473 | ||
1303 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; | 1474 | while (address > PM_LEVEL_SIZE(domain->mode)) |
1475 | increase_address_space(domain, gfp); | ||
1304 | 1476 | ||
1305 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1477 | level = domain->mode - 1; |
1306 | page = (u64 *)get_zeroed_page(gfp); | 1478 | pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; |
1307 | if (!page) | ||
1308 | return NULL; | ||
1309 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
1310 | } | ||
1311 | 1479 | ||
1312 | pte = IOMMU_PTE_PAGE(*pte); | 1480 | while (level > end_lvl) { |
1313 | pte = &pte[IOMMU_PTE_L1_INDEX(address)]; | 1481 | if (!IOMMU_PTE_PRESENT(*pte)) { |
1482 | page = (u64 *)get_zeroed_page(gfp); | ||
1483 | if (!page) | ||
1484 | return NULL; | ||
1485 | *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); | ||
1486 | } | ||
1314 | 1487 | ||
1315 | if (!IOMMU_PTE_PRESENT(*pte)) { | 1488 | level -= 1; |
1316 | page = (u64 *)get_zeroed_page(gfp); | ||
1317 | if (!page) | ||
1318 | return NULL; | ||
1319 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
1320 | } | ||
1321 | 1489 | ||
1322 | pte = IOMMU_PTE_PAGE(*pte); | 1490 | pte = IOMMU_PTE_PAGE(*pte); |
1323 | 1491 | ||
1324 | if (pte_page) | 1492 | if (pte_page && level == end_lvl) |
1325 | *pte_page = pte; | 1493 | *pte_page = pte; |
1326 | 1494 | ||
1327 | pte = &pte[IOMMU_PTE_L0_INDEX(address)]; | 1495 | pte = &pte[PM_LEVEL_INDEX(level, address)]; |
1496 | } | ||
1328 | 1497 | ||
1329 | return pte; | 1498 | return pte; |
1330 | } | 1499 | } |
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, | |||
1344 | 1513 | ||
1345 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; | 1514 | pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; |
1346 | if (!pte) { | 1515 | if (!pte) { |
1347 | pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); | 1516 | pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page, |
1517 | GFP_ATOMIC); | ||
1348 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; | 1518 | aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; |
1349 | } else | 1519 | } else |
1350 | pte += IOMMU_PTE_L0_INDEX(address); | 1520 | pte += PM_LEVEL_INDEX(0, address); |
1521 | |||
1522 | update_domain(&dom->domain); | ||
1351 | 1523 | ||
1352 | return pte; | 1524 | return pte; |
1353 | } | 1525 | } |
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu, | |||
1409 | if (!pte) | 1581 | if (!pte) |
1410 | return; | 1582 | return; |
1411 | 1583 | ||
1412 | pte += IOMMU_PTE_L0_INDEX(address); | 1584 | pte += PM_LEVEL_INDEX(0, address); |
1413 | 1585 | ||
1414 | WARN_ON(!*pte); | 1586 | WARN_ON(!*pte); |
1415 | 1587 | ||
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain) | |||
1988 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2160 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
1989 | } | 2161 | } |
1990 | 2162 | ||
1991 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 2163 | static void protection_domain_free(struct protection_domain *domain) |
2164 | { | ||
2165 | if (!domain) | ||
2166 | return; | ||
2167 | |||
2168 | if (domain->id) | ||
2169 | domain_id_free(domain->id); | ||
2170 | |||
2171 | kfree(domain); | ||
2172 | } | ||
2173 | |||
2174 | static struct protection_domain *protection_domain_alloc(void) | ||
1992 | { | 2175 | { |
1993 | struct protection_domain *domain; | 2176 | struct protection_domain *domain; |
1994 | 2177 | ||
1995 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 2178 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
1996 | if (!domain) | 2179 | if (!domain) |
1997 | return -ENOMEM; | 2180 | return NULL; |
1998 | 2181 | ||
1999 | spin_lock_init(&domain->lock); | 2182 | spin_lock_init(&domain->lock); |
2000 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2001 | domain->id = domain_id_alloc(); | 2183 | domain->id = domain_id_alloc(); |
2002 | if (!domain->id) | 2184 | if (!domain->id) |
2185 | goto out_err; | ||
2186 | |||
2187 | return domain; | ||
2188 | |||
2189 | out_err: | ||
2190 | kfree(domain); | ||
2191 | |||
2192 | return NULL; | ||
2193 | } | ||
2194 | |||
2195 | static int amd_iommu_domain_init(struct iommu_domain *dom) | ||
2196 | { | ||
2197 | struct protection_domain *domain; | ||
2198 | |||
2199 | domain = protection_domain_alloc(); | ||
2200 | if (!domain) | ||
2003 | goto out_free; | 2201 | goto out_free; |
2202 | |||
2203 | domain->mode = PAGE_MODE_3_LEVEL; | ||
2004 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2204 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
2005 | if (!domain->pt_root) | 2205 | if (!domain->pt_root) |
2006 | goto out_free; | 2206 | goto out_free; |
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) | |||
2010 | return 0; | 2210 | return 0; |
2011 | 2211 | ||
2012 | out_free: | 2212 | out_free: |
2013 | kfree(domain); | 2213 | protection_domain_free(domain); |
2014 | 2214 | ||
2015 | return -ENOMEM; | 2215 | return -ENOMEM; |
2016 | } | 2216 | } |
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2115 | paddr &= PAGE_MASK; | 2315 | paddr &= PAGE_MASK; |
2116 | 2316 | ||
2117 | for (i = 0; i < npages; ++i) { | 2317 | for (i = 0; i < npages; ++i) { |
2118 | ret = iommu_map_page(domain, iova, paddr, prot); | 2318 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2119 | if (ret) | 2319 | if (ret) |
2120 | return ret; | 2320 | return ret; |
2121 | 2321 | ||
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2136 | iova &= PAGE_MASK; | 2336 | iova &= PAGE_MASK; |
2137 | 2337 | ||
2138 | for (i = 0; i < npages; ++i) { | 2338 | for (i = 0; i < npages; ++i) { |
2139 | iommu_unmap_page(domain, iova); | 2339 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2140 | iova += PAGE_SIZE; | 2340 | iova += PAGE_SIZE; |
2141 | } | 2341 | } |
2142 | 2342 | ||
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | |||
2151 | phys_addr_t paddr; | 2351 | phys_addr_t paddr; |
2152 | u64 *pte; | 2352 | u64 *pte; |
2153 | 2353 | ||
2154 | pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; | 2354 | pte = fetch_pte(domain, iova, PM_MAP_4k); |
2155 | |||
2156 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2157 | return 0; | ||
2158 | |||
2159 | pte = IOMMU_PTE_PAGE(*pte); | ||
2160 | pte = &pte[IOMMU_PTE_L1_INDEX(iova)]; | ||
2161 | |||
2162 | if (!IOMMU_PTE_PRESENT(*pte)) | ||
2163 | return 0; | ||
2164 | |||
2165 | pte = IOMMU_PTE_PAGE(*pte); | ||
2166 | pte = &pte[IOMMU_PTE_L0_INDEX(iova)]; | ||
2167 | 2355 | ||
2168 | if (!IOMMU_PTE_PRESENT(*pte)) | 2356 | if (!pte || !IOMMU_PTE_PRESENT(*pte)) |
2169 | return 0; | 2357 | return 0; |
2170 | 2358 | ||
2171 | paddr = *pte & IOMMU_PAGE_MASK; | 2359 | paddr = *pte & IOMMU_PAGE_MASK; |
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = { | |||
2191 | .domain_has_cap = amd_iommu_domain_has_cap, | 2379 | .domain_has_cap = amd_iommu_domain_has_cap, |
2192 | }; | 2380 | }; |
2193 | 2381 | ||
2382 | /***************************************************************************** | ||
2383 | * | ||
2384 | * The next functions do a basic initialization of IOMMU for pass through | ||
2385 | * mode | ||
2386 | * | ||
2387 | * In passthrough mode the IOMMU is initialized and enabled but not used for | ||
2388 | * DMA-API translation. | ||
2389 | * | ||
2390 | *****************************************************************************/ | ||
2391 | |||
2392 | int __init amd_iommu_init_passthrough(void) | ||
2393 | { | ||
2394 | struct pci_dev *dev = NULL; | ||
2395 | u16 devid, devid2; | ||
2396 | |||
2397 | /* allocate passthroug domain */ | ||
2398 | pt_domain = protection_domain_alloc(); | ||
2399 | if (!pt_domain) | ||
2400 | return -ENOMEM; | ||
2401 | |||
2402 | pt_domain->mode |= PAGE_MODE_NONE; | ||
2403 | |||
2404 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
2405 | struct amd_iommu *iommu; | ||
2406 | |||
2407 | devid = calc_devid(dev->bus->number, dev->devfn); | ||
2408 | if (devid > amd_iommu_last_bdf) | ||
2409 | continue; | ||
2410 | |||
2411 | devid2 = amd_iommu_alias_table[devid]; | ||
2412 | |||
2413 | iommu = amd_iommu_rlookup_table[devid2]; | ||
2414 | if (!iommu) | ||
2415 | continue; | ||
2416 | |||
2417 | __attach_device(iommu, pt_domain, devid); | ||
2418 | __attach_device(iommu, pt_domain, devid2); | ||
2419 | } | ||
2420 | |||
2421 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | ||
2422 | |||
2423 | return 0; | ||
2424 | } | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c1b17e97252e..b4b61d462dcc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
252 | /* Function to enable the hardware */ | 252 | /* Function to enable the hardware */ |
253 | static void iommu_enable(struct amd_iommu *iommu) | 253 | static void iommu_enable(struct amd_iommu *iommu) |
254 | { | 254 | { |
255 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", | 255 | printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", |
256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); | 256 | dev_name(&iommu->dev->dev), iommu->cap_ptr); |
257 | 257 | ||
258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 258 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * This function resets the command buffer if the IOMMU stopped fetching | ||
439 | * commands from it. | ||
440 | */ | ||
441 | void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) | ||
442 | { | ||
443 | iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); | ||
444 | |||
445 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
446 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
447 | |||
448 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
449 | } | ||
450 | |||
451 | /* | ||
438 | * This function writes the command buffer address to the hardware and | 452 | * This function writes the command buffer address to the hardware and |
439 | * enables it. | 453 | * enables it. |
440 | */ | 454 | */ |
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
450 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, | 464 | memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, |
451 | &entry, sizeof(entry)); | 465 | &entry, sizeof(entry)); |
452 | 466 | ||
453 | /* set head and tail to zero manually */ | 467 | amd_iommu_reset_cmd_buffer(iommu); |
454 | writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | ||
455 | writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | ||
456 | |||
457 | iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); | ||
458 | } | 468 | } |
459 | 469 | ||
460 | static void __init free_command_buffer(struct amd_iommu *iommu) | 470 | static void __init free_command_buffer(struct amd_iommu *iommu) |
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
858 | switch (*p) { | 868 | switch (*p) { |
859 | case ACPI_IVHD_TYPE: | 869 | case ACPI_IVHD_TYPE: |
860 | 870 | ||
861 | DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " | 871 | DUMP_printk("device: %02x:%02x.%01x cap: %04x " |
862 | "seg: %d flags: %01x info %04x\n", | 872 | "seg: %d flags: %01x info %04x\n", |
863 | PCI_BUS(h->devid), PCI_SLOT(h->devid), | 873 | PCI_BUS(h->devid), PCI_SLOT(h->devid), |
864 | PCI_FUNC(h->devid), h->cap_ptr, | 874 | PCI_FUNC(h->devid), h->cap_ptr, |
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) | |||
902 | 912 | ||
903 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, | 913 | r = request_irq(iommu->dev->irq, amd_iommu_int_handler, |
904 | IRQF_SAMPLE_RANDOM, | 914 | IRQF_SAMPLE_RANDOM, |
905 | "AMD IOMMU", | 915 | "AMD-Vi", |
906 | NULL); | 916 | NULL); |
907 | 917 | ||
908 | if (r) { | 918 | if (r) { |
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void) | |||
1150 | 1160 | ||
1151 | 1161 | ||
1152 | if (no_iommu) { | 1162 | if (no_iommu) { |
1153 | printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); | 1163 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); |
1154 | return 0; | 1164 | return 0; |
1155 | } | 1165 | } |
1156 | 1166 | ||
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void) | |||
1242 | if (ret) | 1252 | if (ret) |
1243 | goto free; | 1253 | goto free; |
1244 | 1254 | ||
1245 | ret = amd_iommu_init_dma_ops(); | 1255 | if (iommu_pass_through) |
1256 | ret = amd_iommu_init_passthrough(); | ||
1257 | else | ||
1258 | ret = amd_iommu_init_dma_ops(); | ||
1246 | if (ret) | 1259 | if (ret) |
1247 | goto free; | 1260 | goto free; |
1248 | 1261 | ||
1249 | enable_iommus(); | 1262 | enable_iommus(); |
1250 | 1263 | ||
1251 | printk(KERN_INFO "AMD IOMMU: device isolation "); | 1264 | if (iommu_pass_through) |
1265 | goto out; | ||
1266 | |||
1267 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1252 | if (amd_iommu_isolate) | 1268 | if (amd_iommu_isolate) |
1253 | printk("enabled\n"); | 1269 | printk("enabled\n"); |
1254 | else | 1270 | else |
1255 | printk("disabled\n"); | 1271 | printk("disabled\n"); |
1256 | 1272 | ||
1257 | if (amd_iommu_unmap_flush) | 1273 | if (amd_iommu_unmap_flush) |
1258 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | 1274 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1259 | else | 1275 | else |
1260 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | 1276 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1261 | 1277 | ||
1262 | out: | 1278 | out: |
1263 | return ret; | 1279 | return ret; |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 676debfc1702..128111d8ffe0 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/suspend.h> | 22 | #include <linux/suspend.h> |
23 | #include <linux/kmemleak.h> | ||
23 | #include <asm/e820.h> | 24 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/iommu.h> | 26 | #include <asm/iommu.h> |
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void) | |||
94 | * code for safe | 95 | * code for safe |
95 | */ | 96 | */ |
96 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | 97 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); |
98 | /* | ||
99 | * Kmemleak should not scan this block as it may not be mapped via the | ||
100 | * kernel direct mapping. | ||
101 | */ | ||
102 | kmemleak_ignore(p); | ||
97 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 103 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
98 | printk(KERN_ERR | 104 | printk(KERN_ERR |
99 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 105 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0a1c2830ec66..159740decc41 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mtrr.h> | 49 | #include <asm/mtrr.h> |
50 | #include <asm/smp.h> | 50 | #include <asm/smp.h> |
51 | #include <asm/mce.h> | 51 | #include <asm/mce.h> |
52 | #include <asm/kvm_para.h> | ||
52 | 53 | ||
53 | unsigned int num_processors; | 54 | unsigned int num_processors; |
54 | 55 | ||
@@ -1361,52 +1362,80 @@ void enable_x2apic(void) | |||
1361 | } | 1362 | } |
1362 | #endif /* CONFIG_X86_X2APIC */ | 1363 | #endif /* CONFIG_X86_X2APIC */ |
1363 | 1364 | ||
1364 | void __init enable_IR_x2apic(void) | 1365 | int __init enable_IR(void) |
1365 | { | 1366 | { |
1366 | #ifdef CONFIG_INTR_REMAP | 1367 | #ifdef CONFIG_INTR_REMAP |
1367 | int ret; | ||
1368 | unsigned long flags; | ||
1369 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1370 | |||
1371 | ret = dmar_table_init(); | ||
1372 | if (ret) { | ||
1373 | pr_debug("dmar_table_init() failed with %d:\n", ret); | ||
1374 | goto ir_failed; | ||
1375 | } | ||
1376 | |||
1377 | if (!intr_remapping_supported()) { | 1368 | if (!intr_remapping_supported()) { |
1378 | pr_debug("intr-remapping not supported\n"); | 1369 | pr_debug("intr-remapping not supported\n"); |
1379 | goto ir_failed; | 1370 | return 0; |
1380 | } | 1371 | } |
1381 | 1372 | ||
1382 | |||
1383 | if (!x2apic_preenabled && skip_ioapic_setup) { | 1373 | if (!x2apic_preenabled && skip_ioapic_setup) { |
1384 | pr_info("Skipped enabling intr-remap because of skipping " | 1374 | pr_info("Skipped enabling intr-remap because of skipping " |
1385 | "io-apic setup\n"); | 1375 | "io-apic setup\n"); |
1386 | return; | 1376 | return 0; |
1387 | } | 1377 | } |
1388 | 1378 | ||
1379 | if (enable_intr_remapping(x2apic_supported())) | ||
1380 | return 0; | ||
1381 | |||
1382 | pr_info("Enabled Interrupt-remapping\n"); | ||
1383 | |||
1384 | return 1; | ||
1385 | |||
1386 | #endif | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
1390 | void __init enable_IR_x2apic(void) | ||
1391 | { | ||
1392 | unsigned long flags; | ||
1393 | struct IO_APIC_route_entry **ioapic_entries = NULL; | ||
1394 | int ret, x2apic_enabled = 0; | ||
1395 | int dmar_table_init_ret = 0; | ||
1396 | |||
1397 | #ifdef CONFIG_INTR_REMAP | ||
1398 | dmar_table_init_ret = dmar_table_init(); | ||
1399 | if (dmar_table_init_ret) | ||
1400 | pr_debug("dmar_table_init() failed with %d:\n", | ||
1401 | dmar_table_init_ret); | ||
1402 | #endif | ||
1403 | |||
1389 | ioapic_entries = alloc_ioapic_entries(); | 1404 | ioapic_entries = alloc_ioapic_entries(); |
1390 | if (!ioapic_entries) { | 1405 | if (!ioapic_entries) { |
1391 | pr_info("Allocate ioapic_entries failed: %d\n", ret); | 1406 | pr_err("Allocate ioapic_entries failed\n"); |
1392 | goto end; | 1407 | goto out; |
1393 | } | 1408 | } |
1394 | 1409 | ||
1395 | ret = save_IO_APIC_setup(ioapic_entries); | 1410 | ret = save_IO_APIC_setup(ioapic_entries); |
1396 | if (ret) { | 1411 | if (ret) { |
1397 | pr_info("Saving IO-APIC state failed: %d\n", ret); | 1412 | pr_info("Saving IO-APIC state failed: %d\n", ret); |
1398 | goto end; | 1413 | goto out; |
1399 | } | 1414 | } |
1400 | 1415 | ||
1401 | local_irq_save(flags); | 1416 | local_irq_save(flags); |
1402 | mask_IO_APIC_setup(ioapic_entries); | ||
1403 | mask_8259A(); | 1417 | mask_8259A(); |
1418 | mask_IO_APIC_setup(ioapic_entries); | ||
1404 | 1419 | ||
1405 | ret = enable_intr_remapping(x2apic_supported()); | 1420 | if (dmar_table_init_ret) |
1406 | if (ret) | 1421 | ret = 0; |
1407 | goto end_restore; | 1422 | else |
1423 | ret = enable_IR(); | ||
1408 | 1424 | ||
1409 | pr_info("Enabled Interrupt-remapping\n"); | 1425 | if (!ret) { |
1426 | /* IR is required if there is APIC ID > 255 even when running | ||
1427 | * under KVM | ||
1428 | */ | ||
1429 | if (max_physical_apicid > 255 || !kvm_para_available()) | ||
1430 | goto nox2apic; | ||
1431 | /* | ||
1432 | * without IR all CPUs can be addressed by IOAPIC/MSI | ||
1433 | * only in physical mode | ||
1434 | */ | ||
1435 | x2apic_force_phys(); | ||
1436 | } | ||
1437 | |||
1438 | x2apic_enabled = 1; | ||
1410 | 1439 | ||
1411 | if (x2apic_supported() && !x2apic_mode) { | 1440 | if (x2apic_supported() && !x2apic_mode) { |
1412 | x2apic_mode = 1; | 1441 | x2apic_mode = 1; |
@@ -1414,41 +1443,25 @@ void __init enable_IR_x2apic(void) | |||
1414 | pr_info("Enabled x2apic\n"); | 1443 | pr_info("Enabled x2apic\n"); |
1415 | } | 1444 | } |
1416 | 1445 | ||
1417 | end_restore: | 1446 | nox2apic: |
1418 | if (ret) | 1447 | if (!ret) /* IR enabling failed */ |
1419 | /* | ||
1420 | * IR enabling failed | ||
1421 | */ | ||
1422 | restore_IO_APIC_setup(ioapic_entries); | 1448 | restore_IO_APIC_setup(ioapic_entries); |
1423 | |||
1424 | unmask_8259A(); | 1449 | unmask_8259A(); |
1425 | local_irq_restore(flags); | 1450 | local_irq_restore(flags); |
1426 | 1451 | ||
1427 | end: | 1452 | out: |
1428 | if (ioapic_entries) | 1453 | if (ioapic_entries) |
1429 | free_ioapic_entries(ioapic_entries); | 1454 | free_ioapic_entries(ioapic_entries); |
1430 | 1455 | ||
1431 | if (!ret) | 1456 | if (x2apic_enabled) |
1432 | return; | 1457 | return; |
1433 | 1458 | ||
1434 | ir_failed: | ||
1435 | if (x2apic_preenabled) | 1459 | if (x2apic_preenabled) |
1436 | panic("x2apic enabled by bios. But IR enabling failed"); | 1460 | panic("x2apic: enabled by BIOS but kernel init failed."); |
1437 | else if (cpu_has_x2apic) | 1461 | else if (cpu_has_x2apic) |
1438 | pr_info("Not enabling x2apic,Intr-remapping\n"); | 1462 | pr_info("Not enabling x2apic, Intr-remapping init failed.\n"); |
1439 | #else | ||
1440 | if (!cpu_has_x2apic) | ||
1441 | return; | ||
1442 | |||
1443 | if (x2apic_preenabled) | ||
1444 | panic("x2apic enabled prior OS handover," | ||
1445 | " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP"); | ||
1446 | #endif | ||
1447 | |||
1448 | return; | ||
1449 | } | 1463 | } |
1450 | 1464 | ||
1451 | |||
1452 | #ifdef CONFIG_X86_64 | 1465 | #ifdef CONFIG_X86_64 |
1453 | /* | 1466 | /* |
1454 | * Detect and enable local APICs on non-SMP boards. | 1467 | * Detect and enable local APICs on non-SMP boards. |
@@ -1549,8 +1562,6 @@ no_apic: | |||
1549 | #ifdef CONFIG_X86_64 | 1562 | #ifdef CONFIG_X86_64 |
1550 | void __init early_init_lapic_mapping(void) | 1563 | void __init early_init_lapic_mapping(void) |
1551 | { | 1564 | { |
1552 | unsigned long phys_addr; | ||
1553 | |||
1554 | /* | 1565 | /* |
1555 | * If no local APIC can be found then go out | 1566 | * If no local APIC can be found then go out |
1556 | * : it means there is no mpatable and MADT | 1567 | * : it means there is no mpatable and MADT |
@@ -1558,11 +1569,9 @@ void __init early_init_lapic_mapping(void) | |||
1558 | if (!smp_found_config) | 1569 | if (!smp_found_config) |
1559 | return; | 1570 | return; |
1560 | 1571 | ||
1561 | phys_addr = mp_lapic_addr; | 1572 | set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); |
1562 | |||
1563 | set_fixmap_nocache(FIX_APIC_BASE, phys_addr); | ||
1564 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", | 1573 | apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", |
1565 | APIC_BASE, phys_addr); | 1574 | APIC_BASE, mp_lapic_addr); |
1566 | 1575 | ||
1567 | /* | 1576 | /* |
1568 | * Fetch the APIC ID of the BSP in case we have a | 1577 | * Fetch the APIC ID of the BSP in case we have a |
@@ -1651,7 +1660,6 @@ int __init APIC_init_uniprocessor(void) | |||
1651 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { | 1660 | APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { |
1652 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", | 1661 | pr_err("BIOS bug, local APIC 0x%x not detected!...\n", |
1653 | boot_cpu_physical_apicid); | 1662 | boot_cpu_physical_apicid); |
1654 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); | ||
1655 | return -1; | 1663 | return -1; |
1656 | } | 1664 | } |
1657 | #endif | 1665 | #endif |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 8952a5890281..89174f847b49 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void) | |||
167 | { | 167 | { |
168 | /* MPENTIUMIII */ | 168 | /* MPENTIUMIII */ |
169 | if (boot_cpu_data.x86 == 6 && | 169 | if (boot_cpu_data.x86 == 6 && |
170 | (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) | 170 | (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11)) |
171 | return 1; | 171 | return 1; |
172 | 172 | ||
173 | return 0; | 173 | return 0; |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d2ed6c5ddc80..3c8f9e75d038 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -66,6 +66,8 @@ | |||
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | 67 | ||
68 | #define __apicdebuginit(type) static type __init | 68 | #define __apicdebuginit(type) static type __init |
69 | #define for_each_irq_pin(entry, head) \ | ||
70 | for (entry = head; entry; entry = entry->next) | ||
69 | 71 | ||
70 | /* | 72 | /* |
71 | * Is the SiS APIC rmw bug present ? | 73 | * Is the SiS APIC rmw bug present ? |
@@ -85,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS]; | |||
85 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; | 87 | struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; |
86 | int nr_ioapics; | 88 | int nr_ioapics; |
87 | 89 | ||
90 | /* IO APIC gsi routing info */ | ||
91 | struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; | ||
92 | |||
88 | /* MP IRQ source entries */ | 93 | /* MP IRQ source entries */ |
89 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; | 94 | struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; |
90 | 95 | ||
@@ -116,15 +121,6 @@ static int __init parse_noapic(char *str) | |||
116 | } | 121 | } |
117 | early_param("noapic", parse_noapic); | 122 | early_param("noapic", parse_noapic); |
118 | 123 | ||
119 | struct irq_pin_list; | ||
120 | |||
121 | /* | ||
122 | * This is performance-critical, we want to do it O(1) | ||
123 | * | ||
124 | * the indexing order of this array favors 1:1 mappings | ||
125 | * between pins and IRQs. | ||
126 | */ | ||
127 | |||
128 | struct irq_pin_list { | 124 | struct irq_pin_list { |
129 | int apic, pin; | 125 | int apic, pin; |
130 | struct irq_pin_list *next; | 126 | struct irq_pin_list *next; |
@@ -139,6 +135,11 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
139 | return pin; | 135 | return pin; |
140 | } | 136 | } |
141 | 137 | ||
138 | /* | ||
139 | * This is performance-critical, we want to do it O(1) | ||
140 | * | ||
141 | * Most irqs are mapped 1:1 with pins. | ||
142 | */ | ||
142 | struct irq_cfg { | 143 | struct irq_cfg { |
143 | struct irq_pin_list *irq_2_pin; | 144 | struct irq_pin_list *irq_2_pin; |
144 | cpumask_var_t domain; | 145 | cpumask_var_t domain; |
@@ -414,13 +415,10 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
414 | unsigned long flags; | 415 | unsigned long flags; |
415 | 416 | ||
416 | spin_lock_irqsave(&ioapic_lock, flags); | 417 | spin_lock_irqsave(&ioapic_lock, flags); |
417 | entry = cfg->irq_2_pin; | 418 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
418 | for (;;) { | ||
419 | unsigned int reg; | 419 | unsigned int reg; |
420 | int pin; | 420 | int pin; |
421 | 421 | ||
422 | if (!entry) | ||
423 | break; | ||
424 | pin = entry->pin; | 422 | pin = entry->pin; |
425 | reg = io_apic_read(entry->apic, 0x10 + pin*2); | 423 | reg = io_apic_read(entry->apic, 0x10 + pin*2); |
426 | /* Is the remote IRR bit set? */ | 424 | /* Is the remote IRR bit set? */ |
@@ -428,9 +426,6 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg) | |||
428 | spin_unlock_irqrestore(&ioapic_lock, flags); | 426 | spin_unlock_irqrestore(&ioapic_lock, flags); |
429 | return true; | 427 | return true; |
430 | } | 428 | } |
431 | if (!entry->next) | ||
432 | break; | ||
433 | entry = entry->next; | ||
434 | } | 429 | } |
435 | spin_unlock_irqrestore(&ioapic_lock, flags); | 430 | spin_unlock_irqrestore(&ioapic_lock, flags); |
436 | 431 | ||
@@ -498,72 +493,68 @@ static void ioapic_mask_entry(int apic, int pin) | |||
498 | * shared ISA-space IRQs, so we have to support them. We are super | 493 | * shared ISA-space IRQs, so we have to support them. We are super |
499 | * fast in the common case, and fast for shared ISA-space IRQs. | 494 | * fast in the common case, and fast for shared ISA-space IRQs. |
500 | */ | 495 | */ |
501 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | 496 | static int |
497 | add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) | ||
502 | { | 498 | { |
503 | struct irq_pin_list *entry; | 499 | struct irq_pin_list **last, *entry; |
504 | 500 | ||
505 | entry = cfg->irq_2_pin; | 501 | /* don't allow duplicates */ |
506 | if (!entry) { | 502 | last = &cfg->irq_2_pin; |
507 | entry = get_one_free_irq_2_pin(node); | 503 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
508 | if (!entry) { | ||
509 | printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", | ||
510 | apic, pin); | ||
511 | return; | ||
512 | } | ||
513 | cfg->irq_2_pin = entry; | ||
514 | entry->apic = apic; | ||
515 | entry->pin = pin; | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | while (entry->next) { | ||
520 | /* not again, please */ | ||
521 | if (entry->apic == apic && entry->pin == pin) | 504 | if (entry->apic == apic && entry->pin == pin) |
522 | return; | 505 | return 0; |
523 | 506 | last = &entry->next; | |
524 | entry = entry->next; | ||
525 | } | 507 | } |
526 | 508 | ||
527 | entry->next = get_one_free_irq_2_pin(node); | 509 | entry = get_one_free_irq_2_pin(node); |
528 | entry = entry->next; | 510 | if (!entry) { |
511 | printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", | ||
512 | node, apic, pin); | ||
513 | return -ENOMEM; | ||
514 | } | ||
529 | entry->apic = apic; | 515 | entry->apic = apic; |
530 | entry->pin = pin; | 516 | entry->pin = pin; |
517 | |||
518 | *last = entry; | ||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) | ||
523 | { | ||
524 | if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) | ||
525 | panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); | ||
531 | } | 526 | } |
532 | 527 | ||
533 | /* | 528 | /* |
534 | * Reroute an IRQ to a different pin. | 529 | * Reroute an IRQ to a different pin. |
535 | */ | 530 | */ |
536 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, | 531 | static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, |
537 | int oldapic, int oldpin, | 532 | int oldapic, int oldpin, |
538 | int newapic, int newpin) | 533 | int newapic, int newpin) |
539 | { | 534 | { |
540 | struct irq_pin_list *entry = cfg->irq_2_pin; | 535 | struct irq_pin_list *entry; |
541 | int replaced = 0; | ||
542 | 536 | ||
543 | while (entry) { | 537 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
544 | if (entry->apic == oldapic && entry->pin == oldpin) { | 538 | if (entry->apic == oldapic && entry->pin == oldpin) { |
545 | entry->apic = newapic; | 539 | entry->apic = newapic; |
546 | entry->pin = newpin; | 540 | entry->pin = newpin; |
547 | replaced = 1; | ||
548 | /* every one is different, right? */ | 541 | /* every one is different, right? */ |
549 | break; | 542 | return; |
550 | } | 543 | } |
551 | entry = entry->next; | ||
552 | } | 544 | } |
553 | 545 | ||
554 | /* why? call replace before add? */ | 546 | /* old apic/pin didn't exist, so just add new ones */ |
555 | if (!replaced) | 547 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
556 | add_pin_to_irq_node(cfg, node, newapic, newpin); | ||
557 | } | 548 | } |
558 | 549 | ||
559 | static inline void io_apic_modify_irq(struct irq_cfg *cfg, | 550 | static void io_apic_modify_irq(struct irq_cfg *cfg, |
560 | int mask_and, int mask_or, | 551 | int mask_and, int mask_or, |
561 | void (*final)(struct irq_pin_list *entry)) | 552 | void (*final)(struct irq_pin_list *entry)) |
562 | { | 553 | { |
563 | int pin; | 554 | int pin; |
564 | struct irq_pin_list *entry; | 555 | struct irq_pin_list *entry; |
565 | 556 | ||
566 | for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { | 557 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
567 | unsigned int reg; | 558 | unsigned int reg; |
568 | pin = entry->pin; | 559 | pin = entry->pin; |
569 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | 560 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); |
@@ -580,7 +571,6 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
580 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); | 571 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); |
581 | } | 572 | } |
582 | 573 | ||
583 | #ifdef CONFIG_X86_64 | ||
584 | static void io_apic_sync(struct irq_pin_list *entry) | 574 | static void io_apic_sync(struct irq_pin_list *entry) |
585 | { | 575 | { |
586 | /* | 576 | /* |
@@ -596,11 +586,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | |||
596 | { | 586 | { |
597 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 587 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
598 | } | 588 | } |
599 | #else /* CONFIG_X86_32 */ | ||
600 | static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | ||
601 | { | ||
602 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL); | ||
603 | } | ||
604 | 589 | ||
605 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) | 590 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) |
606 | { | 591 | { |
@@ -613,7 +598,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) | |||
613 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, | 598 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, |
614 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | 599 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); |
615 | } | 600 | } |
616 | #endif /* CONFIG_X86_32 */ | ||
617 | 601 | ||
618 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 602 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) |
619 | { | 603 | { |
@@ -1702,12 +1686,8 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1702 | if (!entry) | 1686 | if (!entry) |
1703 | continue; | 1687 | continue; |
1704 | printk(KERN_DEBUG "IRQ%d ", irq); | 1688 | printk(KERN_DEBUG "IRQ%d ", irq); |
1705 | for (;;) { | 1689 | for_each_irq_pin(entry, cfg->irq_2_pin) |
1706 | printk("-> %d:%d", entry->apic, entry->pin); | 1690 | printk("-> %d:%d", entry->apic, entry->pin); |
1707 | if (!entry->next) | ||
1708 | break; | ||
1709 | entry = entry->next; | ||
1710 | } | ||
1711 | printk("\n"); | 1691 | printk("\n"); |
1712 | } | 1692 | } |
1713 | 1693 | ||
@@ -2211,7 +2191,6 @@ static unsigned int startup_ioapic_irq(unsigned int irq) | |||
2211 | return was_pending; | 2191 | return was_pending; |
2212 | } | 2192 | } |
2213 | 2193 | ||
2214 | #ifdef CONFIG_X86_64 | ||
2215 | static int ioapic_retrigger_irq(unsigned int irq) | 2194 | static int ioapic_retrigger_irq(unsigned int irq) |
2216 | { | 2195 | { |
2217 | 2196 | ||
@@ -2224,14 +2203,6 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2224 | 2203 | ||
2225 | return 1; | 2204 | return 1; |
2226 | } | 2205 | } |
2227 | #else | ||
2228 | static int ioapic_retrigger_irq(unsigned int irq) | ||
2229 | { | ||
2230 | apic->send_IPI_self(irq_cfg(irq)->vector); | ||
2231 | |||
2232 | return 1; | ||
2233 | } | ||
2234 | #endif | ||
2235 | 2206 | ||
2236 | /* | 2207 | /* |
2237 | * Level and edge triggered IO-APIC interrupts need different handling, | 2208 | * Level and edge triggered IO-APIC interrupts need different handling, |
@@ -2269,13 +2240,9 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2269 | struct irq_pin_list *entry; | 2240 | struct irq_pin_list *entry; |
2270 | u8 vector = cfg->vector; | 2241 | u8 vector = cfg->vector; |
2271 | 2242 | ||
2272 | entry = cfg->irq_2_pin; | 2243 | for_each_irq_pin(entry, cfg->irq_2_pin) { |
2273 | for (;;) { | ||
2274 | unsigned int reg; | 2244 | unsigned int reg; |
2275 | 2245 | ||
2276 | if (!entry) | ||
2277 | break; | ||
2278 | |||
2279 | apic = entry->apic; | 2246 | apic = entry->apic; |
2280 | pin = entry->pin; | 2247 | pin = entry->pin; |
2281 | /* | 2248 | /* |
@@ -2288,9 +2255,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
2288 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; | 2255 | reg &= ~IO_APIC_REDIR_VECTOR_MASK; |
2289 | reg |= vector; | 2256 | reg |= vector; |
2290 | io_apic_modify(apic, 0x10 + pin*2, reg); | 2257 | io_apic_modify(apic, 0x10 + pin*2, reg); |
2291 | if (!entry->next) | ||
2292 | break; | ||
2293 | entry = entry->next; | ||
2294 | } | 2258 | } |
2295 | } | 2259 | } |
2296 | 2260 | ||
@@ -2515,11 +2479,8 @@ atomic_t irq_mis_count; | |||
2515 | static void ack_apic_level(unsigned int irq) | 2479 | static void ack_apic_level(unsigned int irq) |
2516 | { | 2480 | { |
2517 | struct irq_desc *desc = irq_to_desc(irq); | 2481 | struct irq_desc *desc = irq_to_desc(irq); |
2518 | |||
2519 | #ifdef CONFIG_X86_32 | ||
2520 | unsigned long v; | 2482 | unsigned long v; |
2521 | int i; | 2483 | int i; |
2522 | #endif | ||
2523 | struct irq_cfg *cfg; | 2484 | struct irq_cfg *cfg; |
2524 | int do_unmask_irq = 0; | 2485 | int do_unmask_irq = 0; |
2525 | 2486 | ||
@@ -2532,31 +2493,28 @@ static void ack_apic_level(unsigned int irq) | |||
2532 | } | 2493 | } |
2533 | #endif | 2494 | #endif |
2534 | 2495 | ||
2535 | #ifdef CONFIG_X86_32 | ||
2536 | /* | 2496 | /* |
2537 | * It appears there is an erratum which affects at least version 0x11 | 2497 | * It appears there is an erratum which affects at least version 0x11 |
2538 | * of I/O APIC (that's the 82093AA and cores integrated into various | 2498 | * of I/O APIC (that's the 82093AA and cores integrated into various |
2539 | * chipsets). Under certain conditions a level-triggered interrupt is | 2499 | * chipsets). Under certain conditions a level-triggered interrupt is |
2540 | * erroneously delivered as edge-triggered one but the respective IRR | 2500 | * erroneously delivered as edge-triggered one but the respective IRR |
2541 | * bit gets set nevertheless. As a result the I/O unit expects an EOI | 2501 | * bit gets set nevertheless. As a result the I/O unit expects an EOI |
2542 | * message but it will never arrive and further interrupts are blocked | 2502 | * message but it will never arrive and further interrupts are blocked |
2543 | * from the source. The exact reason is so far unknown, but the | 2503 | * from the source. The exact reason is so far unknown, but the |
2544 | * phenomenon was observed when two consecutive interrupt requests | 2504 | * phenomenon was observed when two consecutive interrupt requests |
2545 | * from a given source get delivered to the same CPU and the source is | 2505 | * from a given source get delivered to the same CPU and the source is |
2546 | * temporarily disabled in between. | 2506 | * temporarily disabled in between. |
2547 | * | 2507 | * |
2548 | * A workaround is to simulate an EOI message manually. We achieve it | 2508 | * A workaround is to simulate an EOI message manually. We achieve it |
2549 | * by setting the trigger mode to edge and then to level when the edge | 2509 | * by setting the trigger mode to edge and then to level when the edge |
2550 | * trigger mode gets detected in the TMR of a local APIC for a | 2510 | * trigger mode gets detected in the TMR of a local APIC for a |
2551 | * level-triggered interrupt. We mask the source for the time of the | 2511 | * level-triggered interrupt. We mask the source for the time of the |
2552 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | 2512 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
2553 | * The idea is from Manfred Spraul. --macro | 2513 | * The idea is from Manfred Spraul. --macro |
2554 | */ | 2514 | */ |
2555 | cfg = desc->chip_data; | 2515 | cfg = desc->chip_data; |
2556 | i = cfg->vector; | 2516 | i = cfg->vector; |
2557 | |||
2558 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); | 2517 | v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); |
2559 | #endif | ||
2560 | 2518 | ||
2561 | /* | 2519 | /* |
2562 | * We must acknowledge the irq before we move it or the acknowledge will | 2520 | * We must acknowledge the irq before we move it or the acknowledge will |
@@ -2598,7 +2556,7 @@ static void ack_apic_level(unsigned int irq) | |||
2598 | unmask_IO_APIC_irq_desc(desc); | 2556 | unmask_IO_APIC_irq_desc(desc); |
2599 | } | 2557 | } |
2600 | 2558 | ||
2601 | #ifdef CONFIG_X86_32 | 2559 | /* Tail end of version 0x11 I/O APIC bug workaround */ |
2602 | if (!(v & (1 << (i & 0x1f)))) { | 2560 | if (!(v & (1 << (i & 0x1f)))) { |
2603 | atomic_inc(&irq_mis_count); | 2561 | atomic_inc(&irq_mis_count); |
2604 | spin_lock(&ioapic_lock); | 2562 | spin_lock(&ioapic_lock); |
@@ -2606,26 +2564,15 @@ static void ack_apic_level(unsigned int irq) | |||
2606 | __unmask_and_level_IO_APIC_irq(cfg); | 2564 | __unmask_and_level_IO_APIC_irq(cfg); |
2607 | spin_unlock(&ioapic_lock); | 2565 | spin_unlock(&ioapic_lock); |
2608 | } | 2566 | } |
2609 | #endif | ||
2610 | } | 2567 | } |
2611 | 2568 | ||
2612 | #ifdef CONFIG_INTR_REMAP | 2569 | #ifdef CONFIG_INTR_REMAP |
2613 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | 2570 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) |
2614 | { | 2571 | { |
2615 | int apic, pin; | ||
2616 | struct irq_pin_list *entry; | 2572 | struct irq_pin_list *entry; |
2617 | 2573 | ||
2618 | entry = cfg->irq_2_pin; | 2574 | for_each_irq_pin(entry, cfg->irq_2_pin) |
2619 | for (;;) { | 2575 | io_apic_eoi(entry->apic, entry->pin); |
2620 | |||
2621 | if (!entry) | ||
2622 | break; | ||
2623 | |||
2624 | apic = entry->apic; | ||
2625 | pin = entry->pin; | ||
2626 | io_apic_eoi(apic, pin); | ||
2627 | entry = entry->next; | ||
2628 | } | ||
2629 | } | 2576 | } |
2630 | 2577 | ||
2631 | static void | 2578 | static void |
@@ -3241,8 +3188,7 @@ void destroy_irq(unsigned int irq) | |||
3241 | cfg = desc->chip_data; | 3188 | cfg = desc->chip_data; |
3242 | dynamic_irq_cleanup(irq); | 3189 | dynamic_irq_cleanup(irq); |
3243 | /* connect back irq_cfg */ | 3190 | /* connect back irq_cfg */ |
3244 | if (desc) | 3191 | desc->chip_data = cfg; |
3245 | desc->chip_data = cfg; | ||
3246 | 3192 | ||
3247 | free_irte(irq); | 3193 | free_irte(irq); |
3248 | spin_lock_irqsave(&vector_lock, flags); | 3194 | spin_lock_irqsave(&vector_lock, flags); |
@@ -3912,7 +3858,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq, | |||
3912 | */ | 3858 | */ |
3913 | if (irq >= NR_IRQS_LEGACY) { | 3859 | if (irq >= NR_IRQS_LEGACY) { |
3914 | cfg = desc->chip_data; | 3860 | cfg = desc->chip_data; |
3915 | add_pin_to_irq_node(cfg, node, ioapic, pin); | 3861 | if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { |
3862 | printk(KERN_INFO "can not add pin %d for irq %d\n", | ||
3863 | pin, irq); | ||
3864 | return 0; | ||
3865 | } | ||
3916 | } | 3866 | } |
3917 | 3867 | ||
3918 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); | 3868 | setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); |
@@ -3941,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq, | |||
3941 | return __io_apic_set_pci_routing(dev, irq, irq_attr); | 3891 | return __io_apic_set_pci_routing(dev, irq, irq_attr); |
3942 | } | 3892 | } |
3943 | 3893 | ||
3944 | /* -------------------------------------------------------------------------- | 3894 | u8 __init io_apic_unique_id(u8 id) |
3945 | ACPI-based IOAPIC Configuration | 3895 | { |
3946 | -------------------------------------------------------------------------- */ | 3896 | #ifdef CONFIG_X86_32 |
3897 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && | ||
3898 | !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) | ||
3899 | return io_apic_get_unique_id(nr_ioapics, id); | ||
3900 | else | ||
3901 | return id; | ||
3902 | #else | ||
3903 | int i; | ||
3904 | DECLARE_BITMAP(used, 256); | ||
3947 | 3905 | ||
3948 | #ifdef CONFIG_ACPI | 3906 | bitmap_zero(used, 256); |
3907 | for (i = 0; i < nr_ioapics; i++) { | ||
3908 | struct mpc_ioapic *ia = &mp_ioapics[i]; | ||
3909 | __set_bit(ia->apicid, used); | ||
3910 | } | ||
3911 | if (!test_bit(id, used)) | ||
3912 | return id; | ||
3913 | return find_first_zero_bit(used, 256); | ||
3914 | #endif | ||
3915 | } | ||
3949 | 3916 | ||
3950 | #ifdef CONFIG_X86_32 | 3917 | #ifdef CONFIG_X86_32 |
3951 | int __init io_apic_get_unique_id(int ioapic, int apic_id) | 3918 | int __init io_apic_get_unique_id(int ioapic, int apic_id) |
@@ -4054,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) | |||
4054 | return 0; | 4021 | return 0; |
4055 | } | 4022 | } |
4056 | 4023 | ||
4057 | #endif /* CONFIG_ACPI */ | ||
4058 | |||
4059 | /* | 4024 | /* |
4060 | * This function currently is only a helper for the i386 smp boot process where | 4025 | * This function currently is only a helper for the i386 smp boot process where |
4061 | * we need to reprogram the ioredtbls to cater for the cpus which have come online | 4026 | * we need to reprogram the ioredtbls to cater for the cpus which have come online |
@@ -4109,7 +4074,7 @@ void __init setup_ioapic_dest(void) | |||
4109 | 4074 | ||
4110 | static struct resource *ioapic_resources; | 4075 | static struct resource *ioapic_resources; |
4111 | 4076 | ||
4112 | static struct resource * __init ioapic_setup_resources(void) | 4077 | static struct resource * __init ioapic_setup_resources(int nr_ioapics) |
4113 | { | 4078 | { |
4114 | unsigned long n; | 4079 | unsigned long n; |
4115 | struct resource *res; | 4080 | struct resource *res; |
@@ -4125,15 +4090,13 @@ static struct resource * __init ioapic_setup_resources(void) | |||
4125 | mem = alloc_bootmem(n); | 4090 | mem = alloc_bootmem(n); |
4126 | res = (void *)mem; | 4091 | res = (void *)mem; |
4127 | 4092 | ||
4128 | if (mem != NULL) { | 4093 | mem += sizeof(struct resource) * nr_ioapics; |
4129 | mem += sizeof(struct resource) * nr_ioapics; | ||
4130 | 4094 | ||
4131 | for (i = 0; i < nr_ioapics; i++) { | 4095 | for (i = 0; i < nr_ioapics; i++) { |
4132 | res[i].name = mem; | 4096 | res[i].name = mem; |
4133 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 4097 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
4134 | sprintf(mem, "IOAPIC %u", i); | 4098 | sprintf(mem, "IOAPIC %u", i); |
4135 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 4099 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
4136 | } | ||
4137 | } | 4100 | } |
4138 | 4101 | ||
4139 | ioapic_resources = res; | 4102 | ioapic_resources = res; |
@@ -4147,7 +4110,7 @@ void __init ioapic_init_mappings(void) | |||
4147 | struct resource *ioapic_res; | 4110 | struct resource *ioapic_res; |
4148 | int i; | 4111 | int i; |
4149 | 4112 | ||
4150 | ioapic_res = ioapic_setup_resources(); | 4113 | ioapic_res = ioapic_setup_resources(nr_ioapics); |
4151 | for (i = 0; i < nr_ioapics; i++) { | 4114 | for (i = 0; i < nr_ioapics; i++) { |
4152 | if (smp_found_config) { | 4115 | if (smp_found_config) { |
4153 | ioapic_phys = mp_ioapics[i].apicaddr; | 4116 | ioapic_phys = mp_ioapics[i].apicaddr; |
@@ -4176,11 +4139,9 @@ fake_ioapic_page: | |||
4176 | __fix_to_virt(idx), ioapic_phys); | 4139 | __fix_to_virt(idx), ioapic_phys); |
4177 | idx++; | 4140 | idx++; |
4178 | 4141 | ||
4179 | if (ioapic_res != NULL) { | 4142 | ioapic_res->start = ioapic_phys; |
4180 | ioapic_res->start = ioapic_phys; | 4143 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; |
4181 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; | 4144 | ioapic_res++; |
4182 | ioapic_res++; | ||
4183 | } | ||
4184 | } | 4145 | } |
4185 | } | 4146 | } |
4186 | 4147 | ||
@@ -4201,3 +4162,76 @@ void __init ioapic_insert_resources(void) | |||
4201 | r++; | 4162 | r++; |
4202 | } | 4163 | } |
4203 | } | 4164 | } |
4165 | |||
4166 | int mp_find_ioapic(int gsi) | ||
4167 | { | ||
4168 | int i = 0; | ||
4169 | |||
4170 | /* Find the IOAPIC that manages this GSI. */ | ||
4171 | for (i = 0; i < nr_ioapics; i++) { | ||
4172 | if ((gsi >= mp_gsi_routing[i].gsi_base) | ||
4173 | && (gsi <= mp_gsi_routing[i].gsi_end)) | ||
4174 | return i; | ||
4175 | } | ||
4176 | |||
4177 | printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); | ||
4178 | return -1; | ||
4179 | } | ||
4180 | |||
4181 | int mp_find_ioapic_pin(int ioapic, int gsi) | ||
4182 | { | ||
4183 | if (WARN_ON(ioapic == -1)) | ||
4184 | return -1; | ||
4185 | if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) | ||
4186 | return -1; | ||
4187 | |||
4188 | return gsi - mp_gsi_routing[ioapic].gsi_base; | ||
4189 | } | ||
4190 | |||
4191 | static int bad_ioapic(unsigned long address) | ||
4192 | { | ||
4193 | if (nr_ioapics >= MAX_IO_APICS) { | ||
4194 | printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " | ||
4195 | "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); | ||
4196 | return 1; | ||
4197 | } | ||
4198 | if (!address) { | ||
4199 | printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" | ||
4200 | " found in table, skipping!\n"); | ||
4201 | return 1; | ||
4202 | } | ||
4203 | return 0; | ||
4204 | } | ||
4205 | |||
4206 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) | ||
4207 | { | ||
4208 | int idx = 0; | ||
4209 | |||
4210 | if (bad_ioapic(address)) | ||
4211 | return; | ||
4212 | |||
4213 | idx = nr_ioapics; | ||
4214 | |||
4215 | mp_ioapics[idx].type = MP_IOAPIC; | ||
4216 | mp_ioapics[idx].flags = MPC_APIC_USABLE; | ||
4217 | mp_ioapics[idx].apicaddr = address; | ||
4218 | |||
4219 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); | ||
4220 | mp_ioapics[idx].apicid = io_apic_unique_id(id); | ||
4221 | mp_ioapics[idx].apicver = io_apic_get_version(idx); | ||
4222 | |||
4223 | /* | ||
4224 | * Build basic GSI lookup table to facilitate gsi->io_apic lookups | ||
4225 | * and to prevent reprogramming of IOAPIC pins (PCI GSIs). | ||
4226 | */ | ||
4227 | mp_gsi_routing[idx].gsi_base = gsi_base; | ||
4228 | mp_gsi_routing[idx].gsi_end = gsi_base + | ||
4229 | io_apic_get_redir_entries(idx); | ||
4230 | |||
4231 | printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " | ||
4232 | "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, | ||
4233 | mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, | ||
4234 | mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); | ||
4235 | |||
4236 | nr_ioapics++; | ||
4237 | } | ||
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ef00ba4c886..08385e090a6f 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
@@ -153,7 +153,7 @@ int safe_smp_processor_id(void) | |||
153 | { | 153 | { |
154 | int apicid, cpuid; | 154 | int apicid, cpuid; |
155 | 155 | ||
156 | if (!boot_cpu_has(X86_FEATURE_APIC)) | 156 | if (!cpu_has_apic) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | apicid = hard_smp_processor_id(); | 159 | apicid = hard_smp_processor_id(); |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index b3025b43b63a..db7220220d09 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -39,7 +39,7 @@ | |||
39 | int unknown_nmi_panic; | 39 | int unknown_nmi_panic; |
40 | int nmi_watchdog_enabled; | 40 | int nmi_watchdog_enabled; |
41 | 41 | ||
42 | static cpumask_var_t backtrace_mask; | 42 | static cpumask_t backtrace_mask __read_mostly; |
43 | 43 | ||
44 | /* nmi_active: | 44 | /* nmi_active: |
45 | * >0: the lapic NMI watchdog is active, but can be disabled | 45 | * >0: the lapic NMI watchdog is active, but can be disabled |
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void) | |||
138 | if (!prev_nmi_count) | 138 | if (!prev_nmi_count) |
139 | goto error; | 139 | goto error; |
140 | 140 | ||
141 | alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); | ||
142 | printk(KERN_INFO "Testing NMI watchdog ... "); | 141 | printk(KERN_INFO "Testing NMI watchdog ... "); |
143 | 142 | ||
144 | #ifdef CONFIG_SMP | 143 | #ifdef CONFIG_SMP |
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
415 | } | 414 | } |
416 | 415 | ||
417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 416 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
418 | if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { | 417 | if (cpumask_test_cpu(cpu, &backtrace_mask)) { |
419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 418 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ |
420 | 419 | ||
421 | spin_lock(&lock); | 420 | spin_lock(&lock); |
422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 421 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
422 | show_regs(regs); | ||
423 | dump_stack(); | 423 | dump_stack(); |
424 | spin_unlock(&lock); | 424 | spin_unlock(&lock); |
425 | cpumask_clear_cpu(cpu, backtrace_mask); | 425 | cpumask_clear_cpu(cpu, &backtrace_mask); |
426 | |||
427 | rc = 1; | ||
426 | } | 428 | } |
427 | 429 | ||
428 | /* Could check oops_in_progress here too, but it's safer not to */ | 430 | /* Could check oops_in_progress here too, but it's safer not to */ |
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu) | |||
552 | return 0; | 554 | return 0; |
553 | } | 555 | } |
554 | 556 | ||
555 | void __trigger_all_cpu_backtrace(void) | 557 | void arch_trigger_all_cpu_backtrace(void) |
556 | { | 558 | { |
557 | int i; | 559 | int i; |
558 | 560 | ||
559 | cpumask_copy(backtrace_mask, cpu_online_mask); | 561 | cpumask_copy(&backtrace_mask, cpu_online_mask); |
562 | |||
563 | printk(KERN_INFO "sending NMI to all CPUs:\n"); | ||
564 | apic->send_IPI_all(NMI_VECTOR); | ||
565 | |||
560 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | 566 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
561 | for (i = 0; i < 10 * 1000; i++) { | 567 | for (i = 0; i < 10 * 1000; i++) { |
562 | if (cpumask_empty(backtrace_mask)) | 568 | if (cpumask_empty(&backtrace_mask)) |
563 | break; | 569 | break; |
564 | mdelay(1); | 570 | mdelay(1); |
565 | } | 571 | } |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index fcec2f1d34a1..65edc180fc82 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -55,11 +55,11 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb) | |||
55 | void __init default_setup_apic_routing(void) | 55 | void __init default_setup_apic_routing(void) |
56 | { | 56 | { |
57 | #ifdef CONFIG_X86_X2APIC | 57 | #ifdef CONFIG_X86_X2APIC |
58 | if (x2apic_mode && (apic != &apic_x2apic_phys && | 58 | if (x2apic_mode |
59 | #ifdef CONFIG_X86_UV | 59 | #ifdef CONFIG_X86_UV |
60 | apic != &apic_x2apic_uv_x && | 60 | && apic != &apic_x2apic_uv_x |
61 | #endif | 61 | #endif |
62 | apic != &apic_x2apic_cluster)) { | 62 | ) { |
63 | if (x2apic_phys) | 63 | if (x2apic_phys) |
64 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
65 | else | 65 | else |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 442b5508893f..151ace69a5aa 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); | |||
403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); | 403 | static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); |
404 | static struct apm_user *user_list; | 404 | static struct apm_user *user_list; |
405 | static DEFINE_SPINLOCK(user_list_lock); | 405 | static DEFINE_SPINLOCK(user_list_lock); |
406 | static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } }; | 406 | |
407 | /* | ||
408 | * Set up a segment that references the real mode segment 0x40 | ||
409 | * that extends up to the end of page zero (that we have reserved). | ||
410 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
411 | * even though they are called in protected mode. | ||
412 | */ | ||
413 | static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, | ||
414 | (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); | ||
407 | 415 | ||
408 | static const char driver_version[] = "1.16ac"; /* no spaces */ | 416 | static const char driver_version[] = "1.16ac"; /* no spaces */ |
409 | 417 | ||
@@ -2332,15 +2340,6 @@ static int __init apm_init(void) | |||
2332 | pm_flags |= PM_APM; | 2340 | pm_flags |= PM_APM; |
2333 | 2341 | ||
2334 | /* | 2342 | /* |
2335 | * Set up a segment that references the real mode segment 0x40 | ||
2336 | * that extends up to the end of page zero (that we have reserved). | ||
2337 | * This is for buggy BIOS's that refer to (real mode) segment 0x40 | ||
2338 | * even though they are called in protected mode. | ||
2339 | */ | ||
2340 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | ||
2341 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | ||
2342 | |||
2343 | /* | ||
2344 | * Set up the long jump entry point to the APM BIOS, which is called | 2343 | * Set up the long jump entry point to the APM BIOS, which is called |
2345 | * from inline assembly. | 2344 | * from inline assembly. |
2346 | */ | 2345 | */ |
@@ -2358,12 +2357,12 @@ static int __init apm_init(void) | |||
2358 | * code to that CPU. | 2357 | * code to that CPU. |
2359 | */ | 2358 | */ |
2360 | gdt = get_cpu_gdt_table(0); | 2359 | gdt = get_cpu_gdt_table(0); |
2361 | set_base(gdt[APM_CS >> 3], | 2360 | set_desc_base(&gdt[APM_CS >> 3], |
2362 | __va((unsigned long)apm_info.bios.cseg << 4)); | 2361 | (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); |
2363 | set_base(gdt[APM_CS_16 >> 3], | 2362 | set_desc_base(&gdt[APM_CS_16 >> 3], |
2364 | __va((unsigned long)apm_info.bios.cseg_16 << 4)); | 2363 | (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); |
2365 | set_base(gdt[APM_DS >> 3], | 2364 | set_desc_base(&gdt[APM_DS >> 3], |
2366 | __va((unsigned long)apm_info.bios.dseg << 4)); | 2365 | (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); |
2367 | 2366 | ||
2368 | proc_create("apm", 0, NULL, &apm_file_ops); | 2367 | proc_create("apm", 0, NULL, &apm_file_ops); |
2369 | 2368 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 63fddcd082cd..22a47c82f3c0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
4 | 4 | ||
5 | #include <asm/io.h> | 5 | #include <linux/io.h> |
6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
8 | #include <asm/cpu.h> | 8 | #include <asm/cpu.h> |
@@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
45 | #define CBAR_ENB (0x80000000) | 45 | #define CBAR_ENB (0x80000000) |
46 | #define CBAR_KEY (0X000000CB) | 46 | #define CBAR_KEY (0X000000CB) |
47 | if (c->x86_model == 9 || c->x86_model == 10) { | 47 | if (c->x86_model == 9 || c->x86_model == 10) { |
48 | if (inl (CBAR) & CBAR_ENB) | 48 | if (inl(CBAR) & CBAR_ENB) |
49 | outl (0 | CBAR_KEY, CBAR); | 49 | outl(0 | CBAR_KEY, CBAR); |
50 | } | 50 | } |
51 | } | 51 | } |
52 | 52 | ||
@@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
87 | d = d2-d; | 87 | d = d2-d; |
88 | 88 | ||
89 | if (d > 20*K6_BUG_LOOP) | 89 | if (d > 20*K6_BUG_LOOP) |
90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 90 | printk(KERN_CONT |
91 | "system stability may be impaired when more than 32 MB are used.\n"); | ||
91 | else | 92 | else |
92 | printk("probably OK (after B9730xxxx).\n"); | 93 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 94 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
94 | } | 95 | } |
95 | 96 | ||
@@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
219 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | 220 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
220 | rdmsr(MSR_K7_CLK_CTL, l, h); | 221 | rdmsr(MSR_K7_CLK_CTL, l, h); |
221 | if ((l & 0xfff00000) != 0x20000000) { | 222 | if ((l & 0xfff00000) != 0x20000000) { |
222 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 223 | printk(KERN_INFO |
223 | ((l & 0x000fffff)|0x20000000)); | 224 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
225 | l, ((l & 0x000fffff)|0x20000000)); | ||
224 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 226 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
225 | } | 227 | } |
226 | } | 228 | } |
@@ -251,6 +253,64 @@ static int __cpuinit nearby_node(int apicid) | |||
251 | #endif | 253 | #endif |
252 | 254 | ||
253 | /* | 255 | /* |
256 | * Fixup core topology information for AMD multi-node processors. | ||
257 | * Assumption 1: Number of cores in each internal node is the same. | ||
258 | * Assumption 2: Mixed systems with both single-node and dual-node | ||
259 | * processors are not supported. | ||
260 | */ | ||
261 | #ifdef CONFIG_X86_HT | ||
262 | static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) | ||
263 | { | ||
264 | #ifdef CONFIG_PCI | ||
265 | u32 t, cpn; | ||
266 | u8 n, n_id; | ||
267 | int cpu = smp_processor_id(); | ||
268 | |||
269 | /* fixup topology information only once for a core */ | ||
270 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | ||
271 | return; | ||
272 | |||
273 | /* check for multi-node processor on boot cpu */ | ||
274 | t = read_pci_config(0, 24, 3, 0xe8); | ||
275 | if (!(t & (1 << 29))) | ||
276 | return; | ||
277 | |||
278 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | ||
279 | |||
280 | /* cores per node: each internal node has half the number of cores */ | ||
281 | cpn = c->x86_max_cores >> 1; | ||
282 | |||
283 | /* even-numbered NB_id of this dual-node processor */ | ||
284 | n = c->phys_proc_id << 1; | ||
285 | |||
286 | /* | ||
287 | * determine internal node id and assign cores fifty-fifty to | ||
288 | * each node of the dual-node processor | ||
289 | */ | ||
290 | t = read_pci_config(0, 24 + n, 3, 0xe8); | ||
291 | n = (t>>30) & 0x3; | ||
292 | if (n == 0) { | ||
293 | if (c->cpu_core_id < cpn) | ||
294 | n_id = 0; | ||
295 | else | ||
296 | n_id = 1; | ||
297 | } else { | ||
298 | if (c->cpu_core_id < cpn) | ||
299 | n_id = 1; | ||
300 | else | ||
301 | n_id = 0; | ||
302 | } | ||
303 | |||
304 | /* compute entire NodeID, use llc_shared_map to store sibling info */ | ||
305 | per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id; | ||
306 | |||
307 | /* fixup core id to be in range from 0 to cpn */ | ||
308 | c->cpu_core_id = c->cpu_core_id % cpn; | ||
309 | #endif | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | /* | ||
254 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | 314 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. |
255 | * Assumes number of cores is a power of two. | 315 | * Assumes number of cores is a power of two. |
256 | */ | 316 | */ |
@@ -267,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
267 | c->phys_proc_id = c->initial_apicid >> bits; | 327 | c->phys_proc_id = c->initial_apicid >> bits; |
268 | /* use socket ID also for last level cache */ | 328 | /* use socket ID also for last level cache */ |
269 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | 329 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
330 | /* fixup topology information on multi-node processors */ | ||
331 | if ((c->x86 == 0x10) && (c->x86_model == 9)) | ||
332 | amd_fixup_dcm(c); | ||
270 | #endif | 333 | #endif |
271 | } | 334 | } |
272 | 335 | ||
@@ -275,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
275 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 338 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
276 | int cpu = smp_processor_id(); | 339 | int cpu = smp_processor_id(); |
277 | int node; | 340 | int node; |
278 | unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; | 341 | unsigned apicid = c->apicid; |
342 | |||
343 | node = per_cpu(cpu_llc_id, cpu); | ||
279 | 344 | ||
280 | node = c->phys_proc_id; | ||
281 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | 345 | if (apicid_to_node[apicid] != NUMA_NO_NODE) |
282 | node = apicid_to_node[apicid]; | 346 | node = apicid_to_node[apicid]; |
283 | if (!node_online(node)) { | 347 | if (!node_online(node)) { |
@@ -398,18 +462,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
398 | u32 level; | 462 | u32 level; |
399 | 463 | ||
400 | level = cpuid_eax(1); | 464 | level = cpuid_eax(1); |
401 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 465 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
402 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 466 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
403 | 467 | ||
404 | /* | 468 | /* |
405 | * Some BIOSes incorrectly force this feature, but only K8 | 469 | * Some BIOSes incorrectly force this feature, but only K8 |
406 | * revision D (model = 0x14) and later actually support it. | 470 | * revision D (model = 0x14) and later actually support it. |
471 | * (AMD Erratum #110, docId: 25759). | ||
407 | */ | 472 | */ |
408 | if (c->x86_model < 0x14) | 473 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { |
474 | u64 val; | ||
475 | |||
409 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | 476 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); |
477 | if (!rdmsrl_amd_safe(0xc001100d, &val)) { | ||
478 | val &= ~(1ULL << 32); | ||
479 | wrmsrl_amd_safe(0xc001100d, val); | ||
480 | } | ||
481 | } | ||
482 | |||
410 | } | 483 | } |
411 | if (c->x86 == 0x10 || c->x86 == 0x11) | 484 | if (c->x86 == 0x10 || c->x86 == 0x11) |
412 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 485 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
486 | |||
487 | /* get apicid instead of initial apic id from cpuid */ | ||
488 | c->apicid = hard_smp_processor_id(); | ||
413 | #else | 489 | #else |
414 | 490 | ||
415 | /* | 491 | /* |
@@ -494,27 +570,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
494 | * benefit in doing so. | 570 | * benefit in doing so. |
495 | */ | 571 | */ |
496 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 572 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
497 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | 573 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); |
498 | if ((tseg>>PMD_SHIFT) < | 574 | if ((tseg>>PMD_SHIFT) < |
499 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | 575 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
500 | ((tseg>>PMD_SHIFT) < | 576 | ((tseg>>PMD_SHIFT) < |
501 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | 577 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && |
502 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | 578 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) |
503 | set_memory_4k((unsigned long)__va(tseg), 1); | 579 | set_memory_4k((unsigned long)__va(tseg), 1); |
504 | } | 580 | } |
505 | } | 581 | } |
506 | #endif | 582 | #endif |
507 | } | 583 | } |
508 | 584 | ||
509 | #ifdef CONFIG_X86_32 | 585 | #ifdef CONFIG_X86_32 |
510 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 586 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, |
587 | unsigned int size) | ||
511 | { | 588 | { |
512 | /* AMD errata T13 (order #21922) */ | 589 | /* AMD errata T13 (order #21922) */ |
513 | if ((c->x86 == 6)) { | 590 | if ((c->x86 == 6)) { |
514 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 591 | /* Duron Rev A0 */ |
592 | if (c->x86_model == 3 && c->x86_mask == 0) | ||
515 | size = 64; | 593 | size = 64; |
594 | /* Tbird rev A1/A2 */ | ||
516 | if (c->x86_model == 4 && | 595 | if (c->x86_model == 4 && |
517 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ | 596 | (c->x86_mask == 0 || c->x86_mask == 1)) |
518 | size = 256; | 597 | size = 256; |
519 | } | 598 | } |
520 | return size; | 599 | return size; |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c8e315f1aa83..01a265212395 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -81,7 +81,7 @@ static void __init check_fpu(void) | |||
81 | 81 | ||
82 | boot_cpu_data.fdiv_bug = fdiv_bug; | 82 | boot_cpu_data.fdiv_bug = fdiv_bug; |
83 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
84 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); |
85 | } | 85 | } |
86 | 86 | ||
87 | static void __init check_hlt(void) | 87 | static void __init check_hlt(void) |
@@ -98,7 +98,7 @@ static void __init check_hlt(void) | |||
98 | halt(); | 98 | halt(); |
99 | halt(); | 99 | halt(); |
100 | halt(); | 100 | halt(); |
101 | printk("OK.\n"); | 101 | printk(KERN_CONT "OK.\n"); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -122,9 +122,9 @@ static void __init check_popad(void) | |||
122 | * CPU hard. Too bad. | 122 | * CPU hard. Too bad. |
123 | */ | 123 | */ |
124 | if (res != 12345678) | 124 | if (res != 12345678) |
125 | printk("Buggy.\n"); | 125 | printk(KERN_CONT "Buggy.\n"); |
126 | else | 126 | else |
127 | printk("OK.\n"); | 127 | printk(KERN_CONT "OK.\n"); |
128 | #endif | 128 | #endif |
129 | } | 129 | } |
130 | 130 | ||
@@ -156,7 +156,7 @@ void __init check_bugs(void) | |||
156 | { | 156 | { |
157 | identify_boot_cpu(); | 157 | identify_boot_cpu(); |
158 | #ifndef CONFIG_SMP | 158 | #ifndef CONFIG_SMP |
159 | printk("CPU: "); | 159 | printk(KERN_INFO "CPU: "); |
160 | print_cpu_info(&boot_cpu_data); | 160 | print_cpu_info(&boot_cpu_data); |
161 | #endif | 161 | #endif |
162 | check_config(); | 162 | check_config(); |
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 9a3ed0649d4e..04f0fe5af83e 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c | |||
@@ -15,7 +15,7 @@ void __init check_bugs(void) | |||
15 | { | 15 | { |
16 | identify_boot_cpu(); | 16 | identify_boot_cpu(); |
17 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
18 | printk("CPU: "); | 18 | printk(KERN_INFO "CPU: "); |
19 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
20 | #endif | 20 | #endif |
21 | alternative_instructions(); | 21 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5ce60a88027b..2055fc2b2e6b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -18,8 +18,8 @@ | |||
18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
21 | #include <asm/topology.h> | 21 | #include <linux/topology.h> |
22 | #include <asm/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
25 | #include <asm/proto.h> | 25 | #include <asm/proto.h> |
@@ -28,13 +28,13 @@ | |||
28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
29 | #include <asm/i387.h> | 29 | #include <asm/i387.h> |
30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
31 | #include <asm/numa.h> | 31 | #include <linux/numa.h> |
32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
37 | #include <asm/smp.h> | 37 | #include <linux/smp.h> |
38 | 38 | ||
39 | #ifdef CONFIG_X86_LOCAL_APIC | 39 | #ifdef CONFIG_X86_LOCAL_APIC |
40 | #include <asm/uv/uv.h> | 40 | #include <asm/uv/uv.h> |
@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
94 | * TLS descriptors are currently at a different place compared to i386. | 94 | * TLS descriptors are currently at a different place compared to i386. |
95 | * Hopefully nobody expects them at a fixed place (Wine?) | 95 | * Hopefully nobody expects them at a fixed place (Wine?) |
96 | */ | 96 | */ |
97 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 97 | [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), |
98 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 98 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), |
99 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 99 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), |
100 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 100 | [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), |
101 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 101 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), |
102 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 102 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), |
103 | #else | 103 | #else |
104 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 104 | [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), |
105 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 105 | [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
106 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 106 | [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), |
107 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 107 | [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), |
108 | /* | 108 | /* |
109 | * Segments used for calling PnP BIOS have byte granularity. | 109 | * Segments used for calling PnP BIOS have byte granularity. |
110 | * They code segments and data segments have fixed 64k limits, | 110 | * They code segments and data segments have fixed 64k limits, |
111 | * the transfer segment sizes are set at run time. | 111 | * the transfer segment sizes are set at run time. |
112 | */ | 112 | */ |
113 | /* 32-bit code */ | 113 | /* 32-bit code */ |
114 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 114 | [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
115 | /* 16-bit code */ | 115 | /* 16-bit code */ |
116 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 116 | [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
117 | /* 16-bit data */ | 117 | /* 16-bit data */ |
118 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 118 | [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), |
119 | /* 16-bit data */ | 119 | /* 16-bit data */ |
120 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 120 | [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), |
121 | /* 16-bit data */ | 121 | /* 16-bit data */ |
122 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 122 | [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), |
123 | /* | 123 | /* |
124 | * The APM segments have byte granularity and their bases | 124 | * The APM segments have byte granularity and their bases |
125 | * are set at run time. All have 64k limits. | 125 | * are set at run time. All have 64k limits. |
126 | */ | 126 | */ |
127 | /* 32-bit code */ | 127 | /* 32-bit code */ |
128 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 128 | [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), |
129 | /* 16-bit code */ | 129 | /* 16-bit code */ |
130 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 130 | [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), |
131 | /* data */ | 131 | /* data */ |
132 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 132 | [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), |
133 | 133 | ||
134 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 134 | [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
135 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 135 | [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), |
136 | GDT_STACK_CANARY_INIT | 136 | GDT_STACK_CANARY_INIT |
137 | #endif | 137 | #endif |
138 | } }; | 138 | } }; |
@@ -982,18 +982,26 @@ static __init int setup_disablecpuid(char *arg) | |||
982 | __setup("clearcpuid=", setup_disablecpuid); | 982 | __setup("clearcpuid=", setup_disablecpuid); |
983 | 983 | ||
984 | #ifdef CONFIG_X86_64 | 984 | #ifdef CONFIG_X86_64 |
985 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 985 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
986 | 986 | ||
987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
988 | irq_stack_union) __aligned(PAGE_SIZE); | 988 | irq_stack_union) __aligned(PAGE_SIZE); |
989 | 989 | ||
990 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 990 | /* |
991 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 991 | * The following four percpu variables are hot. Align current_task to |
992 | * cacheline size such that all four fall in the same cacheline. | ||
993 | */ | ||
994 | DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = | ||
995 | &init_task; | ||
996 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
992 | 997 | ||
993 | DEFINE_PER_CPU(unsigned long, kernel_stack) = | 998 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
994 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; | 999 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
995 | EXPORT_PER_CPU_SYMBOL(kernel_stack); | 1000 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
996 | 1001 | ||
1002 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | ||
1003 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | ||
1004 | |||
997 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 1005 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
998 | 1006 | ||
999 | /* | 1007 | /* |
@@ -1008,8 +1016,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | |||
1008 | }; | 1016 | }; |
1009 | 1017 | ||
1010 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 1018 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
1011 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | 1019 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); |
1012 | __aligned(PAGE_SIZE); | ||
1013 | 1020 | ||
1014 | /* May not be marked __init: used by software suspend */ | 1021 | /* May not be marked __init: used by software suspend */ |
1015 | void syscall_init(void) | 1022 | void syscall_init(void) |
@@ -1042,8 +1049,11 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist); | |||
1042 | 1049 | ||
1043 | #else /* CONFIG_X86_64 */ | 1050 | #else /* CONFIG_X86_64 */ |
1044 | 1051 | ||
1052 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
1053 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
1054 | |||
1045 | #ifdef CONFIG_CC_STACKPROTECTOR | 1055 | #ifdef CONFIG_CC_STACKPROTECTOR |
1046 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1056 | DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
1047 | #endif | 1057 | #endif |
1048 | 1058 | ||
1049 | /* Make sure %fs and %gs are initialized properly in idle threads */ | 1059 | /* Make sure %fs and %gs are initialized properly in idle threads */ |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 593171e967ef..19807b89f058 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -3,10 +3,10 @@ | |||
3 | #include <linux/delay.h> | 3 | #include <linux/delay.h> |
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <asm/dma.h> | 5 | #include <asm/dma.h> |
6 | #include <asm/io.h> | 6 | #include <linux/io.h> |
7 | #include <asm/processor-cyrix.h> | 7 | #include <asm/processor-cyrix.h> |
8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
9 | #include <asm/timer.h> | 9 | #include <linux/timer.h> |
10 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
11 | #include <asm/tsc.h> | 11 | #include <asm/tsc.h> |
12 | 12 | ||
@@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
282 | * The 5510/5520 companion chips have a funky PIT. | 282 | * The 5510/5520 companion chips have a funky PIT. |
283 | */ | 283 | */ |
284 | if (vendor == PCI_VENDOR_ID_CYRIX && | 284 | if (vendor == PCI_VENDOR_ID_CYRIX && |
285 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 285 | (device == PCI_DEVICE_ID_CYRIX_5510 || |
286 | device == PCI_DEVICE_ID_CYRIX_5520)) | ||
286 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 287 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
287 | } | 288 | } |
288 | #endif | 289 | #endif |
@@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
299 | * ? : 0x7x | 300 | * ? : 0x7x |
300 | * GX1 : 0x8x GX1 datasheet 56 | 301 | * GX1 : 0x8x GX1 datasheet 56 |
301 | */ | 302 | */ |
302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 303 | if ((0x30 <= dir1 && dir1 <= 0x6f) || |
304 | (0x80 <= dir1 && dir1 <= 0x8f)) | ||
303 | geode_configure(); | 305 | geode_configure(); |
304 | return; | 306 | return; |
305 | } else { /* MediaGX */ | 307 | } else { /* MediaGX */ |
@@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
427 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 429 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
428 | local_irq_save(flags); | 430 | local_irq_save(flags); |
429 | ccr3 = getCx86(CX86_CCR3); | 431 | ccr3 = getCx86(CX86_CCR3); |
430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 432 | /* enable MAPEN */ |
431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ | 433 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 434 | /* enable cpuid */ |
435 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); | ||
436 | /* disable MAPEN */ | ||
437 | setCx86(CX86_CCR3, ccr3); | ||
433 | local_irq_restore(flags); | 438 | local_irq_restore(flags); |
434 | } | 439 | } |
435 | } | 440 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index fb5b86af0b01..93ba8eeb100a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -28,11 +28,10 @@ | |||
28 | static inline void __cpuinit | 28 | static inline void __cpuinit |
29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) | 29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | if (vmware_platform()) { | 31 | if (vmware_platform()) |
32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; | 32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; |
33 | } else { | 33 | else |
34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
35 | } | ||
36 | } | 35 | } |
37 | 36 | ||
38 | unsigned long get_hypervisor_tsc_freq(void) | 37 | unsigned long get_hypervisor_tsc_freq(void) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3260ab044996..80a722a071b5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -7,17 +7,17 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/uaccess.h> | ||
10 | 11 | ||
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
13 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
14 | #include <asm/uaccess.h> | ||
15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
17 | #include <asm/cpu.h> | 17 | #include <asm/cpu.h> |
18 | 18 | ||
19 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
20 | #include <asm/topology.h> | 20 | #include <linux/topology.h> |
21 | #include <asm/numa_64.h> | 21 | #include <asm/numa_64.h> |
22 | #endif | 22 | #endif |
23 | 23 | ||
@@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
174 | #ifdef CONFIG_X86_F00F_BUG | 174 | #ifdef CONFIG_X86_F00F_BUG |
175 | /* | 175 | /* |
176 | * All current models of Pentium and Pentium with MMX technology CPUs | 176 | * All current models of Pentium and Pentium with MMX technology CPUs |
177 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | 177 | * have the F0 0F bug, which lets nonprivileged users lock up the |
178 | * system. | ||
178 | * Note that the workaround only should be initialized once... | 179 | * Note that the workaround only should be initialized once... |
179 | */ | 180 | */ |
180 | c->f00f_bug = 0; | 181 | c->f00f_bug = 0; |
@@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
207 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 208 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
208 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 209 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
209 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; | 210 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
210 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 211 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
211 | } | 212 | } |
212 | } | 213 | } |
213 | 214 | ||
@@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
283 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ | 284 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
284 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | 285 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); |
285 | if (eax & 0x1f) | 286 | if (eax & 0x1f) |
286 | return ((eax >> 26) + 1); | 287 | return (eax >> 26) + 1; |
287 | else | 288 | else |
288 | return 1; | 289 | return 1; |
289 | } | 290 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 789efe217e1a..804c40e2bc3e 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
9 | 9 | ||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
17 | 17 | ||
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | #include <asm/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <asm/k8.h> | 20 | #include <asm/k8.h> |
21 | 21 | ||
22 | #define LVL_1_INST 1 | 22 | #define LVL_1_INST 1 |
@@ -25,14 +25,15 @@ | |||
25 | #define LVL_3 4 | 25 | #define LVL_3 4 |
26 | #define LVL_TRACE 5 | 26 | #define LVL_TRACE 5 |
27 | 27 | ||
28 | struct _cache_table | 28 | struct _cache_table { |
29 | { | ||
30 | unsigned char descriptor; | 29 | unsigned char descriptor; |
31 | char cache_type; | 30 | char cache_type; |
32 | short size; | 31 | short size; |
33 | }; | 32 | }; |
34 | 33 | ||
35 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* All the cache descriptor types we care about (no TLB or |
35 | trace cache entries) */ | ||
36 | |||
36 | static const struct _cache_table __cpuinitconst cache_table[] = | 37 | static const struct _cache_table __cpuinitconst cache_table[] = |
37 | { | 38 | { |
38 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 39 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
105 | }; | 106 | }; |
106 | 107 | ||
107 | 108 | ||
108 | enum _cache_type | 109 | enum _cache_type { |
109 | { | ||
110 | CACHE_TYPE_NULL = 0, | 110 | CACHE_TYPE_NULL = 0, |
111 | CACHE_TYPE_DATA = 1, | 111 | CACHE_TYPE_DATA = 1, |
112 | CACHE_TYPE_INST = 2, | 112 | CACHE_TYPE_INST = 2, |
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves; | |||
170 | Maybe later */ | 170 | Maybe later */ |
171 | union l1_cache { | 171 | union l1_cache { |
172 | struct { | 172 | struct { |
173 | unsigned line_size : 8; | 173 | unsigned line_size:8; |
174 | unsigned lines_per_tag : 8; | 174 | unsigned lines_per_tag:8; |
175 | unsigned assoc : 8; | 175 | unsigned assoc:8; |
176 | unsigned size_in_kb : 8; | 176 | unsigned size_in_kb:8; |
177 | }; | 177 | }; |
178 | unsigned val; | 178 | unsigned val; |
179 | }; | 179 | }; |
180 | 180 | ||
181 | union l2_cache { | 181 | union l2_cache { |
182 | struct { | 182 | struct { |
183 | unsigned line_size : 8; | 183 | unsigned line_size:8; |
184 | unsigned lines_per_tag : 4; | 184 | unsigned lines_per_tag:4; |
185 | unsigned assoc : 4; | 185 | unsigned assoc:4; |
186 | unsigned size_in_kb : 16; | 186 | unsigned size_in_kb:16; |
187 | }; | 187 | }; |
188 | unsigned val; | 188 | unsigned val; |
189 | }; | 189 | }; |
190 | 190 | ||
191 | union l3_cache { | 191 | union l3_cache { |
192 | struct { | 192 | struct { |
193 | unsigned line_size : 8; | 193 | unsigned line_size:8; |
194 | unsigned lines_per_tag : 4; | 194 | unsigned lines_per_tag:4; |
195 | unsigned assoc : 4; | 195 | unsigned assoc:4; |
196 | unsigned res : 2; | 196 | unsigned res:2; |
197 | unsigned size_encoded : 14; | 197 | unsigned size_encoded:14; |
198 | }; | 198 | }; |
199 | unsigned val; | 199 | unsigned val; |
200 | }; | 200 | }; |
@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
241 | case 0: | 241 | case 0: |
242 | if (!l1->val) | 242 | if (!l1->val) |
243 | return; | 243 | return; |
244 | assoc = l1->assoc; | 244 | assoc = assocs[l1->assoc]; |
245 | line_size = l1->line_size; | 245 | line_size = l1->line_size; |
246 | lines_per_tag = l1->lines_per_tag; | 246 | lines_per_tag = l1->lines_per_tag; |
247 | size_in_kb = l1->size_in_kb; | 247 | size_in_kb = l1->size_in_kb; |
@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
249 | case 2: | 249 | case 2: |
250 | if (!l2.val) | 250 | if (!l2.val) |
251 | return; | 251 | return; |
252 | assoc = l2.assoc; | 252 | assoc = assocs[l2.assoc]; |
253 | line_size = l2.line_size; | 253 | line_size = l2.line_size; |
254 | lines_per_tag = l2.lines_per_tag; | 254 | lines_per_tag = l2.lines_per_tag; |
255 | /* cpu_data has errata corrections for K7 applied */ | 255 | /* cpu_data has errata corrections for K7 applied */ |
@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
258 | case 3: | 258 | case 3: |
259 | if (!l3.val) | 259 | if (!l3.val) |
260 | return; | 260 | return; |
261 | assoc = l3.assoc; | 261 | assoc = assocs[l3.assoc]; |
262 | line_size = l3.line_size; | 262 | line_size = l3.line_size; |
263 | lines_per_tag = l3.lines_per_tag; | 263 | lines_per_tag = l3.lines_per_tag; |
264 | size_in_kb = l3.size_encoded * 512; | 264 | size_in_kb = l3.size_encoded * 512; |
265 | if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { | ||
266 | size_in_kb = size_in_kb >> 1; | ||
267 | assoc = assoc >> 1; | ||
268 | } | ||
265 | break; | 269 | break; |
266 | default: | 270 | default: |
267 | return; | 271 | return; |
@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
270 | eax->split.is_self_initializing = 1; | 274 | eax->split.is_self_initializing = 1; |
271 | eax->split.type = types[leaf]; | 275 | eax->split.type = types[leaf]; |
272 | eax->split.level = levels[leaf]; | 276 | eax->split.level = levels[leaf]; |
273 | if (leaf == 3) | 277 | eax->split.num_threads_sharing = 0; |
274 | eax->split.num_threads_sharing = | ||
275 | current_cpu_data.x86_max_cores - 1; | ||
276 | else | ||
277 | eax->split.num_threads_sharing = 0; | ||
278 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 278 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; |
279 | 279 | ||
280 | 280 | ||
281 | if (assoc == 0xf) | 281 | if (assoc == 0xffff) |
282 | eax->split.is_fully_associative = 1; | 282 | eax->split.is_fully_associative = 1; |
283 | ebx->split.coherency_line_size = line_size - 1; | 283 | ebx->split.coherency_line_size = line_size - 1; |
284 | ebx->split.ways_of_associativity = assocs[assoc] - 1; | 284 | ebx->split.ways_of_associativity = assoc - 1; |
285 | ebx->split.physical_line_partition = lines_per_tag - 1; | 285 | ebx->split.physical_line_partition = lines_per_tag - 1; |
286 | ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / | 286 | ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / |
287 | (ebx->split.ways_of_associativity + 1) - 1; | 287 | (ebx->split.ways_of_associativity + 1) - 1; |
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void) | |||
350 | 350 | ||
351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) |
352 | { | 352 | { |
353 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 353 | /* Cache sizes */ |
354 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | ||
354 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 355 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
355 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 356 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
356 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | 357 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; |
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
377 | 378 | ||
378 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); | 379 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
379 | if (retval >= 0) { | 380 | if (retval >= 0) { |
380 | switch(this_leaf.eax.split.level) { | 381 | switch (this_leaf.eax.split.level) { |
381 | case 1: | 382 | case 1: |
382 | if (this_leaf.eax.split.type == | 383 | if (this_leaf.eax.split.type == |
383 | CACHE_TYPE_DATA) | 384 | CACHE_TYPE_DATA) |
384 | new_l1d = this_leaf.size/1024; | 385 | new_l1d = this_leaf.size/1024; |
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
386 | CACHE_TYPE_INST) | 387 | CACHE_TYPE_INST) |
387 | new_l1i = this_leaf.size/1024; | 388 | new_l1i = this_leaf.size/1024; |
388 | break; | 389 | break; |
389 | case 2: | 390 | case 2: |
390 | new_l2 = this_leaf.size/1024; | 391 | new_l2 = this_leaf.size/1024; |
391 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 392 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
392 | index_msb = get_count_order(num_threads_sharing); | 393 | index_msb = get_count_order(num_threads_sharing); |
393 | l2_id = c->apicid >> index_msb; | 394 | l2_id = c->apicid >> index_msb; |
394 | break; | 395 | break; |
395 | case 3: | 396 | case 3: |
396 | new_l3 = this_leaf.size/1024; | 397 | new_l3 = this_leaf.size/1024; |
397 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 398 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
398 | index_msb = get_count_order(num_threads_sharing); | 399 | index_msb = get_count_order( |
400 | num_threads_sharing); | ||
399 | l3_id = c->apicid >> index_msb; | 401 | l3_id = c->apicid >> index_msb; |
400 | break; | 402 | break; |
401 | default: | 403 | default: |
402 | break; | 404 | break; |
403 | } | 405 | } |
404 | } | 406 | } |
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
421 | /* Number of times to iterate */ | 423 | /* Number of times to iterate */ |
422 | n = cpuid_eax(2) & 0xFF; | 424 | n = cpuid_eax(2) & 0xFF; |
423 | 425 | ||
424 | for ( i = 0 ; i < n ; i++ ) { | 426 | for (i = 0 ; i < n ; i++) { |
425 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | 427 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
426 | 428 | ||
427 | /* If bit 31 is set, this is an unknown format */ | 429 | /* If bit 31 is set, this is an unknown format */ |
428 | for ( j = 0 ; j < 3 ; j++ ) { | 430 | for (j = 0 ; j < 3 ; j++) |
429 | if (regs[j] & (1 << 31)) regs[j] = 0; | 431 | if (regs[j] & (1 << 31)) |
430 | } | 432 | regs[j] = 0; |
431 | 433 | ||
432 | /* Byte 0 is level count, not a descriptor */ | 434 | /* Byte 0 is level count, not a descriptor */ |
433 | for ( j = 1 ; j < 16 ; j++ ) { | 435 | for (j = 1 ; j < 16 ; j++) { |
434 | unsigned char des = dp[j]; | 436 | unsigned char des = dp[j]; |
435 | unsigned char k = 0; | 437 | unsigned char k = 0; |
436 | 438 | ||
437 | /* look up this descriptor in the table */ | 439 | /* look up this descriptor in the table */ |
438 | while (cache_table[k].descriptor != 0) | 440 | while (cache_table[k].descriptor != 0) { |
439 | { | ||
440 | if (cache_table[k].descriptor == des) { | 441 | if (cache_table[k].descriptor == des) { |
441 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | 442 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) |
442 | break; | 443 | break; |
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | if (trace) | 491 | if (trace) |
491 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | 492 | printk(KERN_INFO "CPU: Trace cache: %dK uops", trace); |
492 | else if ( l1i ) | 493 | else if (l1i) |
493 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | 494 | printk(KERN_INFO "CPU: L1 I cache: %dK", l1i); |
494 | 495 | ||
495 | if (l1d) | 496 | if (l1d) |
496 | printk(", L1 D cache: %dK\n", l1d); | 497 | printk(KERN_CONT ", L1 D cache: %dK\n", l1d); |
497 | else | 498 | else |
498 | printk("\n"); | 499 | printk(KERN_CONT "\n"); |
499 | 500 | ||
500 | if (l2) | 501 | if (l2) |
501 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | 502 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
@@ -522,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
522 | int index_msb, i; | 523 | int index_msb, i; |
523 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 524 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
524 | 525 | ||
526 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | ||
527 | struct cpuinfo_x86 *d; | ||
528 | for_each_online_cpu(i) { | ||
529 | if (!per_cpu(cpuid4_info, i)) | ||
530 | continue; | ||
531 | d = &cpu_data(i); | ||
532 | this_leaf = CPUID4_INFO_IDX(i, index); | ||
533 | cpumask_copy(to_cpumask(this_leaf->shared_cpu_map), | ||
534 | d->llc_shared_map); | ||
535 | } | ||
536 | return; | ||
537 | } | ||
525 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 538 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
526 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 539 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
527 | 540 | ||
@@ -558,8 +571,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
558 | } | 571 | } |
559 | } | 572 | } |
560 | #else | 573 | #else |
561 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | 574 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
562 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} | 575 | { |
576 | } | ||
577 | |||
578 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
579 | { | ||
580 | } | ||
563 | #endif | 581 | #endif |
564 | 582 | ||
565 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 583 | static void __cpuinit free_cache_attributes(unsigned int cpu) |
@@ -645,7 +663,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | |||
645 | static ssize_t show_##file_name \ | 663 | static ssize_t show_##file_name \ |
646 | (struct _cpuid4_info *this_leaf, char *buf) \ | 664 | (struct _cpuid4_info *this_leaf, char *buf) \ |
647 | { \ | 665 | { \ |
648 | return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 666 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
649 | } | 667 | } |
650 | 668 | ||
651 | show_one_plus(level, eax.split.level, 0); | 669 | show_one_plus(level, eax.split.level, 0); |
@@ -656,7 +674,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | |||
656 | 674 | ||
657 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 675 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) |
658 | { | 676 | { |
659 | return sprintf (buf, "%luK\n", this_leaf->size / 1024); | 677 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
660 | } | 678 | } |
661 | 679 | ||
662 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | 680 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, |
@@ -669,7 +687,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
669 | const struct cpumask *mask; | 687 | const struct cpumask *mask; |
670 | 688 | ||
671 | mask = to_cpumask(this_leaf->shared_cpu_map); | 689 | mask = to_cpumask(this_leaf->shared_cpu_map); |
672 | n = type? | 690 | n = type ? |
673 | cpulist_scnprintf(buf, len-2, mask) : | 691 | cpulist_scnprintf(buf, len-2, mask) : |
674 | cpumask_scnprintf(buf, len-2, mask); | 692 | cpumask_scnprintf(buf, len-2, mask); |
675 | buf[n++] = '\n'; | 693 | buf[n++] = '\n'; |
@@ -800,7 +818,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
800 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 818 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
801 | show_cache_disable_1, store_cache_disable_1); | 819 | show_cache_disable_1, store_cache_disable_1); |
802 | 820 | ||
803 | static struct attribute * default_attrs[] = { | 821 | static struct attribute *default_attrs[] = { |
804 | &type.attr, | 822 | &type.attr, |
805 | &level.attr, | 823 | &level.attr, |
806 | &coherency_line_size.attr, | 824 | &coherency_line_size.attr, |
@@ -815,7 +833,7 @@ static struct attribute * default_attrs[] = { | |||
815 | NULL | 833 | NULL |
816 | }; | 834 | }; |
817 | 835 | ||
818 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 836 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
819 | { | 837 | { |
820 | struct _cache_attr *fattr = to_attr(attr); | 838 | struct _cache_attr *fattr = to_attr(attr); |
821 | struct _index_kobject *this_leaf = to_object(kobj); | 839 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -828,8 +846,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
828 | return ret; | 846 | return ret; |
829 | } | 847 | } |
830 | 848 | ||
831 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 849 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
832 | const char * buf, size_t count) | 850 | const char *buf, size_t count) |
833 | { | 851 | { |
834 | struct _cache_attr *fattr = to_attr(attr); | 852 | struct _cache_attr *fattr = to_attr(attr); |
835 | struct _index_kobject *this_leaf = to_object(kobj); | 853 | struct _index_kobject *this_leaf = to_object(kobj); |
@@ -883,7 +901,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
883 | goto err_out; | 901 | goto err_out; |
884 | 902 | ||
885 | per_cpu(index_kobject, cpu) = kzalloc( | 903 | per_cpu(index_kobject, cpu) = kzalloc( |
886 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | 904 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
887 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 905 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) |
888 | goto err_out; | 906 | goto err_out; |
889 | 907 | ||
@@ -917,7 +935,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
917 | } | 935 | } |
918 | 936 | ||
919 | for (i = 0; i < num_cache_leaves; i++) { | 937 | for (i = 0; i < num_cache_leaves; i++) { |
920 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 938 | this_object = INDEX_KOBJECT_PTR(cpu, i); |
921 | this_object->cpu = cpu; | 939 | this_object->cpu = cpu; |
922 | this_object->index = i; | 940 | this_object->index = i; |
923 | retval = kobject_init_and_add(&(this_object->kobj), | 941 | retval = kobject_init_and_add(&(this_object->kobj), |
@@ -925,9 +943,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
925 | per_cpu(cache_kobject, cpu), | 943 | per_cpu(cache_kobject, cpu), |
926 | "index%1lu", i); | 944 | "index%1lu", i); |
927 | if (unlikely(retval)) { | 945 | if (unlikely(retval)) { |
928 | for (j = 0; j < i; j++) { | 946 | for (j = 0; j < i; j++) |
929 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | 947 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
930 | } | ||
931 | kobject_put(per_cpu(cache_kobject, cpu)); | 948 | kobject_put(per_cpu(cache_kobject, cpu)); |
932 | cpuid4_cache_sysfs_exit(cpu); | 949 | cpuid4_cache_sysfs_exit(cpu); |
933 | return retval; | 950 | return retval; |
@@ -952,7 +969,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
952 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | 969 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
953 | 970 | ||
954 | for (i = 0; i < num_cache_leaves; i++) | 971 | for (i = 0; i < num_cache_leaves; i++) |
955 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 972 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
956 | kobject_put(per_cpu(cache_kobject, cpu)); | 973 | kobject_put(per_cpu(cache_kobject, cpu)); |
957 | cpuid4_cache_sysfs_exit(cpu); | 974 | cpuid4_cache_sysfs_exit(cpu); |
958 | } | 975 | } |
@@ -977,8 +994,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
977 | return NOTIFY_OK; | 994 | return NOTIFY_OK; |
978 | } | 995 | } |
979 | 996 | ||
980 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 997 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { |
981 | { | ||
982 | .notifier_call = cacheinfo_cpu_callback, | 998 | .notifier_call = cacheinfo_cpu_callback, |
983 | }; | 999 | }; |
984 | 1000 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 01213048f62f..9bfe9d2ea615 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -183,6 +183,11 @@ void mce_log(struct mce *mce) | |||
183 | set_bit(0, &mce_need_notify); | 183 | set_bit(0, &mce_need_notify); |
184 | } | 184 | } |
185 | 185 | ||
186 | void __weak decode_mce(struct mce *m) | ||
187 | { | ||
188 | return; | ||
189 | } | ||
190 | |||
186 | static void print_mce(struct mce *m) | 191 | static void print_mce(struct mce *m) |
187 | { | 192 | { |
188 | printk(KERN_EMERG | 193 | printk(KERN_EMERG |
@@ -205,6 +210,8 @@ static void print_mce(struct mce *m) | |||
205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | 210 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", |
206 | m->cpuvendor, m->cpuid, m->time, m->socketid, | 211 | m->cpuvendor, m->cpuid, m->time, m->socketid, |
207 | m->apicid); | 212 | m->apicid); |
213 | |||
214 | decode_mce(m); | ||
208 | } | 215 | } |
209 | 216 | ||
210 | static void print_mce_head(void) | 217 | static void print_mce_head(void) |
@@ -215,7 +222,10 @@ static void print_mce_head(void) | |||
215 | static void print_mce_tail(void) | 222 | static void print_mce_tail(void) |
216 | { | 223 | { |
217 | printk(KERN_EMERG "This is not a software problem!\n" | 224 | printk(KERN_EMERG "This is not a software problem!\n" |
218 | "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | 225 | #if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD)) |
226 | "Run through mcelog --ascii to decode and contact your hardware vendor\n" | ||
227 | #endif | ||
228 | ); | ||
219 | } | 229 | } |
220 | 230 | ||
221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 231 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ddae21620bda..1fecba404fd8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
489 | int i, err = 0; | 489 | int i, err = 0; |
490 | struct threshold_bank *b = NULL; | 490 | struct threshold_bank *b = NULL; |
491 | char name[32]; | 491 | char name[32]; |
492 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
493 | |||
492 | 494 | ||
493 | sprintf(name, "threshold_bank%i", bank); | 495 | sprintf(name, "threshold_bank%i", bank); |
494 | 496 | ||
495 | #ifdef CONFIG_SMP | 497 | #ifdef CONFIG_SMP |
496 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 498 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
497 | i = cpumask_first(cpu_core_mask(cpu)); | 499 | i = cpumask_first(c->llc_shared_map); |
498 | 500 | ||
499 | /* first core not up yet */ | 501 | /* first core not up yet */ |
500 | if (cpu_data(i).cpu_core_id) | 502 | if (cpu_data(i).cpu_core_id) |
@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
514 | if (err) | 516 | if (err) |
515 | goto out; | 517 | goto out; |
516 | 518 | ||
517 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | 519 | cpumask_copy(b->cpus, c->llc_shared_map); |
518 | per_cpu(threshold_banks, cpu)[bank] = b; | 520 | per_cpu(threshold_banks, cpu)[bank] = b; |
519 | 521 | ||
520 | goto out; | 522 | goto out; |
@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
539 | #ifndef CONFIG_SMP | 541 | #ifndef CONFIG_SMP |
540 | cpumask_setall(b->cpus); | 542 | cpumask_setall(b->cpus); |
541 | #else | 543 | #else |
542 | cpumask_copy(b->cpus, cpu_core_mask(cpu)); | 544 | cpumask_copy(b->cpus, c->llc_shared_map); |
543 | #endif | 545 | #endif |
544 | 546 | ||
545 | per_cpu(threshold_banks, cpu)[bank] = b; | 547 | per_cpu(threshold_banks, cpu)[bank] = b; |
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c index ee2331b0e58f..33af14110dfd 100644 --- a/arch/x86/kernel/cpu/mtrr/amd.c +++ b/arch/x86/kernel/cpu/mtrr/amd.c | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | static void | 8 | static void |
9 | amd_get_mtrr(unsigned int reg, unsigned long *base, | 9 | amd_get_mtrr(unsigned int reg, unsigned long *base, |
10 | unsigned long *size, mtrr_type * type) | 10 | unsigned long *size, mtrr_type *type) |
11 | { | 11 | { |
12 | unsigned long low, high; | 12 | unsigned long low, high; |
13 | 13 | ||
14 | rdmsr(MSR_K6_UWCCR, low, high); | 14 | rdmsr(MSR_K6_UWCCR, low, high); |
15 | /* Upper dword is region 1, lower is region 0 */ | 15 | /* Upper dword is region 1, lower is region 0 */ |
16 | if (reg == 1) | 16 | if (reg == 1) |
17 | low = high; | 17 | low = high; |
18 | /* The base masks off on the right alignment */ | 18 | /* The base masks off on the right alignment */ |
19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; | 19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; |
20 | *type = 0; | 20 | *type = 0; |
21 | if (low & 1) | 21 | if (low & 1) |
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base, | |||
27 | return; | 27 | return; |
28 | } | 28 | } |
29 | /* | 29 | /* |
30 | * This needs a little explaining. The size is stored as an | 30 | * This needs a little explaining. The size is stored as an |
31 | * inverted mask of bits of 128K granularity 15 bits long offset | 31 | * inverted mask of bits of 128K granularity 15 bits long offset |
32 | * 2 bits | 32 | * 2 bits. |
33 | * | 33 | * |
34 | * So to get a size we do invert the mask and add 1 to the lowest | 34 | * So to get a size we do invert the mask and add 1 to the lowest |
35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift | 35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift |
36 | * to turn into 128K blocks | 36 | * to turn into 128K blocks. |
37 | * | 37 | * |
38 | * eg 111 1111 1111 1100 is 512K | 38 | * eg 111 1111 1111 1100 is 512K |
39 | * | 39 | * |
40 | * invert 000 0000 0000 0011 | 40 | * invert 000 0000 0000 0011 |
41 | * +1 000 0000 0000 0100 | 41 | * +1 000 0000 0000 0100 |
42 | * *128K ... | 42 | * *128K ... |
43 | */ | 43 | */ |
44 | low = (~low) & 0x1FFFC; | 44 | low = (~low) & 0x1FFFC; |
45 | *size = (low + 4) << (15 - PAGE_SHIFT); | 45 | *size = (low + 4) << (15 - PAGE_SHIFT); |
46 | return; | ||
47 | } | 46 | } |
48 | 47 | ||
49 | static void amd_set_mtrr(unsigned int reg, unsigned long base, | 48 | /** |
50 | unsigned long size, mtrr_type type) | 49 | * amd_set_mtrr - Set variable MTRR register on the local CPU. |
51 | /* [SUMMARY] Set variable MTRR register on the local CPU. | 50 | * |
52 | <reg> The register to set. | 51 | * @reg The register to set. |
53 | <base> The base address of the region. | 52 | * @base The base address of the region. |
54 | <size> The size of the region. If this is 0 the region is disabled. | 53 | * @size The size of the region. If this is 0 the region is disabled. |
55 | <type> The type of the region. | 54 | * @type The type of the region. |
56 | [RETURNS] Nothing. | 55 | * |
57 | */ | 56 | * Returns nothing. |
57 | */ | ||
58 | static void | ||
59 | amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) | ||
58 | { | 60 | { |
59 | u32 regs[2]; | 61 | u32 regs[2]; |
60 | 62 | ||
61 | /* | 63 | /* |
62 | * Low is MTRR0 , High MTRR 1 | 64 | * Low is MTRR0, High MTRR 1 |
63 | */ | 65 | */ |
64 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 66 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
65 | /* | 67 | /* |
66 | * Blank to disable | 68 | * Blank to disable |
67 | */ | 69 | */ |
68 | if (size == 0) | 70 | if (size == 0) { |
69 | regs[reg] = 0; | 71 | regs[reg] = 0; |
70 | else | 72 | } else { |
71 | /* Set the register to the base, the type (off by one) and an | 73 | /* |
72 | inverted bitmask of the size The size is the only odd | 74 | * Set the register to the base, the type (off by one) and an |
73 | bit. We are fed say 512K We invert this and we get 111 1111 | 75 | * inverted bitmask of the size The size is the only odd |
74 | 1111 1011 but if you subtract one and invert you get the | 76 | * bit. We are fed say 512K We invert this and we get 111 1111 |
75 | desired 111 1111 1111 1100 mask | 77 | * 1111 1011 but if you subtract one and invert you get the |
76 | 78 | * desired 111 1111 1111 1100 mask | |
77 | But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ | 79 | * |
80 | * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! | ||
81 | */ | ||
78 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) | 82 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) |
79 | | (base << PAGE_SHIFT) | (type + 1); | 83 | | (base << PAGE_SHIFT) | (type + 1); |
84 | } | ||
80 | 85 | ||
81 | /* | 86 | /* |
82 | * The writeback rule is quite specific. See the manual. Its | 87 | * The writeback rule is quite specific. See the manual. Its |
83 | * disable local interrupts, write back the cache, set the mtrr | 88 | * disable local interrupts, write back the cache, set the mtrr |
84 | */ | 89 | */ |
85 | wbinvd(); | 90 | wbinvd(); |
86 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 91 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
87 | } | 92 | } |
88 | 93 | ||
89 | static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 94 | static int |
95 | amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | ||
90 | { | 96 | { |
91 | /* Apply the K6 block alignment and size rules | 97 | /* |
92 | In order | 98 | * Apply the K6 block alignment and size rules |
93 | o Uncached or gathering only | 99 | * In order |
94 | o 128K or bigger block | 100 | * o Uncached or gathering only |
95 | o Power of 2 block | 101 | * o 128K or bigger block |
96 | o base suitably aligned to the power | 102 | * o Power of 2 block |
97 | */ | 103 | * o base suitably aligned to the power |
104 | */ | ||
98 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) | 105 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) |
99 | || (size & ~(size - 1)) - size || (base & (size - 1))) | 106 | || (size & ~(size - 1)) - size || (base & (size - 1))) |
100 | return -EINVAL; | 107 | return -EINVAL; |
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void) | |||
115 | set_mtrr_ops(&amd_mtrr_ops); | 122 | set_mtrr_ops(&amd_mtrr_ops); |
116 | return 0; | 123 | return 0; |
117 | } | 124 | } |
118 | |||
119 | //arch_initcall(amd_mtrr_init); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index cb9aa3a7a7ab..de89f14eff3a 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c | |||
@@ -1,7 +1,9 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
3 | |||
3 | #include <asm/mtrr.h> | 4 | #include <asm/mtrr.h> |
4 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | |||
5 | #include "mtrr.h" | 7 | #include "mtrr.h" |
6 | 8 | ||
7 | static struct { | 9 | static struct { |
@@ -12,25 +14,25 @@ static struct { | |||
12 | static u8 centaur_mcr_reserved; | 14 | static u8 centaur_mcr_reserved; |
13 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ | 15 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ |
14 | 16 | ||
15 | /* | 17 | /** |
16 | * Report boot time MCR setups | 18 | * centaur_get_free_region - Get a free MTRR. |
19 | * | ||
20 | * @base: The starting (base) address of the region. | ||
21 | * @size: The size (in bytes) of the region. | ||
22 | * | ||
23 | * Returns: the index of the region on success, else -1 on error. | ||
17 | */ | 24 | */ |
18 | |||
19 | static int | 25 | static int |
20 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 26 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
21 | /* [SUMMARY] Get a free MTRR. | ||
22 | <base> The starting (base) address of the region. | ||
23 | <size> The size (in bytes) of the region. | ||
24 | [RETURNS] The index of the region on success, else -1 on error. | ||
25 | */ | ||
26 | { | 27 | { |
27 | int i, max; | ||
28 | mtrr_type ltype; | ||
29 | unsigned long lbase, lsize; | 28 | unsigned long lbase, lsize; |
29 | mtrr_type ltype; | ||
30 | int i, max; | ||
30 | 31 | ||
31 | max = num_var_ranges; | 32 | max = num_var_ranges; |
32 | if (replace_reg >= 0 && replace_reg < max) | 33 | if (replace_reg >= 0 && replace_reg < max) |
33 | return replace_reg; | 34 | return replace_reg; |
35 | |||
34 | for (i = 0; i < max; ++i) { | 36 | for (i = 0; i < max; ++i) { |
35 | if (centaur_mcr_reserved & (1 << i)) | 37 | if (centaur_mcr_reserved & (1 << i)) |
36 | continue; | 38 | continue; |
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
38 | if (lsize == 0) | 40 | if (lsize == 0) |
39 | return i; | 41 | return i; |
40 | } | 42 | } |
43 | |||
41 | return -ENOSPC; | 44 | return -ENOSPC; |
42 | } | 45 | } |
43 | 46 | ||
44 | void | 47 | /* |
45 | mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | 48 | * Report boot time MCR setups |
49 | */ | ||
50 | void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | ||
46 | { | 51 | { |
47 | centaur_mcr[mcr].low = lo; | 52 | centaur_mcr[mcr].low = lo; |
48 | centaur_mcr[mcr].high = hi; | 53 | centaur_mcr[mcr].high = hi; |
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base, | |||
54 | { | 59 | { |
55 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; | 60 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; |
56 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; | 61 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; |
57 | *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ | 62 | *type = MTRR_TYPE_WRCOMB; /* write-combining */ |
63 | |||
58 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) | 64 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) |
59 | *type = MTRR_TYPE_UNCACHABLE; | 65 | *type = MTRR_TYPE_UNCACHABLE; |
60 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) | 66 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) |
61 | *type = MTRR_TYPE_WRBACK; | 67 | *type = MTRR_TYPE_WRBACK; |
62 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) | 68 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) |
63 | *type = MTRR_TYPE_WRBACK; | 69 | *type = MTRR_TYPE_WRBACK; |
64 | |||
65 | } | 70 | } |
66 | 71 | ||
67 | static void centaur_set_mcr(unsigned int reg, unsigned long base, | 72 | static void |
68 | unsigned long size, mtrr_type type) | 73 | centaur_set_mcr(unsigned int reg, unsigned long base, |
74 | unsigned long size, mtrr_type type) | ||
69 | { | 75 | { |
70 | unsigned long low, high; | 76 | unsigned long low, high; |
71 | 77 | ||
72 | if (size == 0) { | 78 | if (size == 0) { |
73 | /* Disable */ | 79 | /* Disable */ |
74 | high = low = 0; | 80 | high = low = 0; |
75 | } else { | 81 | } else { |
76 | high = base << PAGE_SHIFT; | 82 | high = base << PAGE_SHIFT; |
77 | if (centaur_mcr_type == 0) | 83 | if (centaur_mcr_type == 0) { |
78 | low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ | 84 | /* Only support write-combining... */ |
79 | else { | 85 | low = -size << PAGE_SHIFT | 0x1f; |
86 | } else { | ||
80 | if (type == MTRR_TYPE_UNCACHABLE) | 87 | if (type == MTRR_TYPE_UNCACHABLE) |
81 | low = -size << PAGE_SHIFT | 0x02; /* NC */ | 88 | low = -size << PAGE_SHIFT | 0x02; /* NC */ |
82 | else | 89 | else |
83 | low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ | 90 | low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ |
84 | } | 91 | } |
85 | } | 92 | } |
86 | centaur_mcr[reg].high = high; | 93 | centaur_mcr[reg].high = high; |
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base, | |||
88 | wrmsr(MSR_IDT_MCR0 + reg, low, high); | 95 | wrmsr(MSR_IDT_MCR0 + reg, low, high); |
89 | } | 96 | } |
90 | 97 | ||
91 | #if 0 | 98 | static int |
92 | /* | 99 | centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
93 | * Initialise the later (saner) Winchip MCR variant. In this version | ||
94 | * the BIOS can pass us the registers it has used (but not their values) | ||
95 | * and the control register is read/write | ||
96 | */ | ||
97 | |||
98 | static void __init | ||
99 | centaur_mcr1_init(void) | ||
100 | { | ||
101 | unsigned i; | ||
102 | u32 lo, hi; | ||
103 | |||
104 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
105 | * find out what the bios might have done. | ||
106 | */ | ||
107 | |||
108 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
109 | if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ | ||
110 | lo &= ~0x1C0; /* clear key */ | ||
111 | lo |= 0x040; /* set key to 1 */ | ||
112 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ | ||
113 | } | ||
114 | |||
115 | centaur_mcr_type = 1; | ||
116 | |||
117 | /* | ||
118 | * Clear any unconfigured MCR's. | ||
119 | */ | ||
120 | |||
121 | for (i = 0; i < 8; ++i) { | ||
122 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { | ||
123 | if (!(lo & (1 << (9 + i)))) | ||
124 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
125 | else | ||
126 | /* | ||
127 | * If the BIOS set up an MCR we cannot see it | ||
128 | * but we don't wish to obliterate it | ||
129 | */ | ||
130 | centaur_mcr_reserved |= (1 << i); | ||
131 | } | ||
132 | } | ||
133 | /* | ||
134 | * Throw the main write-combining switch... | ||
135 | * However if OOSTORE is enabled then people have already done far | ||
136 | * cleverer things and we should behave. | ||
137 | */ | ||
138 | |||
139 | lo |= 15; /* Write combine enables */ | ||
140 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Initialise the original winchip with read only MCR registers | ||
145 | * no used bitmask for the BIOS to pass on and write only control | ||
146 | */ | ||
147 | |||
148 | static void __init | ||
149 | centaur_mcr0_init(void) | ||
150 | { | ||
151 | unsigned i; | ||
152 | |||
153 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
154 | * find out what the bios might have done. | ||
155 | */ | ||
156 | |||
157 | /* Clear any unconfigured MCR's. | ||
158 | * This way we are sure that the centaur_mcr array contains the actual | ||
159 | * values. The disadvantage is that any BIOS tweaks are thus undone. | ||
160 | * | ||
161 | */ | ||
162 | for (i = 0; i < 8; ++i) { | ||
163 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) | ||
164 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
165 | } | ||
166 | |||
167 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Initialise Winchip series MCR registers | ||
172 | */ | ||
173 | |||
174 | static void __init | ||
175 | centaur_mcr_init(void) | ||
176 | { | ||
177 | struct set_mtrr_context ctxt; | ||
178 | |||
179 | set_mtrr_prepare_save(&ctxt); | ||
180 | set_mtrr_cache_disable(&ctxt); | ||
181 | |||
182 | if (boot_cpu_data.x86_model == 4) | ||
183 | centaur_mcr0_init(); | ||
184 | else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) | ||
185 | centaur_mcr1_init(); | ||
186 | |||
187 | set_mtrr_done(&ctxt); | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | static int centaur_validate_add_page(unsigned long base, | ||
192 | unsigned long size, unsigned int type) | ||
193 | { | 100 | { |
194 | /* | 101 | /* |
195 | * FIXME: Winchip2 supports uncached | 102 | * FIXME: Winchip2 supports uncached |
196 | */ | 103 | */ |
197 | if (type != MTRR_TYPE_WRCOMB && | 104 | if (type != MTRR_TYPE_WRCOMB && |
198 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { | 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { |
199 | printk(KERN_WARNING | 106 | pr_warning("mtrr: only write-combining%s supported\n", |
200 | "mtrr: only write-combining%s supported\n", | 107 | centaur_mcr_type ? " and uncacheable are" : " is"); |
201 | centaur_mcr_type ? " and uncacheable are" | ||
202 | : " is"); | ||
203 | return -EINVAL; | 108 | return -EINVAL; |
204 | } | 109 | } |
205 | return 0; | 110 | return 0; |
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base, | |||
207 | 112 | ||
208 | static struct mtrr_ops centaur_mtrr_ops = { | 113 | static struct mtrr_ops centaur_mtrr_ops = { |
209 | .vendor = X86_VENDOR_CENTAUR, | 114 | .vendor = X86_VENDOR_CENTAUR, |
210 | // .init = centaur_mcr_init, | ||
211 | .set = centaur_set_mcr, | 115 | .set = centaur_set_mcr, |
212 | .get = centaur_get_mcr, | 116 | .get = centaur_get_mcr, |
213 | .get_free_region = centaur_get_free_region, | 117 | .get_free_region = centaur_get_free_region, |
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void) | |||
220 | set_mtrr_ops(¢aur_mtrr_ops); | 124 | set_mtrr_ops(¢aur_mtrr_ops); |
221 | return 0; | 125 | return 0; |
222 | } | 126 | } |
223 | |||
224 | //arch_initcall(centaur_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 1d584a18a50d..315738c74aad 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -1,51 +1,75 @@ | |||
1 | /* MTRR (Memory Type Range Register) cleanup | 1 | /* |
2 | 2 | * MTRR (Memory Type Range Register) cleanup | |
3 | Copyright (C) 2009 Yinghai Lu | 3 | * |
4 | 4 | * Copyright (C) 2009 Yinghai Lu | |
5 | This library is free software; you can redistribute it and/or | 5 | * |
6 | modify it under the terms of the GNU Library General Public | 6 | * This library is free software; you can redistribute it and/or |
7 | License as published by the Free Software Foundation; either | 7 | * modify it under the terms of the GNU Library General Public |
8 | version 2 of the License, or (at your option) any later version. | 8 | * License as published by the Free Software Foundation; either |
9 | 9 | * version 2 of the License, or (at your option) any later version. | |
10 | This library is distributed in the hope that it will be useful, | 10 | * |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * This library is distributed in the hope that it will be useful, |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | Library General Public License for more details. | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | 14 | * Library General Public License for more details. | |
15 | You should have received a copy of the GNU Library General Public | 15 | * |
16 | License along with this library; if not, write to the Free | 16 | * You should have received a copy of the GNU Library General Public |
17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | * License along with this library; if not, write to the Free |
18 | */ | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | 19 | */ | |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/mutex.h> | ||
26 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
26 | #include <linux/mutex.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/kvm_para.h> | ||
27 | 29 | ||
30 | #include <asm/processor.h> | ||
28 | #include <asm/e820.h> | 31 | #include <asm/e820.h> |
29 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
30 | #include <asm/uaccess.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
33 | #include <asm/kvm_para.h> | ||
34 | #include "mtrr.h" | ||
35 | 34 | ||
36 | /* should be related to MTRR_VAR_RANGES nums */ | 35 | #include "mtrr.h" |
37 | #define RANGE_NUM 256 | ||
38 | 36 | ||
39 | struct res_range { | 37 | struct res_range { |
40 | unsigned long start; | 38 | unsigned long start; |
41 | unsigned long end; | 39 | unsigned long end; |
40 | }; | ||
41 | |||
42 | struct var_mtrr_range_state { | ||
43 | unsigned long base_pfn; | ||
44 | unsigned long size_pfn; | ||
45 | mtrr_type type; | ||
46 | }; | ||
47 | |||
48 | struct var_mtrr_state { | ||
49 | unsigned long range_startk; | ||
50 | unsigned long range_sizek; | ||
51 | unsigned long chunk_sizek; | ||
52 | unsigned long gran_sizek; | ||
53 | unsigned int reg; | ||
42 | }; | 54 | }; |
43 | 55 | ||
56 | /* Should be related to MTRR_VAR_RANGES nums */ | ||
57 | #define RANGE_NUM 256 | ||
58 | |||
59 | static struct res_range __initdata range[RANGE_NUM]; | ||
60 | static int __initdata nr_range; | ||
61 | |||
62 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
63 | |||
64 | static int __initdata debug_print; | ||
65 | #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) | ||
66 | |||
67 | |||
44 | static int __init | 68 | static int __init |
45 | add_range(struct res_range *range, int nr_range, unsigned long start, | 69 | add_range(struct res_range *range, int nr_range, |
46 | unsigned long end) | 70 | unsigned long start, unsigned long end) |
47 | { | 71 | { |
48 | /* out of slots */ | 72 | /* Out of slots: */ |
49 | if (nr_range >= RANGE_NUM) | 73 | if (nr_range >= RANGE_NUM) |
50 | return nr_range; | 74 | return nr_range; |
51 | 75 | ||
@@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start, | |||
58 | } | 82 | } |
59 | 83 | ||
60 | static int __init | 84 | static int __init |
61 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | 85 | add_range_with_merge(struct res_range *range, int nr_range, |
62 | unsigned long end) | 86 | unsigned long start, unsigned long end) |
63 | { | 87 | { |
64 | int i; | 88 | int i; |
65 | 89 | ||
66 | /* try to merge it with old one */ | 90 | /* Try to merge it with old one: */ |
67 | for (i = 0; i < nr_range; i++) { | 91 | for (i = 0; i < nr_range; i++) { |
68 | unsigned long final_start, final_end; | 92 | unsigned long final_start, final_end; |
69 | unsigned long common_start, common_end; | 93 | unsigned long common_start, common_end; |
@@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | |||
84 | return nr_range; | 108 | return nr_range; |
85 | } | 109 | } |
86 | 110 | ||
87 | /* need to add that */ | 111 | /* Need to add it: */ |
88 | return add_range(range, nr_range, start, end); | 112 | return add_range(range, nr_range, start, end); |
89 | } | 113 | } |
90 | 114 | ||
@@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end) | |||
117 | } | 141 | } |
118 | 142 | ||
119 | if (start > range[j].start && end < range[j].end) { | 143 | if (start > range[j].start && end < range[j].end) { |
120 | /* find the new spare */ | 144 | /* Find the new spare: */ |
121 | for (i = 0; i < RANGE_NUM; i++) { | 145 | for (i = 0; i < RANGE_NUM; i++) { |
122 | if (range[i].end == 0) | 146 | if (range[i].end == 0) |
123 | break; | 147 | break; |
@@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2) | |||
146 | return start1 - start2; | 170 | return start1 - start2; |
147 | } | 171 | } |
148 | 172 | ||
149 | struct var_mtrr_range_state { | 173 | #define BIOS_BUG_MSG KERN_WARNING \ |
150 | unsigned long base_pfn; | 174 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
151 | unsigned long size_pfn; | ||
152 | mtrr_type type; | ||
153 | }; | ||
154 | |||
155 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
156 | static int __initdata debug_print; | ||
157 | 175 | ||
158 | static int __init | 176 | static int __init |
159 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | 177 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, |
@@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
180 | range[i].start, range[i].end + 1); | 198 | range[i].start, range[i].end + 1); |
181 | } | 199 | } |
182 | 200 | ||
183 | /* take out UC ranges */ | 201 | /* Take out UC ranges: */ |
184 | for (i = 0; i < num_var_ranges; i++) { | 202 | for (i = 0; i < num_var_ranges; i++) { |
185 | type = range_state[i].type; | 203 | type = range_state[i].type; |
186 | if (type != MTRR_TYPE_UNCACHABLE && | 204 | if (type != MTRR_TYPE_UNCACHABLE && |
@@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
193 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && | 211 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && |
194 | (mtrr_state.enabled & 1)) { | 212 | (mtrr_state.enabled & 1)) { |
195 | /* Var MTRR contains UC entry below 1M? Skip it: */ | 213 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
196 | printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " | 214 | printk(BIOS_BUG_MSG, i); |
197 | "contains strange UC entry under 1M, check " | ||
198 | "with your system vendor!\n", i); | ||
199 | if (base + size <= (1<<(20-PAGE_SHIFT))) | 215 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
200 | continue; | 216 | continue; |
201 | size -= (1<<(20-PAGE_SHIFT)) - base; | 217 | size -= (1<<(20-PAGE_SHIFT)) - base; |
@@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
237 | return nr_range; | 253 | return nr_range; |
238 | } | 254 | } |
239 | 255 | ||
240 | static struct res_range __initdata range[RANGE_NUM]; | ||
241 | static int __initdata nr_range; | ||
242 | |||
243 | #ifdef CONFIG_MTRR_SANITIZER | 256 | #ifdef CONFIG_MTRR_SANITIZER |
244 | 257 | ||
245 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | 258 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) |
246 | { | 259 | { |
247 | unsigned long sum; | 260 | unsigned long sum = 0; |
248 | int i; | 261 | int i; |
249 | 262 | ||
250 | sum = 0; | ||
251 | for (i = 0; i < nr_range; i++) | 263 | for (i = 0; i < nr_range; i++) |
252 | sum += range[i].end + 1 - range[i].start; | 264 | sum += range[i].end + 1 - range[i].start; |
253 | 265 | ||
@@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str) | |||
278 | } | 290 | } |
279 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | 291 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); |
280 | 292 | ||
281 | struct var_mtrr_state { | ||
282 | unsigned long range_startk; | ||
283 | unsigned long range_sizek; | ||
284 | unsigned long chunk_sizek; | ||
285 | unsigned long gran_sizek; | ||
286 | unsigned int reg; | ||
287 | }; | ||
288 | |||
289 | static void __init | 293 | static void __init |
290 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 294 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
291 | unsigned char type, unsigned int address_bits) | 295 | unsigned char type, unsigned int address_bits) |
292 | { | 296 | { |
293 | u32 base_lo, base_hi, mask_lo, mask_hi; | 297 | u32 base_lo, base_hi, mask_lo, mask_hi; |
294 | u64 base, mask; | 298 | u64 base, mask; |
@@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
301 | mask = (1ULL << address_bits) - 1; | 305 | mask = (1ULL << address_bits) - 1; |
302 | mask &= ~((((u64)sizek) << 10) - 1); | 306 | mask &= ~((((u64)sizek) << 10) - 1); |
303 | 307 | ||
304 | base = ((u64)basek) << 10; | 308 | base = ((u64)basek) << 10; |
305 | 309 | ||
306 | base |= type; | 310 | base |= type; |
307 | mask |= 0x800; | 311 | mask |= 0x800; |
@@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
317 | 321 | ||
318 | static void __init | 322 | static void __init |
319 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 323 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
320 | unsigned char type) | 324 | unsigned char type) |
321 | { | 325 | { |
322 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | 326 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); |
323 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | 327 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); |
324 | range_state[reg].type = type; | 328 | range_state[reg].type = type; |
325 | } | 329 | } |
326 | 330 | ||
327 | static void __init | 331 | static void __init set_var_mtrr_all(unsigned int address_bits) |
328 | set_var_mtrr_all(unsigned int address_bits) | ||
329 | { | 332 | { |
330 | unsigned long basek, sizek; | 333 | unsigned long basek, sizek; |
331 | unsigned char type; | 334 | unsigned char type; |
@@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits) | |||
342 | 345 | ||
343 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | 346 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) |
344 | { | 347 | { |
345 | char factor; | ||
346 | unsigned long base = sizek; | 348 | unsigned long base = sizek; |
349 | char factor; | ||
347 | 350 | ||
348 | if (base & ((1<<10) - 1)) { | 351 | if (base & ((1<<10) - 1)) { |
349 | /* not MB alignment */ | 352 | /* Not MB-aligned: */ |
350 | factor = 'K'; | 353 | factor = 'K'; |
351 | } else if (base & ((1<<20) - 1)) { | 354 | } else if (base & ((1<<20) - 1)) { |
352 | factor = 'M'; | 355 | factor = 'M'; |
@@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
372 | unsigned long max_align, align; | 375 | unsigned long max_align, align; |
373 | unsigned long sizek; | 376 | unsigned long sizek; |
374 | 377 | ||
375 | /* Compute the maximum size I can make a range */ | 378 | /* Compute the maximum size with which we can make a range: */ |
376 | if (range_startk) | 379 | if (range_startk) |
377 | max_align = ffs(range_startk) - 1; | 380 | max_align = ffs(range_startk) - 1; |
378 | else | 381 | else |
379 | max_align = 32; | 382 | max_align = 32; |
383 | |||
380 | align = fls(range_sizek) - 1; | 384 | align = fls(range_sizek) - 1; |
381 | if (align > max_align) | 385 | if (align > max_align) |
382 | align = max_align; | 386 | align = max_align; |
@@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
386 | char start_factor = 'K', size_factor = 'K'; | 390 | char start_factor = 'K', size_factor = 'K'; |
387 | unsigned long start_base, size_base; | 391 | unsigned long start_base, size_base; |
388 | 392 | ||
389 | start_base = to_size_factor(range_startk, | 393 | start_base = to_size_factor(range_startk, &start_factor); |
390 | &start_factor), | 394 | size_base = to_size_factor(sizek, &size_factor); |
391 | size_base = to_size_factor(sizek, &size_factor), | ||
392 | 395 | ||
393 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 396 | Dprintk("Setting variable MTRR %d, " |
394 | "base: %ld%cB, range: %ld%cB, type %s\n", | 397 | "base: %ld%cB, range: %ld%cB, type %s\n", |
395 | reg, start_base, start_factor, | 398 | reg, start_base, start_factor, |
396 | size_base, size_factor, | 399 | size_base, size_factor, |
@@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
425 | chunk_sizek = state->chunk_sizek; | 428 | chunk_sizek = state->chunk_sizek; |
426 | gran_sizek = state->gran_sizek; | 429 | gran_sizek = state->gran_sizek; |
427 | 430 | ||
428 | /* align with gran size, prevent small block used up MTRRs */ | 431 | /* Align with gran size, prevent small block used up MTRRs: */ |
429 | range_basek = ALIGN(state->range_startk, gran_sizek); | 432 | range_basek = ALIGN(state->range_startk, gran_sizek); |
430 | if ((range_basek > basek) && basek) | 433 | if ((range_basek > basek) && basek) |
431 | return second_sizek; | 434 | return second_sizek; |
435 | |||
432 | state->range_sizek -= (range_basek - state->range_startk); | 436 | state->range_sizek -= (range_basek - state->range_startk); |
433 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | 437 | range_sizek = ALIGN(state->range_sizek, gran_sizek); |
434 | 438 | ||
@@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
439 | } | 443 | } |
440 | state->range_sizek = range_sizek; | 444 | state->range_sizek = range_sizek; |
441 | 445 | ||
442 | /* try to append some small hole */ | 446 | /* Try to append some small hole: */ |
443 | range0_basek = state->range_startk; | 447 | range0_basek = state->range_startk; |
444 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 448 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
445 | 449 | ||
446 | /* no increase */ | 450 | /* No increase: */ |
447 | if (range0_sizek == state->range_sizek) { | 451 | if (range0_sizek == state->range_sizek) { |
448 | if (debug_print) | 452 | Dprintk("rangeX: %016lx - %016lx\n", |
449 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 453 | range0_basek<<10, |
450 | range0_basek<<10, | 454 | (range0_basek + state->range_sizek)<<10); |
451 | (range0_basek + state->range_sizek)<<10); | ||
452 | state->reg = range_to_mtrr(state->reg, range0_basek, | 455 | state->reg = range_to_mtrr(state->reg, range0_basek, |
453 | state->range_sizek, MTRR_TYPE_WRBACK); | 456 | state->range_sizek, MTRR_TYPE_WRBACK); |
454 | return 0; | 457 | return 0; |
455 | } | 458 | } |
456 | 459 | ||
457 | /* only cut back, when it is not the last */ | 460 | /* Only cut back when it is not the last: */ |
458 | if (sizek) { | 461 | if (sizek) { |
459 | while (range0_basek + range0_sizek > (basek + sizek)) { | 462 | while (range0_basek + range0_sizek > (basek + sizek)) { |
460 | if (range0_sizek >= chunk_sizek) | 463 | if (range0_sizek >= chunk_sizek) |
@@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
470 | second_try: | 473 | second_try: |
471 | range_basek = range0_basek + range0_sizek; | 474 | range_basek = range0_basek + range0_sizek; |
472 | 475 | ||
473 | /* one hole in the middle */ | 476 | /* One hole in the middle: */ |
474 | if (range_basek > basek && range_basek <= (basek + sizek)) | 477 | if (range_basek > basek && range_basek <= (basek + sizek)) |
475 | second_sizek = range_basek - basek; | 478 | second_sizek = range_basek - basek; |
476 | 479 | ||
477 | if (range0_sizek > state->range_sizek) { | 480 | if (range0_sizek > state->range_sizek) { |
478 | 481 | ||
479 | /* one hole in middle or at end */ | 482 | /* One hole in middle or at the end: */ |
480 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | 483 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; |
481 | 484 | ||
482 | /* hole size should be less than half of range0 size */ | 485 | /* Hole size should be less than half of range0 size: */ |
483 | if (hole_sizek >= (range0_sizek >> 1) && | 486 | if (hole_sizek >= (range0_sizek >> 1) && |
484 | range0_sizek >= chunk_sizek) { | 487 | range0_sizek >= chunk_sizek) { |
485 | range0_sizek -= chunk_sizek; | 488 | range0_sizek -= chunk_sizek; |
@@ -491,32 +494,30 @@ second_try: | |||
491 | } | 494 | } |
492 | 495 | ||
493 | if (range0_sizek) { | 496 | if (range0_sizek) { |
494 | if (debug_print) | 497 | Dprintk("range0: %016lx - %016lx\n", |
495 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | 498 | range0_basek<<10, |
496 | range0_basek<<10, | 499 | (range0_basek + range0_sizek)<<10); |
497 | (range0_basek + range0_sizek)<<10); | ||
498 | state->reg = range_to_mtrr(state->reg, range0_basek, | 500 | state->reg = range_to_mtrr(state->reg, range0_basek, |
499 | range0_sizek, MTRR_TYPE_WRBACK); | 501 | range0_sizek, MTRR_TYPE_WRBACK); |
500 | } | 502 | } |
501 | 503 | ||
502 | if (range0_sizek < state->range_sizek) { | 504 | if (range0_sizek < state->range_sizek) { |
503 | /* need to handle left over */ | 505 | /* Need to handle left over range: */ |
504 | range_sizek = state->range_sizek - range0_sizek; | 506 | range_sizek = state->range_sizek - range0_sizek; |
505 | 507 | ||
506 | if (debug_print) | 508 | Dprintk("range: %016lx - %016lx\n", |
507 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | 509 | range_basek<<10, |
508 | range_basek<<10, | 510 | (range_basek + range_sizek)<<10); |
509 | (range_basek + range_sizek)<<10); | 511 | |
510 | state->reg = range_to_mtrr(state->reg, range_basek, | 512 | state->reg = range_to_mtrr(state->reg, range_basek, |
511 | range_sizek, MTRR_TYPE_WRBACK); | 513 | range_sizek, MTRR_TYPE_WRBACK); |
512 | } | 514 | } |
513 | 515 | ||
514 | if (hole_sizek) { | 516 | if (hole_sizek) { |
515 | hole_basek = range_basek - hole_sizek - second_sizek; | 517 | hole_basek = range_basek - hole_sizek - second_sizek; |
516 | if (debug_print) | 518 | Dprintk("hole: %016lx - %016lx\n", |
517 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 519 | hole_basek<<10, |
518 | hole_basek<<10, | 520 | (hole_basek + hole_sizek)<<10); |
519 | (hole_basek + hole_sizek)<<10); | ||
520 | state->reg = range_to_mtrr(state->reg, hole_basek, | 521 | state->reg = range_to_mtrr(state->reg, hole_basek, |
521 | hole_sizek, MTRR_TYPE_UNCACHABLE); | 522 | hole_sizek, MTRR_TYPE_UNCACHABLE); |
522 | } | 523 | } |
@@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | |||
537 | basek = base_pfn << (PAGE_SHIFT - 10); | 538 | basek = base_pfn << (PAGE_SHIFT - 10); |
538 | sizek = size_pfn << (PAGE_SHIFT - 10); | 539 | sizek = size_pfn << (PAGE_SHIFT - 10); |
539 | 540 | ||
540 | /* See if I can merge with the last range */ | 541 | /* See if I can merge with the last range: */ |
541 | if ((basek <= 1024) || | 542 | if ((basek <= 1024) || |
542 | (state->range_startk + state->range_sizek == basek)) { | 543 | (state->range_startk + state->range_sizek == basek)) { |
543 | unsigned long endk = basek + sizek; | 544 | unsigned long endk = basek + sizek; |
544 | state->range_sizek = endk - state->range_startk; | 545 | state->range_sizek = endk - state->range_startk; |
545 | return; | 546 | return; |
546 | } | 547 | } |
547 | /* Write the range mtrrs */ | 548 | /* Write the range mtrrs: */ |
548 | if (state->range_sizek != 0) | 549 | if (state->range_sizek != 0) |
549 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | 550 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); |
550 | 551 | ||
551 | /* Allocate an msr */ | 552 | /* Allocate an msr: */ |
552 | state->range_startk = basek + second_sizek; | 553 | state->range_startk = basek + second_sizek; |
553 | state->range_sizek = sizek - second_sizek; | 554 | state->range_sizek = sizek - second_sizek; |
554 | } | 555 | } |
555 | 556 | ||
556 | /* mininum size of mtrr block that can take hole */ | 557 | /* Mininum size of mtrr block that can take hole: */ |
557 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | 558 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); |
558 | 559 | ||
559 | static int __init parse_mtrr_chunk_size_opt(char *p) | 560 | static int __init parse_mtrr_chunk_size_opt(char *p) |
@@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p) | |||
565 | } | 566 | } |
566 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | 567 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); |
567 | 568 | ||
568 | /* granity of mtrr of block */ | 569 | /* Granularity of mtrr of block: */ |
569 | static u64 mtrr_gran_size __initdata; | 570 | static u64 mtrr_gran_size __initdata; |
570 | 571 | ||
571 | static int __init parse_mtrr_gran_size_opt(char *p) | 572 | static int __init parse_mtrr_gran_size_opt(char *p) |
@@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p) | |||
577 | } | 578 | } |
578 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | 579 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); |
579 | 580 | ||
580 | static int nr_mtrr_spare_reg __initdata = | 581 | static unsigned long nr_mtrr_spare_reg __initdata = |
581 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | 582 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; |
582 | 583 | ||
583 | static int __init parse_mtrr_spare_reg(char *arg) | 584 | static int __init parse_mtrr_spare_reg(char *arg) |
@@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg) | |||
586 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | 587 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); |
587 | return 0; | 588 | return 0; |
588 | } | 589 | } |
589 | |||
590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); |
591 | 591 | ||
592 | static int __init | 592 | static int __init |
@@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
594 | u64 chunk_size, u64 gran_size) | 594 | u64 chunk_size, u64 gran_size) |
595 | { | 595 | { |
596 | struct var_mtrr_state var_state; | 596 | struct var_mtrr_state var_state; |
597 | int i; | ||
598 | int num_reg; | 597 | int num_reg; |
598 | int i; | ||
599 | 599 | ||
600 | var_state.range_startk = 0; | 600 | var_state.range_startk = 0; |
601 | var_state.range_sizek = 0; | 601 | var_state.range_sizek = 0; |
@@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
605 | 605 | ||
606 | memset(range_state, 0, sizeof(range_state)); | 606 | memset(range_state, 0, sizeof(range_state)); |
607 | 607 | ||
608 | /* Write the range etc */ | 608 | /* Write the range: */ |
609 | for (i = 0; i < nr_range; i++) | 609 | for (i = 0; i < nr_range; i++) { |
610 | set_var_mtrr_range(&var_state, range[i].start, | 610 | set_var_mtrr_range(&var_state, range[i].start, |
611 | range[i].end - range[i].start + 1); | 611 | range[i].end - range[i].start + 1); |
612 | } | ||
612 | 613 | ||
613 | /* Write the last range */ | 614 | /* Write the last range: */ |
614 | if (var_state.range_sizek != 0) | 615 | if (var_state.range_sizek != 0) |
615 | range_to_mtrr_with_hole(&var_state, 0, 0); | 616 | range_to_mtrr_with_hole(&var_state, 0, 0); |
616 | 617 | ||
617 | num_reg = var_state.reg; | 618 | num_reg = var_state.reg; |
618 | /* Clear out the extra MTRR's */ | 619 | /* Clear out the extra MTRR's: */ |
619 | while (var_state.reg < num_var_ranges) { | 620 | while (var_state.reg < num_var_ranges) { |
620 | save_var_mtrr(var_state.reg, 0, 0, 0); | 621 | save_var_mtrr(var_state.reg, 0, 0, 0); |
621 | var_state.reg++; | 622 | var_state.reg++; |
@@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
625 | } | 626 | } |
626 | 627 | ||
627 | struct mtrr_cleanup_result { | 628 | struct mtrr_cleanup_result { |
628 | unsigned long gran_sizek; | 629 | unsigned long gran_sizek; |
629 | unsigned long chunk_sizek; | 630 | unsigned long chunk_sizek; |
630 | unsigned long lose_cover_sizek; | 631 | unsigned long lose_cover_sizek; |
631 | unsigned int num_reg; | 632 | unsigned int num_reg; |
632 | int bad; | 633 | int bad; |
633 | }; | 634 | }; |
634 | 635 | ||
635 | /* | 636 | /* |
@@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
645 | 646 | ||
646 | static void __init print_out_mtrr_range_state(void) | 647 | static void __init print_out_mtrr_range_state(void) |
647 | { | 648 | { |
648 | int i; | ||
649 | char start_factor = 'K', size_factor = 'K'; | 649 | char start_factor = 'K', size_factor = 'K'; |
650 | unsigned long start_base, size_base; | 650 | unsigned long start_base, size_base; |
651 | mtrr_type type; | 651 | mtrr_type type; |
652 | int i; | ||
652 | 653 | ||
653 | for (i = 0; i < num_var_ranges; i++) { | 654 | for (i = 0; i < num_var_ranges; i++) { |
654 | 655 | ||
@@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void) | |||
676 | int i; | 677 | int i; |
677 | mtrr_type type; | 678 | mtrr_type type; |
678 | unsigned long size; | 679 | unsigned long size; |
679 | /* extra one for all 0 */ | 680 | /* Extra one for all 0: */ |
680 | int num[MTRR_NUM_TYPES + 1]; | 681 | int num[MTRR_NUM_TYPES + 1]; |
681 | 682 | ||
682 | /* check entries number */ | 683 | /* Check entries number: */ |
683 | memset(num, 0, sizeof(num)); | 684 | memset(num, 0, sizeof(num)); |
684 | for (i = 0; i < num_var_ranges; i++) { | 685 | for (i = 0; i < num_var_ranges; i++) { |
685 | type = range_state[i].type; | 686 | type = range_state[i].type; |
@@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void) | |||
693 | num[type]++; | 694 | num[type]++; |
694 | } | 695 | } |
695 | 696 | ||
696 | /* check if we got UC entries */ | 697 | /* Check if we got UC entries: */ |
697 | if (!num[MTRR_TYPE_UNCACHABLE]) | 698 | if (!num[MTRR_TYPE_UNCACHABLE]) |
698 | return 0; | 699 | return 0; |
699 | 700 | ||
700 | /* check if we only had WB and UC */ | 701 | /* Check if we only had WB and UC */ |
701 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 702 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
702 | num_var_ranges - num[MTRR_NUM_TYPES]) | 703 | num_var_ranges - num[MTRR_NUM_TYPES]) |
703 | return 0; | 704 | return 0; |
704 | 705 | ||
705 | return 1; | 706 | return 1; |
706 | } | 707 | } |
707 | 708 | ||
708 | static unsigned long __initdata range_sums; | 709 | static unsigned long __initdata range_sums; |
709 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | 710 | |
710 | unsigned long extra_remove_base, | 711 | static void __init |
711 | unsigned long extra_remove_size, | 712 | mtrr_calc_range_state(u64 chunk_size, u64 gran_size, |
712 | int i) | 713 | unsigned long x_remove_base, |
714 | unsigned long x_remove_size, int i) | ||
713 | { | 715 | { |
714 | int num_reg; | ||
715 | static struct res_range range_new[RANGE_NUM]; | 716 | static struct res_range range_new[RANGE_NUM]; |
716 | static int nr_range_new; | ||
717 | unsigned long range_sums_new; | 717 | unsigned long range_sums_new; |
718 | static int nr_range_new; | ||
719 | int num_reg; | ||
718 | 720 | ||
719 | /* convert ranges to var ranges state */ | 721 | /* Convert ranges to var ranges state: */ |
720 | num_reg = x86_setup_var_mtrrs(range, nr_range, | 722 | num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
721 | chunk_size, gran_size); | ||
722 | 723 | ||
723 | /* we got new setting in range_state, check it */ | 724 | /* We got new setting in range_state, check it: */ |
724 | memset(range_new, 0, sizeof(range_new)); | 725 | memset(range_new, 0, sizeof(range_new)); |
725 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | 726 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, |
726 | extra_remove_base, extra_remove_size); | 727 | x_remove_base, x_remove_size); |
727 | range_sums_new = sum_ranges(range_new, nr_range_new); | 728 | range_sums_new = sum_ranges(range_new, nr_range_new); |
728 | 729 | ||
729 | result[i].chunk_sizek = chunk_size >> 10; | 730 | result[i].chunk_sizek = chunk_size >> 10; |
730 | result[i].gran_sizek = gran_size >> 10; | 731 | result[i].gran_sizek = gran_size >> 10; |
731 | result[i].num_reg = num_reg; | 732 | result[i].num_reg = num_reg; |
733 | |||
732 | if (range_sums < range_sums_new) { | 734 | if (range_sums < range_sums_new) { |
733 | result[i].lose_cover_sizek = | 735 | result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; |
734 | (range_sums_new - range_sums) << PSHIFT; | ||
735 | result[i].bad = 1; | 736 | result[i].bad = 1; |
736 | } else | 737 | } else { |
737 | result[i].lose_cover_sizek = | 738 | result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; |
738 | (range_sums - range_sums_new) << PSHIFT; | 739 | } |
739 | 740 | ||
740 | /* double check it */ | 741 | /* Double check it: */ |
741 | if (!result[i].bad && !result[i].lose_cover_sizek) { | 742 | if (!result[i].bad && !result[i].lose_cover_sizek) { |
742 | if (nr_range_new != nr_range || | 743 | if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) |
743 | memcmp(range, range_new, sizeof(range))) | 744 | result[i].bad = 1; |
744 | result[i].bad = 1; | ||
745 | } | 745 | } |
746 | 746 | ||
747 | if (!result[i].bad && (range_sums - range_sums_new < | 747 | if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) |
748 | min_loss_pfn[num_reg])) { | 748 | min_loss_pfn[num_reg] = range_sums - range_sums_new; |
749 | min_loss_pfn[num_reg] = | ||
750 | range_sums - range_sums_new; | ||
751 | } | ||
752 | } | 749 | } |
753 | 750 | ||
754 | static void __init mtrr_print_out_one_result(int i) | 751 | static void __init mtrr_print_out_one_result(int i) |
755 | { | 752 | { |
756 | char gran_factor, chunk_factor, lose_factor; | ||
757 | unsigned long gran_base, chunk_base, lose_base; | 753 | unsigned long gran_base, chunk_base, lose_base; |
754 | char gran_factor, chunk_factor, lose_factor; | ||
758 | 755 | ||
759 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | 756 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
760 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | 757 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
761 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | 758 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
762 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | 759 | |
763 | result[i].bad ? "*BAD*" : " ", | 760 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
764 | gran_base, gran_factor, chunk_base, chunk_factor); | 761 | result[i].bad ? "*BAD*" : " ", |
765 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | 762 | gran_base, gran_factor, chunk_base, chunk_factor); |
766 | result[i].num_reg, result[i].bad ? "-" : "", | 763 | pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", |
767 | lose_base, lose_factor); | 764 | result[i].num_reg, result[i].bad ? "-" : "", |
765 | lose_base, lose_factor); | ||
768 | } | 766 | } |
769 | 767 | ||
770 | static int __init mtrr_search_optimal_index(void) | 768 | static int __init mtrr_search_optimal_index(void) |
771 | { | 769 | { |
772 | int i; | ||
773 | int num_reg_good; | 770 | int num_reg_good; |
774 | int index_good; | 771 | int index_good; |
772 | int i; | ||
775 | 773 | ||
776 | if (nr_mtrr_spare_reg >= num_var_ranges) | 774 | if (nr_mtrr_spare_reg >= num_var_ranges) |
777 | nr_mtrr_spare_reg = num_var_ranges - 1; | 775 | nr_mtrr_spare_reg = num_var_ranges - 1; |
776 | |||
778 | num_reg_good = -1; | 777 | num_reg_good = -1; |
779 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 778 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
780 | if (!min_loss_pfn[i]) | 779 | if (!min_loss_pfn[i]) |
@@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void) | |||
796 | return index_good; | 795 | return index_good; |
797 | } | 796 | } |
798 | 797 | ||
799 | |||
800 | int __init mtrr_cleanup(unsigned address_bits) | 798 | int __init mtrr_cleanup(unsigned address_bits) |
801 | { | 799 | { |
802 | unsigned long extra_remove_base, extra_remove_size; | 800 | unsigned long x_remove_base, x_remove_size; |
803 | unsigned long base, size, def, dummy; | 801 | unsigned long base, size, def, dummy; |
804 | mtrr_type type; | ||
805 | u64 chunk_size, gran_size; | 802 | u64 chunk_size, gran_size; |
803 | mtrr_type type; | ||
806 | int index_good; | 804 | int index_good; |
807 | int i; | 805 | int i; |
808 | 806 | ||
809 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | 807 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) |
810 | return 0; | 808 | return 0; |
809 | |||
811 | rdmsr(MSR_MTRRdefType, def, dummy); | 810 | rdmsr(MSR_MTRRdefType, def, dummy); |
812 | def &= 0xff; | 811 | def &= 0xff; |
813 | if (def != MTRR_TYPE_UNCACHABLE) | 812 | if (def != MTRR_TYPE_UNCACHABLE) |
814 | return 0; | 813 | return 0; |
815 | 814 | ||
816 | /* get it and store it aside */ | 815 | /* Get it and store it aside: */ |
817 | memset(range_state, 0, sizeof(range_state)); | 816 | memset(range_state, 0, sizeof(range_state)); |
818 | for (i = 0; i < num_var_ranges; i++) { | 817 | for (i = 0; i < num_var_ranges; i++) { |
819 | mtrr_if->get(i, &base, &size, &type); | 818 | mtrr_if->get(i, &base, &size, &type); |
@@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
822 | range_state[i].type = type; | 821 | range_state[i].type = type; |
823 | } | 822 | } |
824 | 823 | ||
825 | /* check if we need handle it and can handle it */ | 824 | /* Check if we need handle it and can handle it: */ |
826 | if (!mtrr_need_cleanup()) | 825 | if (!mtrr_need_cleanup()) |
827 | return 0; | 826 | return 0; |
828 | 827 | ||
829 | /* print original var MTRRs at first, for debugging: */ | 828 | /* Print original var MTRRs at first, for debugging: */ |
830 | printk(KERN_DEBUG "original variable MTRRs\n"); | 829 | printk(KERN_DEBUG "original variable MTRRs\n"); |
831 | print_out_mtrr_range_state(); | 830 | print_out_mtrr_range_state(); |
832 | 831 | ||
833 | memset(range, 0, sizeof(range)); | 832 | memset(range, 0, sizeof(range)); |
834 | extra_remove_size = 0; | 833 | x_remove_size = 0; |
835 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 834 | x_remove_base = 1 << (32 - PAGE_SHIFT); |
836 | if (mtrr_tom2) | 835 | if (mtrr_tom2) |
837 | extra_remove_size = | 836 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
838 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 837 | |
839 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 838 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); |
840 | extra_remove_size); | ||
841 | /* | 839 | /* |
842 | * [0, 1M) should always be coverred by var mtrr with WB | 840 | * [0, 1M) should always be covered by var mtrr with WB |
843 | * and fixed mtrrs should take effective before var mtrr for it | 841 | * and fixed mtrrs should take effect before var mtrr for it: |
844 | */ | 842 | */ |
845 | nr_range = add_range_with_merge(range, nr_range, 0, | 843 | nr_range = add_range_with_merge(range, nr_range, 0, |
846 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | 844 | (1ULL<<(20 - PAGE_SHIFT)) - 1); |
847 | /* sort the ranges */ | 845 | /* Sort the ranges: */ |
848 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); |
849 | 847 | ||
850 | range_sums = sum_ranges(range, nr_range); | 848 | range_sums = sum_ranges(range, nr_range); |
@@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
854 | if (mtrr_chunk_size && mtrr_gran_size) { | 852 | if (mtrr_chunk_size && mtrr_gran_size) { |
855 | i = 0; | 853 | i = 0; |
856 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | 854 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, |
857 | extra_remove_base, extra_remove_size, i); | 855 | x_remove_base, x_remove_size, i); |
858 | 856 | ||
859 | mtrr_print_out_one_result(i); | 857 | mtrr_print_out_one_result(i); |
860 | 858 | ||
@@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
880 | continue; | 878 | continue; |
881 | 879 | ||
882 | mtrr_calc_range_state(chunk_size, gran_size, | 880 | mtrr_calc_range_state(chunk_size, gran_size, |
883 | extra_remove_base, extra_remove_size, i); | 881 | x_remove_base, x_remove_size, i); |
884 | if (debug_print) { | 882 | if (debug_print) { |
885 | mtrr_print_out_one_result(i); | 883 | mtrr_print_out_one_result(i); |
886 | printk(KERN_INFO "\n"); | 884 | printk(KERN_INFO "\n"); |
@@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
890 | } | 888 | } |
891 | } | 889 | } |
892 | 890 | ||
893 | /* try to find the optimal index */ | 891 | /* Try to find the optimal index: */ |
894 | index_good = mtrr_search_optimal_index(); | 892 | index_good = mtrr_search_optimal_index(); |
895 | 893 | ||
896 | if (index_good != -1) { | 894 | if (index_good != -1) { |
@@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
898 | i = index_good; | 896 | i = index_good; |
899 | mtrr_print_out_one_result(i); | 897 | mtrr_print_out_one_result(i); |
900 | 898 | ||
901 | /* convert ranges to var ranges state */ | 899 | /* Convert ranges to var ranges state: */ |
902 | chunk_size = result[i].chunk_sizek; | 900 | chunk_size = result[i].chunk_sizek; |
903 | chunk_size <<= 10; | 901 | chunk_size <<= 10; |
904 | gran_size = result[i].gran_sizek; | 902 | gran_size = result[i].gran_sizek; |
@@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
941 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | 939 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't |
942 | * apply to are wrong, but so far we don't know of any such case in the wild. | 940 | * apply to are wrong, but so far we don't know of any such case in the wild. |
943 | */ | 941 | */ |
944 | #define Tom2Enabled (1U << 21) | 942 | #define Tom2Enabled (1U << 21) |
945 | #define Tom2ForceMemTypeWB (1U << 22) | 943 | #define Tom2ForceMemTypeWB (1U << 22) |
946 | 944 | ||
947 | int __init amd_special_default_mtrr(void) | 945 | int __init amd_special_default_mtrr(void) |
948 | { | 946 | { |
@@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void) | |||
952 | return 0; | 950 | return 0; |
953 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 951 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) |
954 | return 0; | 952 | return 0; |
955 | /* In case some hypervisor doesn't pass SYSCFG through */ | 953 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
956 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 954 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
957 | return 0; | 955 | return 0; |
958 | /* | 956 | /* |
@@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void) | |||
965 | return 0; | 963 | return 0; |
966 | } | 964 | } |
967 | 965 | ||
968 | static u64 __init real_trim_memory(unsigned long start_pfn, | 966 | static u64 __init |
969 | unsigned long limit_pfn) | 967 | real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) |
970 | { | 968 | { |
971 | u64 trim_start, trim_size; | 969 | u64 trim_start, trim_size; |
970 | |||
972 | trim_start = start_pfn; | 971 | trim_start = start_pfn; |
973 | trim_start <<= PAGE_SHIFT; | 972 | trim_start <<= PAGE_SHIFT; |
973 | |||
974 | trim_size = limit_pfn; | 974 | trim_size = limit_pfn; |
975 | trim_size <<= PAGE_SHIFT; | 975 | trim_size <<= PAGE_SHIFT; |
976 | trim_size -= trim_start; | 976 | trim_size -= trim_start; |
977 | 977 | ||
978 | return e820_update_range(trim_start, trim_size, E820_RAM, | 978 | return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); |
979 | E820_RESERVED); | ||
980 | } | 979 | } |
980 | |||
981 | /** | 981 | /** |
982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
983 | * @end_pfn: ending page frame number | 983 | * @end_pfn: ending page frame number |
@@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn, | |||
985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
986 | * memory configurations. This routine checks that the highest MTRR matches | 986 | * memory configurations. This routine checks that the highest MTRR matches |
987 | * the end of memory, to make sure the MTRRs having a write back type cover | 987 | * the end of memory, to make sure the MTRRs having a write back type cover |
988 | * all of the memory the kernel is intending to use. If not, it'll trim any | 988 | * all of the memory the kernel is intending to use. If not, it'll trim any |
989 | * memory off the end by adjusting end_pfn, removing it from the kernel's | 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's |
990 | * allocation pools, warning the user with an obnoxious message. | 990 | * allocation pools, warning the user with an obnoxious message. |
991 | */ | 991 | */ |
@@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
995 | mtrr_type type; | 995 | mtrr_type type; |
996 | u64 total_trim_size; | 996 | u64 total_trim_size; |
997 | |||
998 | /* extra one for all 0 */ | 997 | /* extra one for all 0 */ |
999 | int num[MTRR_NUM_TYPES + 1]; | 998 | int num[MTRR_NUM_TYPES + 1]; |
999 | |||
1000 | /* | 1000 | /* |
1001 | * Make sure we only trim uncachable memory on machines that | 1001 | * Make sure we only trim uncachable memory on machines that |
1002 | * support the Intel MTRR architecture: | 1002 | * support the Intel MTRR architecture: |
1003 | */ | 1003 | */ |
1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) | 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) |
1005 | return 0; | 1005 | return 0; |
1006 | |||
1006 | rdmsr(MSR_MTRRdefType, def, dummy); | 1007 | rdmsr(MSR_MTRRdefType, def, dummy); |
1007 | def &= 0xff; | 1008 | def &= 0xff; |
1008 | if (def != MTRR_TYPE_UNCACHABLE) | 1009 | if (def != MTRR_TYPE_UNCACHABLE) |
1009 | return 0; | 1010 | return 0; |
1010 | 1011 | ||
1011 | /* get it and store it aside */ | 1012 | /* Get it and store it aside: */ |
1012 | memset(range_state, 0, sizeof(range_state)); | 1013 | memset(range_state, 0, sizeof(range_state)); |
1013 | for (i = 0; i < num_var_ranges; i++) { | 1014 | for (i = 0; i < num_var_ranges; i++) { |
1014 | mtrr_if->get(i, &base, &size, &type); | 1015 | mtrr_if->get(i, &base, &size, &type); |
@@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1017 | range_state[i].type = type; | 1018 | range_state[i].type = type; |
1018 | } | 1019 | } |
1019 | 1020 | ||
1020 | /* Find highest cached pfn */ | 1021 | /* Find highest cached pfn: */ |
1021 | for (i = 0; i < num_var_ranges; i++) { | 1022 | for (i = 0; i < num_var_ranges; i++) { |
1022 | type = range_state[i].type; | 1023 | type = range_state[i].type; |
1023 | if (type != MTRR_TYPE_WRBACK) | 1024 | if (type != MTRR_TYPE_WRBACK) |
@@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1028 | highest_pfn = base + size; | 1029 | highest_pfn = base + size; |
1029 | } | 1030 | } |
1030 | 1031 | ||
1031 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1032 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
1032 | if (!highest_pfn) { | 1033 | if (!highest_pfn) { |
1033 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | 1034 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
1034 | return 0; | 1035 | return 0; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | /* check entries number */ | 1038 | /* Check entries number: */ |
1038 | memset(num, 0, sizeof(num)); | 1039 | memset(num, 0, sizeof(num)); |
1039 | for (i = 0; i < num_var_ranges; i++) { | 1040 | for (i = 0; i < num_var_ranges; i++) { |
1040 | type = range_state[i].type; | 1041 | type = range_state[i].type; |
@@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1046 | num[type]++; | 1047 | num[type]++; |
1047 | } | 1048 | } |
1048 | 1049 | ||
1049 | /* no entry for WB? */ | 1050 | /* No entry for WB? */ |
1050 | if (!num[MTRR_TYPE_WRBACK]) | 1051 | if (!num[MTRR_TYPE_WRBACK]) |
1051 | return 0; | 1052 | return 0; |
1052 | 1053 | ||
1053 | /* check if we only had WB and UC */ | 1054 | /* Check if we only had WB and UC: */ |
1054 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 1055 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
1055 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1056 | num_var_ranges - num[MTRR_NUM_TYPES]) |
1056 | return 0; | 1057 | return 0; |
@@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1066 | } | 1067 | } |
1067 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | 1068 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); |
1068 | 1069 | ||
1070 | /* Check the head: */ | ||
1069 | total_trim_size = 0; | 1071 | total_trim_size = 0; |
1070 | /* check the head */ | ||
1071 | if (range[0].start) | 1072 | if (range[0].start) |
1072 | total_trim_size += real_trim_memory(0, range[0].start); | 1073 | total_trim_size += real_trim_memory(0, range[0].start); |
1073 | /* check the holes */ | 1074 | |
1075 | /* Check the holes: */ | ||
1074 | for (i = 0; i < nr_range - 1; i++) { | 1076 | for (i = 0; i < nr_range - 1; i++) { |
1075 | if (range[i].end + 1 < range[i+1].start) | 1077 | if (range[i].end + 1 < range[i+1].start) |
1076 | total_trim_size += real_trim_memory(range[i].end + 1, | 1078 | total_trim_size += real_trim_memory(range[i].end + 1, |
1077 | range[i+1].start); | 1079 | range[i+1].start); |
1078 | } | 1080 | } |
1079 | /* check the top */ | 1081 | |
1082 | /* Check the top: */ | ||
1080 | i = nr_range - 1; | 1083 | i = nr_range - 1; |
1081 | if (range[i].end + 1 < end_pfn) | 1084 | if (range[i].end + 1 < end_pfn) |
1082 | total_trim_size += real_trim_memory(range[i].end + 1, | 1085 | total_trim_size += real_trim_memory(range[i].end + 1, |
1083 | end_pfn); | 1086 | end_pfn); |
1084 | 1087 | ||
1085 | if (total_trim_size) { | 1088 | if (total_trim_size) { |
1086 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | 1089 | pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); |
1087 | " all of memory, losing %lluMB of RAM.\n", | ||
1088 | total_trim_size >> 20); | ||
1089 | 1090 | ||
1090 | if (!changed_by_mtrr_cleanup) | 1091 | if (!changed_by_mtrr_cleanup) |
1091 | WARN_ON(1); | 1092 | WARN_ON(1); |
1092 | 1093 | ||
1093 | printk(KERN_INFO "update e820 for mtrr\n"); | 1094 | pr_info("update e820 for mtrr\n"); |
1094 | update_e820(); | 1095 | update_e820(); |
1095 | 1096 | ||
1096 | return 1; | 1097 | return 1; |
@@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
1098 | 1099 | ||
1099 | return 0; | 1100 | return 0; |
1100 | } | 1101 | } |
1101 | |||
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index ff14c320040c..228d982ce09c 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c | |||
@@ -1,38 +1,40 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/io.h> | ||
2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
3 | #include <asm/mtrr.h> | 4 | |
4 | #include <asm/msr.h> | ||
5 | #include <asm/io.h> | ||
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
7 | #include <asm/mtrr.h> | ||
8 | #include <asm/msr.h> | ||
9 | |||
8 | #include "mtrr.h" | 10 | #include "mtrr.h" |
9 | 11 | ||
10 | static void | 12 | static void |
11 | cyrix_get_arr(unsigned int reg, unsigned long *base, | 13 | cyrix_get_arr(unsigned int reg, unsigned long *base, |
12 | unsigned long *size, mtrr_type * type) | 14 | unsigned long *size, mtrr_type * type) |
13 | { | 15 | { |
14 | unsigned long flags; | ||
15 | unsigned char arr, ccr3, rcr, shift; | 16 | unsigned char arr, ccr3, rcr, shift; |
17 | unsigned long flags; | ||
16 | 18 | ||
17 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ | 19 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ |
18 | 20 | ||
19 | /* Save flags and disable interrupts */ | ||
20 | local_irq_save(flags); | 21 | local_irq_save(flags); |
21 | 22 | ||
22 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
23 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 24 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
24 | ((unsigned char *) base)[3] = getCx86(arr); | 25 | ((unsigned char *)base)[3] = getCx86(arr); |
25 | ((unsigned char *) base)[2] = getCx86(arr + 1); | 26 | ((unsigned char *)base)[2] = getCx86(arr + 1); |
26 | ((unsigned char *) base)[1] = getCx86(arr + 2); | 27 | ((unsigned char *)base)[1] = getCx86(arr + 2); |
27 | rcr = getCx86(CX86_RCR_BASE + reg); | 28 | rcr = getCx86(CX86_RCR_BASE + reg); |
28 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 29 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
29 | 30 | ||
30 | /* Enable interrupts if it was enabled previously */ | ||
31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
32 | |||
32 | shift = ((unsigned char *) base)[1] & 0x0f; | 33 | shift = ((unsigned char *) base)[1] & 0x0f; |
33 | *base >>= PAGE_SHIFT; | 34 | *base >>= PAGE_SHIFT; |
34 | 35 | ||
35 | /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | 36 | /* |
37 | * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | ||
36 | * Note: shift==0xf means 4G, this is unsupported. | 38 | * Note: shift==0xf means 4G, this is unsupported. |
37 | */ | 39 | */ |
38 | if (shift) | 40 | if (shift) |
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base, | |||
76 | } | 78 | } |
77 | } | 79 | } |
78 | 80 | ||
81 | /* | ||
82 | * cyrix_get_free_region - get a free ARR. | ||
83 | * | ||
84 | * @base: the starting (base) address of the region. | ||
85 | * @size: the size (in bytes) of the region. | ||
86 | * | ||
87 | * Returns: the index of the region on success, else -1 on error. | ||
88 | */ | ||
79 | static int | 89 | static int |
80 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 90 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
81 | /* [SUMMARY] Get a free ARR. | ||
82 | <base> The starting (base) address of the region. | ||
83 | <size> The size (in bytes) of the region. | ||
84 | [RETURNS] The index of the region on success, else -1 on error. | ||
85 | */ | ||
86 | { | 91 | { |
87 | int i; | ||
88 | mtrr_type ltype; | ||
89 | unsigned long lbase, lsize; | 92 | unsigned long lbase, lsize; |
93 | mtrr_type ltype; | ||
94 | int i; | ||
90 | 95 | ||
91 | switch (replace_reg) { | 96 | switch (replace_reg) { |
92 | case 7: | 97 | case 7: |
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
107 | cyrix_get_arr(7, &lbase, &lsize, <ype); | 112 | cyrix_get_arr(7, &lbase, &lsize, <ype); |
108 | if (lsize == 0) | 113 | if (lsize == 0) |
109 | return 7; | 114 | return 7; |
110 | /* Else try ARR0-ARR6 first */ | 115 | /* Else try ARR0-ARR6 first */ |
111 | } else { | 116 | } else { |
112 | for (i = 0; i < 7; i++) { | 117 | for (i = 0; i < 7; i++) { |
113 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 118 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
114 | if (lsize == 0) | 119 | if (lsize == 0) |
115 | return i; | 120 | return i; |
116 | } | 121 | } |
117 | /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ | 122 | /* |
123 | * ARR0-ARR6 isn't free | ||
124 | * try ARR7 but its size must be at least 256K | ||
125 | */ | ||
118 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 126 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
119 | if ((lsize == 0) && (size >= 0x40)) | 127 | if ((lsize == 0) && (size >= 0x40)) |
120 | return i; | 128 | return i; |
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
122 | return -ENOSPC; | 130 | return -ENOSPC; |
123 | } | 131 | } |
124 | 132 | ||
125 | static u32 cr4 = 0; | 133 | static u32 cr4, ccr3; |
126 | static u32 ccr3; | ||
127 | 134 | ||
128 | static void prepare_set(void) | 135 | static void prepare_set(void) |
129 | { | 136 | { |
130 | u32 cr0; | 137 | u32 cr0; |
131 | 138 | ||
132 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 139 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
133 | if ( cpu_has_pge ) { | 140 | if (cpu_has_pge) { |
134 | cr4 = read_cr4(); | 141 | cr4 = read_cr4(); |
135 | write_cr4(cr4 & ~X86_CR4_PGE); | 142 | write_cr4(cr4 & ~X86_CR4_PGE); |
136 | } | 143 | } |
137 | 144 | ||
138 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 145 | /* |
139 | a side-effect */ | 146 | * Disable and flush caches. |
147 | * Note that wbinvd flushes the TLBs as a side-effect | ||
148 | */ | ||
140 | cr0 = read_cr0() | X86_CR0_CD; | 149 | cr0 = read_cr0() | X86_CR0_CD; |
141 | wbinvd(); | 150 | wbinvd(); |
142 | write_cr0(cr0); | 151 | write_cr0(cr0); |
@@ -147,22 +156,21 @@ static void prepare_set(void) | |||
147 | 156 | ||
148 | /* Cyrix ARRs - everything else was excluded at the top */ | 157 | /* Cyrix ARRs - everything else was excluded at the top */ |
149 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); | 158 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
150 | |||
151 | } | 159 | } |
152 | 160 | ||
153 | static void post_set(void) | 161 | static void post_set(void) |
154 | { | 162 | { |
155 | /* Flush caches and TLBs */ | 163 | /* Flush caches and TLBs */ |
156 | wbinvd(); | 164 | wbinvd(); |
157 | 165 | ||
158 | /* Cyrix ARRs - everything else was excluded at the top */ | 166 | /* Cyrix ARRs - everything else was excluded at the top */ |
159 | setCx86(CX86_CCR3, ccr3); | 167 | setCx86(CX86_CCR3, ccr3); |
160 | 168 | ||
161 | /* Enable caches */ | 169 | /* Enable caches */ |
162 | write_cr0(read_cr0() & 0xbfffffff); | 170 | write_cr0(read_cr0() & 0xbfffffff); |
163 | 171 | ||
164 | /* Restore value of CR4 */ | 172 | /* Restore value of CR4 */ |
165 | if ( cpu_has_pge ) | 173 | if (cpu_has_pge) |
166 | write_cr4(cr4); | 174 | write_cr4(cr4); |
167 | } | 175 | } |
168 | 176 | ||
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
178 | size >>= 6; | 186 | size >>= 6; |
179 | 187 | ||
180 | size &= 0x7fff; /* make sure arr_size <= 14 */ | 188 | size &= 0x7fff; /* make sure arr_size <= 14 */ |
181 | for (arr_size = 0; size; arr_size++, size >>= 1) ; | 189 | for (arr_size = 0; size; arr_size++, size >>= 1) |
190 | ; | ||
182 | 191 | ||
183 | if (reg < 7) { | 192 | if (reg < 7) { |
184 | switch (type) { | 193 | switch (type) { |
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
215 | prepare_set(); | 224 | prepare_set(); |
216 | 225 | ||
217 | base <<= PAGE_SHIFT; | 226 | base <<= PAGE_SHIFT; |
218 | setCx86(arr, ((unsigned char *) &base)[3]); | 227 | setCx86(arr + 0, ((unsigned char *)&base)[3]); |
219 | setCx86(arr + 1, ((unsigned char *) &base)[2]); | 228 | setCx86(arr + 1, ((unsigned char *)&base)[2]); |
220 | setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); | 229 | setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); |
221 | setCx86(CX86_RCR_BASE + reg, arr_type); | 230 | setCx86(CX86_RCR_BASE + reg, arr_type); |
222 | 231 | ||
223 | post_set(); | 232 | post_set(); |
224 | } | 233 | } |
225 | 234 | ||
226 | typedef struct { | 235 | typedef struct { |
227 | unsigned long base; | 236 | unsigned long base; |
228 | unsigned long size; | 237 | unsigned long size; |
229 | mtrr_type type; | 238 | mtrr_type type; |
230 | } arr_state_t; | 239 | } arr_state_t; |
231 | 240 | ||
232 | static arr_state_t arr_state[8] = { | 241 | static arr_state_t arr_state[8] = { |
@@ -247,16 +256,17 @@ static void cyrix_set_all(void) | |||
247 | setCx86(CX86_CCR0 + i, ccr_state[i]); | 256 | setCx86(CX86_CCR0 + i, ccr_state[i]); |
248 | for (; i < 7; i++) | 257 | for (; i < 7; i++) |
249 | setCx86(CX86_CCR4 + i, ccr_state[i]); | 258 | setCx86(CX86_CCR4 + i, ccr_state[i]); |
250 | for (i = 0; i < 8; i++) | 259 | |
251 | cyrix_set_arr(i, arr_state[i].base, | 260 | for (i = 0; i < 8; i++) { |
261 | cyrix_set_arr(i, arr_state[i].base, | ||
252 | arr_state[i].size, arr_state[i].type); | 262 | arr_state[i].size, arr_state[i].type); |
263 | } | ||
253 | 264 | ||
254 | post_set(); | 265 | post_set(); |
255 | } | 266 | } |
256 | 267 | ||
257 | static struct mtrr_ops cyrix_mtrr_ops = { | 268 | static struct mtrr_ops cyrix_mtrr_ops = { |
258 | .vendor = X86_VENDOR_CYRIX, | 269 | .vendor = X86_VENDOR_CYRIX, |
259 | // .init = cyrix_arr_init, | ||
260 | .set_all = cyrix_set_all, | 270 | .set_all = cyrix_set_all, |
261 | .set = cyrix_set_arr, | 271 | .set = cyrix_set_arr, |
262 | .get = cyrix_get_arr, | 272 | .get = cyrix_get_arr, |
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void) | |||
270 | set_mtrr_ops(&cyrix_mtrr_ops); | 280 | set_mtrr_ops(&cyrix_mtrr_ops); |
271 | return 0; | 281 | return 0; |
272 | } | 282 | } |
273 | |||
274 | //arch_initcall(cyrix_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69f0b27..55da0c5f68dd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -1,28 +1,34 @@ | |||
1 | /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 1 | /* |
2 | because MTRRs can span upto 40 bits (36bits on most modern x86) */ | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | ||
4 | */ | ||
5 | #define DEBUG | ||
6 | |||
7 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | 8 | #include <linux/init.h> |
4 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/io.h> | ||
5 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
6 | #include <linux/module.h> | 12 | |
7 | #include <asm/io.h> | ||
8 | #include <asm/mtrr.h> | ||
9 | #include <asm/msr.h> | ||
10 | #include <asm/system.h> | ||
11 | #include <asm/cpufeature.h> | ||
12 | #include <asm/processor-flags.h> | 13 | #include <asm/processor-flags.h> |
14 | #include <asm/cpufeature.h> | ||
13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #include <asm/system.h> | ||
17 | #include <asm/mtrr.h> | ||
18 | #include <asm/msr.h> | ||
14 | #include <asm/pat.h> | 19 | #include <asm/pat.h> |
20 | |||
15 | #include "mtrr.h" | 21 | #include "mtrr.h" |
16 | 22 | ||
17 | struct fixed_range_block { | 23 | struct fixed_range_block { |
18 | int base_msr; /* start address of an MTRR block */ | 24 | int base_msr; /* start address of an MTRR block */ |
19 | int ranges; /* number of MTRRs in this block */ | 25 | int ranges; /* number of MTRRs in this block */ |
20 | }; | 26 | }; |
21 | 27 | ||
22 | static struct fixed_range_block fixed_range_blocks[] = { | 28 | static struct fixed_range_block fixed_range_blocks[] = { |
23 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ | 29 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ |
24 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ | 30 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ |
25 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ | 31 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ |
26 | {} | 32 | {} |
27 | }; | 33 | }; |
28 | 34 | ||
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask; | |||
30 | static int mtrr_state_set; | 36 | static int mtrr_state_set; |
31 | u64 mtrr_tom2; | 37 | u64 mtrr_tom2; |
32 | 38 | ||
33 | struct mtrr_state_type mtrr_state = {}; | 39 | struct mtrr_state_type mtrr_state; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 40 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 41 | ||
36 | /** | 42 | /* |
37 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example | 43 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
38 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | 44 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
39 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section | 45 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section |
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
104 | * Look of multiple ranges matching this address and pick type | 110 | * Look of multiple ranges matching this address and pick type |
105 | * as per MTRR precedence | 111 | * as per MTRR precedence |
106 | */ | 112 | */ |
107 | if (!(mtrr_state.enabled & 2)) { | 113 | if (!(mtrr_state.enabled & 2)) |
108 | return mtrr_state.def_type; | 114 | return mtrr_state.def_type; |
109 | } | ||
110 | 115 | ||
111 | prev_match = 0xFF; | 116 | prev_match = 0xFF; |
112 | for (i = 0; i < num_var_ranges; ++i) { | 117 | for (i = 0; i < num_var_ranges; ++i) { |
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
125 | if (start_state != end_state) | 130 | if (start_state != end_state) |
126 | return 0xFE; | 131 | return 0xFE; |
127 | 132 | ||
128 | if ((start & mask) != (base & mask)) { | 133 | if ((start & mask) != (base & mask)) |
129 | continue; | 134 | continue; |
130 | } | ||
131 | 135 | ||
132 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | 136 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; |
133 | if (prev_match == 0xFF) { | 137 | if (prev_match == 0xFF) { |
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
148 | curr_match = MTRR_TYPE_WRTHROUGH; | 152 | curr_match = MTRR_TYPE_WRTHROUGH; |
149 | } | 153 | } |
150 | 154 | ||
151 | if (prev_match != curr_match) { | 155 | if (prev_match != curr_match) |
152 | return MTRR_TYPE_UNCACHABLE; | 156 | return MTRR_TYPE_UNCACHABLE; |
153 | } | ||
154 | } | 157 | } |
155 | 158 | ||
156 | if (mtrr_tom2) { | 159 | if (mtrr_tom2) { |
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
164 | return mtrr_state.def_type; | 167 | return mtrr_state.def_type; |
165 | } | 168 | } |
166 | 169 | ||
167 | /* Get the MSR pair relating to a var range */ | 170 | /* Get the MSR pair relating to a var range */ |
168 | static void | 171 | static void |
169 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 172 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
170 | { | 173 | { |
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | |||
172 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); | 175 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
173 | } | 176 | } |
174 | 177 | ||
175 | /* fill the MSR pair relating to a var range */ | 178 | /* Fill the MSR pair relating to a var range */ |
176 | void fill_mtrr_var_range(unsigned int index, | 179 | void fill_mtrr_var_range(unsigned int index, |
177 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) | 180 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) |
178 | { | 181 | { |
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index, | |||
186 | vr[index].mask_hi = mask_hi; | 189 | vr[index].mask_hi = mask_hi; |
187 | } | 190 | } |
188 | 191 | ||
189 | static void | 192 | static void get_fixed_ranges(mtrr_type *frs) |
190 | get_fixed_ranges(mtrr_type * frs) | ||
191 | { | 193 | { |
192 | unsigned int *p = (unsigned int *) frs; | 194 | unsigned int *p = (unsigned int *)frs; |
193 | int i; | 195 | int i; |
194 | 196 | ||
195 | k8_check_syscfg_dram_mod_en(); | 197 | k8_check_syscfg_dram_mod_en(); |
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void) | |||
217 | if (!last_fixed_end) | 219 | if (!last_fixed_end) |
218 | return; | 220 | return; |
219 | 221 | ||
220 | printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, | 222 | pr_debug(" %05X-%05X %s\n", last_fixed_start, |
221 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); | 223 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
222 | 224 | ||
223 | last_fixed_end = 0; | 225 | last_fixed_end = 0; |
224 | } | 226 | } |
225 | 227 | ||
226 | static void __init update_fixed_last(unsigned base, unsigned end, | 228 | static void __init update_fixed_last(unsigned base, unsigned end, |
227 | mtrr_type type) | 229 | mtrr_type type) |
228 | { | 230 | { |
229 | last_fixed_start = base; | 231 | last_fixed_start = base; |
230 | last_fixed_end = end; | 232 | last_fixed_end = end; |
231 | last_fixed_type = type; | 233 | last_fixed_type = type; |
232 | } | 234 | } |
233 | 235 | ||
234 | static void __init print_fixed(unsigned base, unsigned step, | 236 | static void __init |
235 | const mtrr_type *types) | 237 | print_fixed(unsigned base, unsigned step, const mtrr_type *types) |
236 | { | 238 | { |
237 | unsigned i; | 239 | unsigned i; |
238 | 240 | ||
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void) | |||
259 | unsigned int i; | 261 | unsigned int i; |
260 | int high_width; | 262 | int high_width; |
261 | 263 | ||
262 | printk(KERN_DEBUG "MTRR default type: %s\n", | 264 | pr_debug("MTRR default type: %s\n", |
263 | mtrr_attrib_to_str(mtrr_state.def_type)); | 265 | mtrr_attrib_to_str(mtrr_state.def_type)); |
264 | if (mtrr_state.have_fixed) { | 266 | if (mtrr_state.have_fixed) { |
265 | printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", | 267 | pr_debug("MTRR fixed ranges %sabled:\n", |
266 | mtrr_state.enabled & 1 ? "en" : "dis"); | 268 | mtrr_state.enabled & 1 ? "en" : "dis"); |
267 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | 269 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); |
268 | for (i = 0; i < 2; ++i) | 270 | for (i = 0; i < 2; ++i) |
269 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | 271 | print_fixed(0x80000 + i * 0x20000, 0x04000, |
272 | mtrr_state.fixed_ranges + (i + 1) * 8); | ||
270 | for (i = 0; i < 8; ++i) | 273 | for (i = 0; i < 8; ++i) |
271 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | 274 | print_fixed(0xC0000 + i * 0x08000, 0x01000, |
275 | mtrr_state.fixed_ranges + (i + 3) * 8); | ||
272 | 276 | ||
273 | /* tail */ | 277 | /* tail */ |
274 | print_fixed_last(); | 278 | print_fixed_last(); |
275 | } | 279 | } |
276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | 280 | pr_debug("MTRR variable ranges %sabled:\n", |
277 | mtrr_state.enabled & 2 ? "en" : "dis"); | 281 | mtrr_state.enabled & 2 ? "en" : "dis"); |
278 | if (size_or_mask & 0xffffffffUL) | 282 | if (size_or_mask & 0xffffffffUL) |
279 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; | 283 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; |
280 | else | 284 | else |
281 | high_width = ffs(size_or_mask>>32) + 32 - 1; | 285 | high_width = ffs(size_or_mask>>32) + 32 - 1; |
282 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; | 286 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; |
287 | |||
283 | for (i = 0; i < num_var_ranges; ++i) { | 288 | for (i = 0; i < num_var_ranges; ++i) { |
284 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | 289 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
285 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | 290 | pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
286 | i, | 291 | i, |
287 | high_width, | 292 | high_width, |
288 | mtrr_state.var_ranges[i].base_hi, | 293 | mtrr_state.var_ranges[i].base_hi, |
289 | mtrr_state.var_ranges[i].base_lo >> 12, | 294 | mtrr_state.var_ranges[i].base_lo >> 12, |
290 | high_width, | 295 | high_width, |
291 | mtrr_state.var_ranges[i].mask_hi, | 296 | mtrr_state.var_ranges[i].mask_hi, |
292 | mtrr_state.var_ranges[i].mask_lo >> 12, | 297 | mtrr_state.var_ranges[i].mask_lo >> 12, |
293 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | 298 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); |
294 | else | 299 | else |
295 | printk(KERN_DEBUG " %u disabled\n", i); | 300 | pr_debug(" %u disabled\n", i); |
296 | } | ||
297 | if (mtrr_tom2) { | ||
298 | printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", | ||
299 | mtrr_tom2, mtrr_tom2>>20); | ||
300 | } | 301 | } |
302 | if (mtrr_tom2) | ||
303 | pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); | ||
301 | } | 304 | } |
302 | 305 | ||
303 | /* Grab all of the MTRR state for this CPU into *state */ | 306 | /* Grab all of the MTRR state for this CPU into *state */ |
304 | void __init get_mtrr_state(void) | 307 | void __init get_mtrr_state(void) |
305 | { | 308 | { |
306 | unsigned int i; | ||
307 | struct mtrr_var_range *vrs; | 309 | struct mtrr_var_range *vrs; |
308 | unsigned lo, dummy; | ||
309 | unsigned long flags; | 310 | unsigned long flags; |
311 | unsigned lo, dummy; | ||
312 | unsigned int i; | ||
310 | 313 | ||
311 | vrs = mtrr_state.var_ranges; | 314 | vrs = mtrr_state.var_ranges; |
312 | 315 | ||
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void) | |||
324 | 327 | ||
325 | if (amd_special_default_mtrr()) { | 328 | if (amd_special_default_mtrr()) { |
326 | unsigned low, high; | 329 | unsigned low, high; |
330 | |||
327 | /* TOP_MEM2 */ | 331 | /* TOP_MEM2 */ |
328 | rdmsr(MSR_K8_TOP_MEM2, low, high); | 332 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
329 | mtrr_tom2 = high; | 333 | mtrr_tom2 = high; |
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void) | |||
344 | 348 | ||
345 | post_set(); | 349 | post_set(); |
346 | local_irq_restore(flags); | 350 | local_irq_restore(flags); |
347 | |||
348 | } | 351 | } |
349 | 352 | ||
350 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 353 | /* Some BIOS's are messed up and don't set all MTRRs the same! */ |
351 | void __init mtrr_state_warn(void) | 354 | void __init mtrr_state_warn(void) |
352 | { | 355 | { |
353 | unsigned long mask = smp_changes_mask; | 356 | unsigned long mask = smp_changes_mask; |
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void) | |||
355 | if (!mask) | 358 | if (!mask) |
356 | return; | 359 | return; |
357 | if (mask & MTRR_CHANGE_MASK_FIXED) | 360 | if (mask & MTRR_CHANGE_MASK_FIXED) |
358 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); | 361 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
359 | if (mask & MTRR_CHANGE_MASK_VARIABLE) | 362 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
360 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); | 363 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
361 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) | 364 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
362 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); | 365 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
366 | |||
363 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); | 367 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
364 | printk(KERN_INFO "mtrr: corrected configuration.\n"); | 368 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
365 | } | 369 | } |
366 | 370 | ||
367 | /* Doesn't attempt to pass an error out to MTRR users | 371 | /* |
368 | because it's quite complicated in some cases and probably not | 372 | * Doesn't attempt to pass an error out to MTRR users |
369 | worth it because the best error handling is to ignore it. */ | 373 | * because it's quite complicated in some cases and probably not |
374 | * worth it because the best error handling is to ignore it. | ||
375 | */ | ||
370 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | 376 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
371 | { | 377 | { |
372 | if (wrmsr_safe(msr, a, b) < 0) | 378 | if (wrmsr_safe(msr, a, b) < 0) { |
373 | printk(KERN_ERR | 379 | printk(KERN_ERR |
374 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", | 380 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
375 | smp_processor_id(), msr, a, b); | 381 | smp_processor_id(), msr, a, b); |
382 | } | ||
376 | } | 383 | } |
377 | 384 | ||
378 | /** | 385 | /** |
379 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have | 386 | * set_fixed_range - checks & updates a fixed-range MTRR if it |
387 | * differs from the value it should have | ||
380 | * @msr: MSR address of the MTTR which should be checked and updated | 388 | * @msr: MSR address of the MTTR which should be checked and updated |
381 | * @changed: pointer which indicates whether the MTRR needed to be changed | 389 | * @changed: pointer which indicates whether the MTRR needed to be changed |
382 | * @msrwords: pointer to the MSR values which the MSR should have | 390 | * @msrwords: pointer to the MSR values which the MSR should have |
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | |||
401 | * | 409 | * |
402 | * Returns: The index of the region on success, else negative on error. | 410 | * Returns: The index of the region on success, else negative on error. |
403 | */ | 411 | */ |
404 | int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 412 | int |
413 | generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | ||
405 | { | 414 | { |
406 | int i, max; | ||
407 | mtrr_type ltype; | ||
408 | unsigned long lbase, lsize; | 415 | unsigned long lbase, lsize; |
416 | mtrr_type ltype; | ||
417 | int i, max; | ||
409 | 418 | ||
410 | max = num_var_ranges; | 419 | max = num_var_ranges; |
411 | if (replace_reg >= 0 && replace_reg < max) | 420 | if (replace_reg >= 0 && replace_reg < max) |
412 | return replace_reg; | 421 | return replace_reg; |
422 | |||
413 | for (i = 0; i < max; ++i) { | 423 | for (i = 0; i < max; ++i) { |
414 | mtrr_if->get(i, &lbase, &lsize, <ype); | 424 | mtrr_if->get(i, &lbase, &lsize, <ype); |
415 | if (lsize == 0) | 425 | if (lsize == 0) |
416 | return i; | 426 | return i; |
417 | } | 427 | } |
428 | |||
418 | return -ENOSPC; | 429 | return -ENOSPC; |
419 | } | 430 | } |
420 | 431 | ||
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
434 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 445 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
435 | 446 | ||
436 | if ((mask_lo & 0x800) == 0) { | 447 | if ((mask_lo & 0x800) == 0) { |
437 | /* Invalid (i.e. free) range */ | 448 | /* Invalid (i.e. free) range */ |
438 | *base = 0; | 449 | *base = 0; |
439 | *size = 0; | 450 | *size = 0; |
440 | *type = 0; | 451 | *type = 0; |
@@ -471,27 +482,31 @@ out_put_cpu: | |||
471 | } | 482 | } |
472 | 483 | ||
473 | /** | 484 | /** |
474 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set | 485 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they |
486 | * differ from the saved set | ||
475 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() | 487 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
476 | */ | 488 | */ |
477 | static int set_fixed_ranges(mtrr_type * frs) | 489 | static int set_fixed_ranges(mtrr_type *frs) |
478 | { | 490 | { |
479 | unsigned long long *saved = (unsigned long long *) frs; | 491 | unsigned long long *saved = (unsigned long long *)frs; |
480 | bool changed = false; | 492 | bool changed = false; |
481 | int block=-1, range; | 493 | int block = -1, range; |
482 | 494 | ||
483 | k8_check_syscfg_dram_mod_en(); | 495 | k8_check_syscfg_dram_mod_en(); |
484 | 496 | ||
485 | while (fixed_range_blocks[++block].ranges) | 497 | while (fixed_range_blocks[++block].ranges) { |
486 | for (range=0; range < fixed_range_blocks[block].ranges; range++) | 498 | for (range = 0; range < fixed_range_blocks[block].ranges; range++) |
487 | set_fixed_range(fixed_range_blocks[block].base_msr + range, | 499 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
488 | &changed, (unsigned int *) saved++); | 500 | &changed, (unsigned int *)saved++); |
501 | } | ||
489 | 502 | ||
490 | return changed; | 503 | return changed; |
491 | } | 504 | } |
492 | 505 | ||
493 | /* Set the MSR pair relating to a var range. Returns TRUE if | 506 | /* |
494 | changes are made */ | 507 | * Set the MSR pair relating to a var range. |
508 | * Returns true if changes are made. | ||
509 | */ | ||
495 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | 510 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
496 | { | 511 | { |
497 | unsigned int lo, hi; | 512 | unsigned int lo, hi; |
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | |||
501 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) | 516 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
502 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != | 517 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
503 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { | 518 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
519 | |||
504 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); | 520 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
505 | changed = true; | 521 | changed = true; |
506 | } | 522 | } |
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi; | |||
526 | */ | 542 | */ |
527 | static unsigned long set_mtrr_state(void) | 543 | static unsigned long set_mtrr_state(void) |
528 | { | 544 | { |
529 | unsigned int i; | ||
530 | unsigned long change_mask = 0; | 545 | unsigned long change_mask = 0; |
546 | unsigned int i; | ||
531 | 547 | ||
532 | for (i = 0; i < num_var_ranges; i++) | 548 | for (i = 0; i < num_var_ranges; i++) { |
533 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) | 549 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
534 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; | 550 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
551 | } | ||
535 | 552 | ||
536 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) | 553 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
537 | change_mask |= MTRR_CHANGE_MASK_FIXED; | 554 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
538 | 555 | ||
539 | /* Set_mtrr_restore restores the old value of MTRRdefType, | 556 | /* |
540 | so to set it we fiddle with the saved value */ | 557 | * Set_mtrr_restore restores the old value of MTRRdefType, |
558 | * so to set it we fiddle with the saved value: | ||
559 | */ | ||
541 | if ((deftype_lo & 0xff) != mtrr_state.def_type | 560 | if ((deftype_lo & 0xff) != mtrr_state.def_type |
542 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { | 561 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
543 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); | 562 | |
563 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | | ||
564 | (mtrr_state.enabled << 10); | ||
544 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; | 565 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
545 | } | 566 | } |
546 | 567 | ||
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void) | |||
548 | } | 569 | } |
549 | 570 | ||
550 | 571 | ||
551 | static unsigned long cr4 = 0; | 572 | static unsigned long cr4; |
552 | static DEFINE_SPINLOCK(set_atomicity_lock); | 573 | static DEFINE_SPINLOCK(set_atomicity_lock); |
553 | 574 | ||
554 | /* | 575 | /* |
555 | * Since we are disabling the cache don't allow any interrupts - they | 576 | * Since we are disabling the cache don't allow any interrupts, |
556 | * would run extremely slow and would only increase the pain. The caller must | 577 | * they would run extremely slow and would only increase the pain. |
557 | * ensure that local interrupts are disabled and are reenabled after post_set() | 578 | * |
558 | * has been called. | 579 | * The caller must ensure that local interrupts are disabled and |
580 | * are reenabled after post_set() has been called. | ||
559 | */ | 581 | */ |
560 | |||
561 | static void prepare_set(void) __acquires(set_atomicity_lock) | 582 | static void prepare_set(void) __acquires(set_atomicity_lock) |
562 | { | 583 | { |
563 | unsigned long cr0; | 584 | unsigned long cr0; |
564 | 585 | ||
565 | /* Note that this is not ideal, since the cache is only flushed/disabled | 586 | /* |
566 | for this CPU while the MTRRs are changed, but changing this requires | 587 | * Note that this is not ideal |
567 | more invasive changes to the way the kernel boots */ | 588 | * since the cache is only flushed/disabled for this CPU while the |
589 | * MTRRs are changed, but changing this requires more invasive | ||
590 | * changes to the way the kernel boots | ||
591 | */ | ||
568 | 592 | ||
569 | spin_lock(&set_atomicity_lock); | 593 | spin_lock(&set_atomicity_lock); |
570 | 594 | ||
571 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ | 595 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
572 | cr0 = read_cr0() | X86_CR0_CD; | 596 | cr0 = read_cr0() | X86_CR0_CD; |
573 | write_cr0(cr0); | 597 | write_cr0(cr0); |
574 | wbinvd(); | 598 | wbinvd(); |
575 | 599 | ||
576 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 600 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
577 | if ( cpu_has_pge ) { | 601 | if (cpu_has_pge) { |
578 | cr4 = read_cr4(); | 602 | cr4 = read_cr4(); |
579 | write_cr4(cr4 & ~X86_CR4_PGE); | 603 | write_cr4(cr4 & ~X86_CR4_PGE); |
580 | } | 604 | } |
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
582 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ | 606 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
583 | __flush_tlb(); | 607 | __flush_tlb(); |
584 | 608 | ||
585 | /* Save MTRR state */ | 609 | /* Save MTRR state */ |
586 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 610 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
587 | 611 | ||
588 | /* Disable MTRRs, and set the default type to uncached */ | 612 | /* Disable MTRRs, and set the default type to uncached */ |
589 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); | 613 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
590 | } | 614 | } |
591 | 615 | ||
592 | static void post_set(void) __releases(set_atomicity_lock) | 616 | static void post_set(void) __releases(set_atomicity_lock) |
593 | { | 617 | { |
594 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 618 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
595 | __flush_tlb(); | 619 | __flush_tlb(); |
596 | 620 | ||
597 | /* Intel (P6) standard MTRRs */ | 621 | /* Intel (P6) standard MTRRs */ |
598 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 622 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
599 | 623 | ||
600 | /* Enable caches */ | 624 | /* Enable caches */ |
601 | write_cr0(read_cr0() & 0xbfffffff); | 625 | write_cr0(read_cr0() & 0xbfffffff); |
602 | 626 | ||
603 | /* Restore value of CR4 */ | 627 | /* Restore value of CR4 */ |
604 | if ( cpu_has_pge ) | 628 | if (cpu_has_pge) |
605 | write_cr4(cr4); | 629 | write_cr4(cr4); |
606 | spin_unlock(&set_atomicity_lock); | 630 | spin_unlock(&set_atomicity_lock); |
607 | } | 631 | } |
@@ -623,24 +647,27 @@ static void generic_set_all(void) | |||
623 | post_set(); | 647 | post_set(); |
624 | local_irq_restore(flags); | 648 | local_irq_restore(flags); |
625 | 649 | ||
626 | /* Use the atomic bitops to update the global mask */ | 650 | /* Use the atomic bitops to update the global mask */ |
627 | for (count = 0; count < sizeof mask * 8; ++count) { | 651 | for (count = 0; count < sizeof mask * 8; ++count) { |
628 | if (mask & 0x01) | 652 | if (mask & 0x01) |
629 | set_bit(count, &smp_changes_mask); | 653 | set_bit(count, &smp_changes_mask); |
630 | mask >>= 1; | 654 | mask >>= 1; |
631 | } | 655 | } |
632 | 656 | ||
633 | } | 657 | } |
634 | 658 | ||
659 | /** | ||
660 | * generic_set_mtrr - set variable MTRR register on the local CPU. | ||
661 | * | ||
662 | * @reg: The register to set. | ||
663 | * @base: The base address of the region. | ||
664 | * @size: The size of the region. If this is 0 the region is disabled. | ||
665 | * @type: The type of the region. | ||
666 | * | ||
667 | * Returns nothing. | ||
668 | */ | ||
635 | static void generic_set_mtrr(unsigned int reg, unsigned long base, | 669 | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
636 | unsigned long size, mtrr_type type) | 670 | unsigned long size, mtrr_type type) |
637 | /* [SUMMARY] Set variable MTRR register on the local CPU. | ||
638 | <reg> The register to set. | ||
639 | <base> The base address of the region. | ||
640 | <size> The size of the region. If this is 0 the region is disabled. | ||
641 | <type> The type of the region. | ||
642 | [RETURNS] Nothing. | ||
643 | */ | ||
644 | { | 671 | { |
645 | unsigned long flags; | 672 | unsigned long flags; |
646 | struct mtrr_var_range *vr; | 673 | struct mtrr_var_range *vr; |
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
651 | prepare_set(); | 678 | prepare_set(); |
652 | 679 | ||
653 | if (size == 0) { | 680 | if (size == 0) { |
654 | /* The invalid bit is kept in the mask, so we simply clear the | 681 | /* |
655 | relevant mask register to disable a range. */ | 682 | * The invalid bit is kept in the mask, so we simply |
683 | * clear the relevant mask register to disable a range. | ||
684 | */ | ||
656 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); | 685 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
657 | memset(vr, 0, sizeof(struct mtrr_var_range)); | 686 | memset(vr, 0, sizeof(struct mtrr_var_range)); |
658 | } else { | 687 | } else { |
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
669 | local_irq_restore(flags); | 698 | local_irq_restore(flags); |
670 | } | 699 | } |
671 | 700 | ||
672 | int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 701 | int generic_validate_add_page(unsigned long base, unsigned long size, |
702 | unsigned int type) | ||
673 | { | 703 | { |
674 | unsigned long lbase, last; | 704 | unsigned long lbase, last; |
675 | 705 | ||
676 | /* For Intel PPro stepping <= 7, must be 4 MiB aligned | 706 | /* |
677 | and not touch 0x70000000->0x7003FFFF */ | 707 | * For Intel PPro stepping <= 7 |
708 | * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF | ||
709 | */ | ||
678 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && | 710 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
679 | boot_cpu_data.x86_model == 1 && | 711 | boot_cpu_data.x86_model == 1 && |
680 | boot_cpu_data.x86_mask <= 7) { | 712 | boot_cpu_data.x86_mask <= 7) { |
681 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { | 713 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
682 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); | 714 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
683 | return -EINVAL; | 715 | return -EINVAL; |
684 | } | 716 | } |
685 | if (!(base + size < 0x70000 || base > 0x7003F) && | 717 | if (!(base + size < 0x70000 || base > 0x7003F) && |
686 | (type == MTRR_TYPE_WRCOMB | 718 | (type == MTRR_TYPE_WRCOMB |
687 | || type == MTRR_TYPE_WRBACK)) { | 719 | || type == MTRR_TYPE_WRBACK)) { |
688 | printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); | 720 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
689 | return -EINVAL; | 721 | return -EINVAL; |
690 | } | 722 | } |
691 | } | 723 | } |
692 | 724 | ||
693 | /* Check upper bits of base and last are equal and lower bits are 0 | 725 | /* |
694 | for base and 1 for last */ | 726 | * Check upper bits of base and last are equal and lower bits are 0 |
727 | * for base and 1 for last | ||
728 | */ | ||
695 | last = base + size - 1; | 729 | last = base + size - 1; |
696 | for (lbase = base; !(lbase & 1) && (last & 1); | 730 | for (lbase = base; !(lbase & 1) && (last & 1); |
697 | lbase = lbase >> 1, last = last >> 1) ; | 731 | lbase = lbase >> 1, last = last >> 1) |
732 | ; | ||
698 | if (lbase != last) { | 733 | if (lbase != last) { |
699 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", | 734 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
700 | base, size); | ||
701 | return -EINVAL; | 735 | return -EINVAL; |
702 | } | 736 | } |
703 | return 0; | 737 | return 0; |
704 | } | 738 | } |
705 | 739 | ||
706 | |||
707 | static int generic_have_wrcomb(void) | 740 | static int generic_have_wrcomb(void) |
708 | { | 741 | { |
709 | unsigned long config, dummy; | 742 | unsigned long config, dummy; |
710 | rdmsr(MSR_MTRRcap, config, dummy); | 743 | rdmsr(MSR_MTRRcap, config, dummy); |
711 | return (config & (1 << 10)); | 744 | return config & (1 << 10); |
712 | } | 745 | } |
713 | 746 | ||
714 | int positive_have_wrcomb(void) | 747 | int positive_have_wrcomb(void) |
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void) | |||
716 | return 1; | 749 | return 1; |
717 | } | 750 | } |
718 | 751 | ||
719 | /* generic structure... | 752 | /* |
753 | * Generic structure... | ||
720 | */ | 754 | */ |
721 | struct mtrr_ops generic_mtrr_ops = { | 755 | struct mtrr_ops generic_mtrr_ops = { |
722 | .use_intel_if = 1, | 756 | .use_intel_if = 1, |
723 | .set_all = generic_set_all, | 757 | .set_all = generic_set_all, |
724 | .get = generic_get_mtrr, | 758 | .get = generic_get_mtrr, |
725 | .get_free_region = generic_get_free_region, | 759 | .get_free_region = generic_get_free_region, |
726 | .set = generic_set_mtrr, | 760 | .set = generic_set_mtrr, |
727 | .validate_add_page = generic_validate_add_page, | 761 | .validate_add_page = generic_validate_add_page, |
728 | .have_wrcomb = generic_have_wrcomb, | 762 | .have_wrcomb = generic_have_wrcomb, |
729 | }; | 763 | }; |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index fb73a52913a4..08b6ea4c62b4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -1,27 +1,28 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/proc_fs.h> | ||
3 | #include <linux/capability.h> | 1 | #include <linux/capability.h> |
4 | #include <linux/ctype.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/seq_file.h> | 2 | #include <linux/seq_file.h> |
7 | #include <asm/uaccess.h> | 3 | #include <linux/uaccess.h> |
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/ctype.h> | ||
7 | #include <linux/init.h> | ||
8 | 8 | ||
9 | #define LINE_SIZE 80 | 9 | #define LINE_SIZE 80 |
10 | 10 | ||
11 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
12 | |||
12 | #include "mtrr.h" | 13 | #include "mtrr.h" |
13 | 14 | ||
14 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) | 15 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) |
15 | 16 | ||
16 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = | 17 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = |
17 | { | 18 | { |
18 | "uncachable", /* 0 */ | 19 | "uncachable", /* 0 */ |
19 | "write-combining", /* 1 */ | 20 | "write-combining", /* 1 */ |
20 | "?", /* 2 */ | 21 | "?", /* 2 */ |
21 | "?", /* 3 */ | 22 | "?", /* 3 */ |
22 | "write-through", /* 4 */ | 23 | "write-through", /* 4 */ |
23 | "write-protect", /* 5 */ | 24 | "write-protect", /* 5 */ |
24 | "write-back", /* 6 */ | 25 | "write-back", /* 6 */ |
25 | }; | 26 | }; |
26 | 27 | ||
27 | const char *mtrr_attrib_to_str(int x) | 28 | const char *mtrr_attrib_to_str(int x) |
@@ -35,8 +36,8 @@ static int | |||
35 | mtrr_file_add(unsigned long base, unsigned long size, | 36 | mtrr_file_add(unsigned long base, unsigned long size, |
36 | unsigned int type, bool increment, struct file *file, int page) | 37 | unsigned int type, bool increment, struct file *file, int page) |
37 | { | 38 | { |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
38 | int reg, max; | 40 | int reg, max; |
39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
40 | 41 | ||
41 | max = num_var_ranges; | 42 | max = num_var_ranges; |
42 | if (fcount == NULL) { | 43 | if (fcount == NULL) { |
@@ -61,8 +62,8 @@ static int | |||
61 | mtrr_file_del(unsigned long base, unsigned long size, | 62 | mtrr_file_del(unsigned long base, unsigned long size, |
62 | struct file *file, int page) | 63 | struct file *file, int page) |
63 | { | 64 | { |
64 | int reg; | ||
65 | unsigned int *fcount = FILE_FCOUNT(file); | 65 | unsigned int *fcount = FILE_FCOUNT(file); |
66 | int reg; | ||
66 | 67 | ||
67 | if (!page) { | 68 | if (!page) { |
68 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) | 69 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) |
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size, | |||
81 | return reg; | 82 | return reg; |
82 | } | 83 | } |
83 | 84 | ||
84 | /* RED-PEN: seq_file can seek now. this is ignored. */ | 85 | /* |
86 | * seq_file can seek but we ignore it. | ||
87 | * | ||
88 | * Format of control line: | ||
89 | * "base=%Lx size=%Lx type=%s" or "disable=%d" | ||
90 | */ | ||
85 | static ssize_t | 91 | static ssize_t |
86 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | 92 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) |
87 | /* Format of control line: | ||
88 | "base=%Lx size=%Lx type=%s" OR: | ||
89 | "disable=%d" | ||
90 | */ | ||
91 | { | 93 | { |
92 | int i, err; | 94 | int i, err; |
93 | unsigned long reg; | 95 | unsigned long reg; |
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
100 | return -EPERM; | 102 | return -EPERM; |
101 | if (!len) | 103 | if (!len) |
102 | return -EINVAL; | 104 | return -EINVAL; |
105 | |||
103 | memset(line, 0, LINE_SIZE); | 106 | memset(line, 0, LINE_SIZE); |
104 | if (len > LINE_SIZE) | 107 | if (len > LINE_SIZE) |
105 | len = LINE_SIZE; | 108 | len = LINE_SIZE; |
106 | if (copy_from_user(line, buf, len - 1)) | 109 | if (copy_from_user(line, buf, len - 1)) |
107 | return -EFAULT; | 110 | return -EFAULT; |
111 | |||
108 | linelen = strlen(line); | 112 | linelen = strlen(line); |
109 | ptr = line + linelen - 1; | 113 | ptr = line + linelen - 1; |
110 | if (linelen && *ptr == '\n') | 114 | if (linelen && *ptr == '\n') |
111 | *ptr = '\0'; | 115 | *ptr = '\0'; |
116 | |||
112 | if (!strncmp(line, "disable=", 8)) { | 117 | if (!strncmp(line, "disable=", 8)) { |
113 | reg = simple_strtoul(line + 8, &ptr, 0); | 118 | reg = simple_strtoul(line + 8, &ptr, 0); |
114 | err = mtrr_del_page(reg, 0, 0); | 119 | err = mtrr_del_page(reg, 0, 0); |
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
116 | return err; | 121 | return err; |
117 | return len; | 122 | return len; |
118 | } | 123 | } |
124 | |||
119 | if (strncmp(line, "base=", 5)) | 125 | if (strncmp(line, "base=", 5)) |
120 | return -EINVAL; | 126 | return -EINVAL; |
127 | |||
121 | base = simple_strtoull(line + 5, &ptr, 0); | 128 | base = simple_strtoull(line + 5, &ptr, 0); |
122 | for (; isspace(*ptr); ++ptr) ; | 129 | for (; isspace(*ptr); ++ptr) |
130 | ; | ||
131 | |||
123 | if (strncmp(ptr, "size=", 5)) | 132 | if (strncmp(ptr, "size=", 5)) |
124 | return -EINVAL; | 133 | return -EINVAL; |
134 | |||
125 | size = simple_strtoull(ptr + 5, &ptr, 0); | 135 | size = simple_strtoull(ptr + 5, &ptr, 0); |
126 | if ((base & 0xfff) || (size & 0xfff)) | 136 | if ((base & 0xfff) || (size & 0xfff)) |
127 | return -EINVAL; | 137 | return -EINVAL; |
128 | for (; isspace(*ptr); ++ptr) ; | 138 | for (; isspace(*ptr); ++ptr) |
139 | ; | ||
140 | |||
129 | if (strncmp(ptr, "type=", 5)) | 141 | if (strncmp(ptr, "type=", 5)) |
130 | return -EINVAL; | 142 | return -EINVAL; |
131 | ptr += 5; | 143 | ptr += 5; |
132 | for (; isspace(*ptr); ++ptr) ; | 144 | for (; isspace(*ptr); ++ptr) |
145 | ; | ||
146 | |||
133 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { | 147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { |
134 | if (strcmp(ptr, mtrr_strings[i])) | 148 | if (strcmp(ptr, mtrr_strings[i])) |
135 | continue; | 149 | continue; |
136 | base >>= PAGE_SHIFT; | 150 | base >>= PAGE_SHIFT; |
137 | size >>= PAGE_SHIFT; | 151 | size >>= PAGE_SHIFT; |
138 | err = | 152 | err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); |
139 | mtrr_add_page((unsigned long) base, (unsigned long) size, i, | ||
140 | true); | ||
141 | if (err < 0) | 153 | if (err < 0) |
142 | return err; | 154 | return err; |
143 | return len; | 155 | return len; |
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
181 | case MTRRIOC32_SET_PAGE_ENTRY: | 193 | case MTRRIOC32_SET_PAGE_ENTRY: |
182 | case MTRRIOC32_DEL_PAGE_ENTRY: | 194 | case MTRRIOC32_DEL_PAGE_ENTRY: |
183 | case MTRRIOC32_KILL_PAGE_ENTRY: { | 195 | case MTRRIOC32_KILL_PAGE_ENTRY: { |
184 | struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; | 196 | struct mtrr_sentry32 __user *s32; |
197 | |||
198 | s32 = (struct mtrr_sentry32 __user *)__arg; | ||
185 | err = get_user(sentry.base, &s32->base); | 199 | err = get_user(sentry.base, &s32->base); |
186 | err |= get_user(sentry.size, &s32->size); | 200 | err |= get_user(sentry.size, &s32->size); |
187 | err |= get_user(sentry.type, &s32->type); | 201 | err |= get_user(sentry.type, &s32->type); |
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
191 | } | 205 | } |
192 | case MTRRIOC32_GET_ENTRY: | 206 | case MTRRIOC32_GET_ENTRY: |
193 | case MTRRIOC32_GET_PAGE_ENTRY: { | 207 | case MTRRIOC32_GET_PAGE_ENTRY: { |
194 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 208 | struct mtrr_gentry32 __user *g32; |
209 | |||
210 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
195 | err = get_user(gentry.regnum, &g32->regnum); | 211 | err = get_user(gentry.regnum, &g32->regnum); |
196 | err |= get_user(gentry.base, &g32->base); | 212 | err |= get_user(gentry.base, &g32->base); |
197 | err |= get_user(gentry.size, &g32->size); | 213 | err |= get_user(gentry.size, &g32->size); |
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
314 | if (err) | 330 | if (err) |
315 | return err; | 331 | return err; |
316 | 332 | ||
317 | switch(cmd) { | 333 | switch (cmd) { |
318 | case MTRRIOC_GET_ENTRY: | 334 | case MTRRIOC_GET_ENTRY: |
319 | case MTRRIOC_GET_PAGE_ENTRY: | 335 | case MTRRIOC_GET_PAGE_ENTRY: |
320 | if (copy_to_user(arg, &gentry, sizeof gentry)) | 336 | if (copy_to_user(arg, &gentry, sizeof gentry)) |
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
323 | #ifdef CONFIG_COMPAT | 339 | #ifdef CONFIG_COMPAT |
324 | case MTRRIOC32_GET_ENTRY: | 340 | case MTRRIOC32_GET_ENTRY: |
325 | case MTRRIOC32_GET_PAGE_ENTRY: { | 341 | case MTRRIOC32_GET_PAGE_ENTRY: { |
326 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 342 | struct mtrr_gentry32 __user *g32; |
343 | |||
344 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
327 | err = put_user(gentry.base, &g32->base); | 345 | err = put_user(gentry.base, &g32->base); |
328 | err |= put_user(gentry.size, &g32->size); | 346 | err |= put_user(gentry.size, &g32->size); |
329 | err |= put_user(gentry.regnum, &g32->regnum); | 347 | err |= put_user(gentry.regnum, &g32->regnum); |
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
335 | return err; | 353 | return err; |
336 | } | 354 | } |
337 | 355 | ||
338 | static int | 356 | static int mtrr_close(struct inode *ino, struct file *file) |
339 | mtrr_close(struct inode *ino, struct file *file) | ||
340 | { | 357 | { |
341 | int i, max; | ||
342 | unsigned int *fcount = FILE_FCOUNT(file); | 358 | unsigned int *fcount = FILE_FCOUNT(file); |
359 | int i, max; | ||
343 | 360 | ||
344 | if (fcount != NULL) { | 361 | if (fcount != NULL) { |
345 | max = num_var_ranges; | 362 | max = num_var_ranges; |
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset); | |||
359 | 376 | ||
360 | static int mtrr_open(struct inode *inode, struct file *file) | 377 | static int mtrr_open(struct inode *inode, struct file *file) |
361 | { | 378 | { |
362 | if (!mtrr_if) | 379 | if (!mtrr_if) |
363 | return -EIO; | 380 | return -EIO; |
364 | if (!mtrr_if->get) | 381 | if (!mtrr_if->get) |
365 | return -ENXIO; | 382 | return -ENXIO; |
366 | return single_open(file, mtrr_seq_show, NULL); | 383 | return single_open(file, mtrr_seq_show, NULL); |
367 | } | 384 | } |
368 | 385 | ||
369 | static const struct file_operations mtrr_fops = { | 386 | static const struct file_operations mtrr_fops = { |
370 | .owner = THIS_MODULE, | 387 | .owner = THIS_MODULE, |
371 | .open = mtrr_open, | 388 | .open = mtrr_open, |
372 | .read = seq_read, | 389 | .read = seq_read, |
373 | .llseek = seq_lseek, | 390 | .llseek = seq_lseek, |
374 | .write = mtrr_write, | 391 | .write = mtrr_write, |
375 | .unlocked_ioctl = mtrr_ioctl, | 392 | .unlocked_ioctl = mtrr_ioctl, |
376 | .compat_ioctl = mtrr_ioctl, | 393 | .compat_ioctl = mtrr_ioctl, |
377 | .release = mtrr_close, | 394 | .release = mtrr_close, |
378 | }; | 395 | }; |
379 | 396 | ||
380 | static int mtrr_seq_show(struct seq_file *seq, void *offset) | 397 | static int mtrr_seq_show(struct seq_file *seq, void *offset) |
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
388 | max = num_var_ranges; | 405 | max = num_var_ranges; |
389 | for (i = 0; i < max; i++) { | 406 | for (i = 0; i < max; i++) { |
390 | mtrr_if->get(i, &base, &size, &type); | 407 | mtrr_if->get(i, &base, &size, &type); |
391 | if (size == 0) | 408 | if (size == 0) { |
392 | mtrr_usage_table[i] = 0; | 409 | mtrr_usage_table[i] = 0; |
393 | else { | 410 | continue; |
394 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
395 | /* less than 1MB */ | ||
396 | factor = 'K'; | ||
397 | size <<= PAGE_SHIFT - 10; | ||
398 | } else { | ||
399 | factor = 'M'; | ||
400 | size >>= 20 - PAGE_SHIFT; | ||
401 | } | ||
402 | /* RED-PEN: base can be > 32bit */ | ||
403 | len += seq_printf(seq, | ||
404 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
405 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | ||
406 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); | ||
407 | } | 411 | } |
412 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
413 | /* less than 1MB */ | ||
414 | factor = 'K'; | ||
415 | size <<= PAGE_SHIFT - 10; | ||
416 | } else { | ||
417 | factor = 'M'; | ||
418 | size >>= 20 - PAGE_SHIFT; | ||
419 | } | ||
420 | /* Base can be > 32bit */ | ||
421 | len += seq_printf(seq, "reg%02i: base=0x%06lx000 " | ||
422 | "(%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
423 | i, base, base >> (20 - PAGE_SHIFT), size, | ||
424 | factor, mtrr_usage_table[i], | ||
425 | mtrr_attrib_to_str(type)); | ||
408 | } | 426 | } |
409 | return 0; | 427 | return 0; |
410 | } | 428 | } |
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void) | |||
422 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); | 440 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
423 | return 0; | 441 | return 0; |
424 | } | 442 | } |
425 | |||
426 | arch_initcall(mtrr_if_init); | 443 | arch_initcall(mtrr_if_init); |
427 | #endif /* CONFIG_PROC_FS */ | 444 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 8fc248b5aeaf..7af0f88a4163 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -25,43 +25,48 @@ | |||
25 | Operating System Writer's Guide" (Intel document number 242692), | 25 | Operating System Writer's Guide" (Intel document number 242692), |
26 | section 11.11.7 | 26 | section 11.11.7 |
27 | 27 | ||
28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> | 28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> |
29 | on 6-7 March 2002. | 29 | on 6-7 March 2002. |
30 | Source: Intel Architecture Software Developers Manual, Volume 3: | 30 | Source: Intel Architecture Software Developers Manual, Volume 3: |
31 | System Programming Guide; Section 9.11. (1997 edition - PPro). | 31 | System Programming Guide; Section 9.11. (1997 edition - PPro). |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define DEBUG | ||
35 | |||
36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ | ||
37 | |||
38 | #include <linux/kvm_para.h> | ||
39 | #include <linux/uaccess.h> | ||
34 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/mutex.h> | ||
35 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/sort.h> | ||
44 | #include <linux/cpu.h> | ||
36 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
37 | #include <linux/smp.h> | 46 | #include <linux/smp.h> |
38 | #include <linux/cpu.h> | ||
39 | #include <linux/mutex.h> | ||
40 | #include <linux/sort.h> | ||
41 | 47 | ||
48 | #include <asm/processor.h> | ||
42 | #include <asm/e820.h> | 49 | #include <asm/e820.h> |
43 | #include <asm/mtrr.h> | 50 | #include <asm/mtrr.h> |
44 | #include <asm/uaccess.h> | ||
45 | #include <asm/processor.h> | ||
46 | #include <asm/msr.h> | 51 | #include <asm/msr.h> |
47 | #include <asm/kvm_para.h> | 52 | |
48 | #include "mtrr.h" | 53 | #include "mtrr.h" |
49 | 54 | ||
50 | u32 num_var_ranges = 0; | 55 | u32 num_var_ranges; |
51 | 56 | ||
52 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | 57 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
53 | static DEFINE_MUTEX(mtrr_mutex); | 58 | static DEFINE_MUTEX(mtrr_mutex); |
54 | 59 | ||
55 | u64 size_or_mask, size_and_mask; | 60 | u64 size_or_mask, size_and_mask; |
56 | 61 | ||
57 | static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; | 62 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; |
58 | 63 | ||
59 | struct mtrr_ops * mtrr_if = NULL; | 64 | struct mtrr_ops *mtrr_if; |
60 | 65 | ||
61 | static void set_mtrr(unsigned int reg, unsigned long base, | 66 | static void set_mtrr(unsigned int reg, unsigned long base, |
62 | unsigned long size, mtrr_type type); | 67 | unsigned long size, mtrr_type type); |
63 | 68 | ||
64 | void set_mtrr_ops(struct mtrr_ops * ops) | 69 | void set_mtrr_ops(struct mtrr_ops *ops) |
65 | { | 70 | { |
66 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) | 71 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) |
67 | mtrr_ops[ops->vendor] = ops; | 72 | mtrr_ops[ops->vendor] = ops; |
@@ -72,30 +77,36 @@ static int have_wrcomb(void) | |||
72 | { | 77 | { |
73 | struct pci_dev *dev; | 78 | struct pci_dev *dev; |
74 | u8 rev; | 79 | u8 rev; |
75 | 80 | ||
76 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { | 81 | dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); |
77 | /* ServerWorks LE chipsets < rev 6 have problems with write-combining | 82 | if (dev != NULL) { |
78 | Don't allow it and leave room for other chipsets to be tagged */ | 83 | /* |
84 | * ServerWorks LE chipsets < rev 6 have problems with | ||
85 | * write-combining. Don't allow it and leave room for other | ||
86 | * chipsets to be tagged | ||
87 | */ | ||
79 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && | 88 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
80 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { | 89 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
81 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); | 90 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
82 | if (rev <= 5) { | 91 | if (rev <= 5) { |
83 | printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); | 92 | pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); |
84 | pci_dev_put(dev); | 93 | pci_dev_put(dev); |
85 | return 0; | 94 | return 0; |
86 | } | 95 | } |
87 | } | 96 | } |
88 | /* Intel 450NX errata # 23. Non ascending cacheline evictions to | 97 | /* |
89 | write combining memory may resulting in data corruption */ | 98 | * Intel 450NX errata # 23. Non ascending cacheline evictions to |
99 | * write combining memory may resulting in data corruption | ||
100 | */ | ||
90 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | 101 | if (dev->vendor == PCI_VENDOR_ID_INTEL && |
91 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { | 102 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { |
92 | printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); | 103 | pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); |
93 | pci_dev_put(dev); | 104 | pci_dev_put(dev); |
94 | return 0; | 105 | return 0; |
95 | } | 106 | } |
96 | pci_dev_put(dev); | 107 | pci_dev_put(dev); |
97 | } | 108 | } |
98 | return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); | 109 | return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; |
99 | } | 110 | } |
100 | 111 | ||
101 | /* This function returns the number of variable MTRRs */ | 112 | /* This function returns the number of variable MTRRs */ |
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void) | |||
103 | { | 114 | { |
104 | unsigned long config = 0, dummy; | 115 | unsigned long config = 0, dummy; |
105 | 116 | ||
106 | if (use_intel()) { | 117 | if (use_intel()) |
107 | rdmsr(MSR_MTRRcap, config, dummy); | 118 | rdmsr(MSR_MTRRcap, config, dummy); |
108 | } else if (is_cpu(AMD)) | 119 | else if (is_cpu(AMD)) |
109 | config = 2; | 120 | config = 2; |
110 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) | 121 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) |
111 | config = 8; | 122 | config = 8; |
123 | |||
112 | num_var_ranges = config & 0xff; | 124 | num_var_ranges = config & 0xff; |
113 | } | 125 | } |
114 | 126 | ||
@@ -130,10 +142,12 @@ struct set_mtrr_data { | |||
130 | mtrr_type smp_type; | 142 | mtrr_type smp_type; |
131 | }; | 143 | }; |
132 | 144 | ||
145 | /** | ||
146 | * ipi_handler - Synchronisation handler. Executed by "other" CPUs. | ||
147 | * | ||
148 | * Returns nothing. | ||
149 | */ | ||
133 | static void ipi_handler(void *info) | 150 | static void ipi_handler(void *info) |
134 | /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. | ||
135 | [RETURNS] Nothing. | ||
136 | */ | ||
137 | { | 151 | { |
138 | #ifdef CONFIG_SMP | 152 | #ifdef CONFIG_SMP |
139 | struct set_mtrr_data *data = info; | 153 | struct set_mtrr_data *data = info; |
@@ -142,18 +156,19 @@ static void ipi_handler(void *info) | |||
142 | local_irq_save(flags); | 156 | local_irq_save(flags); |
143 | 157 | ||
144 | atomic_dec(&data->count); | 158 | atomic_dec(&data->count); |
145 | while(!atomic_read(&data->gate)) | 159 | while (!atomic_read(&data->gate)) |
146 | cpu_relax(); | 160 | cpu_relax(); |
147 | 161 | ||
148 | /* The master has cleared me to execute */ | 162 | /* The master has cleared me to execute */ |
149 | if (data->smp_reg != ~0U) | 163 | if (data->smp_reg != ~0U) { |
150 | mtrr_if->set(data->smp_reg, data->smp_base, | 164 | mtrr_if->set(data->smp_reg, data->smp_base, |
151 | data->smp_size, data->smp_type); | 165 | data->smp_size, data->smp_type); |
152 | else | 166 | } else { |
153 | mtrr_if->set_all(); | 167 | mtrr_if->set_all(); |
168 | } | ||
154 | 169 | ||
155 | atomic_dec(&data->count); | 170 | atomic_dec(&data->count); |
156 | while(atomic_read(&data->gate)) | 171 | while (atomic_read(&data->gate)) |
157 | cpu_relax(); | 172 | cpu_relax(); |
158 | 173 | ||
159 | atomic_dec(&data->count); | 174 | atomic_dec(&data->count); |
@@ -161,7 +176,8 @@ static void ipi_handler(void *info) | |||
161 | #endif | 176 | #endif |
162 | } | 177 | } |
163 | 178 | ||
164 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | 179 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) |
180 | { | ||
165 | return type1 == MTRR_TYPE_UNCACHABLE || | 181 | return type1 == MTRR_TYPE_UNCACHABLE || |
166 | type2 == MTRR_TYPE_UNCACHABLE || | 182 | type2 == MTRR_TYPE_UNCACHABLE || |
167 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || | 183 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || |
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
176 | * @type: mtrr type | 192 | * @type: mtrr type |
177 | * | 193 | * |
178 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: | 194 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
179 | * | 195 | * |
180 | * 1. Send IPI to do the following: | 196 | * 1. Send IPI to do the following: |
181 | * 2. Disable Interrupts | 197 | * 2. Disable Interrupts |
182 | * 3. Wait for all procs to do so | 198 | * 3. Wait for all procs to do so |
183 | * 4. Enter no-fill cache mode | 199 | * 4. Enter no-fill cache mode |
184 | * 5. Flush caches | 200 | * 5. Flush caches |
185 | * 6. Clear PGE bit | 201 | * 6. Clear PGE bit |
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
189 | * 10. Enable all range registers | 205 | * 10. Enable all range registers |
190 | * 11. Flush all TLBs and caches again | 206 | * 11. Flush all TLBs and caches again |
191 | * 12. Enter normal cache mode and reenable caching | 207 | * 12. Enter normal cache mode and reenable caching |
192 | * 13. Set PGE | 208 | * 13. Set PGE |
193 | * 14. Wait for buddies to catch up | 209 | * 14. Wait for buddies to catch up |
194 | * 15. Enable interrupts. | 210 | * 15. Enable interrupts. |
195 | * | 211 | * |
196 | * What does that mean for us? Well, first we set data.count to the number | 212 | * What does that mean for us? Well, first we set data.count to the number |
197 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait | 213 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait |
198 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. | 214 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. |
199 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each | 215 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each |
200 | * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it | 216 | * CPU goes through the transition of updating MTRRs. |
201 | * differently, so we call mtrr_if->set() callback and let them take care of it. | 217 | * The CPU vendors may each do it differently, |
202 | * When they're done, they again decrement data->count and wait for data.gate to | 218 | * so we call mtrr_if->set() callback and let them take care of it. |
203 | * be reset. | 219 | * When they're done, they again decrement data->count and wait for data.gate |
204 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. | 220 | * to be reset. |
221 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | ||
205 | * Everyone then enables interrupts and we all continue on. | 222 | * Everyone then enables interrupts and we all continue on. |
206 | * | 223 | * |
207 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff | 224 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
208 | * becomes nops. | 225 | * becomes nops. |
209 | */ | 226 | */ |
210 | static void set_mtrr(unsigned int reg, unsigned long base, | 227 | static void |
211 | unsigned long size, mtrr_type type) | 228 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) |
212 | { | 229 | { |
213 | struct set_mtrr_data data; | 230 | struct set_mtrr_data data; |
214 | unsigned long flags; | 231 | unsigned long flags; |
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
218 | data.smp_size = size; | 235 | data.smp_size = size; |
219 | data.smp_type = type; | 236 | data.smp_type = type; |
220 | atomic_set(&data.count, num_booting_cpus() - 1); | 237 | atomic_set(&data.count, num_booting_cpus() - 1); |
221 | /* make sure data.count is visible before unleashing other CPUs */ | 238 | |
239 | /* Make sure data.count is visible before unleashing other CPUs */ | ||
222 | smp_wmb(); | 240 | smp_wmb(); |
223 | atomic_set(&data.gate,0); | 241 | atomic_set(&data.gate, 0); |
224 | 242 | ||
225 | /* Start the ball rolling on other CPUs */ | 243 | /* Start the ball rolling on other CPUs */ |
226 | if (smp_call_function(ipi_handler, &data, 0) != 0) | 244 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
227 | panic("mtrr: timed out waiting for other CPUs\n"); | 245 | panic("mtrr: timed out waiting for other CPUs\n"); |
228 | 246 | ||
229 | local_irq_save(flags); | 247 | local_irq_save(flags); |
230 | 248 | ||
231 | while(atomic_read(&data.count)) | 249 | while (atomic_read(&data.count)) |
232 | cpu_relax(); | 250 | cpu_relax(); |
233 | 251 | ||
234 | /* ok, reset count and toggle gate */ | 252 | /* Ok, reset count and toggle gate */ |
235 | atomic_set(&data.count, num_booting_cpus() - 1); | 253 | atomic_set(&data.count, num_booting_cpus() - 1); |
236 | smp_wmb(); | 254 | smp_wmb(); |
237 | atomic_set(&data.gate,1); | 255 | atomic_set(&data.gate, 1); |
238 | 256 | ||
239 | /* do our MTRR business */ | 257 | /* Do our MTRR business */ |
240 | 258 | ||
241 | /* HACK! | 259 | /* |
260 | * HACK! | ||
242 | * We use this same function to initialize the mtrrs on boot. | 261 | * We use this same function to initialize the mtrrs on boot. |
243 | * The state of the boot cpu's mtrrs has been saved, and we want | 262 | * The state of the boot cpu's mtrrs has been saved, and we want |
244 | * to replicate across all the APs. | 263 | * to replicate across all the APs. |
245 | * If we're doing that @reg is set to something special... | 264 | * If we're doing that @reg is set to something special... |
246 | */ | 265 | */ |
247 | if (reg != ~0U) | 266 | if (reg != ~0U) |
248 | mtrr_if->set(reg,base,size,type); | 267 | mtrr_if->set(reg, base, size, type); |
249 | 268 | ||
250 | /* wait for the others */ | 269 | /* Wait for the others */ |
251 | while(atomic_read(&data.count)) | 270 | while (atomic_read(&data.count)) |
252 | cpu_relax(); | 271 | cpu_relax(); |
253 | 272 | ||
254 | atomic_set(&data.count, num_booting_cpus() - 1); | 273 | atomic_set(&data.count, num_booting_cpus() - 1); |
255 | smp_wmb(); | 274 | smp_wmb(); |
256 | atomic_set(&data.gate,0); | 275 | atomic_set(&data.gate, 0); |
257 | 276 | ||
258 | /* | 277 | /* |
259 | * Wait here for everyone to have seen the gate change | 278 | * Wait here for everyone to have seen the gate change |
260 | * So we're the last ones to touch 'data' | 279 | * So we're the last ones to touch 'data' |
261 | */ | 280 | */ |
262 | while(atomic_read(&data.count)) | 281 | while (atomic_read(&data.count)) |
263 | cpu_relax(); | 282 | cpu_relax(); |
264 | 283 | ||
265 | local_irq_restore(flags); | 284 | local_irq_restore(flags); |
266 | } | 285 | } |
267 | 286 | ||
268 | /** | 287 | /** |
269 | * mtrr_add_page - Add a memory type region | 288 | * mtrr_add_page - Add a memory type region |
270 | * @base: Physical base address of region in pages (in units of 4 kB!) | 289 | * @base: Physical base address of region in pages (in units of 4 kB!) |
271 | * @size: Physical size of region in pages (4 kB) | 290 | * @size: Physical size of region in pages (4 kB) |
272 | * @type: Type of MTRR desired | 291 | * @type: Type of MTRR desired |
273 | * @increment: If this is true do usage counting on the region | 292 | * @increment: If this is true do usage counting on the region |
274 | * | 293 | * |
275 | * Memory type region registers control the caching on newer Intel and | 294 | * Memory type region registers control the caching on newer Intel and |
276 | * non Intel processors. This function allows drivers to request an | 295 | * non Intel processors. This function allows drivers to request an |
277 | * MTRR is added. The details and hardware specifics of each processor's | 296 | * MTRR is added. The details and hardware specifics of each processor's |
278 | * implementation are hidden from the caller, but nevertheless the | 297 | * implementation are hidden from the caller, but nevertheless the |
279 | * caller should expect to need to provide a power of two size on an | 298 | * caller should expect to need to provide a power of two size on an |
280 | * equivalent power of two boundary. | 299 | * equivalent power of two boundary. |
281 | * | 300 | * |
282 | * If the region cannot be added either because all regions are in use | 301 | * If the region cannot be added either because all regions are in use |
283 | * or the CPU cannot support it a negative value is returned. On success | 302 | * or the CPU cannot support it a negative value is returned. On success |
284 | * the register number for this entry is returned, but should be treated | 303 | * the register number for this entry is returned, but should be treated |
285 | * as a cookie only. | 304 | * as a cookie only. |
286 | * | 305 | * |
287 | * On a multiprocessor machine the changes are made to all processors. | 306 | * On a multiprocessor machine the changes are made to all processors. |
288 | * This is required on x86 by the Intel processors. | 307 | * This is required on x86 by the Intel processors. |
289 | * | 308 | * |
290 | * The available types are | 309 | * The available types are |
291 | * | 310 | * |
292 | * %MTRR_TYPE_UNCACHABLE - No caching | 311 | * %MTRR_TYPE_UNCACHABLE - No caching |
293 | * | 312 | * |
294 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 313 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
295 | * | 314 | * |
296 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 315 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
297 | * | 316 | * |
298 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 317 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
299 | * | 318 | * |
300 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 319 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
301 | * failures and do not wish system log messages to be sent. | 320 | * failures and do not wish system log messages to be sent. |
302 | */ | 321 | */ |
303 | 322 | int mtrr_add_page(unsigned long base, unsigned long size, | |
304 | int mtrr_add_page(unsigned long base, unsigned long size, | ||
305 | unsigned int type, bool increment) | 323 | unsigned int type, bool increment) |
306 | { | 324 | { |
325 | unsigned long lbase, lsize; | ||
307 | int i, replace, error; | 326 | int i, replace, error; |
308 | mtrr_type ltype; | 327 | mtrr_type ltype; |
309 | unsigned long lbase, lsize; | ||
310 | 328 | ||
311 | if (!mtrr_if) | 329 | if (!mtrr_if) |
312 | return -ENXIO; | 330 | return -ENXIO; |
313 | 331 | ||
314 | if ((error = mtrr_if->validate_add_page(base,size,type))) | 332 | error = mtrr_if->validate_add_page(base, size, type); |
333 | if (error) | ||
315 | return error; | 334 | return error; |
316 | 335 | ||
317 | if (type >= MTRR_NUM_TYPES) { | 336 | if (type >= MTRR_NUM_TYPES) { |
318 | printk(KERN_WARNING "mtrr: type: %u invalid\n", type); | 337 | pr_warning("mtrr: type: %u invalid\n", type); |
319 | return -EINVAL; | 338 | return -EINVAL; |
320 | } | 339 | } |
321 | 340 | ||
322 | /* If the type is WC, check that this processor supports it */ | 341 | /* If the type is WC, check that this processor supports it */ |
323 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { | 342 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
324 | printk(KERN_WARNING | 343 | pr_warning("mtrr: your processor doesn't support write-combining\n"); |
325 | "mtrr: your processor doesn't support write-combining\n"); | ||
326 | return -ENOSYS; | 344 | return -ENOSYS; |
327 | } | 345 | } |
328 | 346 | ||
329 | if (!size) { | 347 | if (!size) { |
330 | printk(KERN_WARNING "mtrr: zero sized request\n"); | 348 | pr_warning("mtrr: zero sized request\n"); |
331 | return -EINVAL; | 349 | return -EINVAL; |
332 | } | 350 | } |
333 | 351 | ||
334 | if (base & size_or_mask || size & size_or_mask) { | 352 | if (base & size_or_mask || size & size_or_mask) { |
335 | printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); | 353 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); |
336 | return -EINVAL; | 354 | return -EINVAL; |
337 | } | 355 | } |
338 | 356 | ||
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
341 | 359 | ||
342 | /* No CPU hotplug when we change MTRR entries */ | 360 | /* No CPU hotplug when we change MTRR entries */ |
343 | get_online_cpus(); | 361 | get_online_cpus(); |
344 | /* Search for existing MTRR */ | 362 | |
363 | /* Search for existing MTRR */ | ||
345 | mutex_lock(&mtrr_mutex); | 364 | mutex_lock(&mtrr_mutex); |
346 | for (i = 0; i < num_var_ranges; ++i) { | 365 | for (i = 0; i < num_var_ranges; ++i) { |
347 | mtrr_if->get(i, &lbase, &lsize, <ype); | 366 | mtrr_if->get(i, &lbase, &lsize, <ype); |
348 | if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) | 367 | if (!lsize || base > lbase + lsize - 1 || |
368 | base + size - 1 < lbase) | ||
349 | continue; | 369 | continue; |
350 | /* At this point we know there is some kind of overlap/enclosure */ | 370 | /* |
371 | * At this point we know there is some kind of | ||
372 | * overlap/enclosure | ||
373 | */ | ||
351 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { | 374 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { |
352 | if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { | 375 | if (base <= lbase && |
376 | base + size - 1 >= lbase + lsize - 1) { | ||
353 | /* New region encloses an existing region */ | 377 | /* New region encloses an existing region */ |
354 | if (type == ltype) { | 378 | if (type == ltype) { |
355 | replace = replace == -1 ? i : -2; | 379 | replace = replace == -1 ? i : -2; |
356 | continue; | 380 | continue; |
357 | } | 381 | } else if (types_compatible(type, ltype)) |
358 | else if (types_compatible(type, ltype)) | ||
359 | continue; | 382 | continue; |
360 | } | 383 | } |
361 | printk(KERN_WARNING | 384 | pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" |
362 | "mtrr: 0x%lx000,0x%lx000 overlaps existing" | 385 | " 0x%lx000,0x%lx000\n", base, size, lbase, |
363 | " 0x%lx000,0x%lx000\n", base, size, lbase, | 386 | lsize); |
364 | lsize); | ||
365 | goto out; | 387 | goto out; |
366 | } | 388 | } |
367 | /* New region is enclosed by an existing region */ | 389 | /* New region is enclosed by an existing region */ |
368 | if (ltype != type) { | 390 | if (ltype != type) { |
369 | if (types_compatible(type, ltype)) | 391 | if (types_compatible(type, ltype)) |
370 | continue; | 392 | continue; |
371 | printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", | 393 | pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
372 | base, size, mtrr_attrib_to_str(ltype), | 394 | base, size, mtrr_attrib_to_str(ltype), |
373 | mtrr_attrib_to_str(type)); | 395 | mtrr_attrib_to_str(type)); |
374 | goto out; | 396 | goto out; |
375 | } | 397 | } |
376 | if (increment) | 398 | if (increment) |
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
378 | error = i; | 400 | error = i; |
379 | goto out; | 401 | goto out; |
380 | } | 402 | } |
381 | /* Search for an empty MTRR */ | 403 | /* Search for an empty MTRR */ |
382 | i = mtrr_if->get_free_region(base, size, replace); | 404 | i = mtrr_if->get_free_region(base, size, replace); |
383 | if (i >= 0) { | 405 | if (i >= 0) { |
384 | set_mtrr(i, base, size, type); | 406 | set_mtrr(i, base, size, type); |
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
393 | mtrr_usage_table[replace] = 0; | 415 | mtrr_usage_table[replace] = 0; |
394 | } | 416 | } |
395 | } | 417 | } |
396 | } else | 418 | } else { |
397 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); | 419 | pr_info("mtrr: no more MTRRs available\n"); |
420 | } | ||
398 | error = i; | 421 | error = i; |
399 | out: | 422 | out: |
400 | mutex_unlock(&mtrr_mutex); | 423 | mutex_unlock(&mtrr_mutex); |
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
405 | static int mtrr_check(unsigned long base, unsigned long size) | 428 | static int mtrr_check(unsigned long base, unsigned long size) |
406 | { | 429 | { |
407 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { | 430 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
408 | printk(KERN_WARNING | 431 | pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); |
409 | "mtrr: size and base must be multiples of 4 kiB\n"); | 432 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
410 | printk(KERN_DEBUG | ||
411 | "mtrr: size: 0x%lx base: 0x%lx\n", size, base); | ||
412 | dump_stack(); | 433 | dump_stack(); |
413 | return -1; | 434 | return -1; |
414 | } | 435 | } |
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size) | |||
416 | } | 437 | } |
417 | 438 | ||
418 | /** | 439 | /** |
419 | * mtrr_add - Add a memory type region | 440 | * mtrr_add - Add a memory type region |
420 | * @base: Physical base address of region | 441 | * @base: Physical base address of region |
421 | * @size: Physical size of region | 442 | * @size: Physical size of region |
422 | * @type: Type of MTRR desired | 443 | * @type: Type of MTRR desired |
423 | * @increment: If this is true do usage counting on the region | 444 | * @increment: If this is true do usage counting on the region |
424 | * | 445 | * |
425 | * Memory type region registers control the caching on newer Intel and | 446 | * Memory type region registers control the caching on newer Intel and |
426 | * non Intel processors. This function allows drivers to request an | 447 | * non Intel processors. This function allows drivers to request an |
427 | * MTRR is added. The details and hardware specifics of each processor's | 448 | * MTRR is added. The details and hardware specifics of each processor's |
428 | * implementation are hidden from the caller, but nevertheless the | 449 | * implementation are hidden from the caller, but nevertheless the |
429 | * caller should expect to need to provide a power of two size on an | 450 | * caller should expect to need to provide a power of two size on an |
430 | * equivalent power of two boundary. | 451 | * equivalent power of two boundary. |
431 | * | 452 | * |
432 | * If the region cannot be added either because all regions are in use | 453 | * If the region cannot be added either because all regions are in use |
433 | * or the CPU cannot support it a negative value is returned. On success | 454 | * or the CPU cannot support it a negative value is returned. On success |
434 | * the register number for this entry is returned, but should be treated | 455 | * the register number for this entry is returned, but should be treated |
435 | * as a cookie only. | 456 | * as a cookie only. |
436 | * | 457 | * |
437 | * On a multiprocessor machine the changes are made to all processors. | 458 | * On a multiprocessor machine the changes are made to all processors. |
438 | * This is required on x86 by the Intel processors. | 459 | * This is required on x86 by the Intel processors. |
439 | * | 460 | * |
440 | * The available types are | 461 | * The available types are |
441 | * | 462 | * |
442 | * %MTRR_TYPE_UNCACHABLE - No caching | 463 | * %MTRR_TYPE_UNCACHABLE - No caching |
443 | * | 464 | * |
444 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 465 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
445 | * | 466 | * |
446 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 467 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
447 | * | 468 | * |
448 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 469 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
449 | * | 470 | * |
450 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 471 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
451 | * failures and do not wish system log messages to be sent. | 472 | * failures and do not wish system log messages to be sent. |
452 | */ | 473 | */ |
453 | 474 | int mtrr_add(unsigned long base, unsigned long size, unsigned int type, | |
454 | int | 475 | bool increment) |
455 | mtrr_add(unsigned long base, unsigned long size, unsigned int type, | ||
456 | bool increment) | ||
457 | { | 476 | { |
458 | if (mtrr_check(base, size)) | 477 | if (mtrr_check(base, size)) |
459 | return -EINVAL; | 478 | return -EINVAL; |
460 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, | 479 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, |
461 | increment); | 480 | increment); |
462 | } | 481 | } |
482 | EXPORT_SYMBOL(mtrr_add); | ||
463 | 483 | ||
464 | /** | 484 | /** |
465 | * mtrr_del_page - delete a memory type region | 485 | * mtrr_del_page - delete a memory type region |
466 | * @reg: Register returned by mtrr_add | 486 | * @reg: Register returned by mtrr_add |
467 | * @base: Physical base address | 487 | * @base: Physical base address |
468 | * @size: Size of region | 488 | * @size: Size of region |
469 | * | 489 | * |
470 | * If register is supplied then base and size are ignored. This is | 490 | * If register is supplied then base and size are ignored. This is |
471 | * how drivers should call it. | 491 | * how drivers should call it. |
472 | * | 492 | * |
473 | * Releases an MTRR region. If the usage count drops to zero the | 493 | * Releases an MTRR region. If the usage count drops to zero the |
474 | * register is freed and the region returns to default state. | 494 | * register is freed and the region returns to default state. |
475 | * On success the register is returned, on failure a negative error | 495 | * On success the register is returned, on failure a negative error |
476 | * code. | 496 | * code. |
477 | */ | 497 | */ |
478 | |||
479 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) | 498 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
480 | { | 499 | { |
481 | int i, max; | 500 | int i, max; |
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
500 | } | 519 | } |
501 | } | 520 | } |
502 | if (reg < 0) { | 521 | if (reg < 0) { |
503 | printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, | 522 | pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", |
504 | size); | 523 | base, size); |
505 | goto out; | 524 | goto out; |
506 | } | 525 | } |
507 | } | 526 | } |
508 | if (reg >= max) { | 527 | if (reg >= max) { |
509 | printk(KERN_WARNING "mtrr: register: %d too big\n", reg); | 528 | pr_warning("mtrr: register: %d too big\n", reg); |
510 | goto out; | 529 | goto out; |
511 | } | 530 | } |
512 | mtrr_if->get(reg, &lbase, &lsize, <ype); | 531 | mtrr_if->get(reg, &lbase, &lsize, <ype); |
513 | if (lsize < 1) { | 532 | if (lsize < 1) { |
514 | printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); | 533 | pr_warning("mtrr: MTRR %d not used\n", reg); |
515 | goto out; | 534 | goto out; |
516 | } | 535 | } |
517 | if (mtrr_usage_table[reg] < 1) { | 536 | if (mtrr_usage_table[reg] < 1) { |
518 | printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); | 537 | pr_warning("mtrr: reg: %d has count=0\n", reg); |
519 | goto out; | 538 | goto out; |
520 | } | 539 | } |
521 | if (--mtrr_usage_table[reg] < 1) | 540 | if (--mtrr_usage_table[reg] < 1) |
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
526 | put_online_cpus(); | 545 | put_online_cpus(); |
527 | return error; | 546 | return error; |
528 | } | 547 | } |
548 | |||
529 | /** | 549 | /** |
530 | * mtrr_del - delete a memory type region | 550 | * mtrr_del - delete a memory type region |
531 | * @reg: Register returned by mtrr_add | 551 | * @reg: Register returned by mtrr_add |
532 | * @base: Physical base address | 552 | * @base: Physical base address |
533 | * @size: Size of region | 553 | * @size: Size of region |
534 | * | 554 | * |
535 | * If register is supplied then base and size are ignored. This is | 555 | * If register is supplied then base and size are ignored. This is |
536 | * how drivers should call it. | 556 | * how drivers should call it. |
537 | * | 557 | * |
538 | * Releases an MTRR region. If the usage count drops to zero the | 558 | * Releases an MTRR region. If the usage count drops to zero the |
539 | * register is freed and the region returns to default state. | 559 | * register is freed and the region returns to default state. |
540 | * On success the register is returned, on failure a negative error | 560 | * On success the register is returned, on failure a negative error |
541 | * code. | 561 | * code. |
542 | */ | 562 | */ |
543 | 563 | int mtrr_del(int reg, unsigned long base, unsigned long size) | |
544 | int | ||
545 | mtrr_del(int reg, unsigned long base, unsigned long size) | ||
546 | { | 564 | { |
547 | if (mtrr_check(base, size)) | 565 | if (mtrr_check(base, size)) |
548 | return -EINVAL; | 566 | return -EINVAL; |
549 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); | 567 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); |
550 | } | 568 | } |
551 | |||
552 | EXPORT_SYMBOL(mtrr_add); | ||
553 | EXPORT_SYMBOL(mtrr_del); | 569 | EXPORT_SYMBOL(mtrr_del); |
554 | 570 | ||
555 | /* HACK ALERT! | 571 | /* |
572 | * HACK ALERT! | ||
556 | * These should be called implicitly, but we can't yet until all the initcall | 573 | * These should be called implicitly, but we can't yet until all the initcall |
557 | * stuff is done... | 574 | * stuff is done... |
558 | */ | 575 | */ |
@@ -576,29 +593,28 @@ struct mtrr_value { | |||
576 | 593 | ||
577 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; | 594 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
578 | 595 | ||
579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 596 | static int mtrr_save(struct sys_device *sysdev, pm_message_t state) |
580 | { | 597 | { |
581 | int i; | 598 | int i; |
582 | 599 | ||
583 | for (i = 0; i < num_var_ranges; i++) { | 600 | for (i = 0; i < num_var_ranges; i++) { |
584 | mtrr_if->get(i, | 601 | mtrr_if->get(i, &mtrr_value[i].lbase, |
585 | &mtrr_value[i].lbase, | 602 | &mtrr_value[i].lsize, |
586 | &mtrr_value[i].lsize, | 603 | &mtrr_value[i].ltype); |
587 | &mtrr_value[i].ltype); | ||
588 | } | 604 | } |
589 | return 0; | 605 | return 0; |
590 | } | 606 | } |
591 | 607 | ||
592 | static int mtrr_restore(struct sys_device * sysdev) | 608 | static int mtrr_restore(struct sys_device *sysdev) |
593 | { | 609 | { |
594 | int i; | 610 | int i; |
595 | 611 | ||
596 | for (i = 0; i < num_var_ranges; i++) { | 612 | for (i = 0; i < num_var_ranges; i++) { |
597 | if (mtrr_value[i].lsize) | 613 | if (mtrr_value[i].lsize) { |
598 | set_mtrr(i, | 614 | set_mtrr(i, mtrr_value[i].lbase, |
599 | mtrr_value[i].lbase, | 615 | mtrr_value[i].lsize, |
600 | mtrr_value[i].lsize, | 616 | mtrr_value[i].ltype); |
601 | mtrr_value[i].ltype); | 617 | } |
602 | } | 618 | } |
603 | return 0; | 619 | return 0; |
604 | } | 620 | } |
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup; | |||
615 | /** | 631 | /** |
616 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 632 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
617 | * | 633 | * |
618 | * This needs to be called early; before any of the other CPUs are | 634 | * This needs to be called early; before any of the other CPUs are |
619 | * initialized (i.e. before smp_init()). | 635 | * initialized (i.e. before smp_init()). |
620 | * | 636 | * |
621 | */ | 637 | */ |
622 | void __init mtrr_bp_init(void) | 638 | void __init mtrr_bp_init(void) |
623 | { | 639 | { |
624 | u32 phys_addr; | 640 | u32 phys_addr; |
641 | |||
625 | init_ifs(); | 642 | init_ifs(); |
626 | 643 | ||
627 | phys_addr = 32; | 644 | phys_addr = 32; |
628 | 645 | ||
629 | if (cpu_has_mtrr) { | 646 | if (cpu_has_mtrr) { |
630 | mtrr_if = &generic_mtrr_ops; | 647 | mtrr_if = &generic_mtrr_ops; |
631 | size_or_mask = 0xff000000; /* 36 bits */ | 648 | size_or_mask = 0xff000000; /* 36 bits */ |
632 | size_and_mask = 0x00f00000; | 649 | size_and_mask = 0x00f00000; |
633 | phys_addr = 36; | 650 | phys_addr = 36; |
634 | 651 | ||
635 | /* This is an AMD specific MSR, but we assume(hope?) that | 652 | /* |
636 | Intel will implement it to when they extend the address | 653 | * This is an AMD specific MSR, but we assume(hope?) that |
637 | bus of the Xeon. */ | 654 | * Intel will implement it to when they extend the address |
655 | * bus of the Xeon. | ||
656 | */ | ||
638 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 657 | if (cpuid_eax(0x80000000) >= 0x80000008) { |
639 | phys_addr = cpuid_eax(0x80000008) & 0xff; | 658 | phys_addr = cpuid_eax(0x80000008) & 0xff; |
640 | /* CPUID workaround for Intel 0F33/0F34 CPU */ | 659 | /* CPUID workaround for Intel 0F33/0F34 CPU */ |
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void) | |||
649 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; | 668 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; |
650 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && | 669 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && |
651 | boot_cpu_data.x86 == 6) { | 670 | boot_cpu_data.x86 == 6) { |
652 | /* VIA C* family have Intel style MTRRs, but | 671 | /* |
653 | don't support PAE */ | 672 | * VIA C* family have Intel style MTRRs, |
654 | size_or_mask = 0xfff00000; /* 32 bits */ | 673 | * but don't support PAE |
674 | */ | ||
675 | size_or_mask = 0xfff00000; /* 32 bits */ | ||
655 | size_and_mask = 0; | 676 | size_and_mask = 0; |
656 | phys_addr = 32; | 677 | phys_addr = 32; |
657 | } | 678 | } |
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void) | |||
694 | changed_by_mtrr_cleanup = 1; | 715 | changed_by_mtrr_cleanup = 1; |
695 | mtrr_if->set_all(); | 716 | mtrr_if->set_all(); |
696 | } | 717 | } |
697 | |||
698 | } | 718 | } |
699 | } | 719 | } |
700 | } | 720 | } |
@@ -706,12 +726,17 @@ void mtrr_ap_init(void) | |||
706 | if (!mtrr_if || !use_intel()) | 726 | if (!mtrr_if || !use_intel()) |
707 | return; | 727 | return; |
708 | /* | 728 | /* |
709 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, | 729 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries |
710 | * but this routine will be called in cpu boot time, holding the lock | 730 | * changed, but this routine will be called in cpu boot time, |
711 | * breaks it. This routine is called in two cases: 1.very earily time | 731 | * holding the lock breaks it. |
712 | * of software resume, when there absolutely isn't mtrr entry changes; | 732 | * |
713 | * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to | 733 | * This routine is called in two cases: |
714 | * prevent mtrr entry changes | 734 | * |
735 | * 1. very earily time of software resume, when there absolutely | ||
736 | * isn't mtrr entry changes; | ||
737 | * | ||
738 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | ||
739 | * lock to prevent mtrr entry changes | ||
715 | */ | 740 | */ |
716 | local_irq_save(flags); | 741 | local_irq_save(flags); |
717 | 742 | ||
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void) | |||
732 | { | 757 | { |
733 | if (!mtrr_if) | 758 | if (!mtrr_if) |
734 | return 0; | 759 | return 0; |
760 | |||
735 | if (use_intel()) { | 761 | if (use_intel()) { |
736 | if (!changed_by_mtrr_cleanup) | 762 | if (!changed_by_mtrr_cleanup) |
737 | mtrr_state_warn(); | 763 | mtrr_state_warn(); |
738 | } else { | 764 | return 0; |
739 | /* The CPUs haven't MTRR and seem to not support SMP. They have | ||
740 | * specific drivers, we use a tricky method to support | ||
741 | * suspend/resume for them. | ||
742 | * TBD: is there any system with such CPU which supports | ||
743 | * suspend/resume? if no, we should remove the code. | ||
744 | */ | ||
745 | sysdev_driver_register(&cpu_sysdev_class, | ||
746 | &mtrr_sysdev_driver); | ||
747 | } | 765 | } |
766 | |||
767 | /* | ||
768 | * The CPU has no MTRR and seems to not support SMP. They have | ||
769 | * specific drivers, we use a tricky method to support | ||
770 | * suspend/resume for them. | ||
771 | * | ||
772 | * TBD: is there any system with such CPU which supports | ||
773 | * suspend/resume? If no, we should remove the code. | ||
774 | */ | ||
775 | sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); | ||
776 | |||
748 | return 0; | 777 | return 0; |
749 | } | 778 | } |
750 | subsys_initcall(mtrr_init_finialize); | 779 | subsys_initcall(mtrr_init_finialize); |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 7538b767f206..a501dee9a87a 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * local mtrr defines. | 2 | * local MTRR defines. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | |||
14 | struct mtrr_ops { | 14 | struct mtrr_ops { |
15 | u32 vendor; | 15 | u32 vendor; |
16 | u32 use_intel_if; | 16 | u32 use_intel_if; |
17 | // void (*init)(void); | ||
18 | void (*set)(unsigned int reg, unsigned long base, | 17 | void (*set)(unsigned int reg, unsigned long base, |
19 | unsigned long size, mtrr_type type); | 18 | unsigned long size, mtrr_type type); |
20 | void (*set_all)(void); | 19 | void (*set_all)(void); |
21 | 20 | ||
22 | void (*get)(unsigned int reg, unsigned long *base, | 21 | void (*get)(unsigned int reg, unsigned long *base, |
23 | unsigned long *size, mtrr_type * type); | 22 | unsigned long *size, mtrr_type *type); |
24 | int (*get_free_region)(unsigned long base, unsigned long size, | 23 | int (*get_free_region)(unsigned long base, unsigned long size, |
25 | int replace_reg); | 24 | int replace_reg); |
26 | int (*validate_add_page)(unsigned long base, unsigned long size, | 25 | int (*validate_add_page)(unsigned long base, unsigned long size, |
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void); | |||
39 | 38 | ||
40 | /* library functions for processor-specific routines */ | 39 | /* library functions for processor-specific routines */ |
41 | struct set_mtrr_context { | 40 | struct set_mtrr_context { |
42 | unsigned long flags; | 41 | unsigned long flags; |
43 | unsigned long cr4val; | 42 | unsigned long cr4val; |
44 | u32 deftype_lo; | 43 | u32 deftype_lo; |
45 | u32 deftype_hi; | 44 | u32 deftype_hi; |
46 | u32 ccr3; | 45 | u32 ccr3; |
47 | }; | 46 | }; |
48 | 47 | ||
49 | void set_mtrr_done(struct set_mtrr_context *ctxt); | 48 | void set_mtrr_done(struct set_mtrr_context *ctxt); |
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index, | |||
54 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); | 53 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); |
55 | void get_mtrr_state(void); | 54 | void get_mtrr_state(void); |
56 | 55 | ||
57 | extern void set_mtrr_ops(struct mtrr_ops * ops); | 56 | extern void set_mtrr_ops(struct mtrr_ops *ops); |
58 | 57 | ||
59 | extern u64 size_or_mask, size_and_mask; | 58 | extern u64 size_or_mask, size_and_mask; |
60 | extern struct mtrr_ops * mtrr_if; | 59 | extern struct mtrr_ops *mtrr_if; |
61 | 60 | ||
62 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) | 61 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) |
63 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) | 62 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) |
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 1f5fb1588d1f..dfc80b4e6b0d 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
@@ -1,24 +1,25 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/init.h> | 1 | #include <linux/init.h> |
3 | #include <asm/io.h> | 2 | #include <linux/io.h> |
4 | #include <asm/mtrr.h> | 3 | #include <linux/mm.h> |
5 | #include <asm/msr.h> | 4 | |
6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
8 | #include "mtrr.h" | 7 | #include <asm/mtrr.h> |
8 | #include <asm/msr.h> | ||
9 | 9 | ||
10 | #include "mtrr.h" | ||
10 | 11 | ||
11 | /* Put the processor into a state where MTRRs can be safely set */ | 12 | /* Put the processor into a state where MTRRs can be safely set */ |
12 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | 13 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) |
13 | { | 14 | { |
14 | unsigned int cr0; | 15 | unsigned int cr0; |
15 | 16 | ||
16 | /* Disable interrupts locally */ | 17 | /* Disable interrupts locally */ |
17 | local_irq_save(ctxt->flags); | 18 | local_irq_save(ctxt->flags); |
18 | 19 | ||
19 | if (use_intel() || is_cpu(CYRIX)) { | 20 | if (use_intel() || is_cpu(CYRIX)) { |
20 | 21 | ||
21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 22 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
22 | if (cpu_has_pge) { | 23 | if (cpu_has_pge) { |
23 | ctxt->cr4val = read_cr4(); | 24 | ctxt->cr4val = read_cr4(); |
24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 25 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
33 | write_cr0(cr0); | 34 | write_cr0(cr0); |
34 | wbinvd(); | 35 | wbinvd(); |
35 | 36 | ||
36 | if (use_intel()) | 37 | if (use_intel()) { |
37 | /* Save MTRR state */ | 38 | /* Save MTRR state */ |
38 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 39 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); |
39 | else | 40 | } else { |
40 | /* Cyrix ARRs - everything else were excluded at the top */ | 41 | /* |
42 | * Cyrix ARRs - | ||
43 | * everything else were excluded at the top | ||
44 | */ | ||
41 | ctxt->ccr3 = getCx86(CX86_CCR3); | 45 | ctxt->ccr3 = getCx86(CX86_CCR3); |
46 | } | ||
42 | } | 47 | } |
43 | } | 48 | } |
44 | 49 | ||
45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 50 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
46 | { | 51 | { |
47 | if (use_intel()) | 52 | if (use_intel()) { |
48 | /* Disable MTRRs, and set the default type to uncached */ | 53 | /* Disable MTRRs, and set the default type to uncached */ |
49 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, | 54 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, |
50 | ctxt->deftype_hi); | 55 | ctxt->deftype_hi); |
51 | else if (is_cpu(CYRIX)) | 56 | } else { |
52 | /* Cyrix ARRs - everything else were excluded at the top */ | 57 | if (is_cpu(CYRIX)) { |
53 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | 58 | /* Cyrix ARRs - everything else were excluded at the top */ |
59 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | ||
60 | } | ||
61 | } | ||
54 | } | 62 | } |
55 | 63 | ||
56 | /* Restore the processor after a set_mtrr_prepare */ | 64 | /* Restore the processor after a set_mtrr_prepare */ |
57 | void set_mtrr_done(struct set_mtrr_context *ctxt) | 65 | void set_mtrr_done(struct set_mtrr_context *ctxt) |
58 | { | 66 | { |
59 | if (use_intel() || is_cpu(CYRIX)) { | 67 | if (use_intel() || is_cpu(CYRIX)) { |
60 | 68 | ||
61 | /* Flush caches and TLBs */ | 69 | /* Flush caches and TLBs */ |
62 | wbinvd(); | 70 | wbinvd(); |
63 | 71 | ||
64 | /* Restore MTRRdefType */ | 72 | /* Restore MTRRdefType */ |
65 | if (use_intel()) | 73 | if (use_intel()) { |
66 | /* Intel (P6) standard MTRRs */ | 74 | /* Intel (P6) standard MTRRs */ |
67 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 75 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, |
68 | else | 76 | ctxt->deftype_hi); |
69 | /* Cyrix ARRs - everything else was excluded at the top */ | 77 | } else { |
78 | /* | ||
79 | * Cyrix ARRs - | ||
80 | * everything else was excluded at the top | ||
81 | */ | ||
70 | setCx86(CX86_CCR3, ctxt->ccr3); | 82 | setCx86(CX86_CCR3, ctxt->ccr3); |
83 | } | ||
71 | 84 | ||
72 | /* Enable caches */ | 85 | /* Enable caches */ |
73 | write_cr0(read_cr0() & 0xbfffffff); | 86 | write_cr0(read_cr0() & 0xbfffffff); |
74 | 87 | ||
75 | /* Restore value of CR4 */ | 88 | /* Restore value of CR4 */ |
76 | if (cpu_has_pge) | 89 | if (cpu_has_pge) |
77 | write_cr4(ctxt->cr4val); | 90 | write_cr4(ctxt->cr4val); |
78 | } | 91 | } |
79 | /* Re-enable interrupts locally (if enabled previously) */ | 92 | /* Re-enable interrupts locally (if enabled previously) */ |
80 | local_irq_restore(ctxt->flags); | 93 | local_irq_restore(ctxt->flags); |
81 | } | 94 | } |
82 | |||
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f8..f9cd0849bd42 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
9 | * | 10 | * |
10 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
11 | */ | 12 | */ |
@@ -20,6 +21,7 @@ | |||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | ||
23 | 25 | ||
24 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
25 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
@@ -27,12 +29,52 @@ | |||
27 | 29 | ||
28 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_counter_mask __read_mostly; |
29 | 31 | ||
32 | /* The maximal number of PEBS counters: */ | ||
33 | #define MAX_PEBS_COUNTERS 4 | ||
34 | |||
35 | /* The size of a BTS record in bytes: */ | ||
36 | #define BTS_RECORD_SIZE 24 | ||
37 | |||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | ||
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | ||
40 | |||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | ||
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Bits in the debugctlmsr controlling branch tracing. | ||
47 | */ | ||
48 | #define X86_DEBUGCTL_TR (1 << 6) | ||
49 | #define X86_DEBUGCTL_BTS (1 << 7) | ||
50 | #define X86_DEBUGCTL_BTINT (1 << 8) | ||
51 | #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) | ||
52 | #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) | ||
53 | |||
54 | /* | ||
55 | * A debug store configuration. | ||
56 | * | ||
57 | * We only support architectures that use 64bit fields. | ||
58 | */ | ||
59 | struct debug_store { | ||
60 | u64 bts_buffer_base; | ||
61 | u64 bts_index; | ||
62 | u64 bts_absolute_maximum; | ||
63 | u64 bts_interrupt_threshold; | ||
64 | u64 pebs_buffer_base; | ||
65 | u64 pebs_index; | ||
66 | u64 pebs_absolute_maximum; | ||
67 | u64 pebs_interrupt_threshold; | ||
68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | ||
69 | }; | ||
70 | |||
30 | struct cpu_hw_counters { | 71 | struct cpu_hw_counters { |
31 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
32 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
33 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
34 | unsigned long interrupts; | 75 | unsigned long interrupts; |
35 | int enabled; | 76 | int enabled; |
77 | struct debug_store *ds; | ||
36 | }; | 78 | }; |
37 | 79 | ||
38 | /* | 80 | /* |
@@ -58,6 +100,8 @@ struct x86_pmu { | |||
58 | int apic; | 100 | int apic; |
59 | u64 max_period; | 101 | u64 max_period; |
60 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | ||
104 | void (*disable_bts)(void); | ||
61 | }; | 105 | }; |
62 | 106 | ||
63 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
577 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
578 | s64 delta; | 622 | s64 delta; |
579 | 623 | ||
624 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
625 | return 0; | ||
626 | |||
580 | /* | 627 | /* |
581 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous counter value. |
582 | * | 629 | * |
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void) | |||
666 | #endif | 713 | #endif |
667 | } | 714 | } |
668 | 715 | ||
716 | static inline bool bts_available(void) | ||
717 | { | ||
718 | return x86_pmu.enable_bts != NULL; | ||
719 | } | ||
720 | |||
721 | static inline void init_debug_store_on_cpu(int cpu) | ||
722 | { | ||
723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
724 | |||
725 | if (!ds) | ||
726 | return; | ||
727 | |||
728 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | ||
729 | (u32)((u64)(unsigned long)ds), | ||
730 | (u32)((u64)(unsigned long)ds >> 32)); | ||
731 | } | ||
732 | |||
733 | static inline void fini_debug_store_on_cpu(int cpu) | ||
734 | { | ||
735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | ||
736 | return; | ||
737 | |||
738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | ||
739 | } | ||
740 | |||
741 | static void release_bts_hardware(void) | ||
742 | { | ||
743 | int cpu; | ||
744 | |||
745 | if (!bts_available()) | ||
746 | return; | ||
747 | |||
748 | get_online_cpus(); | ||
749 | |||
750 | for_each_online_cpu(cpu) | ||
751 | fini_debug_store_on_cpu(cpu); | ||
752 | |||
753 | for_each_possible_cpu(cpu) { | ||
754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
755 | |||
756 | if (!ds) | ||
757 | continue; | ||
758 | |||
759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | ||
760 | |||
761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | ||
762 | kfree(ds); | ||
763 | } | ||
764 | |||
765 | put_online_cpus(); | ||
766 | } | ||
767 | |||
768 | static int reserve_bts_hardware(void) | ||
769 | { | ||
770 | int cpu, err = 0; | ||
771 | |||
772 | if (!bts_available()) | ||
773 | return 0; | ||
774 | |||
775 | get_online_cpus(); | ||
776 | |||
777 | for_each_possible_cpu(cpu) { | ||
778 | struct debug_store *ds; | ||
779 | void *buffer; | ||
780 | |||
781 | err = -ENOMEM; | ||
782 | buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); | ||
783 | if (unlikely(!buffer)) | ||
784 | break; | ||
785 | |||
786 | ds = kzalloc(sizeof(*ds), GFP_KERNEL); | ||
787 | if (unlikely(!ds)) { | ||
788 | kfree(buffer); | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | ds->bts_buffer_base = (u64)(unsigned long)buffer; | ||
793 | ds->bts_index = ds->bts_buffer_base; | ||
794 | ds->bts_absolute_maximum = | ||
795 | ds->bts_buffer_base + BTS_BUFFER_SIZE; | ||
796 | ds->bts_interrupt_threshold = | ||
797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | ||
798 | |||
799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | ||
800 | err = 0; | ||
801 | } | ||
802 | |||
803 | if (err) | ||
804 | release_bts_hardware(); | ||
805 | else { | ||
806 | for_each_online_cpu(cpu) | ||
807 | init_debug_store_on_cpu(cpu); | ||
808 | } | ||
809 | |||
810 | put_online_cpus(); | ||
811 | |||
812 | return err; | ||
813 | } | ||
814 | |||
669 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
670 | { | 816 | { |
671 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
672 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
819 | release_bts_hardware(); | ||
673 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
674 | } | 821 | } |
675 | } | 822 | } |
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | |||
712 | return 0; | 859 | return 0; |
713 | } | 860 | } |
714 | 861 | ||
862 | static void intel_pmu_enable_bts(u64 config) | ||
863 | { | ||
864 | unsigned long debugctlmsr; | ||
865 | |||
866 | debugctlmsr = get_debugctlmsr(); | ||
867 | |||
868 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
869 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
870 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
871 | |||
872 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
873 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
874 | |||
875 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
876 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
877 | |||
878 | update_debugctlmsr(debugctlmsr); | ||
879 | } | ||
880 | |||
881 | static void intel_pmu_disable_bts(void) | ||
882 | { | ||
883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
884 | unsigned long debugctlmsr; | ||
885 | |||
886 | if (!cpuc->ds) | ||
887 | return; | ||
888 | |||
889 | debugctlmsr = get_debugctlmsr(); | ||
890 | |||
891 | debugctlmsr &= | ||
892 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
893 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
894 | |||
895 | update_debugctlmsr(debugctlmsr); | ||
896 | } | ||
897 | |||
715 | /* | 898 | /* |
716 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
717 | */ | 900 | */ |
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
728 | err = 0; | 911 | err = 0; |
729 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_counters)) { |
730 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
731 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | 914 | if (atomic_read(&active_counters) == 0) { |
732 | err = -EBUSY; | 915 | if (!reserve_pmc_hardware()) |
733 | else | 916 | err = -EBUSY; |
917 | else | ||
918 | err = reserve_bts_hardware(); | ||
919 | } | ||
920 | if (!err) | ||
734 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_counters); |
735 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
736 | } | 923 | } |
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
793 | if (config == -1LL) | 980 | if (config == -1LL) |
794 | return -EINVAL; | 981 | return -EINVAL; |
795 | 982 | ||
983 | /* | ||
984 | * Branch tracing: | ||
985 | */ | ||
986 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | ||
987 | (hwc->sample_period == 1)) { | ||
988 | /* BTS is not supported by this architecture. */ | ||
989 | if (!bts_available()) | ||
990 | return -EOPNOTSUPP; | ||
991 | |||
992 | /* BTS is currently only allowed for user-mode. */ | ||
993 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
994 | return -EOPNOTSUPP; | ||
995 | } | ||
996 | |||
796 | hwc->config |= config; | 997 | hwc->config |= config; |
797 | 998 | ||
798 | return 0; | 999 | return 0; |
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void) | |||
817 | 1018 | ||
818 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
819 | { | 1020 | { |
1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1022 | |||
1023 | if (!cpuc->enabled) | ||
1024 | return; | ||
1025 | |||
1026 | cpuc->enabled = 0; | ||
1027 | barrier(); | ||
1028 | |||
820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1029 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1030 | |||
1031 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1032 | intel_pmu_disable_bts(); | ||
821 | } | 1033 | } |
822 | 1034 | ||
823 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void) | |||
875 | 1087 | ||
876 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
877 | { | 1089 | { |
1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1091 | |||
1092 | if (cpuc->enabled) | ||
1093 | return; | ||
1094 | |||
1095 | cpuc->enabled = 1; | ||
1096 | barrier(); | ||
1097 | |||
878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
1099 | |||
1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
1101 | struct perf_counter *counter = | ||
1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1103 | |||
1104 | if (WARN_ON_ONCE(!counter)) | ||
1105 | return; | ||
1106 | |||
1107 | intel_pmu_enable_bts(counter->hw.config); | ||
1108 | } | ||
879 | } | 1109 | } |
880 | 1110 | ||
881 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
962 | static inline void | 1192 | static inline void |
963 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
964 | { | 1194 | { |
1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1196 | intel_pmu_disable_bts(); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
965 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1200 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
966 | intel_pmu_disable_fixed(hwc, idx); | 1201 | intel_pmu_disable_fixed(hwc, idx); |
967 | return; | 1202 | return; |
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
990 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
991 | int err, ret = 0; | 1226 | int err, ret = 0; |
992 | 1227 | ||
1228 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
1229 | return 0; | ||
1230 | |||
993 | /* | 1231 | /* |
994 | * If we are way outside a reasoable range then just skip forward: | 1232 | * If we are way outside a reasoable range then just skip forward: |
995 | */ | 1233 | */ |
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1072 | 1310 | ||
1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1074 | { | 1312 | { |
1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | ||
1315 | return; | ||
1316 | |||
1317 | intel_pmu_enable_bts(hwc->config); | ||
1318 | return; | ||
1319 | } | ||
1320 | |||
1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1321 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
1076 | intel_pmu_enable_fixed(hwc, idx); | 1322 | intel_pmu_enable_fixed(hwc, idx); |
1077 | return; | 1323 | return; |
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
1093 | { | 1339 | { |
1094 | unsigned int event; | 1340 | unsigned int event; |
1095 | 1341 | ||
1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1343 | |||
1344 | if (unlikely((event == | ||
1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1346 | (hwc->sample_period == 1))) | ||
1347 | return X86_PMC_IDX_FIXED_BTS; | ||
1348 | |||
1096 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_counters_fixed) |
1097 | return -1; | 1350 | return -1; |
1098 | 1351 | ||
1099 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1100 | |||
1101 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1102 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1103 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter) | |||
1118 | int idx; | 1369 | int idx; |
1119 | 1370 | ||
1120 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(counter, hwc); |
1121 | if (idx >= 0) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | ||
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1375 | return -EAGAIN; | ||
1376 | |||
1377 | hwc->config_base = 0; | ||
1378 | hwc->counter_base = 0; | ||
1379 | hwc->idx = idx; | ||
1380 | } else if (idx >= 0) { | ||
1122 | /* | 1381 | /* |
1123 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed counter, if that is already taken |
1124 | * then try to get a generic counter: | 1383 | * then try to get a generic counter: |
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void) | |||
1229 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1230 | } | 1489 | } |
1231 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | ||
1492 | struct perf_sample_data *data) | ||
1493 | { | ||
1494 | struct debug_store *ds = cpuc->ds; | ||
1495 | struct bts_record { | ||
1496 | u64 from; | ||
1497 | u64 to; | ||
1498 | u64 flags; | ||
1499 | }; | ||
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | ||
1503 | |||
1504 | if (!counter) | ||
1505 | return; | ||
1506 | |||
1507 | if (!ds) | ||
1508 | return; | ||
1509 | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1512 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | ||
1514 | |||
1515 | for (; at < top; at++) { | ||
1516 | data->regs->ip = at->from; | ||
1517 | data->addr = at->to; | ||
1518 | |||
1519 | perf_counter_output(counter, 1, data); | ||
1520 | } | ||
1521 | |||
1522 | data->regs->ip = orig_ip; | ||
1523 | data->addr = 0; | ||
1524 | |||
1525 | /* There's new data available. */ | ||
1526 | counter->pending_kill = POLL_IN; | ||
1527 | } | ||
1528 | |||
1232 | static void x86_pmu_disable(struct perf_counter *counter) | 1529 | static void x86_pmu_disable(struct perf_counter *counter) |
1233 | { | 1530 | { |
1234 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1531 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1253 | * that we are disabling: | 1550 | * that we are disabling: |
1254 | */ | 1551 | */ |
1255 | x86_perf_counter_update(counter, hwc, idx); | 1552 | x86_perf_counter_update(counter, hwc, idx); |
1553 | |||
1554 | /* Drain the remaining BTS records. */ | ||
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1556 | struct perf_sample_data data; | ||
1557 | struct pt_regs regs; | ||
1558 | |||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1256 | cpuc->counters[idx] = NULL; | 1562 | cpuc->counters[idx] = NULL; |
1257 | clear_bit(idx, cpuc->used_mask); | 1563 | clear_bit(idx, cpuc->used_mask); |
1258 | 1564 | ||
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter) | |||
1280 | 1586 | ||
1281 | static void intel_pmu_reset(void) | 1587 | static void intel_pmu_reset(void) |
1282 | { | 1588 | { |
1589 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | ||
1283 | unsigned long flags; | 1590 | unsigned long flags; |
1284 | int idx; | 1591 | int idx; |
1285 | 1592 | ||
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void) | |||
1297 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1604 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
1298 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1605 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1299 | } | 1606 | } |
1607 | if (ds) | ||
1608 | ds->bts_index = ds->bts_buffer_base; | ||
1300 | 1609 | ||
1301 | local_irq_restore(flags); | 1610 | local_irq_restore(flags); |
1302 | } | 1611 | } |
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1362 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1671 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1363 | 1672 | ||
1364 | perf_disable(); | 1673 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1365 | status = intel_pmu_get_status(); | 1675 | status = intel_pmu_get_status(); |
1366 | if (!status) { | 1676 | if (!status) { |
1367 | perf_enable(); | 1677 | perf_enable(); |
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
1571 | * the generic counter period: | 1881 | * the generic counter period: |
1572 | */ | 1882 | */ |
1573 | .max_period = (1ULL << 31) - 1, | 1883 | .max_period = (1ULL << 31) - 1, |
1884 | .enable_bts = intel_pmu_enable_bts, | ||
1885 | .disable_bts = intel_pmu_disable_bts, | ||
1574 | }; | 1886 | }; |
1575 | 1887 | ||
1576 | static struct x86_pmu amd_pmu = { | 1888 | static struct x86_pmu amd_pmu = { |
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1962 | 2274 | ||
1963 | return entry; | 2275 | return entry; |
1964 | } | 2276 | } |
2277 | |||
2278 | void hw_perf_counter_setup_online(int cpu) | ||
2279 | { | ||
2280 | init_debug_store_on_cpu(cpu); | ||
2281 | } | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index e60ed740d2b3..392bea43b890 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
68 | /* returns the bit offset of the performance counter register */ | 68 | /* returns the bit offset of the performance counter register */ |
69 | switch (boot_cpu_data.x86_vendor) { | 69 | switch (boot_cpu_data.x86_vendor) { |
70 | case X86_VENDOR_AMD: | 70 | case X86_VENDOR_AMD: |
71 | return (msr - MSR_K7_PERFCTR0); | 71 | return msr - MSR_K7_PERFCTR0; |
72 | case X86_VENDOR_INTEL: | 72 | case X86_VENDOR_INTEL: |
73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
74 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | 74 | return msr - MSR_ARCH_PERFMON_PERFCTR0; |
75 | 75 | ||
76 | switch (boot_cpu_data.x86) { | 76 | switch (boot_cpu_data.x86) { |
77 | case 6: | 77 | case 6: |
78 | return (msr - MSR_P6_PERFCTR0); | 78 | return msr - MSR_P6_PERFCTR0; |
79 | case 15: | 79 | case 15: |
80 | return (msr - MSR_P4_BPU_PERFCTR0); | 80 | return msr - MSR_P4_BPU_PERFCTR0; |
81 | } | 81 | } |
82 | } | 82 | } |
83 | return 0; | 83 | return 0; |
@@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
92 | /* returns the bit offset of the event selection register */ | 92 | /* returns the bit offset of the event selection register */ |
93 | switch (boot_cpu_data.x86_vendor) { | 93 | switch (boot_cpu_data.x86_vendor) { |
94 | case X86_VENDOR_AMD: | 94 | case X86_VENDOR_AMD: |
95 | return (msr - MSR_K7_EVNTSEL0); | 95 | return msr - MSR_K7_EVNTSEL0; |
96 | case X86_VENDOR_INTEL: | 96 | case X86_VENDOR_INTEL: |
97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
98 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | 98 | return msr - MSR_ARCH_PERFMON_EVENTSEL0; |
99 | 99 | ||
100 | switch (boot_cpu_data.x86) { | 100 | switch (boot_cpu_data.x86) { |
101 | case 6: | 101 | case 6: |
102 | return (msr - MSR_P6_EVNTSEL0); | 102 | return msr - MSR_P6_EVNTSEL0; |
103 | case 15: | 103 | case 15: |
104 | return (msr - MSR_P4_BSU_ESCR0); | 104 | return msr - MSR_P4_BSU_ESCR0; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | return 0; | 107 | return 0; |
@@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |||
113 | { | 113 | { |
114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
115 | 115 | ||
116 | return (!test_bit(counter, perfctr_nmi_owner)); | 116 | return !test_bit(counter, perfctr_nmi_owner); |
117 | } | 117 | } |
118 | 118 | ||
119 | /* checks the an msr for availability */ | 119 | /* checks the an msr for availability */ |
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr) | |||
124 | counter = nmi_perfctr_msr_to_bit(msr); | 124 | counter = nmi_perfctr_msr_to_bit(msr); |
125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
126 | 126 | ||
127 | return (!test_bit(counter, perfctr_nmi_owner)); | 127 | return !test_bit(counter, perfctr_nmi_owner); |
128 | } | 128 | } |
129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | 129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
130 | 130 | ||
@@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz) | |||
237 | */ | 237 | */ |
238 | counter_val = (u64)cpu_khz * 1000; | 238 | counter_val = (u64)cpu_khz * 1000; |
239 | do_div(counter_val, retval); | 239 | do_div(counter_val, retval); |
240 | if (counter_val > 0x7fffffffULL) { | 240 | if (counter_val > 0x7fffffffULL) { |
241 | u64 count = (u64)cpu_khz * 1000; | 241 | u64 count = (u64)cpu_khz * 1000; |
242 | do_div(count, 0x7fffffffUL); | 242 | do_div(count, 0x7fffffffUL); |
243 | retval = count + 1; | 243 | retval = count + 1; |
@@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr, | |||
251 | u64 count = (u64)cpu_khz * 1000; | 251 | u64 count = (u64)cpu_khz * 1000; |
252 | 252 | ||
253 | do_div(count, nmi_hz); | 253 | do_div(count, nmi_hz); |
254 | if(descr) | 254 | if (descr) |
255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
256 | wrmsrl(perfctr_msr, 0 - count); | 256 | wrmsrl(perfctr_msr, 0 - count); |
257 | } | 257 | } |
@@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr, | |||
262 | u64 count = (u64)cpu_khz * 1000; | 262 | u64 count = (u64)cpu_khz * 1000; |
263 | 263 | ||
264 | do_div(count, nmi_hz); | 264 | do_div(count, nmi_hz); |
265 | if(descr) | 265 | if (descr) |
266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
267 | wrmsr(perfctr_msr, (u32)(-count), 0); | 267 | wrmsr(perfctr_msr, (u32)(-count), 0); |
268 | } | 268 | } |
@@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
296 | 296 | ||
297 | /* setup the timer */ | 297 | /* setup the timer */ |
298 | wrmsr(evntsel_msr, evntsel, 0); | 298 | wrmsr(evntsel_msr, evntsel, 0); |
299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); |
300 | 300 | ||
301 | /* initialize the wd struct before enabling */ | 301 | /* initialize the wd struct before enabling */ |
302 | wd->perfctr_msr = perfctr_msr; | 302 | wd->perfctr_msr = perfctr_msr; |
@@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
387 | /* setup the timer */ | 387 | /* setup the timer */ |
388 | wrmsr(evntsel_msr, evntsel, 0); | 388 | wrmsr(evntsel_msr, evntsel, 0); |
389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); |
391 | 391 | ||
392 | /* initialize the wd struct before enabling */ | 392 | /* initialize the wd struct before enabling */ |
393 | wd->perfctr_msr = perfctr_msr; | 393 | wd->perfctr_msr = perfctr_msr; |
@@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
415 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 415 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
416 | 416 | ||
417 | /* P6/ARCH_PERFMON has 32 bit counter write */ | 417 | /* P6/ARCH_PERFMON has 32 bit counter write */ |
418 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | 418 | write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); |
419 | } | 419 | } |
420 | 420 | ||
421 | static const struct wd_ops p6_wd_ops = { | 421 | static const struct wd_ops p6_wd_ops = { |
@@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
490 | if (smp_num_siblings == 2) { | 490 | if (smp_num_siblings == 2) { |
491 | unsigned int ebx, apicid; | 491 | unsigned int ebx, apicid; |
492 | 492 | ||
493 | ebx = cpuid_ebx(1); | 493 | ebx = cpuid_ebx(1); |
494 | apicid = (ebx >> 24) & 0xff; | 494 | apicid = (ebx >> 24) & 0xff; |
495 | ht_num = apicid & 1; | 495 | ht_num = apicid & 1; |
496 | } else | 496 | } else |
497 | #endif | 497 | #endif |
498 | ht_num = 0; | 498 | ht_num = 0; |
@@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
544 | } | 544 | } |
545 | 545 | ||
546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
547 | | P4_ESCR_OS | 547 | | P4_ESCR_OS |
548 | | P4_ESCR_USR; | 548 | | P4_ESCR_USR; |
549 | 549 | ||
550 | cccr_val |= P4_CCCR_THRESHOLD(15) | 550 | cccr_val |= P4_CCCR_THRESHOLD(15) |
@@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
612 | { | 612 | { |
613 | unsigned dummy; | 613 | unsigned dummy; |
614 | /* | 614 | /* |
615 | * P4 quirks: | 615 | * P4 quirks: |
616 | * - An overflown perfctr will assert its interrupt | 616 | * - An overflown perfctr will assert its interrupt |
617 | * until the OVF flag in its CCCR is cleared. | 617 | * until the OVF flag in its CCCR is cleared. |
618 | * - LVTPC is masked on interrupt and must be | 618 | * - LVTPC is masked on interrupt and must be |
@@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | 662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
663 | */ | 663 | */ |
664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | 664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
665 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | 665 | if ((eax.split.mask_length < |
666 | (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
666 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 667 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
667 | return 0; | 668 | return 0; |
668 | 669 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d5e30397246b..62ac8cb6ba27 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
116 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); | 116 | seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); |
117 | #endif | 117 | #endif |
118 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); | 118 | seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); |
119 | #ifdef CONFIG_X86_64 | ||
120 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); | 119 | seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); |
121 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", | 120 | seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", |
122 | c->x86_phys_bits, c->x86_virt_bits); | 121 | c->x86_phys_bits, c->x86_virt_bits); |
123 | #endif | ||
124 | 122 | ||
125 | seq_printf(m, "power management:"); | 123 | seq_printf(m, "power management:"); |
126 | for (i = 0; i < 32; i++) { | 124 | for (i = 0; i < 32; i++) { |
@@ -128,7 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
128 | if (i < ARRAY_SIZE(x86_power_flags) && | 126 | if (i < ARRAY_SIZE(x86_power_flags) && |
129 | x86_power_flags[i]) | 127 | x86_power_flags[i]) |
130 | seq_printf(m, "%s%s", | 128 | seq_printf(m, "%s%s", |
131 | x86_power_flags[i][0]?" ":"", | 129 | x86_power_flags[i][0] ? " " : "", |
132 | x86_power_flags[i]); | 130 | x86_power_flags[i]); |
133 | else | 131 | else |
134 | seq_printf(m, " [%d]", i); | 132 | seq_printf(m, " [%d]", i); |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 284c399e3234..bc24f514ec93 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -49,17 +49,17 @@ static inline int __vmware_platform(void) | |||
49 | 49 | ||
50 | static unsigned long __vmware_get_tsc_khz(void) | 50 | static unsigned long __vmware_get_tsc_khz(void) |
51 | { | 51 | { |
52 | uint64_t tsc_hz; | 52 | uint64_t tsc_hz; |
53 | uint32_t eax, ebx, ecx, edx; | 53 | uint32_t eax, ebx, ecx, edx; |
54 | 54 | ||
55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
56 | 56 | ||
57 | if (ebx == UINT_MAX) | 57 | if (ebx == UINT_MAX) |
58 | return 0; | 58 | return 0; |
59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 59 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
60 | do_div(tsc_hz, 1000); | 60 | do_div(tsc_hz, 1000); |
61 | BUG_ON(tsc_hz >> 32); | 61 | BUG_ON(tsc_hz >> 32); |
62 | return tsc_hz; | 62 | return tsc_hz; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | 65 | /* |
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c index b4f14c6c09d9..37250fe490b1 100644 --- a/arch/x86/kernel/doublefault_32.c +++ b/arch/x86/kernel/doublefault_32.c | |||
@@ -27,9 +27,7 @@ static void doublefault_fn(void) | |||
27 | 27 | ||
28 | if (ptr_ok(gdt)) { | 28 | if (ptr_ok(gdt)) { |
29 | gdt += GDT_ENTRY_TSS << 3; | 29 | gdt += GDT_ENTRY_TSS << 3; |
30 | tss = *(u16 *)(gdt+2); | 30 | tss = get_desc_base((struct desc_struct *)gdt); |
31 | tss += *(u8 *)(gdt+4) << 16; | ||
32 | tss += *(u8 *)(gdt+7) << 24; | ||
33 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); | 31 | printk(KERN_EMERG "double fault, tss at %08lx\n", tss); |
34 | 32 | ||
35 | if (ptr_ok(tss)) { | 33 | if (ptr_ok(tss)) { |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 48bfe1386038..ef42a038f1a6 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
@@ -509,15 +509,15 @@ enum bts_field { | |||
509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) | 509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) |
510 | }; | 510 | }; |
511 | 511 | ||
512 | static inline unsigned long bts_get(const char *base, enum bts_field field) | 512 | static inline unsigned long bts_get(const char *base, unsigned long field) |
513 | { | 513 | { |
514 | base += (ds_cfg.sizeof_ptr_field * field); | 514 | base += (ds_cfg.sizeof_ptr_field * field); |
515 | return *(unsigned long *)base; | 515 | return *(unsigned long *)base; |
516 | } | 516 | } |
517 | 517 | ||
518 | static inline void bts_set(char *base, enum bts_field field, unsigned long val) | 518 | static inline void bts_set(char *base, unsigned long field, unsigned long val) |
519 | { | 519 | { |
520 | base += (ds_cfg.sizeof_ptr_field * field);; | 520 | base += (ds_cfg.sizeof_ptr_field * field); |
521 | (*(unsigned long *)base) = val; | 521 | (*(unsigned long *)base) = val; |
522 | } | 522 | } |
523 | 523 | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c8405718a4c3..2d8a371d4339 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
18 | #include <linux/ftrace.h> | ||
19 | 18 | ||
20 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
21 | 20 | ||
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 5cb5725b2bae..147005a1cc3c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -115,7 +115,7 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size, | |||
115 | { | 115 | { |
116 | int x = e820x->nr_map; | 116 | int x = e820x->nr_map; |
117 | 117 | ||
118 | if (x == ARRAY_SIZE(e820x->map)) { | 118 | if (x >= ARRAY_SIZE(e820x->map)) { |
119 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); | 119 | printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); |
120 | return; | 120 | return; |
121 | } | 121 | } |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index cc827ac9e8d3..7ffec6b3b331 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP | |||
439 | jne 1f | 439 | jne 1f |
440 | movl $per_cpu__gdt_page,%eax | 440 | movl $per_cpu__gdt_page,%eax |
441 | movl $per_cpu__stack_canary,%ecx | 441 | movl $per_cpu__stack_canary,%ecx |
442 | subl $20, %ecx | ||
443 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) | 442 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
444 | shrl $16, %ecx | 443 | shrl $16, %ecx |
445 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) | 444 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3b09634a5153..7d35d0fe2329 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
218 | void fixup_irqs(void) | 218 | void fixup_irqs(void) |
219 | { | 219 | { |
220 | unsigned int irq; | 220 | unsigned int irq; |
221 | static int warned; | ||
222 | struct irq_desc *desc; | 221 | struct irq_desc *desc; |
223 | 222 | ||
224 | for_each_irq_desc(irq, desc) { | 223 | for_each_irq_desc(irq, desc) { |
@@ -236,8 +235,8 @@ void fixup_irqs(void) | |||
236 | } | 235 | } |
237 | if (desc->chip->set_affinity) | 236 | if (desc->chip->set_affinity) |
238 | desc->chip->set_affinity(irq, affinity); | 237 | desc->chip->set_affinity(irq, affinity); |
239 | else if (desc->action && !(warned++)) | 238 | else if (desc->action) |
240 | printk("Cannot set affinity for irq %i\n", irq); | 239 | printk_once("Cannot set affinity for irq %i\n", irq); |
241 | } | 240 | } |
242 | 241 | ||
243 | #if 0 | 242 | #if 0 |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index c664d515f613..63b0ec8d3d4a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -34,7 +34,6 @@ | |||
34 | struct kvm_para_state { | 34 | struct kvm_para_state { |
35 | u8 mmu_queue[MMU_QUEUE_SIZE]; | 35 | u8 mmu_queue[MMU_QUEUE_SIZE]; |
36 | int mmu_queue_len; | 36 | int mmu_queue_len; |
37 | enum paravirt_lazy_mode mode; | ||
38 | }; | 37 | }; |
39 | 38 | ||
40 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); | 39 | static DEFINE_PER_CPU(struct kvm_para_state, para_state); |
@@ -77,7 +76,7 @@ static void kvm_deferred_mmu_op(void *buffer, int len) | |||
77 | { | 76 | { |
78 | struct kvm_para_state *state = kvm_para_state(); | 77 | struct kvm_para_state *state = kvm_para_state(); |
79 | 78 | ||
80 | if (state->mode != PARAVIRT_LAZY_MMU) { | 79 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) { |
81 | kvm_mmu_op(buffer, len); | 80 | kvm_mmu_op(buffer, len); |
82 | return; | 81 | return; |
83 | } | 82 | } |
@@ -185,10 +184,7 @@ static void kvm_release_pt(unsigned long pfn) | |||
185 | 184 | ||
186 | static void kvm_enter_lazy_mmu(void) | 185 | static void kvm_enter_lazy_mmu(void) |
187 | { | 186 | { |
188 | struct kvm_para_state *state = kvm_para_state(); | ||
189 | |||
190 | paravirt_enter_lazy_mmu(); | 187 | paravirt_enter_lazy_mmu(); |
191 | state->mode = paravirt_get_lazy_mode(); | ||
192 | } | 188 | } |
193 | 189 | ||
194 | static void kvm_leave_lazy_mmu(void) | 190 | static void kvm_leave_lazy_mmu(void) |
@@ -197,7 +193,6 @@ static void kvm_leave_lazy_mmu(void) | |||
197 | 193 | ||
198 | mmu_queue_flush(state); | 194 | mmu_queue_flush(state); |
199 | paravirt_leave_lazy_mmu(); | 195 | paravirt_leave_lazy_mmu(); |
200 | state->mode = paravirt_get_lazy_mode(); | ||
201 | } | 196 | } |
202 | 197 | ||
203 | static void __init paravirt_ops_setup(void) | 198 | static void __init paravirt_ops_setup(void) |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 223af43f1526..e5efcdcca31b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -50,8 +50,8 @@ static unsigned long kvm_get_wallclock(void) | |||
50 | struct timespec ts; | 50 | struct timespec ts; |
51 | int low, high; | 51 | int low, high; |
52 | 52 | ||
53 | low = (int)__pa(&wall_clock); | 53 | low = (int)__pa_symbol(&wall_clock); |
54 | high = ((u64)__pa(&wall_clock) >> 32); | 54 | high = ((u64)__pa_symbol(&wall_clock) >> 32); |
55 | native_write_msr(MSR_KVM_WALL_CLOCK, low, high); | 55 | native_write_msr(MSR_KVM_WALL_CLOCK, low, high); |
56 | 56 | ||
57 | vcpu_time = &get_cpu_var(hv_clock); | 57 | vcpu_time = &get_cpu_var(hv_clock); |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 651c93b28862..fcd513bf2846 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -482,11 +482,11 @@ static void __init construct_ioapic_table(int mpc_default_type) | |||
482 | MP_bus_info(&bus); | 482 | MP_bus_info(&bus); |
483 | } | 483 | } |
484 | 484 | ||
485 | ioapic.type = MP_IOAPIC; | 485 | ioapic.type = MP_IOAPIC; |
486 | ioapic.apicid = 2; | 486 | ioapic.apicid = 2; |
487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; | 487 | ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; |
488 | ioapic.flags = MPC_APIC_USABLE; | 488 | ioapic.flags = MPC_APIC_USABLE; |
489 | ioapic.apicaddr = 0xFEC00000; | 489 | ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE; |
490 | MP_ioapic_info(&ioapic); | 490 | MP_ioapic_info(&ioapic); |
491 | 491 | ||
492 | /* | 492 | /* |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 98fd6cd4e3a4..7dd950094178 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* ----------------------------------------------------------------------- * | 1 | /* ----------------------------------------------------------------------- * |
2 | * | 2 | * |
3 | * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved | 3 | * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved |
4 | * Copyright 2009 Intel Corporation; author: H. Peter Anvin | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf, | |||
80 | 81 | ||
81 | for (; count; count -= 8) { | 82 | for (; count; count -= 8) { |
82 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); | 83 | err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); |
83 | if (err) { | 84 | if (err) |
84 | if (err == -EFAULT) /* Fix idiotic error code */ | ||
85 | err = -EIO; | ||
86 | break; | 85 | break; |
87 | } | ||
88 | if (copy_to_user(tmp, &data, 8)) { | 86 | if (copy_to_user(tmp, &data, 8)) { |
89 | err = -EFAULT; | 87 | err = -EFAULT; |
90 | break; | 88 | break; |
@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
115 | break; | 113 | break; |
116 | } | 114 | } |
117 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); | 115 | err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); |
118 | if (err) { | 116 | if (err) |
119 | if (err == -EFAULT) /* Fix idiotic error code */ | ||
120 | err = -EIO; | ||
121 | break; | 117 | break; |
122 | } | ||
123 | tmp += 2; | 118 | tmp += 2; |
124 | bytes += 8; | 119 | bytes += 8; |
125 | } | 120 | } |
@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf, | |||
127 | return bytes ? bytes : err; | 122 | return bytes ? bytes : err; |
128 | } | 123 | } |
129 | 124 | ||
125 | static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) | ||
126 | { | ||
127 | u32 __user *uregs = (u32 __user *)arg; | ||
128 | u32 regs[8]; | ||
129 | int cpu = iminor(file->f_path.dentry->d_inode); | ||
130 | int err; | ||
131 | |||
132 | switch (ioc) { | ||
133 | case X86_IOC_RDMSR_REGS: | ||
134 | if (!(file->f_mode & FMODE_READ)) { | ||
135 | err = -EBADF; | ||
136 | break; | ||
137 | } | ||
138 | if (copy_from_user(®s, uregs, sizeof regs)) { | ||
139 | err = -EFAULT; | ||
140 | break; | ||
141 | } | ||
142 | err = rdmsr_safe_regs_on_cpu(cpu, regs); | ||
143 | if (err) | ||
144 | break; | ||
145 | if (copy_to_user(uregs, ®s, sizeof regs)) | ||
146 | err = -EFAULT; | ||
147 | break; | ||
148 | |||
149 | case X86_IOC_WRMSR_REGS: | ||
150 | if (!(file->f_mode & FMODE_WRITE)) { | ||
151 | err = -EBADF; | ||
152 | break; | ||
153 | } | ||
154 | if (copy_from_user(®s, uregs, sizeof regs)) { | ||
155 | err = -EFAULT; | ||
156 | break; | ||
157 | } | ||
158 | err = wrmsr_safe_regs_on_cpu(cpu, regs); | ||
159 | if (err) | ||
160 | break; | ||
161 | if (copy_to_user(uregs, ®s, sizeof regs)) | ||
162 | err = -EFAULT; | ||
163 | break; | ||
164 | |||
165 | default: | ||
166 | err = -ENOTTY; | ||
167 | break; | ||
168 | } | ||
169 | |||
170 | return err; | ||
171 | } | ||
172 | |||
130 | static int msr_open(struct inode *inode, struct file *file) | 173 | static int msr_open(struct inode *inode, struct file *file) |
131 | { | 174 | { |
132 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); | 175 | unsigned int cpu = iminor(file->f_path.dentry->d_inode); |
@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = { | |||
157 | .read = msr_read, | 200 | .read = msr_read, |
158 | .write = msr_write, | 201 | .write = msr_write, |
159 | .open = msr_open, | 202 | .open = msr_open, |
203 | .unlocked_ioctl = msr_ioctl, | ||
204 | .compat_ioctl = msr_ioctl, | ||
160 | }; | 205 | }; |
161 | 206 | ||
162 | static int __cpuinit msr_device_create(int cpu) | 207 | static int __cpuinit msr_device_create(int cpu) |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 70ec9b951d76..f5b0b4a01fb2 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
362 | #endif | 362 | #endif |
363 | .wbinvd = native_wbinvd, | 363 | .wbinvd = native_wbinvd, |
364 | .read_msr = native_read_msr_safe, | 364 | .read_msr = native_read_msr_safe, |
365 | .read_msr_amd = native_read_msr_amd_safe, | 365 | .rdmsr_regs = native_rdmsr_safe_regs, |
366 | .write_msr = native_write_msr_safe, | 366 | .write_msr = native_write_msr_safe, |
367 | .wrmsr_regs = native_wrmsr_safe_regs, | ||
367 | .read_tsc = native_read_tsc, | 368 | .read_tsc = native_read_tsc, |
368 | .read_pmc = native_read_pmc, | 369 | .read_pmc = native_read_pmc, |
369 | .read_tscp = native_read_tscp, | 370 | .read_tscp = native_read_tscp, |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1a041bcf506b..d71c8655905b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | ||
6 | 7 | ||
7 | #include <asm/proto.h> | 8 | #include <asm/proto.h> |
8 | #include <asm/dma.h> | 9 | #include <asm/dma.h> |
@@ -32,7 +33,14 @@ int no_iommu __read_mostly; | |||
32 | /* Set this to 1 if there is a HW IOMMU in the system */ | 33 | /* Set this to 1 if there is a HW IOMMU in the system */ |
33 | int iommu_detected __read_mostly = 0; | 34 | int iommu_detected __read_mostly = 0; |
34 | 35 | ||
35 | int iommu_pass_through; | 36 | /* |
37 | * This variable becomes 1 if iommu=pt is passed on the kernel command line. | ||
38 | * If this variable is 1, IOMMU implementations do no DMA ranslation for | ||
39 | * devices and allow every device to access to whole physical memory. This is | ||
40 | * useful if a user want to use an IOMMU only for KVM device assignment to | ||
41 | * guests and not for driver dma translation. | ||
42 | */ | ||
43 | int iommu_pass_through __read_mostly; | ||
36 | 44 | ||
37 | dma_addr_t bad_dma_address __read_mostly = 0; | 45 | dma_addr_t bad_dma_address __read_mostly = 0; |
38 | EXPORT_SYMBOL(bad_dma_address); | 46 | EXPORT_SYMBOL(bad_dma_address); |
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void) | |||
88 | size = roundup(dma32_bootmem_size, align); | 96 | size = roundup(dma32_bootmem_size, align); |
89 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | 97 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, |
90 | 512ULL<<20); | 98 | 512ULL<<20); |
99 | /* | ||
100 | * Kmemleak should not scan this block as it may not be mapped via the | ||
101 | * kernel direct mapping. | ||
102 | */ | ||
103 | kmemleak_ignore(dma32_bootmem_ptr); | ||
91 | if (dma32_bootmem_ptr) | 104 | if (dma32_bootmem_ptr) |
92 | dma32_bootmem_size = size; | 105 | dma32_bootmem_size = size; |
93 | else | 106 | else |
@@ -147,7 +160,7 @@ again: | |||
147 | return NULL; | 160 | return NULL; |
148 | 161 | ||
149 | addr = page_to_phys(page); | 162 | addr = page_to_phys(page); |
150 | if (!is_buffer_dma_capable(dma_mask, addr, size)) { | 163 | if (addr + size > dma_mask) { |
151 | __free_pages(page, get_order(size)); | 164 | __free_pages(page, get_order(size)); |
152 | 165 | ||
153 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { | 166 | if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index d2e56b8f48e7..98a827ee9ed7 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
190 | static inline int | 190 | static inline int |
191 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 191 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
192 | { | 192 | { |
193 | return force_iommu || | 193 | return force_iommu || !dma_capable(dev, addr, size); |
194 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); | ||
195 | } | 194 | } |
196 | 195 | ||
197 | static inline int | 196 | static inline int |
198 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 197 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
199 | { | 198 | { |
200 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); | 199 | return !dma_capable(dev, addr, size); |
201 | } | 200 | } |
202 | 201 | ||
203 | /* Map a single continuous physical area into the IOMMU. | 202 | /* Map a single continuous physical area into the IOMMU. |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 71d412a09f30..a3933d4330cd 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -14,7 +14,7 @@ | |||
14 | static int | 14 | static int |
15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 15 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
16 | { | 16 | { |
17 | if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { | 17 | if (hwdev && !dma_capable(hwdev, bus, size)) { |
18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | 18 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) |
19 | printk(KERN_ERR | 19 | printk(KERN_ERR |
20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", | 20 | "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", |
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
79 | free_pages((unsigned long)vaddr, get_order(size)); | 79 | free_pages((unsigned long)vaddr, get_order(size)); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void nommu_sync_single_for_device(struct device *dev, | ||
83 | dma_addr_t addr, size_t size, | ||
84 | enum dma_data_direction dir) | ||
85 | { | ||
86 | flush_write_buffers(); | ||
87 | } | ||
88 | |||
89 | |||
90 | static void nommu_sync_sg_for_device(struct device *dev, | ||
91 | struct scatterlist *sg, int nelems, | ||
92 | enum dma_data_direction dir) | ||
93 | { | ||
94 | flush_write_buffers(); | ||
95 | } | ||
96 | |||
82 | struct dma_map_ops nommu_dma_ops = { | 97 | struct dma_map_ops nommu_dma_ops = { |
83 | .alloc_coherent = dma_generic_alloc_coherent, | 98 | .alloc_coherent = dma_generic_alloc_coherent, |
84 | .free_coherent = nommu_free_coherent, | 99 | .free_coherent = nommu_free_coherent, |
85 | .map_sg = nommu_map_sg, | 100 | .map_sg = nommu_map_sg, |
86 | .map_page = nommu_map_page, | 101 | .map_page = nommu_map_page, |
87 | .is_phys = 1, | 102 | .sync_single_for_device = nommu_sync_single_for_device, |
103 | .sync_sg_for_device = nommu_sync_sg_for_device, | ||
104 | .is_phys = 1, | ||
88 | }; | 105 | }; |
89 | 106 | ||
90 | void __init no_iommu_init(void) | 107 | void __init no_iommu_init(void) |
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 6af96ee44200..e8a35016115f 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c | |||
@@ -13,31 +13,6 @@ | |||
13 | 13 | ||
14 | int swiotlb __read_mostly; | 14 | int swiotlb __read_mostly; |
15 | 15 | ||
16 | void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) | ||
17 | { | ||
18 | return alloc_bootmem_low_pages(size); | ||
19 | } | ||
20 | |||
21 | void *swiotlb_alloc(unsigned order, unsigned long nslabs) | ||
22 | { | ||
23 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); | ||
24 | } | ||
25 | |||
26 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
27 | { | ||
28 | return paddr; | ||
29 | } | ||
30 | |||
31 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
32 | { | ||
33 | return baddr; | ||
34 | } | ||
35 | |||
36 | int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) | ||
37 | { | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 16 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
42 | dma_addr_t *dma_handle, gfp_t flags) | 17 | dma_addr_t *dma_handle, gfp_t flags) |
43 | { | 18 | { |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 00a8fe4c58bb..209e74801763 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -63,9 +63,6 @@ | |||
63 | 63 | ||
64 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 64 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
65 | 65 | ||
66 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
67 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
68 | |||
69 | /* | 66 | /* |
70 | * Return saved PC of a blocked thread. | 67 | * Return saved PC of a blocked thread. |
71 | */ | 68 | */ |
@@ -361,14 +358,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
361 | *next = &next_p->thread; | 358 | *next = &next_p->thread; |
362 | int cpu = smp_processor_id(); | 359 | int cpu = smp_processor_id(); |
363 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 360 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
361 | bool preload_fpu; | ||
364 | 362 | ||
365 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 363 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
366 | 364 | ||
367 | __unlazy_fpu(prev_p); | 365 | /* |
366 | * If the task has used fpu the last 5 timeslices, just do a full | ||
367 | * restore of the math state immediately to avoid the trap; the | ||
368 | * chances of needing FPU soon are obviously high now | ||
369 | */ | ||
370 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
368 | 371 | ||
372 | __unlazy_fpu(prev_p); | ||
369 | 373 | ||
370 | /* we're going to use this soon, after a few expensive things */ | 374 | /* we're going to use this soon, after a few expensive things */ |
371 | if (next_p->fpu_counter > 5) | 375 | if (preload_fpu) |
372 | prefetch(next->xstate); | 376 | prefetch(next->xstate); |
373 | 377 | ||
374 | /* | 378 | /* |
@@ -409,6 +413,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
409 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | 413 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
410 | __switch_to_xtra(prev_p, next_p, tss); | 414 | __switch_to_xtra(prev_p, next_p, tss); |
411 | 415 | ||
416 | /* If we're going to preload the fpu context, make sure clts | ||
417 | is run while we're batching the cpu state updates. */ | ||
418 | if (preload_fpu) | ||
419 | clts(); | ||
420 | |||
412 | /* | 421 | /* |
413 | * Leave lazy mode, flushing any hypercalls made here. | 422 | * Leave lazy mode, flushing any hypercalls made here. |
414 | * This must be done before restoring TLS segments so | 423 | * This must be done before restoring TLS segments so |
@@ -418,15 +427,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
418 | */ | 427 | */ |
419 | arch_end_context_switch(next_p); | 428 | arch_end_context_switch(next_p); |
420 | 429 | ||
421 | /* If the task has used fpu the last 5 timeslices, just do a full | 430 | if (preload_fpu) |
422 | * restore of the math state immediately to avoid the trap; the | 431 | __math_state_restore(); |
423 | * chances of needing FPU soon are obviously high now | ||
424 | * | ||
425 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
426 | * which can sleep in the case of !tsk_used_math() | ||
427 | */ | ||
428 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | ||
429 | math_state_restore(); | ||
430 | 432 | ||
431 | /* | 433 | /* |
432 | * Restore %gs if needed (which is common) | 434 | * Restore %gs if needed (which is common) |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 89c46f1259d3..72edac026a78 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -57,9 +57,6 @@ | |||
57 | 57 | ||
58 | asmlinkage extern void ret_from_fork(void); | 58 | asmlinkage extern void ret_from_fork(void); |
59 | 59 | ||
60 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; | ||
61 | EXPORT_PER_CPU_SYMBOL(current_task); | ||
62 | |||
63 | DEFINE_PER_CPU(unsigned long, old_rsp); | 60 | DEFINE_PER_CPU(unsigned long, old_rsp); |
64 | static DEFINE_PER_CPU(unsigned char, is_idle); | 61 | static DEFINE_PER_CPU(unsigned char, is_idle); |
65 | 62 | ||
@@ -399,9 +396,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
399 | int cpu = smp_processor_id(); | 396 | int cpu = smp_processor_id(); |
400 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 397 | struct tss_struct *tss = &per_cpu(init_tss, cpu); |
401 | unsigned fsindex, gsindex; | 398 | unsigned fsindex, gsindex; |
399 | bool preload_fpu; | ||
400 | |||
401 | /* | ||
402 | * If the task has used fpu the last 5 timeslices, just do a full | ||
403 | * restore of the math state immediately to avoid the trap; the | ||
404 | * chances of needing FPU soon are obviously high now | ||
405 | */ | ||
406 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | ||
402 | 407 | ||
403 | /* we're going to use this soon, after a few expensive things */ | 408 | /* we're going to use this soon, after a few expensive things */ |
404 | if (next_p->fpu_counter > 5) | 409 | if (preload_fpu) |
405 | prefetch(next->xstate); | 410 | prefetch(next->xstate); |
406 | 411 | ||
407 | /* | 412 | /* |
@@ -432,6 +437,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
432 | 437 | ||
433 | load_TLS(next, cpu); | 438 | load_TLS(next, cpu); |
434 | 439 | ||
440 | /* Must be after DS reload */ | ||
441 | unlazy_fpu(prev_p); | ||
442 | |||
443 | /* Make sure cpu is ready for new context */ | ||
444 | if (preload_fpu) | ||
445 | clts(); | ||
446 | |||
435 | /* | 447 | /* |
436 | * Leave lazy mode, flushing any hypercalls made here. | 448 | * Leave lazy mode, flushing any hypercalls made here. |
437 | * This must be done before restoring TLS segments so | 449 | * This must be done before restoring TLS segments so |
@@ -472,9 +484,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
472 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 484 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); |
473 | prev->gsindex = gsindex; | 485 | prev->gsindex = gsindex; |
474 | 486 | ||
475 | /* Must be after DS reload */ | ||
476 | unlazy_fpu(prev_p); | ||
477 | |||
478 | /* | 487 | /* |
479 | * Switch the PDA and FPU contexts. | 488 | * Switch the PDA and FPU contexts. |
480 | */ | 489 | */ |
@@ -493,15 +502,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
493 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 502 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) |
494 | __switch_to_xtra(prev_p, next_p, tss); | 503 | __switch_to_xtra(prev_p, next_p, tss); |
495 | 504 | ||
496 | /* If the task has used fpu the last 5 timeslices, just do a full | 505 | /* |
497 | * restore of the math state immediately to avoid the trap; the | 506 | * Preload the FPU context, now that we've determined that the |
498 | * chances of needing FPU soon are obviously high now | 507 | * task is likely to be using it. |
499 | * | ||
500 | * tsk_used_math() checks prevent calling math_state_restore(), | ||
501 | * which can sleep in the case of !tsk_used_math() | ||
502 | */ | 508 | */ |
503 | if (tsk_used_math(next_p) && next_p->fpu_counter > 5) | 509 | if (preload_fpu) |
504 | math_state_restore(); | 510 | __math_state_restore(); |
505 | /* | 511 | /* |
506 | * There's a problem with moving the arch_install_thread_hw_breakpoint() | 512 | * There's a problem with moving the arch_install_thread_hw_breakpoint() |
507 | * call before current is updated. Suppose a kernel breakpoint is | 513 | * call before current is updated. Suppose a kernel breakpoint is |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 0f89a4f20db2..baaf8052f355 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -860,6 +860,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
860 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 860 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
861 | clear_thread_flag(TIF_NOTIFY_RESUME); | 861 | clear_thread_flag(TIF_NOTIFY_RESUME); |
862 | tracehook_notify_resume(regs); | 862 | tracehook_notify_resume(regs); |
863 | if (current->replacement_session_keyring) | ||
864 | key_replace_session_keyring(); | ||
863 | } | 865 | } |
864 | 866 | ||
865 | #ifdef CONFIG_X86_32 | 867 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index dee0f3d814af..ec7b64c2df82 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -436,7 +436,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
436 | * For perf, we return last level cache shared map. | 436 | * For perf, we return last level cache shared map. |
437 | * And for power savings, we return cpu_core_map | 437 | * And for power savings, we return cpu_core_map |
438 | */ | 438 | */ |
439 | if (sched_mc_power_savings || sched_smt_power_savings) | 439 | if ((sched_mc_power_savings || sched_smt_power_savings) && |
440 | !(cpu_has(c, X86_FEATURE_AMD_DCM))) | ||
440 | return cpu_core_mask(cpu); | 441 | return cpu_core_mask(cpu); |
441 | else | 442 | else |
442 | return c->llc_shared_map; | 443 | return c->llc_shared_map; |
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index e8b9863ef8c4..3149032ff107 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/ptrace.h> | 6 | #include <linux/ptrace.h> |
7 | #include <asm/desc.h> | ||
7 | 8 | ||
8 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) | 9 | unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) |
9 | { | 10 | { |
@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
23 | * and APM bios ones we just ignore here. | 24 | * and APM bios ones we just ignore here. |
24 | */ | 25 | */ |
25 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { | 26 | if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { |
26 | u32 *desc; | 27 | struct desc_struct *desc; |
27 | unsigned long base; | 28 | unsigned long base; |
28 | 29 | ||
29 | seg &= ~7UL; | 30 | seg &= ~7UL; |
@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re | |||
33 | addr = -1L; /* bogus selector, access would fault */ | 34 | addr = -1L; /* bogus selector, access would fault */ |
34 | else { | 35 | else { |
35 | desc = child->mm->context.ldt + seg; | 36 | desc = child->mm->context.ldt + seg; |
36 | base = ((desc[0] >> 16) | | 37 | base = get_desc_base(desc); |
37 | ((desc[1] & 0xff) << 16) | | ||
38 | (desc[1] & 0xff000000)); | ||
39 | 38 | ||
40 | /* 16-bit code segment? */ | 39 | /* 16-bit code segment? */ |
41 | if (!((desc[1] >> 22) & 1)) | 40 | if (!desc->d) |
42 | addr &= 0xffff; | 41 | addr &= 0xffff; |
43 | addr += base; | 42 | addr += base; |
44 | } | 43 | } |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 77b9689f8edb..503c1f2e8835 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -640,13 +640,13 @@ static int __init uv_ptc_init(void) | |||
640 | if (!is_uv_system()) | 640 | if (!is_uv_system()) |
641 | return 0; | 641 | return 0; |
642 | 642 | ||
643 | proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); | 643 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, |
644 | &proc_uv_ptc_operations); | ||
644 | if (!proc_uv_ptc) { | 645 | if (!proc_uv_ptc) { |
645 | printk(KERN_ERR "unable to create %s proc entry\n", | 646 | printk(KERN_ERR "unable to create %s proc entry\n", |
646 | UV_PTC_BASENAME); | 647 | UV_PTC_BASENAME); |
647 | return -EINVAL; | 648 | return -EINVAL; |
648 | } | 649 | } |
649 | proc_uv_ptc->proc_fops = &proc_uv_ptc_operations; | ||
650 | return 0; | 650 | return 0; |
651 | } | 651 | } |
652 | 652 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ae04589a579b..49a401b1d4d7 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -76,7 +76,7 @@ char ignore_fpu_irq; | |||
76 | * F0 0F bug workaround.. We have a special link segment | 76 | * F0 0F bug workaround.. We have a special link segment |
77 | * for this. | 77 | * for this. |
78 | */ | 78 | */ |
79 | gate_desc idt_table[256] | 79 | gate_desc idt_table[NR_VECTORS] |
80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
81 | #endif | 81 | #endif |
82 | 82 | ||
@@ -765,33 +765,34 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) | |||
765 | #endif | 765 | #endif |
766 | } | 766 | } |
767 | 767 | ||
768 | #ifdef CONFIG_X86_32 | 768 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) |
769 | unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp) | ||
770 | { | 769 | { |
771 | struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id()); | ||
772 | unsigned long base = (kesp - uesp) & -THREAD_SIZE; | ||
773 | unsigned long new_kesp = kesp - base; | ||
774 | unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT; | ||
775 | __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS]; | ||
776 | |||
777 | /* Set up base for espfix segment */ | ||
778 | desc &= 0x00f0ff0000000000ULL; | ||
779 | desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) | | ||
780 | ((((__u64)base) << 32) & 0xff00000000000000ULL) | | ||
781 | ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) | | ||
782 | (lim_pages & 0xffff); | ||
783 | *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc; | ||
784 | |||
785 | return new_kesp; | ||
786 | } | 770 | } |
787 | #endif | ||
788 | 771 | ||
789 | asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) | 772 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) |
790 | { | 773 | { |
791 | } | 774 | } |
792 | 775 | ||
793 | asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) | 776 | /* |
777 | * __math_state_restore assumes that cr0.TS is already clear and the | ||
778 | * fpu state is all ready for use. Used during context switch. | ||
779 | */ | ||
780 | void __math_state_restore(void) | ||
794 | { | 781 | { |
782 | struct thread_info *thread = current_thread_info(); | ||
783 | struct task_struct *tsk = thread->task; | ||
784 | |||
785 | /* | ||
786 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
787 | */ | ||
788 | if (unlikely(restore_fpu_checking(tsk))) { | ||
789 | stts(); | ||
790 | force_sig(SIGSEGV, tsk); | ||
791 | return; | ||
792 | } | ||
793 | |||
794 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | ||
795 | tsk->fpu_counter++; | ||
795 | } | 796 | } |
796 | 797 | ||
797 | /* | 798 | /* |
@@ -825,17 +826,8 @@ asmlinkage void math_state_restore(void) | |||
825 | } | 826 | } |
826 | 827 | ||
827 | clts(); /* Allow maths ops (or we recurse) */ | 828 | clts(); /* Allow maths ops (or we recurse) */ |
828 | /* | ||
829 | * Paranoid restore. send a SIGSEGV if we fail to restore the state. | ||
830 | */ | ||
831 | if (unlikely(restore_fpu_checking(tsk))) { | ||
832 | stts(); | ||
833 | force_sig(SIGSEGV, tsk); | ||
834 | return; | ||
835 | } | ||
836 | 829 | ||
837 | thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ | 830 | __math_state_restore(); |
838 | tsk->fpu_counter++; | ||
839 | } | 831 | } |
840 | EXPORT_SYMBOL_GPL(math_state_restore); | 832 | EXPORT_SYMBOL_GPL(math_state_restore); |
841 | 833 | ||