diff options
| author | H. Peter Anvin <hpa@zytor.com> | 2009-09-17 17:40:19 -0400 |
|---|---|---|
| committer | H. Peter Anvin <hpa@zytor.com> | 2009-09-17 17:40:49 -0400 |
| commit | 3bb045f1e2e51124200ef043256df4c7ad86bebd (patch) | |
| tree | 78e4150fc7108bef4d8fa183d7f7dc00beb73a54 | |
| parent | 80938332d8cf652f6b16e0788cf0ca136befe0b5 (diff) | |
| parent | dcb73bf402e0d5b28ce925dbbe4dab3b00b21eee (diff) | |
Merge branch 'x86/pat' into x86/urgent
Merge reason:
Suresh Siddha (1):
x86, pat: don't use rb-tree based lookup in reserve_memtype()
... requires previous x86/pat commits already pushed to Linus.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
44 files changed, 1523 insertions, 1150 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 170042b420d4..e6246119932a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -112,6 +112,10 @@ config IA64_UNCACHED_ALLOCATOR | |||
| 112 | bool | 112 | bool |
| 113 | select GENERIC_ALLOCATOR | 113 | select GENERIC_ALLOCATOR |
| 114 | 114 | ||
| 115 | config ARCH_USES_PG_UNCACHED | ||
| 116 | def_bool y | ||
| 117 | depends on IA64_UNCACHED_ALLOCATOR | ||
| 118 | |||
| 115 | config AUDIT_ARCH | 119 | config AUDIT_ARCH |
| 116 | bool | 120 | bool |
| 117 | default y | 121 | default y |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 13ffa5df37d7..c1e588131f4a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -1414,6 +1414,10 @@ config X86_PAT | |||
| 1414 | 1414 | ||
| 1415 | If unsure, say Y. | 1415 | If unsure, say Y. |
| 1416 | 1416 | ||
| 1417 | config ARCH_USES_PG_UNCACHED | ||
| 1418 | def_bool y | ||
| 1419 | depends on X86_PAT | ||
| 1420 | |||
| 1417 | config EFI | 1421 | config EFI |
| 1418 | bool "EFI runtime service support" | 1422 | bool "EFI runtime service support" |
| 1419 | depends on ACPI | 1423 | depends on ACPI |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index e55dfc1ad453..b54f6afe7ec4 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
| @@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma, | |||
| 43 | memcpy(dst, src, len); | 43 | memcpy(dst, src, len); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | #define PG_non_WB PG_arch_1 | 46 | #define PG_WC PG_arch_1 |
| 47 | PAGEFLAG(NonWB, non_WB) | 47 | PAGEFLAG(WC, WC) |
| 48 | |||
| 49 | #ifdef CONFIG_X86_PAT | ||
| 50 | /* | ||
| 51 | * X86 PAT uses page flags WC and Uncached together to keep track of | ||
| 52 | * memory type of pages that have backing page struct. X86 PAT supports 3 | ||
| 53 | * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and | ||
| 54 | * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not | ||
| 55 | * been changed from its default (value of -1 used to denote this). | ||
| 56 | * Note we do not support _PAGE_CACHE_UC here. | ||
| 57 | * | ||
| 58 | * Caller must hold memtype_lock for atomicity. | ||
| 59 | */ | ||
| 60 | static inline unsigned long get_page_memtype(struct page *pg) | ||
| 61 | { | ||
| 62 | if (!PageUncached(pg) && !PageWC(pg)) | ||
| 63 | return -1; | ||
| 64 | else if (!PageUncached(pg) && PageWC(pg)) | ||
| 65 | return _PAGE_CACHE_WC; | ||
| 66 | else if (PageUncached(pg) && !PageWC(pg)) | ||
| 67 | return _PAGE_CACHE_UC_MINUS; | ||
| 68 | else | ||
| 69 | return _PAGE_CACHE_WB; | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) | ||
| 73 | { | ||
| 74 | switch (memtype) { | ||
| 75 | case _PAGE_CACHE_WC: | ||
| 76 | ClearPageUncached(pg); | ||
| 77 | SetPageWC(pg); | ||
| 78 | break; | ||
| 79 | case _PAGE_CACHE_UC_MINUS: | ||
| 80 | SetPageUncached(pg); | ||
| 81 | ClearPageWC(pg); | ||
| 82 | break; | ||
| 83 | case _PAGE_CACHE_WB: | ||
| 84 | SetPageUncached(pg); | ||
| 85 | SetPageWC(pg); | ||
| 86 | break; | ||
| 87 | default: | ||
| 88 | case -1: | ||
| 89 | ClearPageUncached(pg); | ||
| 90 | ClearPageWC(pg); | ||
| 91 | break; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | #else | ||
| 95 | static inline unsigned long get_page_memtype(struct page *pg) { return -1; } | ||
| 96 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } | ||
| 97 | #endif | ||
| 48 | 98 | ||
| 49 | /* | 99 | /* |
| 50 | * The set_memory_* API can be used to change various attributes of a virtual | 100 | * The set_memory_* API can be used to change various attributes of a virtual |
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h index 0e9fe1d9d971..f35eb45d6576 100644 --- a/arch/x86/include/asm/iomap.h +++ b/arch/x86/include/asm/iomap.h | |||
| @@ -26,13 +26,16 @@ | |||
| 26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
| 27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 28 | 28 | ||
| 29 | int | ||
| 30 | is_io_mapping_possible(resource_size_t base, unsigned long size); | ||
| 31 | |||
| 32 | void * | 29 | void * |
| 33 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | 30 | iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); |
| 34 | 31 | ||
| 35 | void | 32 | void |
| 36 | iounmap_atomic(void *kvaddr, enum km_type type); | 33 | iounmap_atomic(void *kvaddr, enum km_type type); |
| 37 | 34 | ||
| 35 | int | ||
| 36 | iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot); | ||
| 37 | |||
| 38 | void | ||
| 39 | iomap_free(resource_size_t base, unsigned long size); | ||
| 40 | |||
| 38 | #endif /* _ASM_X86_IOMAP_H */ | 41 | #endif /* _ASM_X86_IOMAP_H */ |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index a51ada8467de..4365ffdb461f 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
| @@ -121,6 +121,9 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); | |||
| 121 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | 121 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); |
| 122 | extern void mtrr_ap_init(void); | 122 | extern void mtrr_ap_init(void); |
| 123 | extern void mtrr_bp_init(void); | 123 | extern void mtrr_bp_init(void); |
| 124 | extern void set_mtrr_aps_delayed_init(void); | ||
| 125 | extern void mtrr_aps_init(void); | ||
| 126 | extern void mtrr_bp_restore(void); | ||
| 124 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); | 127 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); |
| 125 | extern int amd_special_default_mtrr(void); | 128 | extern int amd_special_default_mtrr(void); |
| 126 | # else | 129 | # else |
| @@ -161,6 +164,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | |||
| 161 | 164 | ||
| 162 | #define mtrr_ap_init() do {} while (0) | 165 | #define mtrr_ap_init() do {} while (0) |
| 163 | #define mtrr_bp_init() do {} while (0) | 166 | #define mtrr_bp_init() do {} while (0) |
| 167 | #define set_mtrr_aps_delayed_init() do {} while (0) | ||
| 168 | #define mtrr_aps_init() do {} while (0) | ||
| 169 | #define mtrr_bp_restore() do {} while (0) | ||
| 164 | # endif | 170 | # endif |
| 165 | 171 | ||
| 166 | #ifdef CONFIG_COMPAT | 172 | #ifdef CONFIG_COMPAT |
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 7af14e512f97..e2c1668dde7a 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h | |||
| @@ -19,4 +19,9 @@ extern int free_memtype(u64 start, u64 end); | |||
| 19 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, | 19 | extern int kernel_map_sync_memtype(u64 base, unsigned long size, |
| 20 | unsigned long flag); | 20 | unsigned long flag); |
| 21 | 21 | ||
| 22 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | ||
| 23 | unsigned long *type); | ||
| 24 | |||
| 25 | void io_free_memtype(resource_size_t start, resource_size_t end); | ||
| 26 | |||
| 22 | #endif /* _ASM_X86_PAT_H */ | 27 | #endif /* _ASM_X86_PAT_H */ |
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index 6ef00ba4c886..08385e090a6f 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c | |||
| @@ -153,7 +153,7 @@ int safe_smp_processor_id(void) | |||
| 153 | { | 153 | { |
| 154 | int apicid, cpuid; | 154 | int apicid, cpuid; |
| 155 | 155 | ||
| 156 | if (!boot_cpu_has(X86_FEATURE_APIC)) | 156 | if (!cpu_has_apic) |
| 157 | return 0; | 157 | return 0; |
| 158 | 158 | ||
| 159 | apicid = hard_smp_processor_id(); | 159 | apicid = hard_smp_processor_id(); |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 63fddcd082cd..83b217c7225f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #include <linux/bitops.h> | 2 | #include <linux/bitops.h> |
| 3 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
| 4 | 4 | ||
| 5 | #include <asm/io.h> | 5 | #include <linux/io.h> |
| 6 | #include <asm/processor.h> | 6 | #include <asm/processor.h> |
| 7 | #include <asm/apic.h> | 7 | #include <asm/apic.h> |
| 8 | #include <asm/cpu.h> | 8 | #include <asm/cpu.h> |
| @@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
| 45 | #define CBAR_ENB (0x80000000) | 45 | #define CBAR_ENB (0x80000000) |
| 46 | #define CBAR_KEY (0X000000CB) | 46 | #define CBAR_KEY (0X000000CB) |
| 47 | if (c->x86_model == 9 || c->x86_model == 10) { | 47 | if (c->x86_model == 9 || c->x86_model == 10) { |
| 48 | if (inl (CBAR) & CBAR_ENB) | 48 | if (inl(CBAR) & CBAR_ENB) |
| 49 | outl (0 | CBAR_KEY, CBAR); | 49 | outl(0 | CBAR_KEY, CBAR); |
| 50 | } | 50 | } |
| 51 | } | 51 | } |
| 52 | 52 | ||
| @@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
| 87 | d = d2-d; | 87 | d = d2-d; |
| 88 | 88 | ||
| 89 | if (d > 20*K6_BUG_LOOP) | 89 | if (d > 20*K6_BUG_LOOP) |
| 90 | printk("system stability may be impaired when more than 32 MB are used.\n"); | 90 | printk(KERN_CONT |
| 91 | "system stability may be impaired when more than 32 MB are used.\n"); | ||
| 91 | else | 92 | else |
| 92 | printk("probably OK (after B9730xxxx).\n"); | 93 | printk(KERN_CONT "probably OK (after B9730xxxx).\n"); |
| 93 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); | 94 | printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); |
| 94 | } | 95 | } |
| 95 | 96 | ||
| @@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
| 219 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | 220 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { |
| 220 | rdmsr(MSR_K7_CLK_CTL, l, h); | 221 | rdmsr(MSR_K7_CLK_CTL, l, h); |
| 221 | if ((l & 0xfff00000) != 0x20000000) { | 222 | if ((l & 0xfff00000) != 0x20000000) { |
| 222 | printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, | 223 | printk(KERN_INFO |
| 223 | ((l & 0x000fffff)|0x20000000)); | 224 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", |
| 225 | l, ((l & 0x000fffff)|0x20000000)); | ||
| 224 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | 226 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); |
| 225 | } | 227 | } |
| 226 | } | 228 | } |
| @@ -398,7 +400,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 398 | u32 level; | 400 | u32 level; |
| 399 | 401 | ||
| 400 | level = cpuid_eax(1); | 402 | level = cpuid_eax(1); |
| 401 | if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 403 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
| 402 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 404 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
| 403 | 405 | ||
| 404 | /* | 406 | /* |
| @@ -494,27 +496,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 494 | * benefit in doing so. | 496 | * benefit in doing so. |
| 495 | */ | 497 | */ |
| 496 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | 498 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { |
| 497 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | 499 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); |
| 498 | if ((tseg>>PMD_SHIFT) < | 500 | if ((tseg>>PMD_SHIFT) < |
| 499 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || | 501 | (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || |
| 500 | ((tseg>>PMD_SHIFT) < | 502 | ((tseg>>PMD_SHIFT) < |
| 501 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && | 503 | (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && |
| 502 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) | 504 | (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) |
| 503 | set_memory_4k((unsigned long)__va(tseg), 1); | 505 | set_memory_4k((unsigned long)__va(tseg), 1); |
| 504 | } | 506 | } |
| 505 | } | 507 | } |
| 506 | #endif | 508 | #endif |
| 507 | } | 509 | } |
| 508 | 510 | ||
| 509 | #ifdef CONFIG_X86_32 | 511 | #ifdef CONFIG_X86_32 |
| 510 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 512 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, |
| 513 | unsigned int size) | ||
| 511 | { | 514 | { |
| 512 | /* AMD errata T13 (order #21922) */ | 515 | /* AMD errata T13 (order #21922) */ |
| 513 | if ((c->x86 == 6)) { | 516 | if ((c->x86 == 6)) { |
| 514 | if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ | 517 | /* Duron Rev A0 */ |
| 518 | if (c->x86_model == 3 && c->x86_mask == 0) | ||
| 515 | size = 64; | 519 | size = 64; |
| 520 | /* Tbird rev A1/A2 */ | ||
| 516 | if (c->x86_model == 4 && | 521 | if (c->x86_model == 4 && |
| 517 | (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ | 522 | (c->x86_mask == 0 || c->x86_mask == 1)) |
| 518 | size = 256; | 523 | size = 256; |
| 519 | } | 524 | } |
| 520 | return size; | 525 | return size; |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c8e315f1aa83..01a265212395 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -81,7 +81,7 @@ static void __init check_fpu(void) | |||
| 81 | 81 | ||
| 82 | boot_cpu_data.fdiv_bug = fdiv_bug; | 82 | boot_cpu_data.fdiv_bug = fdiv_bug; |
| 83 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
| 84 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n"); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | static void __init check_hlt(void) | 87 | static void __init check_hlt(void) |
| @@ -98,7 +98,7 @@ static void __init check_hlt(void) | |||
| 98 | halt(); | 98 | halt(); |
| 99 | halt(); | 99 | halt(); |
| 100 | halt(); | 100 | halt(); |
| 101 | printk("OK.\n"); | 101 | printk(KERN_CONT "OK.\n"); |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| @@ -122,9 +122,9 @@ static void __init check_popad(void) | |||
| 122 | * CPU hard. Too bad. | 122 | * CPU hard. Too bad. |
| 123 | */ | 123 | */ |
| 124 | if (res != 12345678) | 124 | if (res != 12345678) |
| 125 | printk("Buggy.\n"); | 125 | printk(KERN_CONT "Buggy.\n"); |
| 126 | else | 126 | else |
| 127 | printk("OK.\n"); | 127 | printk(KERN_CONT "OK.\n"); |
| 128 | #endif | 128 | #endif |
| 129 | } | 129 | } |
| 130 | 130 | ||
| @@ -156,7 +156,7 @@ void __init check_bugs(void) | |||
| 156 | { | 156 | { |
| 157 | identify_boot_cpu(); | 157 | identify_boot_cpu(); |
| 158 | #ifndef CONFIG_SMP | 158 | #ifndef CONFIG_SMP |
| 159 | printk("CPU: "); | 159 | printk(KERN_INFO "CPU: "); |
| 160 | print_cpu_info(&boot_cpu_data); | 160 | print_cpu_info(&boot_cpu_data); |
| 161 | #endif | 161 | #endif |
| 162 | check_config(); | 162 | check_config(); |
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c index 9a3ed0649d4e..04f0fe5af83e 100644 --- a/arch/x86/kernel/cpu/bugs_64.c +++ b/arch/x86/kernel/cpu/bugs_64.c | |||
| @@ -15,7 +15,7 @@ void __init check_bugs(void) | |||
| 15 | { | 15 | { |
| 16 | identify_boot_cpu(); | 16 | identify_boot_cpu(); |
| 17 | #if !defined(CONFIG_SMP) | 17 | #if !defined(CONFIG_SMP) |
| 18 | printk("CPU: "); | 18 | printk(KERN_INFO "CPU: "); |
| 19 | print_cpu_info(&boot_cpu_data); | 19 | print_cpu_info(&boot_cpu_data); |
| 20 | #endif | 20 | #endif |
| 21 | alternative_instructions(); | 21 | alternative_instructions(); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 5ce60a88027b..734eaad93656 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -18,8 +18,8 @@ | |||
| 18 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
| 19 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
| 20 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
| 21 | #include <asm/topology.h> | 21 | #include <linux/topology.h> |
| 22 | #include <asm/cpumask.h> | 22 | #include <linux/cpumask.h> |
| 23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
| 25 | #include <asm/proto.h> | 25 | #include <asm/proto.h> |
| @@ -28,13 +28,13 @@ | |||
| 28 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
| 29 | #include <asm/i387.h> | 29 | #include <asm/i387.h> |
| 30 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
| 31 | #include <asm/numa.h> | 31 | #include <linux/numa.h> |
| 32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
| 33 | #include <asm/cpu.h> | 33 | #include <asm/cpu.h> |
| 34 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
| 35 | #include <asm/msr.h> | 35 | #include <asm/msr.h> |
| 36 | #include <asm/pat.h> | 36 | #include <asm/pat.h> |
| 37 | #include <asm/smp.h> | 37 | #include <linux/smp.h> |
| 38 | 38 | ||
| 39 | #ifdef CONFIG_X86_LOCAL_APIC | 39 | #ifdef CONFIG_X86_LOCAL_APIC |
| 40 | #include <asm/uv/uv.h> | 40 | #include <asm/uv/uv.h> |
| @@ -982,7 +982,7 @@ static __init int setup_disablecpuid(char *arg) | |||
| 982 | __setup("clearcpuid=", setup_disablecpuid); | 982 | __setup("clearcpuid=", setup_disablecpuid); |
| 983 | 983 | ||
| 984 | #ifdef CONFIG_X86_64 | 984 | #ifdef CONFIG_X86_64 |
| 985 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 985 | struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; |
| 986 | 986 | ||
| 987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 987 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
| 988 | irq_stack_union) __aligned(PAGE_SIZE); | 988 | irq_stack_union) __aligned(PAGE_SIZE); |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 593171e967ef..19807b89f058 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -3,10 +3,10 @@ | |||
| 3 | #include <linux/delay.h> | 3 | #include <linux/delay.h> |
| 4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
| 5 | #include <asm/dma.h> | 5 | #include <asm/dma.h> |
| 6 | #include <asm/io.h> | 6 | #include <linux/io.h> |
| 7 | #include <asm/processor-cyrix.h> | 7 | #include <asm/processor-cyrix.h> |
| 8 | #include <asm/processor-flags.h> | 8 | #include <asm/processor-flags.h> |
| 9 | #include <asm/timer.h> | 9 | #include <linux/timer.h> |
| 10 | #include <asm/pci-direct.h> | 10 | #include <asm/pci-direct.h> |
| 11 | #include <asm/tsc.h> | 11 | #include <asm/tsc.h> |
| 12 | 12 | ||
| @@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
| 282 | * The 5510/5520 companion chips have a funky PIT. | 282 | * The 5510/5520 companion chips have a funky PIT. |
| 283 | */ | 283 | */ |
| 284 | if (vendor == PCI_VENDOR_ID_CYRIX && | 284 | if (vendor == PCI_VENDOR_ID_CYRIX && |
| 285 | (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) | 285 | (device == PCI_DEVICE_ID_CYRIX_5510 || |
| 286 | device == PCI_DEVICE_ID_CYRIX_5520)) | ||
| 286 | mark_tsc_unstable("cyrix 5510/5520 detected"); | 287 | mark_tsc_unstable("cyrix 5510/5520 detected"); |
| 287 | } | 288 | } |
| 288 | #endif | 289 | #endif |
| @@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
| 299 | * ? : 0x7x | 300 | * ? : 0x7x |
| 300 | * GX1 : 0x8x GX1 datasheet 56 | 301 | * GX1 : 0x8x GX1 datasheet 56 |
| 301 | */ | 302 | */ |
| 302 | if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) | 303 | if ((0x30 <= dir1 && dir1 <= 0x6f) || |
| 304 | (0x80 <= dir1 && dir1 <= 0x8f)) | ||
| 303 | geode_configure(); | 305 | geode_configure(); |
| 304 | return; | 306 | return; |
| 305 | } else { /* MediaGX */ | 307 | } else { /* MediaGX */ |
| @@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
| 427 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); | 429 | printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); |
| 428 | local_irq_save(flags); | 430 | local_irq_save(flags); |
| 429 | ccr3 = getCx86(CX86_CCR3); | 431 | ccr3 = getCx86(CX86_CCR3); |
| 430 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 432 | /* enable MAPEN */ |
| 431 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ | 433 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
| 432 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 434 | /* enable cpuid */ |
| 435 | setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); | ||
| 436 | /* disable MAPEN */ | ||
| 437 | setCx86(CX86_CCR3, ccr3); | ||
| 433 | local_irq_restore(flags); | 438 | local_irq_restore(flags); |
| 434 | } | 439 | } |
| 435 | } | 440 | } |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index fb5b86af0b01..93ba8eeb100a 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
| @@ -28,11 +28,10 @@ | |||
| 28 | static inline void __cpuinit | 28 | static inline void __cpuinit |
| 29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) | 29 | detect_hypervisor_vendor(struct cpuinfo_x86 *c) |
| 30 | { | 30 | { |
| 31 | if (vmware_platform()) { | 31 | if (vmware_platform()) |
| 32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; | 32 | c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; |
| 33 | } else { | 33 | else |
| 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; | 34 | c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; |
| 35 | } | ||
| 36 | } | 35 | } |
| 37 | 36 | ||
| 38 | unsigned long get_hypervisor_tsc_freq(void) | 37 | unsigned long get_hypervisor_tsc_freq(void) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3260ab044996..80a722a071b5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
| @@ -7,17 +7,17 @@ | |||
| 7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
| 8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
| 9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| 10 | #include <linux/uaccess.h> | ||
| 10 | 11 | ||
| 11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
| 12 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
| 13 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
| 14 | #include <asm/uaccess.h> | ||
| 15 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
| 16 | #include <asm/bugs.h> | 16 | #include <asm/bugs.h> |
| 17 | #include <asm/cpu.h> | 17 | #include <asm/cpu.h> |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_X86_64 | 19 | #ifdef CONFIG_X86_64 |
| 20 | #include <asm/topology.h> | 20 | #include <linux/topology.h> |
| 21 | #include <asm/numa_64.h> | 21 | #include <asm/numa_64.h> |
| 22 | #endif | 22 | #endif |
| 23 | 23 | ||
| @@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 174 | #ifdef CONFIG_X86_F00F_BUG | 174 | #ifdef CONFIG_X86_F00F_BUG |
| 175 | /* | 175 | /* |
| 176 | * All current models of Pentium and Pentium with MMX technology CPUs | 176 | * All current models of Pentium and Pentium with MMX technology CPUs |
| 177 | * have the F0 0F bug, which lets nonprivileged users lock up the system. | 177 | * have the F0 0F bug, which lets nonprivileged users lock up the |
| 178 | * system. | ||
| 178 | * Note that the workaround only should be initialized once... | 179 | * Note that the workaround only should be initialized once... |
| 179 | */ | 180 | */ |
| 180 | c->f00f_bug = 0; | 181 | c->f00f_bug = 0; |
| @@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
| 207 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); | 208 | printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); |
| 208 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); | 209 | printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); |
| 209 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; | 210 | lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; |
| 210 | wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); | 211 | wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); |
| 211 | } | 212 | } |
| 212 | } | 213 | } |
| 213 | 214 | ||
| @@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
| 283 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ | 284 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
| 284 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | 285 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); |
| 285 | if (eax & 0x1f) | 286 | if (eax & 0x1f) |
| 286 | return ((eax >> 26) + 1); | 287 | return (eax >> 26) + 1; |
| 287 | else | 288 | else |
| 288 | return 1; | 289 | return 1; |
| 289 | } | 290 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 789efe217e1a..306bf0dca061 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Changes: | 4 | * Changes: |
| 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
| 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
| 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| @@ -16,7 +16,7 @@ | |||
| 16 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
| 17 | 17 | ||
| 18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
| 19 | #include <asm/smp.h> | 19 | #include <linux/smp.h> |
| 20 | #include <asm/k8.h> | 20 | #include <asm/k8.h> |
| 21 | 21 | ||
| 22 | #define LVL_1_INST 1 | 22 | #define LVL_1_INST 1 |
| @@ -25,14 +25,15 @@ | |||
| 25 | #define LVL_3 4 | 25 | #define LVL_3 4 |
| 26 | #define LVL_TRACE 5 | 26 | #define LVL_TRACE 5 |
| 27 | 27 | ||
| 28 | struct _cache_table | 28 | struct _cache_table { |
| 29 | { | ||
| 30 | unsigned char descriptor; | 29 | unsigned char descriptor; |
| 31 | char cache_type; | 30 | char cache_type; |
| 32 | short size; | 31 | short size; |
| 33 | }; | 32 | }; |
| 34 | 33 | ||
| 35 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* All the cache descriptor types we care about (no TLB or |
| 35 | trace cache entries) */ | ||
| 36 | |||
| 36 | static const struct _cache_table __cpuinitconst cache_table[] = | 37 | static const struct _cache_table __cpuinitconst cache_table[] = |
| 37 | { | 38 | { |
| 38 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 39 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
| @@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | |||
| 105 | }; | 106 | }; |
| 106 | 107 | ||
| 107 | 108 | ||
| 108 | enum _cache_type | 109 | enum _cache_type { |
| 109 | { | ||
| 110 | CACHE_TYPE_NULL = 0, | 110 | CACHE_TYPE_NULL = 0, |
| 111 | CACHE_TYPE_DATA = 1, | 111 | CACHE_TYPE_DATA = 1, |
| 112 | CACHE_TYPE_INST = 2, | 112 | CACHE_TYPE_INST = 2, |
| @@ -170,31 +170,31 @@ unsigned short num_cache_leaves; | |||
| 170 | Maybe later */ | 170 | Maybe later */ |
| 171 | union l1_cache { | 171 | union l1_cache { |
| 172 | struct { | 172 | struct { |
| 173 | unsigned line_size : 8; | 173 | unsigned line_size:8; |
| 174 | unsigned lines_per_tag : 8; | 174 | unsigned lines_per_tag:8; |
| 175 | unsigned assoc : 8; | 175 | unsigned assoc:8; |
| 176 | unsigned size_in_kb : 8; | 176 | unsigned size_in_kb:8; |
| 177 | }; | 177 | }; |
| 178 | unsigned val; | 178 | unsigned val; |
| 179 | }; | 179 | }; |
| 180 | 180 | ||
| 181 | union l2_cache { | 181 | union l2_cache { |
| 182 | struct { | 182 | struct { |
| 183 | unsigned line_size : 8; | 183 | unsigned line_size:8; |
| 184 | unsigned lines_per_tag : 4; | 184 | unsigned lines_per_tag:4; |
| 185 | unsigned assoc : 4; | 185 | unsigned assoc:4; |
| 186 | unsigned size_in_kb : 16; | 186 | unsigned size_in_kb:16; |
| 187 | }; | 187 | }; |
| 188 | unsigned val; | 188 | unsigned val; |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
| 191 | union l3_cache { | 191 | union l3_cache { |
| 192 | struct { | 192 | struct { |
| 193 | unsigned line_size : 8; | 193 | unsigned line_size:8; |
| 194 | unsigned lines_per_tag : 4; | 194 | unsigned lines_per_tag:4; |
| 195 | unsigned assoc : 4; | 195 | unsigned assoc:4; |
| 196 | unsigned res : 2; | 196 | unsigned res:2; |
| 197 | unsigned size_encoded : 14; | 197 | unsigned size_encoded:14; |
| 198 | }; | 198 | }; |
| 199 | unsigned val; | 199 | unsigned val; |
| 200 | }; | 200 | }; |
| @@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void) | |||
| 350 | 350 | ||
| 351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 351 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) |
| 352 | { | 352 | { |
| 353 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ | 353 | /* Cache sizes */ |
| 354 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | ||
| 354 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ | 355 | unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ |
| 355 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ | 356 | unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ |
| 356 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; | 357 | unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; |
| @@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 377 | 378 | ||
| 378 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); | 379 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
| 379 | if (retval >= 0) { | 380 | if (retval >= 0) { |
| 380 | switch(this_leaf.eax.split.level) { | 381 | switch (this_leaf.eax.split.level) { |
| 381 | case 1: | 382 | case 1: |
| 382 | if (this_leaf.eax.split.type == | 383 | if (this_leaf.eax.split.type == |
| 383 | CACHE_TYPE_DATA) | 384 | CACHE_TYPE_DATA) |
| 384 | new_l1d = this_leaf.size/1024; | 385 | new_l1d = this_leaf.size/1024; |
| @@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 386 | CACHE_TYPE_INST) | 387 | CACHE_TYPE_INST) |
| 387 | new_l1i = this_leaf.size/1024; | 388 | new_l1i = this_leaf.size/1024; |
| 388 | break; | 389 | break; |
| 389 | case 2: | 390 | case 2: |
| 390 | new_l2 = this_leaf.size/1024; | 391 | new_l2 = this_leaf.size/1024; |
| 391 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 392 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
| 392 | index_msb = get_count_order(num_threads_sharing); | 393 | index_msb = get_count_order(num_threads_sharing); |
| 393 | l2_id = c->apicid >> index_msb; | 394 | l2_id = c->apicid >> index_msb; |
| 394 | break; | 395 | break; |
| 395 | case 3: | 396 | case 3: |
| 396 | new_l3 = this_leaf.size/1024; | 397 | new_l3 = this_leaf.size/1024; |
| 397 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; | 398 | num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; |
| 398 | index_msb = get_count_order(num_threads_sharing); | 399 | index_msb = get_count_order( |
| 400 | num_threads_sharing); | ||
| 399 | l3_id = c->apicid >> index_msb; | 401 | l3_id = c->apicid >> index_msb; |
| 400 | break; | 402 | break; |
| 401 | default: | 403 | default: |
| 402 | break; | 404 | break; |
| 403 | } | 405 | } |
| 404 | } | 406 | } |
| @@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 421 | /* Number of times to iterate */ | 423 | /* Number of times to iterate */ |
| 422 | n = cpuid_eax(2) & 0xFF; | 424 | n = cpuid_eax(2) & 0xFF; |
| 423 | 425 | ||
| 424 | for ( i = 0 ; i < n ; i++ ) { | 426 | for (i = 0 ; i < n ; i++) { |
| 425 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | 427 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); |
| 426 | 428 | ||
| 427 | /* If bit 31 is set, this is an unknown format */ | 429 | /* If bit 31 is set, this is an unknown format */ |
| 428 | for ( j = 0 ; j < 3 ; j++ ) { | 430 | for (j = 0 ; j < 3 ; j++) |
| 429 | if (regs[j] & (1 << 31)) regs[j] = 0; | 431 | if (regs[j] & (1 << 31)) |
| 430 | } | 432 | regs[j] = 0; |
| 431 | 433 | ||
| 432 | /* Byte 0 is level count, not a descriptor */ | 434 | /* Byte 0 is level count, not a descriptor */ |
| 433 | for ( j = 1 ; j < 16 ; j++ ) { | 435 | for (j = 1 ; j < 16 ; j++) { |
| 434 | unsigned char des = dp[j]; | 436 | unsigned char des = dp[j]; |
| 435 | unsigned char k = 0; | 437 | unsigned char k = 0; |
| 436 | 438 | ||
| 437 | /* look up this descriptor in the table */ | 439 | /* look up this descriptor in the table */ |
| 438 | while (cache_table[k].descriptor != 0) | 440 | while (cache_table[k].descriptor != 0) { |
| 439 | { | ||
| 440 | if (cache_table[k].descriptor == des) { | 441 | if (cache_table[k].descriptor == des) { |
| 441 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | 442 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) |
| 442 | break; | 443 | break; |
| @@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
| 488 | } | 489 | } |
| 489 | 490 | ||
| 490 | if (trace) | 491 | if (trace) |
| 491 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | 492 | printk(KERN_INFO "CPU: Trace cache: %dK uops", trace); |
| 492 | else if ( l1i ) | 493 | else if (l1i) |
| 493 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | 494 | printk(KERN_INFO "CPU: L1 I cache: %dK", l1i); |
| 494 | 495 | ||
| 495 | if (l1d) | 496 | if (l1d) |
| 496 | printk(", L1 D cache: %dK\n", l1d); | 497 | printk(KERN_CONT ", L1 D cache: %dK\n", l1d); |
| 497 | else | 498 | else |
| 498 | printk("\n"); | 499 | printk(KERN_CONT "\n"); |
| 499 | 500 | ||
| 500 | if (l2) | 501 | if (l2) |
| 501 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | 502 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); |
| @@ -558,8 +559,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
| 558 | } | 559 | } |
| 559 | } | 560 | } |
| 560 | #else | 561 | #else |
| 561 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} | 562 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
| 562 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} | 563 | { |
| 564 | } | ||
| 565 | |||
| 566 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | ||
| 567 | { | ||
| 568 | } | ||
| 563 | #endif | 569 | #endif |
| 564 | 570 | ||
| 565 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 571 | static void __cpuinit free_cache_attributes(unsigned int cpu) |
| @@ -645,7 +651,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | |||
| 645 | static ssize_t show_##file_name \ | 651 | static ssize_t show_##file_name \ |
| 646 | (struct _cpuid4_info *this_leaf, char *buf) \ | 652 | (struct _cpuid4_info *this_leaf, char *buf) \ |
| 647 | { \ | 653 | { \ |
| 648 | return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 654 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
| 649 | } | 655 | } |
| 650 | 656 | ||
| 651 | show_one_plus(level, eax.split.level, 0); | 657 | show_one_plus(level, eax.split.level, 0); |
| @@ -656,7 +662,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | |||
| 656 | 662 | ||
| 657 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 663 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) |
| 658 | { | 664 | { |
| 659 | return sprintf (buf, "%luK\n", this_leaf->size / 1024); | 665 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
| 660 | } | 666 | } |
| 661 | 667 | ||
| 662 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | 668 | static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, |
| @@ -669,7 +675,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
| 669 | const struct cpumask *mask; | 675 | const struct cpumask *mask; |
| 670 | 676 | ||
| 671 | mask = to_cpumask(this_leaf->shared_cpu_map); | 677 | mask = to_cpumask(this_leaf->shared_cpu_map); |
| 672 | n = type? | 678 | n = type ? |
| 673 | cpulist_scnprintf(buf, len-2, mask) : | 679 | cpulist_scnprintf(buf, len-2, mask) : |
| 674 | cpumask_scnprintf(buf, len-2, mask); | 680 | cpumask_scnprintf(buf, len-2, mask); |
| 675 | buf[n++] = '\n'; | 681 | buf[n++] = '\n'; |
| @@ -800,7 +806,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
| 800 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 806 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
| 801 | show_cache_disable_1, store_cache_disable_1); | 807 | show_cache_disable_1, store_cache_disable_1); |
| 802 | 808 | ||
| 803 | static struct attribute * default_attrs[] = { | 809 | static struct attribute *default_attrs[] = { |
| 804 | &type.attr, | 810 | &type.attr, |
| 805 | &level.attr, | 811 | &level.attr, |
| 806 | &coherency_line_size.attr, | 812 | &coherency_line_size.attr, |
| @@ -815,7 +821,7 @@ static struct attribute * default_attrs[] = { | |||
| 815 | NULL | 821 | NULL |
| 816 | }; | 822 | }; |
| 817 | 823 | ||
| 818 | static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | 824 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
| 819 | { | 825 | { |
| 820 | struct _cache_attr *fattr = to_attr(attr); | 826 | struct _cache_attr *fattr = to_attr(attr); |
| 821 | struct _index_kobject *this_leaf = to_object(kobj); | 827 | struct _index_kobject *this_leaf = to_object(kobj); |
| @@ -828,8 +834,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
| 828 | return ret; | 834 | return ret; |
| 829 | } | 835 | } |
| 830 | 836 | ||
| 831 | static ssize_t store(struct kobject * kobj, struct attribute * attr, | 837 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
| 832 | const char * buf, size_t count) | 838 | const char *buf, size_t count) |
| 833 | { | 839 | { |
| 834 | struct _cache_attr *fattr = to_attr(attr); | 840 | struct _cache_attr *fattr = to_attr(attr); |
| 835 | struct _index_kobject *this_leaf = to_object(kobj); | 841 | struct _index_kobject *this_leaf = to_object(kobj); |
| @@ -883,7 +889,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
| 883 | goto err_out; | 889 | goto err_out; |
| 884 | 890 | ||
| 885 | per_cpu(index_kobject, cpu) = kzalloc( | 891 | per_cpu(index_kobject, cpu) = kzalloc( |
| 886 | sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); | 892 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
| 887 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 893 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) |
| 888 | goto err_out; | 894 | goto err_out; |
| 889 | 895 | ||
| @@ -917,7 +923,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 917 | } | 923 | } |
| 918 | 924 | ||
| 919 | for (i = 0; i < num_cache_leaves; i++) { | 925 | for (i = 0; i < num_cache_leaves; i++) { |
| 920 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 926 | this_object = INDEX_KOBJECT_PTR(cpu, i); |
| 921 | this_object->cpu = cpu; | 927 | this_object->cpu = cpu; |
| 922 | this_object->index = i; | 928 | this_object->index = i; |
| 923 | retval = kobject_init_and_add(&(this_object->kobj), | 929 | retval = kobject_init_and_add(&(this_object->kobj), |
| @@ -925,9 +931,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
| 925 | per_cpu(cache_kobject, cpu), | 931 | per_cpu(cache_kobject, cpu), |
| 926 | "index%1lu", i); | 932 | "index%1lu", i); |
| 927 | if (unlikely(retval)) { | 933 | if (unlikely(retval)) { |
| 928 | for (j = 0; j < i; j++) { | 934 | for (j = 0; j < i; j++) |
| 929 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); | 935 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
| 930 | } | ||
| 931 | kobject_put(per_cpu(cache_kobject, cpu)); | 936 | kobject_put(per_cpu(cache_kobject, cpu)); |
| 932 | cpuid4_cache_sysfs_exit(cpu); | 937 | cpuid4_cache_sysfs_exit(cpu); |
| 933 | return retval; | 938 | return retval; |
| @@ -952,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
| 952 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); | 957 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
| 953 | 958 | ||
| 954 | for (i = 0; i < num_cache_leaves; i++) | 959 | for (i = 0; i < num_cache_leaves; i++) |
| 955 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 960 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
| 956 | kobject_put(per_cpu(cache_kobject, cpu)); | 961 | kobject_put(per_cpu(cache_kobject, cpu)); |
| 957 | cpuid4_cache_sysfs_exit(cpu); | 962 | cpuid4_cache_sysfs_exit(cpu); |
| 958 | } | 963 | } |
| @@ -977,8 +982,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
| 977 | return NOTIFY_OK; | 982 | return NOTIFY_OK; |
| 978 | } | 983 | } |
| 979 | 984 | ||
| 980 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 985 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { |
| 981 | { | ||
| 982 | .notifier_call = cacheinfo_cpu_callback, | 986 | .notifier_call = cacheinfo_cpu_callback, |
| 983 | }; | 987 | }; |
| 984 | 988 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c index ee2331b0e58f..33af14110dfd 100644 --- a/arch/x86/kernel/cpu/mtrr/amd.c +++ b/arch/x86/kernel/cpu/mtrr/amd.c | |||
| @@ -7,15 +7,15 @@ | |||
| 7 | 7 | ||
| 8 | static void | 8 | static void |
| 9 | amd_get_mtrr(unsigned int reg, unsigned long *base, | 9 | amd_get_mtrr(unsigned int reg, unsigned long *base, |
| 10 | unsigned long *size, mtrr_type * type) | 10 | unsigned long *size, mtrr_type *type) |
| 11 | { | 11 | { |
| 12 | unsigned long low, high; | 12 | unsigned long low, high; |
| 13 | 13 | ||
| 14 | rdmsr(MSR_K6_UWCCR, low, high); | 14 | rdmsr(MSR_K6_UWCCR, low, high); |
| 15 | /* Upper dword is region 1, lower is region 0 */ | 15 | /* Upper dword is region 1, lower is region 0 */ |
| 16 | if (reg == 1) | 16 | if (reg == 1) |
| 17 | low = high; | 17 | low = high; |
| 18 | /* The base masks off on the right alignment */ | 18 | /* The base masks off on the right alignment */ |
| 19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; | 19 | *base = (low & 0xFFFE0000) >> PAGE_SHIFT; |
| 20 | *type = 0; | 20 | *type = 0; |
| 21 | if (low & 1) | 21 | if (low & 1) |
| @@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 27 | return; | 27 | return; |
| 28 | } | 28 | } |
| 29 | /* | 29 | /* |
| 30 | * This needs a little explaining. The size is stored as an | 30 | * This needs a little explaining. The size is stored as an |
| 31 | * inverted mask of bits of 128K granularity 15 bits long offset | 31 | * inverted mask of bits of 128K granularity 15 bits long offset |
| 32 | * 2 bits | 32 | * 2 bits. |
| 33 | * | 33 | * |
| 34 | * So to get a size we do invert the mask and add 1 to the lowest | 34 | * So to get a size we do invert the mask and add 1 to the lowest |
| 35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift | 35 | * mask bit (4 as its 2 bits in). This gives us a size we then shift |
| 36 | * to turn into 128K blocks | 36 | * to turn into 128K blocks. |
| 37 | * | 37 | * |
| 38 | * eg 111 1111 1111 1100 is 512K | 38 | * eg 111 1111 1111 1100 is 512K |
| 39 | * | 39 | * |
| 40 | * invert 000 0000 0000 0011 | 40 | * invert 000 0000 0000 0011 |
| 41 | * +1 000 0000 0000 0100 | 41 | * +1 000 0000 0000 0100 |
| 42 | * *128K ... | 42 | * *128K ... |
| 43 | */ | 43 | */ |
| 44 | low = (~low) & 0x1FFFC; | 44 | low = (~low) & 0x1FFFC; |
| 45 | *size = (low + 4) << (15 - PAGE_SHIFT); | 45 | *size = (low + 4) << (15 - PAGE_SHIFT); |
| 46 | return; | ||
| 47 | } | 46 | } |
| 48 | 47 | ||
| 49 | static void amd_set_mtrr(unsigned int reg, unsigned long base, | 48 | /** |
| 50 | unsigned long size, mtrr_type type) | 49 | * amd_set_mtrr - Set variable MTRR register on the local CPU. |
| 51 | /* [SUMMARY] Set variable MTRR register on the local CPU. | 50 | * |
| 52 | <reg> The register to set. | 51 | * @reg The register to set. |
| 53 | <base> The base address of the region. | 52 | * @base The base address of the region. |
| 54 | <size> The size of the region. If this is 0 the region is disabled. | 53 | * @size The size of the region. If this is 0 the region is disabled. |
| 55 | <type> The type of the region. | 54 | * @type The type of the region. |
| 56 | [RETURNS] Nothing. | 55 | * |
| 57 | */ | 56 | * Returns nothing. |
| 57 | */ | ||
| 58 | static void | ||
| 59 | amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) | ||
| 58 | { | 60 | { |
| 59 | u32 regs[2]; | 61 | u32 regs[2]; |
| 60 | 62 | ||
| 61 | /* | 63 | /* |
| 62 | * Low is MTRR0 , High MTRR 1 | 64 | * Low is MTRR0, High MTRR 1 |
| 63 | */ | 65 | */ |
| 64 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 66 | rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
| 65 | /* | 67 | /* |
| 66 | * Blank to disable | 68 | * Blank to disable |
| 67 | */ | 69 | */ |
| 68 | if (size == 0) | 70 | if (size == 0) { |
| 69 | regs[reg] = 0; | 71 | regs[reg] = 0; |
| 70 | else | 72 | } else { |
| 71 | /* Set the register to the base, the type (off by one) and an | 73 | /* |
| 72 | inverted bitmask of the size The size is the only odd | 74 | * Set the register to the base, the type (off by one) and an |
| 73 | bit. We are fed say 512K We invert this and we get 111 1111 | 75 | * inverted bitmask of the size The size is the only odd |
| 74 | 1111 1011 but if you subtract one and invert you get the | 76 | * bit. We are fed say 512K We invert this and we get 111 1111 |
| 75 | desired 111 1111 1111 1100 mask | 77 | * 1111 1011 but if you subtract one and invert you get the |
| 76 | 78 | * desired 111 1111 1111 1100 mask | |
| 77 | But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ | 79 | * |
| 80 | * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! | ||
| 81 | */ | ||
| 78 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) | 82 | regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) |
| 79 | | (base << PAGE_SHIFT) | (type + 1); | 83 | | (base << PAGE_SHIFT) | (type + 1); |
| 84 | } | ||
| 80 | 85 | ||
| 81 | /* | 86 | /* |
| 82 | * The writeback rule is quite specific. See the manual. Its | 87 | * The writeback rule is quite specific. See the manual. Its |
| 83 | * disable local interrupts, write back the cache, set the mtrr | 88 | * disable local interrupts, write back the cache, set the mtrr |
| 84 | */ | 89 | */ |
| 85 | wbinvd(); | 90 | wbinvd(); |
| 86 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); | 91 | wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); |
| 87 | } | 92 | } |
| 88 | 93 | ||
| 89 | static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 94 | static int |
| 95 | amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | ||
| 90 | { | 96 | { |
| 91 | /* Apply the K6 block alignment and size rules | 97 | /* |
| 92 | In order | 98 | * Apply the K6 block alignment and size rules |
| 93 | o Uncached or gathering only | 99 | * In order |
| 94 | o 128K or bigger block | 100 | * o Uncached or gathering only |
| 95 | o Power of 2 block | 101 | * o 128K or bigger block |
| 96 | o base suitably aligned to the power | 102 | * o Power of 2 block |
| 97 | */ | 103 | * o base suitably aligned to the power |
| 104 | */ | ||
| 98 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) | 105 | if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) |
| 99 | || (size & ~(size - 1)) - size || (base & (size - 1))) | 106 | || (size & ~(size - 1)) - size || (base & (size - 1))) |
| 100 | return -EINVAL; | 107 | return -EINVAL; |
| @@ -115,5 +122,3 @@ int __init amd_init_mtrr(void) | |||
| 115 | set_mtrr_ops(&amd_mtrr_ops); | 122 | set_mtrr_ops(&amd_mtrr_ops); |
| 116 | return 0; | 123 | return 0; |
| 117 | } | 124 | } |
| 118 | |||
| 119 | //arch_initcall(amd_mtrr_init); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c index cb9aa3a7a7ab..de89f14eff3a 100644 --- a/arch/x86/kernel/cpu/mtrr/centaur.c +++ b/arch/x86/kernel/cpu/mtrr/centaur.c | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
| 2 | #include <linux/mm.h> | 2 | #include <linux/mm.h> |
| 3 | |||
| 3 | #include <asm/mtrr.h> | 4 | #include <asm/mtrr.h> |
| 4 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
| 6 | |||
| 5 | #include "mtrr.h" | 7 | #include "mtrr.h" |
| 6 | 8 | ||
| 7 | static struct { | 9 | static struct { |
| @@ -12,25 +14,25 @@ static struct { | |||
| 12 | static u8 centaur_mcr_reserved; | 14 | static u8 centaur_mcr_reserved; |
| 13 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ | 15 | static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ |
| 14 | 16 | ||
| 15 | /* | 17 | /** |
| 16 | * Report boot time MCR setups | 18 | * centaur_get_free_region - Get a free MTRR. |
| 19 | * | ||
| 20 | * @base: The starting (base) address of the region. | ||
| 21 | * @size: The size (in bytes) of the region. | ||
| 22 | * | ||
| 23 | * Returns: the index of the region on success, else -1 on error. | ||
| 17 | */ | 24 | */ |
| 18 | |||
| 19 | static int | 25 | static int |
| 20 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 26 | centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
| 21 | /* [SUMMARY] Get a free MTRR. | ||
| 22 | <base> The starting (base) address of the region. | ||
| 23 | <size> The size (in bytes) of the region. | ||
| 24 | [RETURNS] The index of the region on success, else -1 on error. | ||
| 25 | */ | ||
| 26 | { | 27 | { |
| 27 | int i, max; | ||
| 28 | mtrr_type ltype; | ||
| 29 | unsigned long lbase, lsize; | 28 | unsigned long lbase, lsize; |
| 29 | mtrr_type ltype; | ||
| 30 | int i, max; | ||
| 30 | 31 | ||
| 31 | max = num_var_ranges; | 32 | max = num_var_ranges; |
| 32 | if (replace_reg >= 0 && replace_reg < max) | 33 | if (replace_reg >= 0 && replace_reg < max) |
| 33 | return replace_reg; | 34 | return replace_reg; |
| 35 | |||
| 34 | for (i = 0; i < max; ++i) { | 36 | for (i = 0; i < max; ++i) { |
| 35 | if (centaur_mcr_reserved & (1 << i)) | 37 | if (centaur_mcr_reserved & (1 << i)) |
| 36 | continue; | 38 | continue; |
| @@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
| 38 | if (lsize == 0) | 40 | if (lsize == 0) |
| 39 | return i; | 41 | return i; |
| 40 | } | 42 | } |
| 43 | |||
| 41 | return -ENOSPC; | 44 | return -ENOSPC; |
| 42 | } | 45 | } |
| 43 | 46 | ||
| 44 | void | 47 | /* |
| 45 | mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | 48 | * Report boot time MCR setups |
| 49 | */ | ||
| 50 | void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | ||
| 46 | { | 51 | { |
| 47 | centaur_mcr[mcr].low = lo; | 52 | centaur_mcr[mcr].low = lo; |
| 48 | centaur_mcr[mcr].high = hi; | 53 | centaur_mcr[mcr].high = hi; |
| @@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base, | |||
| 54 | { | 59 | { |
| 55 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; | 60 | *base = centaur_mcr[reg].high >> PAGE_SHIFT; |
| 56 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; | 61 | *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; |
| 57 | *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ | 62 | *type = MTRR_TYPE_WRCOMB; /* write-combining */ |
| 63 | |||
| 58 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) | 64 | if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) |
| 59 | *type = MTRR_TYPE_UNCACHABLE; | 65 | *type = MTRR_TYPE_UNCACHABLE; |
| 60 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) | 66 | if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) |
| 61 | *type = MTRR_TYPE_WRBACK; | 67 | *type = MTRR_TYPE_WRBACK; |
| 62 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) | 68 | if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) |
| 63 | *type = MTRR_TYPE_WRBACK; | 69 | *type = MTRR_TYPE_WRBACK; |
| 64 | |||
| 65 | } | 70 | } |
| 66 | 71 | ||
| 67 | static void centaur_set_mcr(unsigned int reg, unsigned long base, | 72 | static void |
| 68 | unsigned long size, mtrr_type type) | 73 | centaur_set_mcr(unsigned int reg, unsigned long base, |
| 74 | unsigned long size, mtrr_type type) | ||
| 69 | { | 75 | { |
| 70 | unsigned long low, high; | 76 | unsigned long low, high; |
| 71 | 77 | ||
| 72 | if (size == 0) { | 78 | if (size == 0) { |
| 73 | /* Disable */ | 79 | /* Disable */ |
| 74 | high = low = 0; | 80 | high = low = 0; |
| 75 | } else { | 81 | } else { |
| 76 | high = base << PAGE_SHIFT; | 82 | high = base << PAGE_SHIFT; |
| 77 | if (centaur_mcr_type == 0) | 83 | if (centaur_mcr_type == 0) { |
| 78 | low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ | 84 | /* Only support write-combining... */ |
| 79 | else { | 85 | low = -size << PAGE_SHIFT | 0x1f; |
| 86 | } else { | ||
| 80 | if (type == MTRR_TYPE_UNCACHABLE) | 87 | if (type == MTRR_TYPE_UNCACHABLE) |
| 81 | low = -size << PAGE_SHIFT | 0x02; /* NC */ | 88 | low = -size << PAGE_SHIFT | 0x02; /* NC */ |
| 82 | else | 89 | else |
| 83 | low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ | 90 | low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */ |
| 84 | } | 91 | } |
| 85 | } | 92 | } |
| 86 | centaur_mcr[reg].high = high; | 93 | centaur_mcr[reg].high = high; |
| @@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base, | |||
| 88 | wrmsr(MSR_IDT_MCR0 + reg, low, high); | 95 | wrmsr(MSR_IDT_MCR0 + reg, low, high); |
| 89 | } | 96 | } |
| 90 | 97 | ||
| 91 | #if 0 | 98 | static int |
| 92 | /* | 99 | centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type) |
| 93 | * Initialise the later (saner) Winchip MCR variant. In this version | ||
| 94 | * the BIOS can pass us the registers it has used (but not their values) | ||
| 95 | * and the control register is read/write | ||
| 96 | */ | ||
| 97 | |||
| 98 | static void __init | ||
| 99 | centaur_mcr1_init(void) | ||
| 100 | { | ||
| 101 | unsigned i; | ||
| 102 | u32 lo, hi; | ||
| 103 | |||
| 104 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
| 105 | * find out what the bios might have done. | ||
| 106 | */ | ||
| 107 | |||
| 108 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 109 | if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ | ||
| 110 | lo &= ~0x1C0; /* clear key */ | ||
| 111 | lo |= 0x040; /* set key to 1 */ | ||
| 112 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ | ||
| 113 | } | ||
| 114 | |||
| 115 | centaur_mcr_type = 1; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Clear any unconfigured MCR's. | ||
| 119 | */ | ||
| 120 | |||
| 121 | for (i = 0; i < 8; ++i) { | ||
| 122 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { | ||
| 123 | if (!(lo & (1 << (9 + i)))) | ||
| 124 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
| 125 | else | ||
| 126 | /* | ||
| 127 | * If the BIOS set up an MCR we cannot see it | ||
| 128 | * but we don't wish to obliterate it | ||
| 129 | */ | ||
| 130 | centaur_mcr_reserved |= (1 << i); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | /* | ||
| 134 | * Throw the main write-combining switch... | ||
| 135 | * However if OOSTORE is enabled then people have already done far | ||
| 136 | * cleverer things and we should behave. | ||
| 137 | */ | ||
| 138 | |||
| 139 | lo |= 15; /* Write combine enables */ | ||
| 140 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * Initialise the original winchip with read only MCR registers | ||
| 145 | * no used bitmask for the BIOS to pass on and write only control | ||
| 146 | */ | ||
| 147 | |||
| 148 | static void __init | ||
| 149 | centaur_mcr0_init(void) | ||
| 150 | { | ||
| 151 | unsigned i; | ||
| 152 | |||
| 153 | /* Unfortunately, MCR's are read-only, so there is no way to | ||
| 154 | * find out what the bios might have done. | ||
| 155 | */ | ||
| 156 | |||
| 157 | /* Clear any unconfigured MCR's. | ||
| 158 | * This way we are sure that the centaur_mcr array contains the actual | ||
| 159 | * values. The disadvantage is that any BIOS tweaks are thus undone. | ||
| 160 | * | ||
| 161 | */ | ||
| 162 | for (i = 0; i < 8; ++i) { | ||
| 163 | if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) | ||
| 164 | wrmsr(MSR_IDT_MCR0 + i, 0, 0); | ||
| 165 | } | ||
| 166 | |||
| 167 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ | ||
| 168 | } | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Initialise Winchip series MCR registers | ||
| 172 | */ | ||
| 173 | |||
| 174 | static void __init | ||
| 175 | centaur_mcr_init(void) | ||
| 176 | { | ||
| 177 | struct set_mtrr_context ctxt; | ||
| 178 | |||
| 179 | set_mtrr_prepare_save(&ctxt); | ||
| 180 | set_mtrr_cache_disable(&ctxt); | ||
| 181 | |||
| 182 | if (boot_cpu_data.x86_model == 4) | ||
| 183 | centaur_mcr0_init(); | ||
| 184 | else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) | ||
| 185 | centaur_mcr1_init(); | ||
| 186 | |||
| 187 | set_mtrr_done(&ctxt); | ||
| 188 | } | ||
| 189 | #endif | ||
| 190 | |||
| 191 | static int centaur_validate_add_page(unsigned long base, | ||
| 192 | unsigned long size, unsigned int type) | ||
| 193 | { | 100 | { |
| 194 | /* | 101 | /* |
| 195 | * FIXME: Winchip2 supports uncached | 102 | * FIXME: Winchip2 supports uncached |
| 196 | */ | 103 | */ |
| 197 | if (type != MTRR_TYPE_WRCOMB && | 104 | if (type != MTRR_TYPE_WRCOMB && |
| 198 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { | 105 | (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { |
| 199 | printk(KERN_WARNING | 106 | pr_warning("mtrr: only write-combining%s supported\n", |
| 200 | "mtrr: only write-combining%s supported\n", | 107 | centaur_mcr_type ? " and uncacheable are" : " is"); |
| 201 | centaur_mcr_type ? " and uncacheable are" | ||
| 202 | : " is"); | ||
| 203 | return -EINVAL; | 108 | return -EINVAL; |
| 204 | } | 109 | } |
| 205 | return 0; | 110 | return 0; |
| @@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base, | |||
| 207 | 112 | ||
| 208 | static struct mtrr_ops centaur_mtrr_ops = { | 113 | static struct mtrr_ops centaur_mtrr_ops = { |
| 209 | .vendor = X86_VENDOR_CENTAUR, | 114 | .vendor = X86_VENDOR_CENTAUR, |
| 210 | // .init = centaur_mcr_init, | ||
| 211 | .set = centaur_set_mcr, | 115 | .set = centaur_set_mcr, |
| 212 | .get = centaur_get_mcr, | 116 | .get = centaur_get_mcr, |
| 213 | .get_free_region = centaur_get_free_region, | 117 | .get_free_region = centaur_get_free_region, |
| @@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void) | |||
| 220 | set_mtrr_ops(¢aur_mtrr_ops); | 124 | set_mtrr_ops(¢aur_mtrr_ops); |
| 221 | return 0; | 125 | return 0; |
| 222 | } | 126 | } |
| 223 | |||
| 224 | //arch_initcall(centaur_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 1d584a18a50d..315738c74aad 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
| @@ -1,51 +1,75 @@ | |||
| 1 | /* MTRR (Memory Type Range Register) cleanup | 1 | /* |
| 2 | 2 | * MTRR (Memory Type Range Register) cleanup | |
| 3 | Copyright (C) 2009 Yinghai Lu | 3 | * |
| 4 | 4 | * Copyright (C) 2009 Yinghai Lu | |
| 5 | This library is free software; you can redistribute it and/or | 5 | * |
| 6 | modify it under the terms of the GNU Library General Public | 6 | * This library is free software; you can redistribute it and/or |
| 7 | License as published by the Free Software Foundation; either | 7 | * modify it under the terms of the GNU Library General Public |
| 8 | version 2 of the License, or (at your option) any later version. | 8 | * License as published by the Free Software Foundation; either |
| 9 | 9 | * version 2 of the License, or (at your option) any later version. | |
| 10 | This library is distributed in the hope that it will be useful, | 10 | * |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * This library is distributed in the hope that it will be useful, |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | Library General Public License for more details. | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | 14 | * Library General Public License for more details. | |
| 15 | You should have received a copy of the GNU Library General Public | 15 | * |
| 16 | License along with this library; if not, write to the Free | 16 | * You should have received a copy of the GNU Library General Public |
| 17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | * License along with this library; if not, write to the Free |
| 18 | */ | 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | 19 | */ | |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
| 22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
| 23 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
| 24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
| 25 | #include <linux/mutex.h> | ||
| 26 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
| 26 | #include <linux/mutex.h> | ||
| 27 | #include <linux/uaccess.h> | ||
| 28 | #include <linux/kvm_para.h> | ||
| 27 | 29 | ||
| 30 | #include <asm/processor.h> | ||
| 28 | #include <asm/e820.h> | 31 | #include <asm/e820.h> |
| 29 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
| 30 | #include <asm/uaccess.h> | ||
| 31 | #include <asm/processor.h> | ||
| 32 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
| 33 | #include <asm/kvm_para.h> | ||
| 34 | #include "mtrr.h" | ||
| 35 | 34 | ||
| 36 | /* should be related to MTRR_VAR_RANGES nums */ | 35 | #include "mtrr.h" |
| 37 | #define RANGE_NUM 256 | ||
| 38 | 36 | ||
| 39 | struct res_range { | 37 | struct res_range { |
| 40 | unsigned long start; | 38 | unsigned long start; |
| 41 | unsigned long end; | 39 | unsigned long end; |
| 40 | }; | ||
| 41 | |||
| 42 | struct var_mtrr_range_state { | ||
| 43 | unsigned long base_pfn; | ||
| 44 | unsigned long size_pfn; | ||
| 45 | mtrr_type type; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct var_mtrr_state { | ||
| 49 | unsigned long range_startk; | ||
| 50 | unsigned long range_sizek; | ||
| 51 | unsigned long chunk_sizek; | ||
| 52 | unsigned long gran_sizek; | ||
| 53 | unsigned int reg; | ||
| 42 | }; | 54 | }; |
| 43 | 55 | ||
| 56 | /* Should be related to MTRR_VAR_RANGES nums */ | ||
| 57 | #define RANGE_NUM 256 | ||
| 58 | |||
| 59 | static struct res_range __initdata range[RANGE_NUM]; | ||
| 60 | static int __initdata nr_range; | ||
| 61 | |||
| 62 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
| 63 | |||
| 64 | static int __initdata debug_print; | ||
| 65 | #define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0) | ||
| 66 | |||
| 67 | |||
| 44 | static int __init | 68 | static int __init |
| 45 | add_range(struct res_range *range, int nr_range, unsigned long start, | 69 | add_range(struct res_range *range, int nr_range, |
| 46 | unsigned long end) | 70 | unsigned long start, unsigned long end) |
| 47 | { | 71 | { |
| 48 | /* out of slots */ | 72 | /* Out of slots: */ |
| 49 | if (nr_range >= RANGE_NUM) | 73 | if (nr_range >= RANGE_NUM) |
| 50 | return nr_range; | 74 | return nr_range; |
| 51 | 75 | ||
| @@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start, | |||
| 58 | } | 82 | } |
| 59 | 83 | ||
| 60 | static int __init | 84 | static int __init |
| 61 | add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | 85 | add_range_with_merge(struct res_range *range, int nr_range, |
| 62 | unsigned long end) | 86 | unsigned long start, unsigned long end) |
| 63 | { | 87 | { |
| 64 | int i; | 88 | int i; |
| 65 | 89 | ||
| 66 | /* try to merge it with old one */ | 90 | /* Try to merge it with old one: */ |
| 67 | for (i = 0; i < nr_range; i++) { | 91 | for (i = 0; i < nr_range; i++) { |
| 68 | unsigned long final_start, final_end; | 92 | unsigned long final_start, final_end; |
| 69 | unsigned long common_start, common_end; | 93 | unsigned long common_start, common_end; |
| @@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, | |||
| 84 | return nr_range; | 108 | return nr_range; |
| 85 | } | 109 | } |
| 86 | 110 | ||
| 87 | /* need to add that */ | 111 | /* Need to add it: */ |
| 88 | return add_range(range, nr_range, start, end); | 112 | return add_range(range, nr_range, start, end); |
| 89 | } | 113 | } |
| 90 | 114 | ||
| @@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end) | |||
| 117 | } | 141 | } |
| 118 | 142 | ||
| 119 | if (start > range[j].start && end < range[j].end) { | 143 | if (start > range[j].start && end < range[j].end) { |
| 120 | /* find the new spare */ | 144 | /* Find the new spare: */ |
| 121 | for (i = 0; i < RANGE_NUM; i++) { | 145 | for (i = 0; i < RANGE_NUM; i++) { |
| 122 | if (range[i].end == 0) | 146 | if (range[i].end == 0) |
| 123 | break; | 147 | break; |
| @@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2) | |||
| 146 | return start1 - start2; | 170 | return start1 - start2; |
| 147 | } | 171 | } |
| 148 | 172 | ||
| 149 | struct var_mtrr_range_state { | 173 | #define BIOS_BUG_MSG KERN_WARNING \ |
| 150 | unsigned long base_pfn; | 174 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
| 151 | unsigned long size_pfn; | ||
| 152 | mtrr_type type; | ||
| 153 | }; | ||
| 154 | |||
| 155 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; | ||
| 156 | static int __initdata debug_print; | ||
| 157 | 175 | ||
| 158 | static int __init | 176 | static int __init |
| 159 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | 177 | x86_get_mtrr_mem_range(struct res_range *range, int nr_range, |
| @@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
| 180 | range[i].start, range[i].end + 1); | 198 | range[i].start, range[i].end + 1); |
| 181 | } | 199 | } |
| 182 | 200 | ||
| 183 | /* take out UC ranges */ | 201 | /* Take out UC ranges: */ |
| 184 | for (i = 0; i < num_var_ranges; i++) { | 202 | for (i = 0; i < num_var_ranges; i++) { |
| 185 | type = range_state[i].type; | 203 | type = range_state[i].type; |
| 186 | if (type != MTRR_TYPE_UNCACHABLE && | 204 | if (type != MTRR_TYPE_UNCACHABLE && |
| @@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
| 193 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && | 211 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && |
| 194 | (mtrr_state.enabled & 1)) { | 212 | (mtrr_state.enabled & 1)) { |
| 195 | /* Var MTRR contains UC entry below 1M? Skip it: */ | 213 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
| 196 | printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " | 214 | printk(BIOS_BUG_MSG, i); |
| 197 | "contains strange UC entry under 1M, check " | ||
| 198 | "with your system vendor!\n", i); | ||
| 199 | if (base + size <= (1<<(20-PAGE_SHIFT))) | 215 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
| 200 | continue; | 216 | continue; |
| 201 | size -= (1<<(20-PAGE_SHIFT)) - base; | 217 | size -= (1<<(20-PAGE_SHIFT)) - base; |
| @@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range, | |||
| 237 | return nr_range; | 253 | return nr_range; |
| 238 | } | 254 | } |
| 239 | 255 | ||
| 240 | static struct res_range __initdata range[RANGE_NUM]; | ||
| 241 | static int __initdata nr_range; | ||
| 242 | |||
| 243 | #ifdef CONFIG_MTRR_SANITIZER | 256 | #ifdef CONFIG_MTRR_SANITIZER |
| 244 | 257 | ||
| 245 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) | 258 | static unsigned long __init sum_ranges(struct res_range *range, int nr_range) |
| 246 | { | 259 | { |
| 247 | unsigned long sum; | 260 | unsigned long sum = 0; |
| 248 | int i; | 261 | int i; |
| 249 | 262 | ||
| 250 | sum = 0; | ||
| 251 | for (i = 0; i < nr_range; i++) | 263 | for (i = 0; i < nr_range; i++) |
| 252 | sum += range[i].end + 1 - range[i].start; | 264 | sum += range[i].end + 1 - range[i].start; |
| 253 | 265 | ||
| @@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str) | |||
| 278 | } | 290 | } |
| 279 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); | 291 | early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); |
| 280 | 292 | ||
| 281 | struct var_mtrr_state { | ||
| 282 | unsigned long range_startk; | ||
| 283 | unsigned long range_sizek; | ||
| 284 | unsigned long chunk_sizek; | ||
| 285 | unsigned long gran_sizek; | ||
| 286 | unsigned int reg; | ||
| 287 | }; | ||
| 288 | |||
| 289 | static void __init | 293 | static void __init |
| 290 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 294 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
| 291 | unsigned char type, unsigned int address_bits) | 295 | unsigned char type, unsigned int address_bits) |
| 292 | { | 296 | { |
| 293 | u32 base_lo, base_hi, mask_lo, mask_hi; | 297 | u32 base_lo, base_hi, mask_lo, mask_hi; |
| 294 | u64 base, mask; | 298 | u64 base, mask; |
| @@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
| 301 | mask = (1ULL << address_bits) - 1; | 305 | mask = (1ULL << address_bits) - 1; |
| 302 | mask &= ~((((u64)sizek) << 10) - 1); | 306 | mask &= ~((((u64)sizek) << 10) - 1); |
| 303 | 307 | ||
| 304 | base = ((u64)basek) << 10; | 308 | base = ((u64)basek) << 10; |
| 305 | 309 | ||
| 306 | base |= type; | 310 | base |= type; |
| 307 | mask |= 0x800; | 311 | mask |= 0x800; |
| @@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | |||
| 317 | 321 | ||
| 318 | static void __init | 322 | static void __init |
| 319 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, | 323 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
| 320 | unsigned char type) | 324 | unsigned char type) |
| 321 | { | 325 | { |
| 322 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); | 326 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); |
| 323 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); | 327 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); |
| 324 | range_state[reg].type = type; | 328 | range_state[reg].type = type; |
| 325 | } | 329 | } |
| 326 | 330 | ||
| 327 | static void __init | 331 | static void __init set_var_mtrr_all(unsigned int address_bits) |
| 328 | set_var_mtrr_all(unsigned int address_bits) | ||
| 329 | { | 332 | { |
| 330 | unsigned long basek, sizek; | 333 | unsigned long basek, sizek; |
| 331 | unsigned char type; | 334 | unsigned char type; |
| @@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits) | |||
| 342 | 345 | ||
| 343 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) | 346 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) |
| 344 | { | 347 | { |
| 345 | char factor; | ||
| 346 | unsigned long base = sizek; | 348 | unsigned long base = sizek; |
| 349 | char factor; | ||
| 347 | 350 | ||
| 348 | if (base & ((1<<10) - 1)) { | 351 | if (base & ((1<<10) - 1)) { |
| 349 | /* not MB alignment */ | 352 | /* Not MB-aligned: */ |
| 350 | factor = 'K'; | 353 | factor = 'K'; |
| 351 | } else if (base & ((1<<20) - 1)) { | 354 | } else if (base & ((1<<20) - 1)) { |
| 352 | factor = 'M'; | 355 | factor = 'M'; |
| @@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
| 372 | unsigned long max_align, align; | 375 | unsigned long max_align, align; |
| 373 | unsigned long sizek; | 376 | unsigned long sizek; |
| 374 | 377 | ||
| 375 | /* Compute the maximum size I can make a range */ | 378 | /* Compute the maximum size with which we can make a range: */ |
| 376 | if (range_startk) | 379 | if (range_startk) |
| 377 | max_align = ffs(range_startk) - 1; | 380 | max_align = ffs(range_startk) - 1; |
| 378 | else | 381 | else |
| 379 | max_align = 32; | 382 | max_align = 32; |
| 383 | |||
| 380 | align = fls(range_sizek) - 1; | 384 | align = fls(range_sizek) - 1; |
| 381 | if (align > max_align) | 385 | if (align > max_align) |
| 382 | align = max_align; | 386 | align = max_align; |
| @@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk, | |||
| 386 | char start_factor = 'K', size_factor = 'K'; | 390 | char start_factor = 'K', size_factor = 'K'; |
| 387 | unsigned long start_base, size_base; | 391 | unsigned long start_base, size_base; |
| 388 | 392 | ||
| 389 | start_base = to_size_factor(range_startk, | 393 | start_base = to_size_factor(range_startk, &start_factor); |
| 390 | &start_factor), | 394 | size_base = to_size_factor(sizek, &size_factor); |
| 391 | size_base = to_size_factor(sizek, &size_factor), | ||
| 392 | 395 | ||
| 393 | printk(KERN_DEBUG "Setting variable MTRR %d, " | 396 | Dprintk("Setting variable MTRR %d, " |
| 394 | "base: %ld%cB, range: %ld%cB, type %s\n", | 397 | "base: %ld%cB, range: %ld%cB, type %s\n", |
| 395 | reg, start_base, start_factor, | 398 | reg, start_base, start_factor, |
| 396 | size_base, size_factor, | 399 | size_base, size_factor, |
| @@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
| 425 | chunk_sizek = state->chunk_sizek; | 428 | chunk_sizek = state->chunk_sizek; |
| 426 | gran_sizek = state->gran_sizek; | 429 | gran_sizek = state->gran_sizek; |
| 427 | 430 | ||
| 428 | /* align with gran size, prevent small block used up MTRRs */ | 431 | /* Align with gran size, prevent small block used up MTRRs: */ |
| 429 | range_basek = ALIGN(state->range_startk, gran_sizek); | 432 | range_basek = ALIGN(state->range_startk, gran_sizek); |
| 430 | if ((range_basek > basek) && basek) | 433 | if ((range_basek > basek) && basek) |
| 431 | return second_sizek; | 434 | return second_sizek; |
| 435 | |||
| 432 | state->range_sizek -= (range_basek - state->range_startk); | 436 | state->range_sizek -= (range_basek - state->range_startk); |
| 433 | range_sizek = ALIGN(state->range_sizek, gran_sizek); | 437 | range_sizek = ALIGN(state->range_sizek, gran_sizek); |
| 434 | 438 | ||
| @@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
| 439 | } | 443 | } |
| 440 | state->range_sizek = range_sizek; | 444 | state->range_sizek = range_sizek; |
| 441 | 445 | ||
| 442 | /* try to append some small hole */ | 446 | /* Try to append some small hole: */ |
| 443 | range0_basek = state->range_startk; | 447 | range0_basek = state->range_startk; |
| 444 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); | 448 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
| 445 | 449 | ||
| 446 | /* no increase */ | 450 | /* No increase: */ |
| 447 | if (range0_sizek == state->range_sizek) { | 451 | if (range0_sizek == state->range_sizek) { |
| 448 | if (debug_print) | 452 | Dprintk("rangeX: %016lx - %016lx\n", |
| 449 | printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", | 453 | range0_basek<<10, |
| 450 | range0_basek<<10, | 454 | (range0_basek + state->range_sizek)<<10); |
| 451 | (range0_basek + state->range_sizek)<<10); | ||
| 452 | state->reg = range_to_mtrr(state->reg, range0_basek, | 455 | state->reg = range_to_mtrr(state->reg, range0_basek, |
| 453 | state->range_sizek, MTRR_TYPE_WRBACK); | 456 | state->range_sizek, MTRR_TYPE_WRBACK); |
| 454 | return 0; | 457 | return 0; |
| 455 | } | 458 | } |
| 456 | 459 | ||
| 457 | /* only cut back, when it is not the last */ | 460 | /* Only cut back when it is not the last: */ |
| 458 | if (sizek) { | 461 | if (sizek) { |
| 459 | while (range0_basek + range0_sizek > (basek + sizek)) { | 462 | while (range0_basek + range0_sizek > (basek + sizek)) { |
| 460 | if (range0_sizek >= chunk_sizek) | 463 | if (range0_sizek >= chunk_sizek) |
| @@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, | |||
| 470 | second_try: | 473 | second_try: |
| 471 | range_basek = range0_basek + range0_sizek; | 474 | range_basek = range0_basek + range0_sizek; |
| 472 | 475 | ||
| 473 | /* one hole in the middle */ | 476 | /* One hole in the middle: */ |
| 474 | if (range_basek > basek && range_basek <= (basek + sizek)) | 477 | if (range_basek > basek && range_basek <= (basek + sizek)) |
| 475 | second_sizek = range_basek - basek; | 478 | second_sizek = range_basek - basek; |
| 476 | 479 | ||
| 477 | if (range0_sizek > state->range_sizek) { | 480 | if (range0_sizek > state->range_sizek) { |
| 478 | 481 | ||
| 479 | /* one hole in middle or at end */ | 482 | /* One hole in middle or at the end: */ |
| 480 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; | 483 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; |
| 481 | 484 | ||
| 482 | /* hole size should be less than half of range0 size */ | 485 | /* Hole size should be less than half of range0 size: */ |
| 483 | if (hole_sizek >= (range0_sizek >> 1) && | 486 | if (hole_sizek >= (range0_sizek >> 1) && |
| 484 | range0_sizek >= chunk_sizek) { | 487 | range0_sizek >= chunk_sizek) { |
| 485 | range0_sizek -= chunk_sizek; | 488 | range0_sizek -= chunk_sizek; |
| @@ -491,32 +494,30 @@ second_try: | |||
| 491 | } | 494 | } |
| 492 | 495 | ||
| 493 | if (range0_sizek) { | 496 | if (range0_sizek) { |
| 494 | if (debug_print) | 497 | Dprintk("range0: %016lx - %016lx\n", |
| 495 | printk(KERN_DEBUG "range0: %016lx - %016lx\n", | 498 | range0_basek<<10, |
| 496 | range0_basek<<10, | 499 | (range0_basek + range0_sizek)<<10); |
| 497 | (range0_basek + range0_sizek)<<10); | ||
| 498 | state->reg = range_to_mtrr(state->reg, range0_basek, | 500 | state->reg = range_to_mtrr(state->reg, range0_basek, |
| 499 | range0_sizek, MTRR_TYPE_WRBACK); | 501 | range0_sizek, MTRR_TYPE_WRBACK); |
| 500 | } | 502 | } |
| 501 | 503 | ||
| 502 | if (range0_sizek < state->range_sizek) { | 504 | if (range0_sizek < state->range_sizek) { |
| 503 | /* need to handle left over */ | 505 | /* Need to handle left over range: */ |
| 504 | range_sizek = state->range_sizek - range0_sizek; | 506 | range_sizek = state->range_sizek - range0_sizek; |
| 505 | 507 | ||
| 506 | if (debug_print) | 508 | Dprintk("range: %016lx - %016lx\n", |
| 507 | printk(KERN_DEBUG "range: %016lx - %016lx\n", | 509 | range_basek<<10, |
| 508 | range_basek<<10, | 510 | (range_basek + range_sizek)<<10); |
| 509 | (range_basek + range_sizek)<<10); | 511 | |
| 510 | state->reg = range_to_mtrr(state->reg, range_basek, | 512 | state->reg = range_to_mtrr(state->reg, range_basek, |
| 511 | range_sizek, MTRR_TYPE_WRBACK); | 513 | range_sizek, MTRR_TYPE_WRBACK); |
| 512 | } | 514 | } |
| 513 | 515 | ||
| 514 | if (hole_sizek) { | 516 | if (hole_sizek) { |
| 515 | hole_basek = range_basek - hole_sizek - second_sizek; | 517 | hole_basek = range_basek - hole_sizek - second_sizek; |
| 516 | if (debug_print) | 518 | Dprintk("hole: %016lx - %016lx\n", |
| 517 | printk(KERN_DEBUG "hole: %016lx - %016lx\n", | 519 | hole_basek<<10, |
| 518 | hole_basek<<10, | 520 | (hole_basek + hole_sizek)<<10); |
| 519 | (hole_basek + hole_sizek)<<10); | ||
| 520 | state->reg = range_to_mtrr(state->reg, hole_basek, | 521 | state->reg = range_to_mtrr(state->reg, hole_basek, |
| 521 | hole_sizek, MTRR_TYPE_UNCACHABLE); | 522 | hole_sizek, MTRR_TYPE_UNCACHABLE); |
| 522 | } | 523 | } |
| @@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, | |||
| 537 | basek = base_pfn << (PAGE_SHIFT - 10); | 538 | basek = base_pfn << (PAGE_SHIFT - 10); |
| 538 | sizek = size_pfn << (PAGE_SHIFT - 10); | 539 | sizek = size_pfn << (PAGE_SHIFT - 10); |
| 539 | 540 | ||
| 540 | /* See if I can merge with the last range */ | 541 | /* See if I can merge with the last range: */ |
| 541 | if ((basek <= 1024) || | 542 | if ((basek <= 1024) || |
| 542 | (state->range_startk + state->range_sizek == basek)) { | 543 | (state->range_startk + state->range_sizek == basek)) { |
| 543 | unsigned long endk = basek + sizek; | 544 | unsigned long endk = basek + sizek; |
| 544 | state->range_sizek = endk - state->range_startk; | 545 | state->range_sizek = endk - state->range_startk; |
| 545 | return; | 546 | return; |
| 546 | } | 547 | } |
| 547 | /* Write the range mtrrs */ | 548 | /* Write the range mtrrs: */ |
| 548 | if (state->range_sizek != 0) | 549 | if (state->range_sizek != 0) |
| 549 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); | 550 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); |
| 550 | 551 | ||
| 551 | /* Allocate an msr */ | 552 | /* Allocate an msr: */ |
| 552 | state->range_startk = basek + second_sizek; | 553 | state->range_startk = basek + second_sizek; |
| 553 | state->range_sizek = sizek - second_sizek; | 554 | state->range_sizek = sizek - second_sizek; |
| 554 | } | 555 | } |
| 555 | 556 | ||
| 556 | /* mininum size of mtrr block that can take hole */ | 557 | /* Mininum size of mtrr block that can take hole: */ |
| 557 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); | 558 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); |
| 558 | 559 | ||
| 559 | static int __init parse_mtrr_chunk_size_opt(char *p) | 560 | static int __init parse_mtrr_chunk_size_opt(char *p) |
| @@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p) | |||
| 565 | } | 566 | } |
| 566 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); | 567 | early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); |
| 567 | 568 | ||
| 568 | /* granity of mtrr of block */ | 569 | /* Granularity of mtrr of block: */ |
| 569 | static u64 mtrr_gran_size __initdata; | 570 | static u64 mtrr_gran_size __initdata; |
| 570 | 571 | ||
| 571 | static int __init parse_mtrr_gran_size_opt(char *p) | 572 | static int __init parse_mtrr_gran_size_opt(char *p) |
| @@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p) | |||
| 577 | } | 578 | } |
| 578 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); | 579 | early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); |
| 579 | 580 | ||
| 580 | static int nr_mtrr_spare_reg __initdata = | 581 | static unsigned long nr_mtrr_spare_reg __initdata = |
| 581 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; | 582 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; |
| 582 | 583 | ||
| 583 | static int __init parse_mtrr_spare_reg(char *arg) | 584 | static int __init parse_mtrr_spare_reg(char *arg) |
| @@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg) | |||
| 586 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); | 587 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); |
| 587 | return 0; | 588 | return 0; |
| 588 | } | 589 | } |
| 589 | |||
| 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); | 590 | early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); |
| 591 | 591 | ||
| 592 | static int __init | 592 | static int __init |
| @@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
| 594 | u64 chunk_size, u64 gran_size) | 594 | u64 chunk_size, u64 gran_size) |
| 595 | { | 595 | { |
| 596 | struct var_mtrr_state var_state; | 596 | struct var_mtrr_state var_state; |
| 597 | int i; | ||
| 598 | int num_reg; | 597 | int num_reg; |
| 598 | int i; | ||
| 599 | 599 | ||
| 600 | var_state.range_startk = 0; | 600 | var_state.range_startk = 0; |
| 601 | var_state.range_sizek = 0; | 601 | var_state.range_sizek = 0; |
| @@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
| 605 | 605 | ||
| 606 | memset(range_state, 0, sizeof(range_state)); | 606 | memset(range_state, 0, sizeof(range_state)); |
| 607 | 607 | ||
| 608 | /* Write the range etc */ | 608 | /* Write the range: */ |
| 609 | for (i = 0; i < nr_range; i++) | 609 | for (i = 0; i < nr_range; i++) { |
| 610 | set_var_mtrr_range(&var_state, range[i].start, | 610 | set_var_mtrr_range(&var_state, range[i].start, |
| 611 | range[i].end - range[i].start + 1); | 611 | range[i].end - range[i].start + 1); |
| 612 | } | ||
| 612 | 613 | ||
| 613 | /* Write the last range */ | 614 | /* Write the last range: */ |
| 614 | if (var_state.range_sizek != 0) | 615 | if (var_state.range_sizek != 0) |
| 615 | range_to_mtrr_with_hole(&var_state, 0, 0); | 616 | range_to_mtrr_with_hole(&var_state, 0, 0); |
| 616 | 617 | ||
| 617 | num_reg = var_state.reg; | 618 | num_reg = var_state.reg; |
| 618 | /* Clear out the extra MTRR's */ | 619 | /* Clear out the extra MTRR's: */ |
| 619 | while (var_state.reg < num_var_ranges) { | 620 | while (var_state.reg < num_var_ranges) { |
| 620 | save_var_mtrr(var_state.reg, 0, 0, 0); | 621 | save_var_mtrr(var_state.reg, 0, 0, 0); |
| 621 | var_state.reg++; | 622 | var_state.reg++; |
| @@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range, | |||
| 625 | } | 626 | } |
| 626 | 627 | ||
| 627 | struct mtrr_cleanup_result { | 628 | struct mtrr_cleanup_result { |
| 628 | unsigned long gran_sizek; | 629 | unsigned long gran_sizek; |
| 629 | unsigned long chunk_sizek; | 630 | unsigned long chunk_sizek; |
| 630 | unsigned long lose_cover_sizek; | 631 | unsigned long lose_cover_sizek; |
| 631 | unsigned int num_reg; | 632 | unsigned int num_reg; |
| 632 | int bad; | 633 | int bad; |
| 633 | }; | 634 | }; |
| 634 | 635 | ||
| 635 | /* | 636 | /* |
| @@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM]; | |||
| 645 | 646 | ||
| 646 | static void __init print_out_mtrr_range_state(void) | 647 | static void __init print_out_mtrr_range_state(void) |
| 647 | { | 648 | { |
| 648 | int i; | ||
| 649 | char start_factor = 'K', size_factor = 'K'; | 649 | char start_factor = 'K', size_factor = 'K'; |
| 650 | unsigned long start_base, size_base; | 650 | unsigned long start_base, size_base; |
| 651 | mtrr_type type; | 651 | mtrr_type type; |
| 652 | int i; | ||
| 652 | 653 | ||
| 653 | for (i = 0; i < num_var_ranges; i++) { | 654 | for (i = 0; i < num_var_ranges; i++) { |
| 654 | 655 | ||
| @@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void) | |||
| 676 | int i; | 677 | int i; |
| 677 | mtrr_type type; | 678 | mtrr_type type; |
| 678 | unsigned long size; | 679 | unsigned long size; |
| 679 | /* extra one for all 0 */ | 680 | /* Extra one for all 0: */ |
| 680 | int num[MTRR_NUM_TYPES + 1]; | 681 | int num[MTRR_NUM_TYPES + 1]; |
| 681 | 682 | ||
| 682 | /* check entries number */ | 683 | /* Check entries number: */ |
| 683 | memset(num, 0, sizeof(num)); | 684 | memset(num, 0, sizeof(num)); |
| 684 | for (i = 0; i < num_var_ranges; i++) { | 685 | for (i = 0; i < num_var_ranges; i++) { |
| 685 | type = range_state[i].type; | 686 | type = range_state[i].type; |
| @@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void) | |||
| 693 | num[type]++; | 694 | num[type]++; |
| 694 | } | 695 | } |
| 695 | 696 | ||
| 696 | /* check if we got UC entries */ | 697 | /* Check if we got UC entries: */ |
| 697 | if (!num[MTRR_TYPE_UNCACHABLE]) | 698 | if (!num[MTRR_TYPE_UNCACHABLE]) |
| 698 | return 0; | 699 | return 0; |
| 699 | 700 | ||
| 700 | /* check if we only had WB and UC */ | 701 | /* Check if we only had WB and UC */ |
| 701 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 702 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
| 702 | num_var_ranges - num[MTRR_NUM_TYPES]) | 703 | num_var_ranges - num[MTRR_NUM_TYPES]) |
| 703 | return 0; | 704 | return 0; |
| 704 | 705 | ||
| 705 | return 1; | 706 | return 1; |
| 706 | } | 707 | } |
| 707 | 708 | ||
| 708 | static unsigned long __initdata range_sums; | 709 | static unsigned long __initdata range_sums; |
| 709 | static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, | 710 | |
| 710 | unsigned long extra_remove_base, | 711 | static void __init |
| 711 | unsigned long extra_remove_size, | 712 | mtrr_calc_range_state(u64 chunk_size, u64 gran_size, |
| 712 | int i) | 713 | unsigned long x_remove_base, |
| 714 | unsigned long x_remove_size, int i) | ||
| 713 | { | 715 | { |
| 714 | int num_reg; | ||
| 715 | static struct res_range range_new[RANGE_NUM]; | 716 | static struct res_range range_new[RANGE_NUM]; |
| 716 | static int nr_range_new; | ||
| 717 | unsigned long range_sums_new; | 717 | unsigned long range_sums_new; |
| 718 | static int nr_range_new; | ||
| 719 | int num_reg; | ||
| 718 | 720 | ||
| 719 | /* convert ranges to var ranges state */ | 721 | /* Convert ranges to var ranges state: */ |
| 720 | num_reg = x86_setup_var_mtrrs(range, nr_range, | 722 | num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
| 721 | chunk_size, gran_size); | ||
| 722 | 723 | ||
| 723 | /* we got new setting in range_state, check it */ | 724 | /* We got new setting in range_state, check it: */ |
| 724 | memset(range_new, 0, sizeof(range_new)); | 725 | memset(range_new, 0, sizeof(range_new)); |
| 725 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, | 726 | nr_range_new = x86_get_mtrr_mem_range(range_new, 0, |
| 726 | extra_remove_base, extra_remove_size); | 727 | x_remove_base, x_remove_size); |
| 727 | range_sums_new = sum_ranges(range_new, nr_range_new); | 728 | range_sums_new = sum_ranges(range_new, nr_range_new); |
| 728 | 729 | ||
| 729 | result[i].chunk_sizek = chunk_size >> 10; | 730 | result[i].chunk_sizek = chunk_size >> 10; |
| 730 | result[i].gran_sizek = gran_size >> 10; | 731 | result[i].gran_sizek = gran_size >> 10; |
| 731 | result[i].num_reg = num_reg; | 732 | result[i].num_reg = num_reg; |
| 733 | |||
| 732 | if (range_sums < range_sums_new) { | 734 | if (range_sums < range_sums_new) { |
| 733 | result[i].lose_cover_sizek = | 735 | result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; |
| 734 | (range_sums_new - range_sums) << PSHIFT; | ||
| 735 | result[i].bad = 1; | 736 | result[i].bad = 1; |
| 736 | } else | 737 | } else { |
| 737 | result[i].lose_cover_sizek = | 738 | result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; |
| 738 | (range_sums - range_sums_new) << PSHIFT; | 739 | } |
| 739 | 740 | ||
| 740 | /* double check it */ | 741 | /* Double check it: */ |
| 741 | if (!result[i].bad && !result[i].lose_cover_sizek) { | 742 | if (!result[i].bad && !result[i].lose_cover_sizek) { |
| 742 | if (nr_range_new != nr_range || | 743 | if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range))) |
| 743 | memcmp(range, range_new, sizeof(range))) | 744 | result[i].bad = 1; |
| 744 | result[i].bad = 1; | ||
| 745 | } | 745 | } |
| 746 | 746 | ||
| 747 | if (!result[i].bad && (range_sums - range_sums_new < | 747 | if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) |
| 748 | min_loss_pfn[num_reg])) { | 748 | min_loss_pfn[num_reg] = range_sums - range_sums_new; |
| 749 | min_loss_pfn[num_reg] = | ||
| 750 | range_sums - range_sums_new; | ||
| 751 | } | ||
| 752 | } | 749 | } |
| 753 | 750 | ||
| 754 | static void __init mtrr_print_out_one_result(int i) | 751 | static void __init mtrr_print_out_one_result(int i) |
| 755 | { | 752 | { |
| 756 | char gran_factor, chunk_factor, lose_factor; | ||
| 757 | unsigned long gran_base, chunk_base, lose_base; | 753 | unsigned long gran_base, chunk_base, lose_base; |
| 754 | char gran_factor, chunk_factor, lose_factor; | ||
| 758 | 755 | ||
| 759 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), | 756 | gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), |
| 760 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), | 757 | chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), |
| 761 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), | 758 | lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), |
| 762 | printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", | 759 | |
| 763 | result[i].bad ? "*BAD*" : " ", | 760 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t", |
| 764 | gran_base, gran_factor, chunk_base, chunk_factor); | 761 | result[i].bad ? "*BAD*" : " ", |
| 765 | printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", | 762 | gran_base, gran_factor, chunk_base, chunk_factor); |
| 766 | result[i].num_reg, result[i].bad ? "-" : "", | 763 | pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n", |
| 767 | lose_base, lose_factor); | 764 | result[i].num_reg, result[i].bad ? "-" : "", |
| 765 | lose_base, lose_factor); | ||
| 768 | } | 766 | } |
| 769 | 767 | ||
| 770 | static int __init mtrr_search_optimal_index(void) | 768 | static int __init mtrr_search_optimal_index(void) |
| 771 | { | 769 | { |
| 772 | int i; | ||
| 773 | int num_reg_good; | 770 | int num_reg_good; |
| 774 | int index_good; | 771 | int index_good; |
| 772 | int i; | ||
| 775 | 773 | ||
| 776 | if (nr_mtrr_spare_reg >= num_var_ranges) | 774 | if (nr_mtrr_spare_reg >= num_var_ranges) |
| 777 | nr_mtrr_spare_reg = num_var_ranges - 1; | 775 | nr_mtrr_spare_reg = num_var_ranges - 1; |
| 776 | |||
| 778 | num_reg_good = -1; | 777 | num_reg_good = -1; |
| 779 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { | 778 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
| 780 | if (!min_loss_pfn[i]) | 779 | if (!min_loss_pfn[i]) |
| @@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void) | |||
| 796 | return index_good; | 795 | return index_good; |
| 797 | } | 796 | } |
| 798 | 797 | ||
| 799 | |||
| 800 | int __init mtrr_cleanup(unsigned address_bits) | 798 | int __init mtrr_cleanup(unsigned address_bits) |
| 801 | { | 799 | { |
| 802 | unsigned long extra_remove_base, extra_remove_size; | 800 | unsigned long x_remove_base, x_remove_size; |
| 803 | unsigned long base, size, def, dummy; | 801 | unsigned long base, size, def, dummy; |
| 804 | mtrr_type type; | ||
| 805 | u64 chunk_size, gran_size; | 802 | u64 chunk_size, gran_size; |
| 803 | mtrr_type type; | ||
| 806 | int index_good; | 804 | int index_good; |
| 807 | int i; | 805 | int i; |
| 808 | 806 | ||
| 809 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) | 807 | if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) |
| 810 | return 0; | 808 | return 0; |
| 809 | |||
| 811 | rdmsr(MSR_MTRRdefType, def, dummy); | 810 | rdmsr(MSR_MTRRdefType, def, dummy); |
| 812 | def &= 0xff; | 811 | def &= 0xff; |
| 813 | if (def != MTRR_TYPE_UNCACHABLE) | 812 | if (def != MTRR_TYPE_UNCACHABLE) |
| 814 | return 0; | 813 | return 0; |
| 815 | 814 | ||
| 816 | /* get it and store it aside */ | 815 | /* Get it and store it aside: */ |
| 817 | memset(range_state, 0, sizeof(range_state)); | 816 | memset(range_state, 0, sizeof(range_state)); |
| 818 | for (i = 0; i < num_var_ranges; i++) { | 817 | for (i = 0; i < num_var_ranges; i++) { |
| 819 | mtrr_if->get(i, &base, &size, &type); | 818 | mtrr_if->get(i, &base, &size, &type); |
| @@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 822 | range_state[i].type = type; | 821 | range_state[i].type = type; |
| 823 | } | 822 | } |
| 824 | 823 | ||
| 825 | /* check if we need handle it and can handle it */ | 824 | /* Check if we need handle it and can handle it: */ |
| 826 | if (!mtrr_need_cleanup()) | 825 | if (!mtrr_need_cleanup()) |
| 827 | return 0; | 826 | return 0; |
| 828 | 827 | ||
| 829 | /* print original var MTRRs at first, for debugging: */ | 828 | /* Print original var MTRRs at first, for debugging: */ |
| 830 | printk(KERN_DEBUG "original variable MTRRs\n"); | 829 | printk(KERN_DEBUG "original variable MTRRs\n"); |
| 831 | print_out_mtrr_range_state(); | 830 | print_out_mtrr_range_state(); |
| 832 | 831 | ||
| 833 | memset(range, 0, sizeof(range)); | 832 | memset(range, 0, sizeof(range)); |
| 834 | extra_remove_size = 0; | 833 | x_remove_size = 0; |
| 835 | extra_remove_base = 1 << (32 - PAGE_SHIFT); | 834 | x_remove_base = 1 << (32 - PAGE_SHIFT); |
| 836 | if (mtrr_tom2) | 835 | if (mtrr_tom2) |
| 837 | extra_remove_size = | 836 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
| 838 | (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; | 837 | |
| 839 | nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, | 838 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); |
| 840 | extra_remove_size); | ||
| 841 | /* | 839 | /* |
| 842 | * [0, 1M) should always be coverred by var mtrr with WB | 840 | * [0, 1M) should always be covered by var mtrr with WB |
| 843 | * and fixed mtrrs should take effective before var mtrr for it | 841 | * and fixed mtrrs should take effect before var mtrr for it: |
| 844 | */ | 842 | */ |
| 845 | nr_range = add_range_with_merge(range, nr_range, 0, | 843 | nr_range = add_range_with_merge(range, nr_range, 0, |
| 846 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | 844 | (1ULL<<(20 - PAGE_SHIFT)) - 1); |
| 847 | /* sort the ranges */ | 845 | /* Sort the ranges: */ |
| 848 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); | 846 | sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); |
| 849 | 847 | ||
| 850 | range_sums = sum_ranges(range, nr_range); | 848 | range_sums = sum_ranges(range, nr_range); |
| @@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 854 | if (mtrr_chunk_size && mtrr_gran_size) { | 852 | if (mtrr_chunk_size && mtrr_gran_size) { |
| 855 | i = 0; | 853 | i = 0; |
| 856 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, | 854 | mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, |
| 857 | extra_remove_base, extra_remove_size, i); | 855 | x_remove_base, x_remove_size, i); |
| 858 | 856 | ||
| 859 | mtrr_print_out_one_result(i); | 857 | mtrr_print_out_one_result(i); |
| 860 | 858 | ||
| @@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 880 | continue; | 878 | continue; |
| 881 | 879 | ||
| 882 | mtrr_calc_range_state(chunk_size, gran_size, | 880 | mtrr_calc_range_state(chunk_size, gran_size, |
| 883 | extra_remove_base, extra_remove_size, i); | 881 | x_remove_base, x_remove_size, i); |
| 884 | if (debug_print) { | 882 | if (debug_print) { |
| 885 | mtrr_print_out_one_result(i); | 883 | mtrr_print_out_one_result(i); |
| 886 | printk(KERN_INFO "\n"); | 884 | printk(KERN_INFO "\n"); |
| @@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 890 | } | 888 | } |
| 891 | } | 889 | } |
| 892 | 890 | ||
| 893 | /* try to find the optimal index */ | 891 | /* Try to find the optimal index: */ |
| 894 | index_good = mtrr_search_optimal_index(); | 892 | index_good = mtrr_search_optimal_index(); |
| 895 | 893 | ||
| 896 | if (index_good != -1) { | 894 | if (index_good != -1) { |
| @@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 898 | i = index_good; | 896 | i = index_good; |
| 899 | mtrr_print_out_one_result(i); | 897 | mtrr_print_out_one_result(i); |
| 900 | 898 | ||
| 901 | /* convert ranges to var ranges state */ | 899 | /* Convert ranges to var ranges state: */ |
| 902 | chunk_size = result[i].chunk_sizek; | 900 | chunk_size = result[i].chunk_sizek; |
| 903 | chunk_size <<= 10; | 901 | chunk_size <<= 10; |
| 904 | gran_size = result[i].gran_sizek; | 902 | gran_size = result[i].gran_sizek; |
| @@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup); | |||
| 941 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't | 939 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't |
| 942 | * apply to are wrong, but so far we don't know of any such case in the wild. | 940 | * apply to are wrong, but so far we don't know of any such case in the wild. |
| 943 | */ | 941 | */ |
| 944 | #define Tom2Enabled (1U << 21) | 942 | #define Tom2Enabled (1U << 21) |
| 945 | #define Tom2ForceMemTypeWB (1U << 22) | 943 | #define Tom2ForceMemTypeWB (1U << 22) |
| 946 | 944 | ||
| 947 | int __init amd_special_default_mtrr(void) | 945 | int __init amd_special_default_mtrr(void) |
| 948 | { | 946 | { |
| @@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void) | |||
| 952 | return 0; | 950 | return 0; |
| 953 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 951 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) |
| 954 | return 0; | 952 | return 0; |
| 955 | /* In case some hypervisor doesn't pass SYSCFG through */ | 953 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
| 956 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 954 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
| 957 | return 0; | 955 | return 0; |
| 958 | /* | 956 | /* |
| @@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void) | |||
| 965 | return 0; | 963 | return 0; |
| 966 | } | 964 | } |
| 967 | 965 | ||
| 968 | static u64 __init real_trim_memory(unsigned long start_pfn, | 966 | static u64 __init |
| 969 | unsigned long limit_pfn) | 967 | real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) |
| 970 | { | 968 | { |
| 971 | u64 trim_start, trim_size; | 969 | u64 trim_start, trim_size; |
| 970 | |||
| 972 | trim_start = start_pfn; | 971 | trim_start = start_pfn; |
| 973 | trim_start <<= PAGE_SHIFT; | 972 | trim_start <<= PAGE_SHIFT; |
| 973 | |||
| 974 | trim_size = limit_pfn; | 974 | trim_size = limit_pfn; |
| 975 | trim_size <<= PAGE_SHIFT; | 975 | trim_size <<= PAGE_SHIFT; |
| 976 | trim_size -= trim_start; | 976 | trim_size -= trim_start; |
| 977 | 977 | ||
| 978 | return e820_update_range(trim_start, trim_size, E820_RAM, | 978 | return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED); |
| 979 | E820_RESERVED); | ||
| 980 | } | 979 | } |
| 980 | |||
| 981 | /** | 981 | /** |
| 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs | 982 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
| 983 | * @end_pfn: ending page frame number | 983 | * @end_pfn: ending page frame number |
| @@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn, | |||
| 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain | 985 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
| 986 | * memory configurations. This routine checks that the highest MTRR matches | 986 | * memory configurations. This routine checks that the highest MTRR matches |
| 987 | * the end of memory, to make sure the MTRRs having a write back type cover | 987 | * the end of memory, to make sure the MTRRs having a write back type cover |
| 988 | * all of the memory the kernel is intending to use. If not, it'll trim any | 988 | * all of the memory the kernel is intending to use. If not, it'll trim any |
| 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's | 989 | * memory off the end by adjusting end_pfn, removing it from the kernel's |
| 990 | * allocation pools, warning the user with an obnoxious message. | 990 | * allocation pools, warning the user with an obnoxious message. |
| 991 | */ | 991 | */ |
| @@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; | 994 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
| 995 | mtrr_type type; | 995 | mtrr_type type; |
| 996 | u64 total_trim_size; | 996 | u64 total_trim_size; |
| 997 | |||
| 998 | /* extra one for all 0 */ | 997 | /* extra one for all 0 */ |
| 999 | int num[MTRR_NUM_TYPES + 1]; | 998 | int num[MTRR_NUM_TYPES + 1]; |
| 999 | |||
| 1000 | /* | 1000 | /* |
| 1001 | * Make sure we only trim uncachable memory on machines that | 1001 | * Make sure we only trim uncachable memory on machines that |
| 1002 | * support the Intel MTRR architecture: | 1002 | * support the Intel MTRR architecture: |
| 1003 | */ | 1003 | */ |
| 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) | 1004 | if (!is_cpu(INTEL) || disable_mtrr_trim) |
| 1005 | return 0; | 1005 | return 0; |
| 1006 | |||
| 1006 | rdmsr(MSR_MTRRdefType, def, dummy); | 1007 | rdmsr(MSR_MTRRdefType, def, dummy); |
| 1007 | def &= 0xff; | 1008 | def &= 0xff; |
| 1008 | if (def != MTRR_TYPE_UNCACHABLE) | 1009 | if (def != MTRR_TYPE_UNCACHABLE) |
| 1009 | return 0; | 1010 | return 0; |
| 1010 | 1011 | ||
| 1011 | /* get it and store it aside */ | 1012 | /* Get it and store it aside: */ |
| 1012 | memset(range_state, 0, sizeof(range_state)); | 1013 | memset(range_state, 0, sizeof(range_state)); |
| 1013 | for (i = 0; i < num_var_ranges; i++) { | 1014 | for (i = 0; i < num_var_ranges; i++) { |
| 1014 | mtrr_if->get(i, &base, &size, &type); | 1015 | mtrr_if->get(i, &base, &size, &type); |
| @@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1017 | range_state[i].type = type; | 1018 | range_state[i].type = type; |
| 1018 | } | 1019 | } |
| 1019 | 1020 | ||
| 1020 | /* Find highest cached pfn */ | 1021 | /* Find highest cached pfn: */ |
| 1021 | for (i = 0; i < num_var_ranges; i++) { | 1022 | for (i = 0; i < num_var_ranges; i++) { |
| 1022 | type = range_state[i].type; | 1023 | type = range_state[i].type; |
| 1023 | if (type != MTRR_TYPE_WRBACK) | 1024 | if (type != MTRR_TYPE_WRBACK) |
| @@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1028 | highest_pfn = base + size; | 1029 | highest_pfn = base + size; |
| 1029 | } | 1030 | } |
| 1030 | 1031 | ||
| 1031 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1032 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
| 1032 | if (!highest_pfn) { | 1033 | if (!highest_pfn) { |
| 1033 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); | 1034 | printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); |
| 1034 | return 0; | 1035 | return 0; |
| 1035 | } | 1036 | } |
| 1036 | 1037 | ||
| 1037 | /* check entries number */ | 1038 | /* Check entries number: */ |
| 1038 | memset(num, 0, sizeof(num)); | 1039 | memset(num, 0, sizeof(num)); |
| 1039 | for (i = 0; i < num_var_ranges; i++) { | 1040 | for (i = 0; i < num_var_ranges; i++) { |
| 1040 | type = range_state[i].type; | 1041 | type = range_state[i].type; |
| @@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1046 | num[type]++; | 1047 | num[type]++; |
| 1047 | } | 1048 | } |
| 1048 | 1049 | ||
| 1049 | /* no entry for WB? */ | 1050 | /* No entry for WB? */ |
| 1050 | if (!num[MTRR_TYPE_WRBACK]) | 1051 | if (!num[MTRR_TYPE_WRBACK]) |
| 1051 | return 0; | 1052 | return 0; |
| 1052 | 1053 | ||
| 1053 | /* check if we only had WB and UC */ | 1054 | /* Check if we only had WB and UC: */ |
| 1054 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != | 1055 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
| 1055 | num_var_ranges - num[MTRR_NUM_TYPES]) | 1056 | num_var_ranges - num[MTRR_NUM_TYPES]) |
| 1056 | return 0; | 1057 | return 0; |
| @@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1066 | } | 1067 | } |
| 1067 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | 1068 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); |
| 1068 | 1069 | ||
| 1070 | /* Check the head: */ | ||
| 1069 | total_trim_size = 0; | 1071 | total_trim_size = 0; |
| 1070 | /* check the head */ | ||
| 1071 | if (range[0].start) | 1072 | if (range[0].start) |
| 1072 | total_trim_size += real_trim_memory(0, range[0].start); | 1073 | total_trim_size += real_trim_memory(0, range[0].start); |
| 1073 | /* check the holes */ | 1074 | |
| 1075 | /* Check the holes: */ | ||
| 1074 | for (i = 0; i < nr_range - 1; i++) { | 1076 | for (i = 0; i < nr_range - 1; i++) { |
| 1075 | if (range[i].end + 1 < range[i+1].start) | 1077 | if (range[i].end + 1 < range[i+1].start) |
| 1076 | total_trim_size += real_trim_memory(range[i].end + 1, | 1078 | total_trim_size += real_trim_memory(range[i].end + 1, |
| 1077 | range[i+1].start); | 1079 | range[i+1].start); |
| 1078 | } | 1080 | } |
| 1079 | /* check the top */ | 1081 | |
| 1082 | /* Check the top: */ | ||
| 1080 | i = nr_range - 1; | 1083 | i = nr_range - 1; |
| 1081 | if (range[i].end + 1 < end_pfn) | 1084 | if (range[i].end + 1 < end_pfn) |
| 1082 | total_trim_size += real_trim_memory(range[i].end + 1, | 1085 | total_trim_size += real_trim_memory(range[i].end + 1, |
| 1083 | end_pfn); | 1086 | end_pfn); |
| 1084 | 1087 | ||
| 1085 | if (total_trim_size) { | 1088 | if (total_trim_size) { |
| 1086 | printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" | 1089 | pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20); |
| 1087 | " all of memory, losing %lluMB of RAM.\n", | ||
| 1088 | total_trim_size >> 20); | ||
| 1089 | 1090 | ||
| 1090 | if (!changed_by_mtrr_cleanup) | 1091 | if (!changed_by_mtrr_cleanup) |
| 1091 | WARN_ON(1); | 1092 | WARN_ON(1); |
| 1092 | 1093 | ||
| 1093 | printk(KERN_INFO "update e820 for mtrr\n"); | 1094 | pr_info("update e820 for mtrr\n"); |
| 1094 | update_e820(); | 1095 | update_e820(); |
| 1095 | 1096 | ||
| 1096 | return 1; | 1097 | return 1; |
| @@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1098 | 1099 | ||
| 1099 | return 0; | 1100 | return 0; |
| 1100 | } | 1101 | } |
| 1101 | |||
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c index ff14c320040c..228d982ce09c 100644 --- a/arch/x86/kernel/cpu/mtrr/cyrix.c +++ b/arch/x86/kernel/cpu/mtrr/cyrix.c | |||
| @@ -1,38 +1,40 @@ | |||
| 1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
| 2 | #include <linux/io.h> | ||
| 2 | #include <linux/mm.h> | 3 | #include <linux/mm.h> |
| 3 | #include <asm/mtrr.h> | 4 | |
| 4 | #include <asm/msr.h> | ||
| 5 | #include <asm/io.h> | ||
| 6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
| 7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
| 7 | #include <asm/mtrr.h> | ||
| 8 | #include <asm/msr.h> | ||
| 9 | |||
| 8 | #include "mtrr.h" | 10 | #include "mtrr.h" |
| 9 | 11 | ||
| 10 | static void | 12 | static void |
| 11 | cyrix_get_arr(unsigned int reg, unsigned long *base, | 13 | cyrix_get_arr(unsigned int reg, unsigned long *base, |
| 12 | unsigned long *size, mtrr_type * type) | 14 | unsigned long *size, mtrr_type * type) |
| 13 | { | 15 | { |
| 14 | unsigned long flags; | ||
| 15 | unsigned char arr, ccr3, rcr, shift; | 16 | unsigned char arr, ccr3, rcr, shift; |
| 17 | unsigned long flags; | ||
| 16 | 18 | ||
| 17 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ | 19 | arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ |
| 18 | 20 | ||
| 19 | /* Save flags and disable interrupts */ | ||
| 20 | local_irq_save(flags); | 21 | local_irq_save(flags); |
| 21 | 22 | ||
| 22 | ccr3 = getCx86(CX86_CCR3); | 23 | ccr3 = getCx86(CX86_CCR3); |
| 23 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | 24 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ |
| 24 | ((unsigned char *) base)[3] = getCx86(arr); | 25 | ((unsigned char *)base)[3] = getCx86(arr); |
| 25 | ((unsigned char *) base)[2] = getCx86(arr + 1); | 26 | ((unsigned char *)base)[2] = getCx86(arr + 1); |
| 26 | ((unsigned char *) base)[1] = getCx86(arr + 2); | 27 | ((unsigned char *)base)[1] = getCx86(arr + 2); |
| 27 | rcr = getCx86(CX86_RCR_BASE + reg); | 28 | rcr = getCx86(CX86_RCR_BASE + reg); |
| 28 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | 29 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ |
| 29 | 30 | ||
| 30 | /* Enable interrupts if it was enabled previously */ | ||
| 31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
| 32 | |||
| 32 | shift = ((unsigned char *) base)[1] & 0x0f; | 33 | shift = ((unsigned char *) base)[1] & 0x0f; |
| 33 | *base >>= PAGE_SHIFT; | 34 | *base >>= PAGE_SHIFT; |
| 34 | 35 | ||
| 35 | /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | 36 | /* |
| 37 | * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 | ||
| 36 | * Note: shift==0xf means 4G, this is unsupported. | 38 | * Note: shift==0xf means 4G, this is unsupported. |
| 37 | */ | 39 | */ |
| 38 | if (shift) | 40 | if (shift) |
| @@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base, | |||
| 76 | } | 78 | } |
| 77 | } | 79 | } |
| 78 | 80 | ||
| 81 | /* | ||
| 82 | * cyrix_get_free_region - get a free ARR. | ||
| 83 | * | ||
| 84 | * @base: the starting (base) address of the region. | ||
| 85 | * @size: the size (in bytes) of the region. | ||
| 86 | * | ||
| 87 | * Returns: the index of the region on success, else -1 on error. | ||
| 88 | */ | ||
| 79 | static int | 89 | static int |
| 80 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 90 | cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) |
| 81 | /* [SUMMARY] Get a free ARR. | ||
| 82 | <base> The starting (base) address of the region. | ||
| 83 | <size> The size (in bytes) of the region. | ||
| 84 | [RETURNS] The index of the region on success, else -1 on error. | ||
| 85 | */ | ||
| 86 | { | 91 | { |
| 87 | int i; | ||
| 88 | mtrr_type ltype; | ||
| 89 | unsigned long lbase, lsize; | 92 | unsigned long lbase, lsize; |
| 93 | mtrr_type ltype; | ||
| 94 | int i; | ||
| 90 | 95 | ||
| 91 | switch (replace_reg) { | 96 | switch (replace_reg) { |
| 92 | case 7: | 97 | case 7: |
| @@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
| 107 | cyrix_get_arr(7, &lbase, &lsize, <ype); | 112 | cyrix_get_arr(7, &lbase, &lsize, <ype); |
| 108 | if (lsize == 0) | 113 | if (lsize == 0) |
| 109 | return 7; | 114 | return 7; |
| 110 | /* Else try ARR0-ARR6 first */ | 115 | /* Else try ARR0-ARR6 first */ |
| 111 | } else { | 116 | } else { |
| 112 | for (i = 0; i < 7; i++) { | 117 | for (i = 0; i < 7; i++) { |
| 113 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 118 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
| 114 | if (lsize == 0) | 119 | if (lsize == 0) |
| 115 | return i; | 120 | return i; |
| 116 | } | 121 | } |
| 117 | /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ | 122 | /* |
| 123 | * ARR0-ARR6 isn't free | ||
| 124 | * try ARR7 but its size must be at least 256K | ||
| 125 | */ | ||
| 118 | cyrix_get_arr(i, &lbase, &lsize, <ype); | 126 | cyrix_get_arr(i, &lbase, &lsize, <ype); |
| 119 | if ((lsize == 0) && (size >= 0x40)) | 127 | if ((lsize == 0) && (size >= 0x40)) |
| 120 | return i; | 128 | return i; |
| @@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) | |||
| 122 | return -ENOSPC; | 130 | return -ENOSPC; |
| 123 | } | 131 | } |
| 124 | 132 | ||
| 125 | static u32 cr4 = 0; | 133 | static u32 cr4, ccr3; |
| 126 | static u32 ccr3; | ||
| 127 | 134 | ||
| 128 | static void prepare_set(void) | 135 | static void prepare_set(void) |
| 129 | { | 136 | { |
| 130 | u32 cr0; | 137 | u32 cr0; |
| 131 | 138 | ||
| 132 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 139 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
| 133 | if ( cpu_has_pge ) { | 140 | if (cpu_has_pge) { |
| 134 | cr4 = read_cr4(); | 141 | cr4 = read_cr4(); |
| 135 | write_cr4(cr4 & ~X86_CR4_PGE); | 142 | write_cr4(cr4 & ~X86_CR4_PGE); |
| 136 | } | 143 | } |
| 137 | 144 | ||
| 138 | /* Disable and flush caches. Note that wbinvd flushes the TLBs as | 145 | /* |
| 139 | a side-effect */ | 146 | * Disable and flush caches. |
| 147 | * Note that wbinvd flushes the TLBs as a side-effect | ||
| 148 | */ | ||
| 140 | cr0 = read_cr0() | X86_CR0_CD; | 149 | cr0 = read_cr0() | X86_CR0_CD; |
| 141 | wbinvd(); | 150 | wbinvd(); |
| 142 | write_cr0(cr0); | 151 | write_cr0(cr0); |
| @@ -147,22 +156,21 @@ static void prepare_set(void) | |||
| 147 | 156 | ||
| 148 | /* Cyrix ARRs - everything else was excluded at the top */ | 157 | /* Cyrix ARRs - everything else was excluded at the top */ |
| 149 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); | 158 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); |
| 150 | |||
| 151 | } | 159 | } |
| 152 | 160 | ||
| 153 | static void post_set(void) | 161 | static void post_set(void) |
| 154 | { | 162 | { |
| 155 | /* Flush caches and TLBs */ | 163 | /* Flush caches and TLBs */ |
| 156 | wbinvd(); | 164 | wbinvd(); |
| 157 | 165 | ||
| 158 | /* Cyrix ARRs - everything else was excluded at the top */ | 166 | /* Cyrix ARRs - everything else was excluded at the top */ |
| 159 | setCx86(CX86_CCR3, ccr3); | 167 | setCx86(CX86_CCR3, ccr3); |
| 160 | 168 | ||
| 161 | /* Enable caches */ | 169 | /* Enable caches */ |
| 162 | write_cr0(read_cr0() & 0xbfffffff); | 170 | write_cr0(read_cr0() & 0xbfffffff); |
| 163 | 171 | ||
| 164 | /* Restore value of CR4 */ | 172 | /* Restore value of CR4 */ |
| 165 | if ( cpu_has_pge ) | 173 | if (cpu_has_pge) |
| 166 | write_cr4(cr4); | 174 | write_cr4(cr4); |
| 167 | } | 175 | } |
| 168 | 176 | ||
| @@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
| 178 | size >>= 6; | 186 | size >>= 6; |
| 179 | 187 | ||
| 180 | size &= 0x7fff; /* make sure arr_size <= 14 */ | 188 | size &= 0x7fff; /* make sure arr_size <= 14 */ |
| 181 | for (arr_size = 0; size; arr_size++, size >>= 1) ; | 189 | for (arr_size = 0; size; arr_size++, size >>= 1) |
| 190 | ; | ||
| 182 | 191 | ||
| 183 | if (reg < 7) { | 192 | if (reg < 7) { |
| 184 | switch (type) { | 193 | switch (type) { |
| @@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base, | |||
| 215 | prepare_set(); | 224 | prepare_set(); |
| 216 | 225 | ||
| 217 | base <<= PAGE_SHIFT; | 226 | base <<= PAGE_SHIFT; |
| 218 | setCx86(arr, ((unsigned char *) &base)[3]); | 227 | setCx86(arr + 0, ((unsigned char *)&base)[3]); |
| 219 | setCx86(arr + 1, ((unsigned char *) &base)[2]); | 228 | setCx86(arr + 1, ((unsigned char *)&base)[2]); |
| 220 | setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); | 229 | setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); |
| 221 | setCx86(CX86_RCR_BASE + reg, arr_type); | 230 | setCx86(CX86_RCR_BASE + reg, arr_type); |
| 222 | 231 | ||
| 223 | post_set(); | 232 | post_set(); |
| 224 | } | 233 | } |
| 225 | 234 | ||
| 226 | typedef struct { | 235 | typedef struct { |
| 227 | unsigned long base; | 236 | unsigned long base; |
| 228 | unsigned long size; | 237 | unsigned long size; |
| 229 | mtrr_type type; | 238 | mtrr_type type; |
| 230 | } arr_state_t; | 239 | } arr_state_t; |
| 231 | 240 | ||
| 232 | static arr_state_t arr_state[8] = { | 241 | static arr_state_t arr_state[8] = { |
| @@ -247,16 +256,17 @@ static void cyrix_set_all(void) | |||
| 247 | setCx86(CX86_CCR0 + i, ccr_state[i]); | 256 | setCx86(CX86_CCR0 + i, ccr_state[i]); |
| 248 | for (; i < 7; i++) | 257 | for (; i < 7; i++) |
| 249 | setCx86(CX86_CCR4 + i, ccr_state[i]); | 258 | setCx86(CX86_CCR4 + i, ccr_state[i]); |
| 250 | for (i = 0; i < 8; i++) | 259 | |
| 251 | cyrix_set_arr(i, arr_state[i].base, | 260 | for (i = 0; i < 8; i++) { |
| 261 | cyrix_set_arr(i, arr_state[i].base, | ||
| 252 | arr_state[i].size, arr_state[i].type); | 262 | arr_state[i].size, arr_state[i].type); |
| 263 | } | ||
| 253 | 264 | ||
| 254 | post_set(); | 265 | post_set(); |
| 255 | } | 266 | } |
| 256 | 267 | ||
| 257 | static struct mtrr_ops cyrix_mtrr_ops = { | 268 | static struct mtrr_ops cyrix_mtrr_ops = { |
| 258 | .vendor = X86_VENDOR_CYRIX, | 269 | .vendor = X86_VENDOR_CYRIX, |
| 259 | // .init = cyrix_arr_init, | ||
| 260 | .set_all = cyrix_set_all, | 270 | .set_all = cyrix_set_all, |
| 261 | .set = cyrix_set_arr, | 271 | .set = cyrix_set_arr, |
| 262 | .get = cyrix_get_arr, | 272 | .get = cyrix_get_arr, |
| @@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void) | |||
| 270 | set_mtrr_ops(&cyrix_mtrr_ops); | 280 | set_mtrr_ops(&cyrix_mtrr_ops); |
| 271 | return 0; | 281 | return 0; |
| 272 | } | 282 | } |
| 273 | |||
| 274 | //arch_initcall(cyrix_init_mtrr); | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 0543f69f0b27..55da0c5f68dd 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -1,28 +1,34 @@ | |||
| 1 | /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong | 1 | /* |
| 2 | because MTRRs can span upto 40 bits (36bits on most modern x86) */ | 2 | * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong |
| 3 | * because MTRRs can span upto 40 bits (36bits on most modern x86) | ||
| 4 | */ | ||
| 5 | #define DEBUG | ||
| 6 | |||
| 7 | #include <linux/module.h> | ||
| 3 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 4 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
| 10 | #include <linux/io.h> | ||
| 5 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
| 6 | #include <linux/module.h> | 12 | |
| 7 | #include <asm/io.h> | ||
| 8 | #include <asm/mtrr.h> | ||
| 9 | #include <asm/msr.h> | ||
| 10 | #include <asm/system.h> | ||
| 11 | #include <asm/cpufeature.h> | ||
| 12 | #include <asm/processor-flags.h> | 13 | #include <asm/processor-flags.h> |
| 14 | #include <asm/cpufeature.h> | ||
| 13 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
| 16 | #include <asm/system.h> | ||
| 17 | #include <asm/mtrr.h> | ||
| 18 | #include <asm/msr.h> | ||
| 14 | #include <asm/pat.h> | 19 | #include <asm/pat.h> |
| 20 | |||
| 15 | #include "mtrr.h" | 21 | #include "mtrr.h" |
| 16 | 22 | ||
| 17 | struct fixed_range_block { | 23 | struct fixed_range_block { |
| 18 | int base_msr; /* start address of an MTRR block */ | 24 | int base_msr; /* start address of an MTRR block */ |
| 19 | int ranges; /* number of MTRRs in this block */ | 25 | int ranges; /* number of MTRRs in this block */ |
| 20 | }; | 26 | }; |
| 21 | 27 | ||
| 22 | static struct fixed_range_block fixed_range_blocks[] = { | 28 | static struct fixed_range_block fixed_range_blocks[] = { |
| 23 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ | 29 | { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ |
| 24 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ | 30 | { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ |
| 25 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ | 31 | { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ |
| 26 | {} | 32 | {} |
| 27 | }; | 33 | }; |
| 28 | 34 | ||
| @@ -30,10 +36,10 @@ static unsigned long smp_changes_mask; | |||
| 30 | static int mtrr_state_set; | 36 | static int mtrr_state_set; |
| 31 | u64 mtrr_tom2; | 37 | u64 mtrr_tom2; |
| 32 | 38 | ||
| 33 | struct mtrr_state_type mtrr_state = {}; | 39 | struct mtrr_state_type mtrr_state; |
| 34 | EXPORT_SYMBOL_GPL(mtrr_state); | 40 | EXPORT_SYMBOL_GPL(mtrr_state); |
| 35 | 41 | ||
| 36 | /** | 42 | /* |
| 37 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example | 43 | * BIOS is expected to clear MtrrFixDramModEn bit, see for example |
| 38 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD | 44 | * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD |
| 39 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section | 45 | * Opteron Processors" (26094 Rev. 3.30 February 2006), section |
| @@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
| 104 | * Look of multiple ranges matching this address and pick type | 110 | * Look of multiple ranges matching this address and pick type |
| 105 | * as per MTRR precedence | 111 | * as per MTRR precedence |
| 106 | */ | 112 | */ |
| 107 | if (!(mtrr_state.enabled & 2)) { | 113 | if (!(mtrr_state.enabled & 2)) |
| 108 | return mtrr_state.def_type; | 114 | return mtrr_state.def_type; |
| 109 | } | ||
| 110 | 115 | ||
| 111 | prev_match = 0xFF; | 116 | prev_match = 0xFF; |
| 112 | for (i = 0; i < num_var_ranges; ++i) { | 117 | for (i = 0; i < num_var_ranges; ++i) { |
| @@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
| 125 | if (start_state != end_state) | 130 | if (start_state != end_state) |
| 126 | return 0xFE; | 131 | return 0xFE; |
| 127 | 132 | ||
| 128 | if ((start & mask) != (base & mask)) { | 133 | if ((start & mask) != (base & mask)) |
| 129 | continue; | 134 | continue; |
| 130 | } | ||
| 131 | 135 | ||
| 132 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; | 136 | curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; |
| 133 | if (prev_match == 0xFF) { | 137 | if (prev_match == 0xFF) { |
| @@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
| 148 | curr_match = MTRR_TYPE_WRTHROUGH; | 152 | curr_match = MTRR_TYPE_WRTHROUGH; |
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | if (prev_match != curr_match) { | 155 | if (prev_match != curr_match) |
| 152 | return MTRR_TYPE_UNCACHABLE; | 156 | return MTRR_TYPE_UNCACHABLE; |
| 153 | } | ||
| 154 | } | 157 | } |
| 155 | 158 | ||
| 156 | if (mtrr_tom2) { | 159 | if (mtrr_tom2) { |
| @@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) | |||
| 164 | return mtrr_state.def_type; | 167 | return mtrr_state.def_type; |
| 165 | } | 168 | } |
| 166 | 169 | ||
| 167 | /* Get the MSR pair relating to a var range */ | 170 | /* Get the MSR pair relating to a var range */ |
| 168 | static void | 171 | static void |
| 169 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | 172 | get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) |
| 170 | { | 173 | { |
| @@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) | |||
| 172 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); | 175 | rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); |
| 173 | } | 176 | } |
| 174 | 177 | ||
| 175 | /* fill the MSR pair relating to a var range */ | 178 | /* Fill the MSR pair relating to a var range */ |
| 176 | void fill_mtrr_var_range(unsigned int index, | 179 | void fill_mtrr_var_range(unsigned int index, |
| 177 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) | 180 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) |
| 178 | { | 181 | { |
| @@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index, | |||
| 186 | vr[index].mask_hi = mask_hi; | 189 | vr[index].mask_hi = mask_hi; |
| 187 | } | 190 | } |
| 188 | 191 | ||
| 189 | static void | 192 | static void get_fixed_ranges(mtrr_type *frs) |
| 190 | get_fixed_ranges(mtrr_type * frs) | ||
| 191 | { | 193 | { |
| 192 | unsigned int *p = (unsigned int *) frs; | 194 | unsigned int *p = (unsigned int *)frs; |
| 193 | int i; | 195 | int i; |
| 194 | 196 | ||
| 195 | k8_check_syscfg_dram_mod_en(); | 197 | k8_check_syscfg_dram_mod_en(); |
| @@ -217,22 +219,22 @@ static void __init print_fixed_last(void) | |||
| 217 | if (!last_fixed_end) | 219 | if (!last_fixed_end) |
| 218 | return; | 220 | return; |
| 219 | 221 | ||
| 220 | printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, | 222 | pr_debug(" %05X-%05X %s\n", last_fixed_start, |
| 221 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); | 223 | last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); |
| 222 | 224 | ||
| 223 | last_fixed_end = 0; | 225 | last_fixed_end = 0; |
| 224 | } | 226 | } |
| 225 | 227 | ||
| 226 | static void __init update_fixed_last(unsigned base, unsigned end, | 228 | static void __init update_fixed_last(unsigned base, unsigned end, |
| 227 | mtrr_type type) | 229 | mtrr_type type) |
| 228 | { | 230 | { |
| 229 | last_fixed_start = base; | 231 | last_fixed_start = base; |
| 230 | last_fixed_end = end; | 232 | last_fixed_end = end; |
| 231 | last_fixed_type = type; | 233 | last_fixed_type = type; |
| 232 | } | 234 | } |
| 233 | 235 | ||
| 234 | static void __init print_fixed(unsigned base, unsigned step, | 236 | static void __init |
| 235 | const mtrr_type *types) | 237 | print_fixed(unsigned base, unsigned step, const mtrr_type *types) |
| 236 | { | 238 | { |
| 237 | unsigned i; | 239 | unsigned i; |
| 238 | 240 | ||
| @@ -259,54 +261,55 @@ static void __init print_mtrr_state(void) | |||
| 259 | unsigned int i; | 261 | unsigned int i; |
| 260 | int high_width; | 262 | int high_width; |
| 261 | 263 | ||
| 262 | printk(KERN_DEBUG "MTRR default type: %s\n", | 264 | pr_debug("MTRR default type: %s\n", |
| 263 | mtrr_attrib_to_str(mtrr_state.def_type)); | 265 | mtrr_attrib_to_str(mtrr_state.def_type)); |
| 264 | if (mtrr_state.have_fixed) { | 266 | if (mtrr_state.have_fixed) { |
| 265 | printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", | 267 | pr_debug("MTRR fixed ranges %sabled:\n", |
| 266 | mtrr_state.enabled & 1 ? "en" : "dis"); | 268 | mtrr_state.enabled & 1 ? "en" : "dis"); |
| 267 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); | 269 | print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); |
| 268 | for (i = 0; i < 2; ++i) | 270 | for (i = 0; i < 2; ++i) |
| 269 | print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); | 271 | print_fixed(0x80000 + i * 0x20000, 0x04000, |
| 272 | mtrr_state.fixed_ranges + (i + 1) * 8); | ||
| 270 | for (i = 0; i < 8; ++i) | 273 | for (i = 0; i < 8; ++i) |
| 271 | print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); | 274 | print_fixed(0xC0000 + i * 0x08000, 0x01000, |
| 275 | mtrr_state.fixed_ranges + (i + 3) * 8); | ||
| 272 | 276 | ||
| 273 | /* tail */ | 277 | /* tail */ |
| 274 | print_fixed_last(); | 278 | print_fixed_last(); |
| 275 | } | 279 | } |
| 276 | printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", | 280 | pr_debug("MTRR variable ranges %sabled:\n", |
| 277 | mtrr_state.enabled & 2 ? "en" : "dis"); | 281 | mtrr_state.enabled & 2 ? "en" : "dis"); |
| 278 | if (size_or_mask & 0xffffffffUL) | 282 | if (size_or_mask & 0xffffffffUL) |
| 279 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; | 283 | high_width = ffs(size_or_mask & 0xffffffffUL) - 1; |
| 280 | else | 284 | else |
| 281 | high_width = ffs(size_or_mask>>32) + 32 - 1; | 285 | high_width = ffs(size_or_mask>>32) + 32 - 1; |
| 282 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; | 286 | high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; |
| 287 | |||
| 283 | for (i = 0; i < num_var_ranges; ++i) { | 288 | for (i = 0; i < num_var_ranges; ++i) { |
| 284 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) | 289 | if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) |
| 285 | printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", | 290 | pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", |
| 286 | i, | 291 | i, |
| 287 | high_width, | 292 | high_width, |
| 288 | mtrr_state.var_ranges[i].base_hi, | 293 | mtrr_state.var_ranges[i].base_hi, |
| 289 | mtrr_state.var_ranges[i].base_lo >> 12, | 294 | mtrr_state.var_ranges[i].base_lo >> 12, |
| 290 | high_width, | 295 | high_width, |
| 291 | mtrr_state.var_ranges[i].mask_hi, | 296 | mtrr_state.var_ranges[i].mask_hi, |
| 292 | mtrr_state.var_ranges[i].mask_lo >> 12, | 297 | mtrr_state.var_ranges[i].mask_lo >> 12, |
| 293 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); | 298 | mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); |
| 294 | else | 299 | else |
| 295 | printk(KERN_DEBUG " %u disabled\n", i); | 300 | pr_debug(" %u disabled\n", i); |
| 296 | } | ||
| 297 | if (mtrr_tom2) { | ||
| 298 | printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", | ||
| 299 | mtrr_tom2, mtrr_tom2>>20); | ||
| 300 | } | 301 | } |
| 302 | if (mtrr_tom2) | ||
| 303 | pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); | ||
| 301 | } | 304 | } |
| 302 | 305 | ||
| 303 | /* Grab all of the MTRR state for this CPU into *state */ | 306 | /* Grab all of the MTRR state for this CPU into *state */ |
| 304 | void __init get_mtrr_state(void) | 307 | void __init get_mtrr_state(void) |
| 305 | { | 308 | { |
| 306 | unsigned int i; | ||
| 307 | struct mtrr_var_range *vrs; | 309 | struct mtrr_var_range *vrs; |
| 308 | unsigned lo, dummy; | ||
| 309 | unsigned long flags; | 310 | unsigned long flags; |
| 311 | unsigned lo, dummy; | ||
| 312 | unsigned int i; | ||
| 310 | 313 | ||
| 311 | vrs = mtrr_state.var_ranges; | 314 | vrs = mtrr_state.var_ranges; |
| 312 | 315 | ||
| @@ -324,6 +327,7 @@ void __init get_mtrr_state(void) | |||
| 324 | 327 | ||
| 325 | if (amd_special_default_mtrr()) { | 328 | if (amd_special_default_mtrr()) { |
| 326 | unsigned low, high; | 329 | unsigned low, high; |
| 330 | |||
| 327 | /* TOP_MEM2 */ | 331 | /* TOP_MEM2 */ |
| 328 | rdmsr(MSR_K8_TOP_MEM2, low, high); | 332 | rdmsr(MSR_K8_TOP_MEM2, low, high); |
| 329 | mtrr_tom2 = high; | 333 | mtrr_tom2 = high; |
| @@ -344,10 +348,9 @@ void __init get_mtrr_state(void) | |||
| 344 | 348 | ||
| 345 | post_set(); | 349 | post_set(); |
| 346 | local_irq_restore(flags); | 350 | local_irq_restore(flags); |
| 347 | |||
| 348 | } | 351 | } |
| 349 | 352 | ||
| 350 | /* Some BIOS's are fucked and don't set all MTRRs the same! */ | 353 | /* Some BIOS's are messed up and don't set all MTRRs the same! */ |
| 351 | void __init mtrr_state_warn(void) | 354 | void __init mtrr_state_warn(void) |
| 352 | { | 355 | { |
| 353 | unsigned long mask = smp_changes_mask; | 356 | unsigned long mask = smp_changes_mask; |
| @@ -355,28 +358,33 @@ void __init mtrr_state_warn(void) | |||
| 355 | if (!mask) | 358 | if (!mask) |
| 356 | return; | 359 | return; |
| 357 | if (mask & MTRR_CHANGE_MASK_FIXED) | 360 | if (mask & MTRR_CHANGE_MASK_FIXED) |
| 358 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); | 361 | pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); |
| 359 | if (mask & MTRR_CHANGE_MASK_VARIABLE) | 362 | if (mask & MTRR_CHANGE_MASK_VARIABLE) |
| 360 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); | 363 | pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); |
| 361 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) | 364 | if (mask & MTRR_CHANGE_MASK_DEFTYPE) |
| 362 | printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); | 365 | pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); |
| 366 | |||
| 363 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); | 367 | printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); |
| 364 | printk(KERN_INFO "mtrr: corrected configuration.\n"); | 368 | printk(KERN_INFO "mtrr: corrected configuration.\n"); |
| 365 | } | 369 | } |
| 366 | 370 | ||
| 367 | /* Doesn't attempt to pass an error out to MTRR users | 371 | /* |
| 368 | because it's quite complicated in some cases and probably not | 372 | * Doesn't attempt to pass an error out to MTRR users |
| 369 | worth it because the best error handling is to ignore it. */ | 373 | * because it's quite complicated in some cases and probably not |
| 374 | * worth it because the best error handling is to ignore it. | ||
| 375 | */ | ||
| 370 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) | 376 | void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) |
| 371 | { | 377 | { |
| 372 | if (wrmsr_safe(msr, a, b) < 0) | 378 | if (wrmsr_safe(msr, a, b) < 0) { |
| 373 | printk(KERN_ERR | 379 | printk(KERN_ERR |
| 374 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", | 380 | "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", |
| 375 | smp_processor_id(), msr, a, b); | 381 | smp_processor_id(), msr, a, b); |
| 382 | } | ||
| 376 | } | 383 | } |
| 377 | 384 | ||
| 378 | /** | 385 | /** |
| 379 | * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have | 386 | * set_fixed_range - checks & updates a fixed-range MTRR if it |
| 387 | * differs from the value it should have | ||
| 380 | * @msr: MSR address of the MTTR which should be checked and updated | 388 | * @msr: MSR address of the MTTR which should be checked and updated |
| 381 | * @changed: pointer which indicates whether the MTRR needed to be changed | 389 | * @changed: pointer which indicates whether the MTRR needed to be changed |
| 382 | * @msrwords: pointer to the MSR values which the MSR should have | 390 | * @msrwords: pointer to the MSR values which the MSR should have |
| @@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) | |||
| 401 | * | 409 | * |
| 402 | * Returns: The index of the region on success, else negative on error. | 410 | * Returns: The index of the region on success, else negative on error. |
| 403 | */ | 411 | */ |
| 404 | int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | 412 | int |
| 413 | generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) | ||
| 405 | { | 414 | { |
| 406 | int i, max; | ||
| 407 | mtrr_type ltype; | ||
| 408 | unsigned long lbase, lsize; | 415 | unsigned long lbase, lsize; |
| 416 | mtrr_type ltype; | ||
| 417 | int i, max; | ||
| 409 | 418 | ||
| 410 | max = num_var_ranges; | 419 | max = num_var_ranges; |
| 411 | if (replace_reg >= 0 && replace_reg < max) | 420 | if (replace_reg >= 0 && replace_reg < max) |
| 412 | return replace_reg; | 421 | return replace_reg; |
| 422 | |||
| 413 | for (i = 0; i < max; ++i) { | 423 | for (i = 0; i < max; ++i) { |
| 414 | mtrr_if->get(i, &lbase, &lsize, <ype); | 424 | mtrr_if->get(i, &lbase, &lsize, <ype); |
| 415 | if (lsize == 0) | 425 | if (lsize == 0) |
| 416 | return i; | 426 | return i; |
| 417 | } | 427 | } |
| 428 | |||
| 418 | return -ENOSPC; | 429 | return -ENOSPC; |
| 419 | } | 430 | } |
| 420 | 431 | ||
| @@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 434 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 445 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
| 435 | 446 | ||
| 436 | if ((mask_lo & 0x800) == 0) { | 447 | if ((mask_lo & 0x800) == 0) { |
| 437 | /* Invalid (i.e. free) range */ | 448 | /* Invalid (i.e. free) range */ |
| 438 | *base = 0; | 449 | *base = 0; |
| 439 | *size = 0; | 450 | *size = 0; |
| 440 | *type = 0; | 451 | *type = 0; |
| @@ -471,27 +482,31 @@ out_put_cpu: | |||
| 471 | } | 482 | } |
| 472 | 483 | ||
| 473 | /** | 484 | /** |
| 474 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set | 485 | * set_fixed_ranges - checks & updates the fixed-range MTRRs if they |
| 486 | * differ from the saved set | ||
| 475 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() | 487 | * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() |
| 476 | */ | 488 | */ |
| 477 | static int set_fixed_ranges(mtrr_type * frs) | 489 | static int set_fixed_ranges(mtrr_type *frs) |
| 478 | { | 490 | { |
| 479 | unsigned long long *saved = (unsigned long long *) frs; | 491 | unsigned long long *saved = (unsigned long long *)frs; |
| 480 | bool changed = false; | 492 | bool changed = false; |
| 481 | int block=-1, range; | 493 | int block = -1, range; |
| 482 | 494 | ||
| 483 | k8_check_syscfg_dram_mod_en(); | 495 | k8_check_syscfg_dram_mod_en(); |
| 484 | 496 | ||
| 485 | while (fixed_range_blocks[++block].ranges) | 497 | while (fixed_range_blocks[++block].ranges) { |
| 486 | for (range=0; range < fixed_range_blocks[block].ranges; range++) | 498 | for (range = 0; range < fixed_range_blocks[block].ranges; range++) |
| 487 | set_fixed_range(fixed_range_blocks[block].base_msr + range, | 499 | set_fixed_range(fixed_range_blocks[block].base_msr + range, |
| 488 | &changed, (unsigned int *) saved++); | 500 | &changed, (unsigned int *)saved++); |
| 501 | } | ||
| 489 | 502 | ||
| 490 | return changed; | 503 | return changed; |
| 491 | } | 504 | } |
| 492 | 505 | ||
| 493 | /* Set the MSR pair relating to a var range. Returns TRUE if | 506 | /* |
| 494 | changes are made */ | 507 | * Set the MSR pair relating to a var range. |
| 508 | * Returns true if changes are made. | ||
| 509 | */ | ||
| 495 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | 510 | static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) |
| 496 | { | 511 | { |
| 497 | unsigned int lo, hi; | 512 | unsigned int lo, hi; |
| @@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) | |||
| 501 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) | 516 | if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) |
| 502 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != | 517 | || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != |
| 503 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { | 518 | (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { |
| 519 | |||
| 504 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); | 520 | mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); |
| 505 | changed = true; | 521 | changed = true; |
| 506 | } | 522 | } |
| @@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi; | |||
| 526 | */ | 542 | */ |
| 527 | static unsigned long set_mtrr_state(void) | 543 | static unsigned long set_mtrr_state(void) |
| 528 | { | 544 | { |
| 529 | unsigned int i; | ||
| 530 | unsigned long change_mask = 0; | 545 | unsigned long change_mask = 0; |
| 546 | unsigned int i; | ||
| 531 | 547 | ||
| 532 | for (i = 0; i < num_var_ranges; i++) | 548 | for (i = 0; i < num_var_ranges; i++) { |
| 533 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) | 549 | if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) |
| 534 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; | 550 | change_mask |= MTRR_CHANGE_MASK_VARIABLE; |
| 551 | } | ||
| 535 | 552 | ||
| 536 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) | 553 | if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) |
| 537 | change_mask |= MTRR_CHANGE_MASK_FIXED; | 554 | change_mask |= MTRR_CHANGE_MASK_FIXED; |
| 538 | 555 | ||
| 539 | /* Set_mtrr_restore restores the old value of MTRRdefType, | 556 | /* |
| 540 | so to set it we fiddle with the saved value */ | 557 | * Set_mtrr_restore restores the old value of MTRRdefType, |
| 558 | * so to set it we fiddle with the saved value: | ||
| 559 | */ | ||
| 541 | if ((deftype_lo & 0xff) != mtrr_state.def_type | 560 | if ((deftype_lo & 0xff) != mtrr_state.def_type |
| 542 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { | 561 | || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { |
| 543 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); | 562 | |
| 563 | deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | | ||
| 564 | (mtrr_state.enabled << 10); | ||
| 544 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; | 565 | change_mask |= MTRR_CHANGE_MASK_DEFTYPE; |
| 545 | } | 566 | } |
| 546 | 567 | ||
| @@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void) | |||
| 548 | } | 569 | } |
| 549 | 570 | ||
| 550 | 571 | ||
| 551 | static unsigned long cr4 = 0; | 572 | static unsigned long cr4; |
| 552 | static DEFINE_SPINLOCK(set_atomicity_lock); | 573 | static DEFINE_SPINLOCK(set_atomicity_lock); |
| 553 | 574 | ||
| 554 | /* | 575 | /* |
| 555 | * Since we are disabling the cache don't allow any interrupts - they | 576 | * Since we are disabling the cache don't allow any interrupts, |
| 556 | * would run extremely slow and would only increase the pain. The caller must | 577 | * they would run extremely slow and would only increase the pain. |
| 557 | * ensure that local interrupts are disabled and are reenabled after post_set() | 578 | * |
| 558 | * has been called. | 579 | * The caller must ensure that local interrupts are disabled and |
| 580 | * are reenabled after post_set() has been called. | ||
| 559 | */ | 581 | */ |
| 560 | |||
| 561 | static void prepare_set(void) __acquires(set_atomicity_lock) | 582 | static void prepare_set(void) __acquires(set_atomicity_lock) |
| 562 | { | 583 | { |
| 563 | unsigned long cr0; | 584 | unsigned long cr0; |
| 564 | 585 | ||
| 565 | /* Note that this is not ideal, since the cache is only flushed/disabled | 586 | /* |
| 566 | for this CPU while the MTRRs are changed, but changing this requires | 587 | * Note that this is not ideal |
| 567 | more invasive changes to the way the kernel boots */ | 588 | * since the cache is only flushed/disabled for this CPU while the |
| 589 | * MTRRs are changed, but changing this requires more invasive | ||
| 590 | * changes to the way the kernel boots | ||
| 591 | */ | ||
| 568 | 592 | ||
| 569 | spin_lock(&set_atomicity_lock); | 593 | spin_lock(&set_atomicity_lock); |
| 570 | 594 | ||
| 571 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ | 595 | /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
| 572 | cr0 = read_cr0() | X86_CR0_CD; | 596 | cr0 = read_cr0() | X86_CR0_CD; |
| 573 | write_cr0(cr0); | 597 | write_cr0(cr0); |
| 574 | wbinvd(); | 598 | wbinvd(); |
| 575 | 599 | ||
| 576 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 600 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
| 577 | if ( cpu_has_pge ) { | 601 | if (cpu_has_pge) { |
| 578 | cr4 = read_cr4(); | 602 | cr4 = read_cr4(); |
| 579 | write_cr4(cr4 & ~X86_CR4_PGE); | 603 | write_cr4(cr4 & ~X86_CR4_PGE); |
| 580 | } | 604 | } |
| @@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
| 582 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ | 606 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
| 583 | __flush_tlb(); | 607 | __flush_tlb(); |
| 584 | 608 | ||
| 585 | /* Save MTRR state */ | 609 | /* Save MTRR state */ |
| 586 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 610 | rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
| 587 | 611 | ||
| 588 | /* Disable MTRRs, and set the default type to uncached */ | 612 | /* Disable MTRRs, and set the default type to uncached */ |
| 589 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); | 613 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
| 590 | } | 614 | } |
| 591 | 615 | ||
| 592 | static void post_set(void) __releases(set_atomicity_lock) | 616 | static void post_set(void) __releases(set_atomicity_lock) |
| 593 | { | 617 | { |
| 594 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 618 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
| 595 | __flush_tlb(); | 619 | __flush_tlb(); |
| 596 | 620 | ||
| 597 | /* Intel (P6) standard MTRRs */ | 621 | /* Intel (P6) standard MTRRs */ |
| 598 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); | 622 | mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
| 599 | 623 | ||
| 600 | /* Enable caches */ | 624 | /* Enable caches */ |
| 601 | write_cr0(read_cr0() & 0xbfffffff); | 625 | write_cr0(read_cr0() & 0xbfffffff); |
| 602 | 626 | ||
| 603 | /* Restore value of CR4 */ | 627 | /* Restore value of CR4 */ |
| 604 | if ( cpu_has_pge ) | 628 | if (cpu_has_pge) |
| 605 | write_cr4(cr4); | 629 | write_cr4(cr4); |
| 606 | spin_unlock(&set_atomicity_lock); | 630 | spin_unlock(&set_atomicity_lock); |
| 607 | } | 631 | } |
| @@ -623,24 +647,27 @@ static void generic_set_all(void) | |||
| 623 | post_set(); | 647 | post_set(); |
| 624 | local_irq_restore(flags); | 648 | local_irq_restore(flags); |
| 625 | 649 | ||
| 626 | /* Use the atomic bitops to update the global mask */ | 650 | /* Use the atomic bitops to update the global mask */ |
| 627 | for (count = 0; count < sizeof mask * 8; ++count) { | 651 | for (count = 0; count < sizeof mask * 8; ++count) { |
| 628 | if (mask & 0x01) | 652 | if (mask & 0x01) |
| 629 | set_bit(count, &smp_changes_mask); | 653 | set_bit(count, &smp_changes_mask); |
| 630 | mask >>= 1; | 654 | mask >>= 1; |
| 631 | } | 655 | } |
| 632 | 656 | ||
| 633 | } | 657 | } |
| 634 | 658 | ||
| 659 | /** | ||
| 660 | * generic_set_mtrr - set variable MTRR register on the local CPU. | ||
| 661 | * | ||
| 662 | * @reg: The register to set. | ||
| 663 | * @base: The base address of the region. | ||
| 664 | * @size: The size of the region. If this is 0 the region is disabled. | ||
| 665 | * @type: The type of the region. | ||
| 666 | * | ||
| 667 | * Returns nothing. | ||
| 668 | */ | ||
| 635 | static void generic_set_mtrr(unsigned int reg, unsigned long base, | 669 | static void generic_set_mtrr(unsigned int reg, unsigned long base, |
| 636 | unsigned long size, mtrr_type type) | 670 | unsigned long size, mtrr_type type) |
| 637 | /* [SUMMARY] Set variable MTRR register on the local CPU. | ||
| 638 | <reg> The register to set. | ||
| 639 | <base> The base address of the region. | ||
| 640 | <size> The size of the region. If this is 0 the region is disabled. | ||
| 641 | <type> The type of the region. | ||
| 642 | [RETURNS] Nothing. | ||
| 643 | */ | ||
| 644 | { | 671 | { |
| 645 | unsigned long flags; | 672 | unsigned long flags; |
| 646 | struct mtrr_var_range *vr; | 673 | struct mtrr_var_range *vr; |
| @@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
| 651 | prepare_set(); | 678 | prepare_set(); |
| 652 | 679 | ||
| 653 | if (size == 0) { | 680 | if (size == 0) { |
| 654 | /* The invalid bit is kept in the mask, so we simply clear the | 681 | /* |
| 655 | relevant mask register to disable a range. */ | 682 | * The invalid bit is kept in the mask, so we simply |
| 683 | * clear the relevant mask register to disable a range. | ||
| 684 | */ | ||
| 656 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); | 685 | mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); |
| 657 | memset(vr, 0, sizeof(struct mtrr_var_range)); | 686 | memset(vr, 0, sizeof(struct mtrr_var_range)); |
| 658 | } else { | 687 | } else { |
| @@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base, | |||
| 669 | local_irq_restore(flags); | 698 | local_irq_restore(flags); |
| 670 | } | 699 | } |
| 671 | 700 | ||
| 672 | int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) | 701 | int generic_validate_add_page(unsigned long base, unsigned long size, |
| 702 | unsigned int type) | ||
| 673 | { | 703 | { |
| 674 | unsigned long lbase, last; | 704 | unsigned long lbase, last; |
| 675 | 705 | ||
| 676 | /* For Intel PPro stepping <= 7, must be 4 MiB aligned | 706 | /* |
| 677 | and not touch 0x70000000->0x7003FFFF */ | 707 | * For Intel PPro stepping <= 7 |
| 708 | * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF | ||
| 709 | */ | ||
| 678 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && | 710 | if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && |
| 679 | boot_cpu_data.x86_model == 1 && | 711 | boot_cpu_data.x86_model == 1 && |
| 680 | boot_cpu_data.x86_mask <= 7) { | 712 | boot_cpu_data.x86_mask <= 7) { |
| 681 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { | 713 | if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { |
| 682 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); | 714 | pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); |
| 683 | return -EINVAL; | 715 | return -EINVAL; |
| 684 | } | 716 | } |
| 685 | if (!(base + size < 0x70000 || base > 0x7003F) && | 717 | if (!(base + size < 0x70000 || base > 0x7003F) && |
| 686 | (type == MTRR_TYPE_WRCOMB | 718 | (type == MTRR_TYPE_WRCOMB |
| 687 | || type == MTRR_TYPE_WRBACK)) { | 719 | || type == MTRR_TYPE_WRBACK)) { |
| 688 | printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); | 720 | pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); |
| 689 | return -EINVAL; | 721 | return -EINVAL; |
| 690 | } | 722 | } |
| 691 | } | 723 | } |
| 692 | 724 | ||
| 693 | /* Check upper bits of base and last are equal and lower bits are 0 | 725 | /* |
| 694 | for base and 1 for last */ | 726 | * Check upper bits of base and last are equal and lower bits are 0 |
| 727 | * for base and 1 for last | ||
| 728 | */ | ||
| 695 | last = base + size - 1; | 729 | last = base + size - 1; |
| 696 | for (lbase = base; !(lbase & 1) && (last & 1); | 730 | for (lbase = base; !(lbase & 1) && (last & 1); |
| 697 | lbase = lbase >> 1, last = last >> 1) ; | 731 | lbase = lbase >> 1, last = last >> 1) |
| 732 | ; | ||
| 698 | if (lbase != last) { | 733 | if (lbase != last) { |
| 699 | printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", | 734 | pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); |
| 700 | base, size); | ||
| 701 | return -EINVAL; | 735 | return -EINVAL; |
| 702 | } | 736 | } |
| 703 | return 0; | 737 | return 0; |
| 704 | } | 738 | } |
| 705 | 739 | ||
| 706 | |||
| 707 | static int generic_have_wrcomb(void) | 740 | static int generic_have_wrcomb(void) |
| 708 | { | 741 | { |
| 709 | unsigned long config, dummy; | 742 | unsigned long config, dummy; |
| 710 | rdmsr(MSR_MTRRcap, config, dummy); | 743 | rdmsr(MSR_MTRRcap, config, dummy); |
| 711 | return (config & (1 << 10)); | 744 | return config & (1 << 10); |
| 712 | } | 745 | } |
| 713 | 746 | ||
| 714 | int positive_have_wrcomb(void) | 747 | int positive_have_wrcomb(void) |
| @@ -716,14 +749,15 @@ int positive_have_wrcomb(void) | |||
| 716 | return 1; | 749 | return 1; |
| 717 | } | 750 | } |
| 718 | 751 | ||
| 719 | /* generic structure... | 752 | /* |
| 753 | * Generic structure... | ||
| 720 | */ | 754 | */ |
| 721 | struct mtrr_ops generic_mtrr_ops = { | 755 | struct mtrr_ops generic_mtrr_ops = { |
| 722 | .use_intel_if = 1, | 756 | .use_intel_if = 1, |
| 723 | .set_all = generic_set_all, | 757 | .set_all = generic_set_all, |
| 724 | .get = generic_get_mtrr, | 758 | .get = generic_get_mtrr, |
| 725 | .get_free_region = generic_get_free_region, | 759 | .get_free_region = generic_get_free_region, |
| 726 | .set = generic_set_mtrr, | 760 | .set = generic_set_mtrr, |
| 727 | .validate_add_page = generic_validate_add_page, | 761 | .validate_add_page = generic_validate_add_page, |
| 728 | .have_wrcomb = generic_have_wrcomb, | 762 | .have_wrcomb = generic_have_wrcomb, |
| 729 | }; | 763 | }; |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index fb73a52913a4..08b6ea4c62b4 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
| @@ -1,27 +1,28 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/proc_fs.h> | ||
| 3 | #include <linux/capability.h> | 1 | #include <linux/capability.h> |
| 4 | #include <linux/ctype.h> | ||
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/seq_file.h> | 2 | #include <linux/seq_file.h> |
| 7 | #include <asm/uaccess.h> | 3 | #include <linux/uaccess.h> |
| 4 | #include <linux/proc_fs.h> | ||
| 5 | #include <linux/module.h> | ||
| 6 | #include <linux/ctype.h> | ||
| 7 | #include <linux/init.h> | ||
| 8 | 8 | ||
| 9 | #define LINE_SIZE 80 | 9 | #define LINE_SIZE 80 |
| 10 | 10 | ||
| 11 | #include <asm/mtrr.h> | 11 | #include <asm/mtrr.h> |
| 12 | |||
| 12 | #include "mtrr.h" | 13 | #include "mtrr.h" |
| 13 | 14 | ||
| 14 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) | 15 | #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) |
| 15 | 16 | ||
| 16 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = | 17 | static const char *const mtrr_strings[MTRR_NUM_TYPES] = |
| 17 | { | 18 | { |
| 18 | "uncachable", /* 0 */ | 19 | "uncachable", /* 0 */ |
| 19 | "write-combining", /* 1 */ | 20 | "write-combining", /* 1 */ |
| 20 | "?", /* 2 */ | 21 | "?", /* 2 */ |
| 21 | "?", /* 3 */ | 22 | "?", /* 3 */ |
| 22 | "write-through", /* 4 */ | 23 | "write-through", /* 4 */ |
| 23 | "write-protect", /* 5 */ | 24 | "write-protect", /* 5 */ |
| 24 | "write-back", /* 6 */ | 25 | "write-back", /* 6 */ |
| 25 | }; | 26 | }; |
| 26 | 27 | ||
| 27 | const char *mtrr_attrib_to_str(int x) | 28 | const char *mtrr_attrib_to_str(int x) |
| @@ -35,8 +36,8 @@ static int | |||
| 35 | mtrr_file_add(unsigned long base, unsigned long size, | 36 | mtrr_file_add(unsigned long base, unsigned long size, |
| 36 | unsigned int type, bool increment, struct file *file, int page) | 37 | unsigned int type, bool increment, struct file *file, int page) |
| 37 | { | 38 | { |
| 39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
| 38 | int reg, max; | 40 | int reg, max; |
| 39 | unsigned int *fcount = FILE_FCOUNT(file); | ||
| 40 | 41 | ||
| 41 | max = num_var_ranges; | 42 | max = num_var_ranges; |
| 42 | if (fcount == NULL) { | 43 | if (fcount == NULL) { |
| @@ -61,8 +62,8 @@ static int | |||
| 61 | mtrr_file_del(unsigned long base, unsigned long size, | 62 | mtrr_file_del(unsigned long base, unsigned long size, |
| 62 | struct file *file, int page) | 63 | struct file *file, int page) |
| 63 | { | 64 | { |
| 64 | int reg; | ||
| 65 | unsigned int *fcount = FILE_FCOUNT(file); | 65 | unsigned int *fcount = FILE_FCOUNT(file); |
| 66 | int reg; | ||
| 66 | 67 | ||
| 67 | if (!page) { | 68 | if (!page) { |
| 68 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) | 69 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) |
| @@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size, | |||
| 81 | return reg; | 82 | return reg; |
| 82 | } | 83 | } |
| 83 | 84 | ||
| 84 | /* RED-PEN: seq_file can seek now. this is ignored. */ | 85 | /* |
| 86 | * seq_file can seek but we ignore it. | ||
| 87 | * | ||
| 88 | * Format of control line: | ||
| 89 | * "base=%Lx size=%Lx type=%s" or "disable=%d" | ||
| 90 | */ | ||
| 85 | static ssize_t | 91 | static ssize_t |
| 86 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | 92 | mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) |
| 87 | /* Format of control line: | ||
| 88 | "base=%Lx size=%Lx type=%s" OR: | ||
| 89 | "disable=%d" | ||
| 90 | */ | ||
| 91 | { | 93 | { |
| 92 | int i, err; | 94 | int i, err; |
| 93 | unsigned long reg; | 95 | unsigned long reg; |
| @@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
| 100 | return -EPERM; | 102 | return -EPERM; |
| 101 | if (!len) | 103 | if (!len) |
| 102 | return -EINVAL; | 104 | return -EINVAL; |
| 105 | |||
| 103 | memset(line, 0, LINE_SIZE); | 106 | memset(line, 0, LINE_SIZE); |
| 104 | if (len > LINE_SIZE) | 107 | if (len > LINE_SIZE) |
| 105 | len = LINE_SIZE; | 108 | len = LINE_SIZE; |
| 106 | if (copy_from_user(line, buf, len - 1)) | 109 | if (copy_from_user(line, buf, len - 1)) |
| 107 | return -EFAULT; | 110 | return -EFAULT; |
| 111 | |||
| 108 | linelen = strlen(line); | 112 | linelen = strlen(line); |
| 109 | ptr = line + linelen - 1; | 113 | ptr = line + linelen - 1; |
| 110 | if (linelen && *ptr == '\n') | 114 | if (linelen && *ptr == '\n') |
| 111 | *ptr = '\0'; | 115 | *ptr = '\0'; |
| 116 | |||
| 112 | if (!strncmp(line, "disable=", 8)) { | 117 | if (!strncmp(line, "disable=", 8)) { |
| 113 | reg = simple_strtoul(line + 8, &ptr, 0); | 118 | reg = simple_strtoul(line + 8, &ptr, 0); |
| 114 | err = mtrr_del_page(reg, 0, 0); | 119 | err = mtrr_del_page(reg, 0, 0); |
| @@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) | |||
| 116 | return err; | 121 | return err; |
| 117 | return len; | 122 | return len; |
| 118 | } | 123 | } |
| 124 | |||
| 119 | if (strncmp(line, "base=", 5)) | 125 | if (strncmp(line, "base=", 5)) |
| 120 | return -EINVAL; | 126 | return -EINVAL; |
| 127 | |||
| 121 | base = simple_strtoull(line + 5, &ptr, 0); | 128 | base = simple_strtoull(line + 5, &ptr, 0); |
| 122 | for (; isspace(*ptr); ++ptr) ; | 129 | for (; isspace(*ptr); ++ptr) |
| 130 | ; | ||
| 131 | |||
| 123 | if (strncmp(ptr, "size=", 5)) | 132 | if (strncmp(ptr, "size=", 5)) |
| 124 | return -EINVAL; | 133 | return -EINVAL; |
| 134 | |||
| 125 | size = simple_strtoull(ptr + 5, &ptr, 0); | 135 | size = simple_strtoull(ptr + 5, &ptr, 0); |
| 126 | if ((base & 0xfff) || (size & 0xfff)) | 136 | if ((base & 0xfff) || (size & 0xfff)) |
| 127 | return -EINVAL; | 137 | return -EINVAL; |
| 128 | for (; isspace(*ptr); ++ptr) ; | 138 | for (; isspace(*ptr); ++ptr) |
| 139 | ; | ||
| 140 | |||
| 129 | if (strncmp(ptr, "type=", 5)) | 141 | if (strncmp(ptr, "type=", 5)) |
| 130 | return -EINVAL; | 142 | return -EINVAL; |
| 131 | ptr += 5; | 143 | ptr += 5; |
| 132 | for (; isspace(*ptr); ++ptr) ; | 144 | for (; isspace(*ptr); ++ptr) |
| 145 | ; | ||
| 146 | |||
| 133 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { | 147 | for (i = 0; i < MTRR_NUM_TYPES; ++i) { |
| 134 | if (strcmp(ptr, mtrr_strings[i])) | 148 | if (strcmp(ptr, mtrr_strings[i])) |
| 135 | continue; | 149 | continue; |
| 136 | base >>= PAGE_SHIFT; | 150 | base >>= PAGE_SHIFT; |
| 137 | size >>= PAGE_SHIFT; | 151 | size >>= PAGE_SHIFT; |
| 138 | err = | 152 | err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); |
| 139 | mtrr_add_page((unsigned long) base, (unsigned long) size, i, | ||
| 140 | true); | ||
| 141 | if (err < 0) | 153 | if (err < 0) |
| 142 | return err; | 154 | return err; |
| 143 | return len; | 155 | return len; |
| @@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
| 181 | case MTRRIOC32_SET_PAGE_ENTRY: | 193 | case MTRRIOC32_SET_PAGE_ENTRY: |
| 182 | case MTRRIOC32_DEL_PAGE_ENTRY: | 194 | case MTRRIOC32_DEL_PAGE_ENTRY: |
| 183 | case MTRRIOC32_KILL_PAGE_ENTRY: { | 195 | case MTRRIOC32_KILL_PAGE_ENTRY: { |
| 184 | struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; | 196 | struct mtrr_sentry32 __user *s32; |
| 197 | |||
| 198 | s32 = (struct mtrr_sentry32 __user *)__arg; | ||
| 185 | err = get_user(sentry.base, &s32->base); | 199 | err = get_user(sentry.base, &s32->base); |
| 186 | err |= get_user(sentry.size, &s32->size); | 200 | err |= get_user(sentry.size, &s32->size); |
| 187 | err |= get_user(sentry.type, &s32->type); | 201 | err |= get_user(sentry.type, &s32->type); |
| @@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
| 191 | } | 205 | } |
| 192 | case MTRRIOC32_GET_ENTRY: | 206 | case MTRRIOC32_GET_ENTRY: |
| 193 | case MTRRIOC32_GET_PAGE_ENTRY: { | 207 | case MTRRIOC32_GET_PAGE_ENTRY: { |
| 194 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 208 | struct mtrr_gentry32 __user *g32; |
| 209 | |||
| 210 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
| 195 | err = get_user(gentry.regnum, &g32->regnum); | 211 | err = get_user(gentry.regnum, &g32->regnum); |
| 196 | err |= get_user(gentry.base, &g32->base); | 212 | err |= get_user(gentry.base, &g32->base); |
| 197 | err |= get_user(gentry.size, &g32->size); | 213 | err |= get_user(gentry.size, &g32->size); |
| @@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
| 314 | if (err) | 330 | if (err) |
| 315 | return err; | 331 | return err; |
| 316 | 332 | ||
| 317 | switch(cmd) { | 333 | switch (cmd) { |
| 318 | case MTRRIOC_GET_ENTRY: | 334 | case MTRRIOC_GET_ENTRY: |
| 319 | case MTRRIOC_GET_PAGE_ENTRY: | 335 | case MTRRIOC_GET_PAGE_ENTRY: |
| 320 | if (copy_to_user(arg, &gentry, sizeof gentry)) | 336 | if (copy_to_user(arg, &gentry, sizeof gentry)) |
| @@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
| 323 | #ifdef CONFIG_COMPAT | 339 | #ifdef CONFIG_COMPAT |
| 324 | case MTRRIOC32_GET_ENTRY: | 340 | case MTRRIOC32_GET_ENTRY: |
| 325 | case MTRRIOC32_GET_PAGE_ENTRY: { | 341 | case MTRRIOC32_GET_PAGE_ENTRY: { |
| 326 | struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; | 342 | struct mtrr_gentry32 __user *g32; |
| 343 | |||
| 344 | g32 = (struct mtrr_gentry32 __user *)__arg; | ||
| 327 | err = put_user(gentry.base, &g32->base); | 345 | err = put_user(gentry.base, &g32->base); |
| 328 | err |= put_user(gentry.size, &g32->size); | 346 | err |= put_user(gentry.size, &g32->size); |
| 329 | err |= put_user(gentry.regnum, &g32->regnum); | 347 | err |= put_user(gentry.regnum, &g32->regnum); |
| @@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) | |||
| 335 | return err; | 353 | return err; |
| 336 | } | 354 | } |
| 337 | 355 | ||
| 338 | static int | 356 | static int mtrr_close(struct inode *ino, struct file *file) |
| 339 | mtrr_close(struct inode *ino, struct file *file) | ||
| 340 | { | 357 | { |
| 341 | int i, max; | ||
| 342 | unsigned int *fcount = FILE_FCOUNT(file); | 358 | unsigned int *fcount = FILE_FCOUNT(file); |
| 359 | int i, max; | ||
| 343 | 360 | ||
| 344 | if (fcount != NULL) { | 361 | if (fcount != NULL) { |
| 345 | max = num_var_ranges; | 362 | max = num_var_ranges; |
| @@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset); | |||
| 359 | 376 | ||
| 360 | static int mtrr_open(struct inode *inode, struct file *file) | 377 | static int mtrr_open(struct inode *inode, struct file *file) |
| 361 | { | 378 | { |
| 362 | if (!mtrr_if) | 379 | if (!mtrr_if) |
| 363 | return -EIO; | 380 | return -EIO; |
| 364 | if (!mtrr_if->get) | 381 | if (!mtrr_if->get) |
| 365 | return -ENXIO; | 382 | return -ENXIO; |
| 366 | return single_open(file, mtrr_seq_show, NULL); | 383 | return single_open(file, mtrr_seq_show, NULL); |
| 367 | } | 384 | } |
| 368 | 385 | ||
| 369 | static const struct file_operations mtrr_fops = { | 386 | static const struct file_operations mtrr_fops = { |
| 370 | .owner = THIS_MODULE, | 387 | .owner = THIS_MODULE, |
| 371 | .open = mtrr_open, | 388 | .open = mtrr_open, |
| 372 | .read = seq_read, | 389 | .read = seq_read, |
| 373 | .llseek = seq_lseek, | 390 | .llseek = seq_lseek, |
| 374 | .write = mtrr_write, | 391 | .write = mtrr_write, |
| 375 | .unlocked_ioctl = mtrr_ioctl, | 392 | .unlocked_ioctl = mtrr_ioctl, |
| 376 | .compat_ioctl = mtrr_ioctl, | 393 | .compat_ioctl = mtrr_ioctl, |
| 377 | .release = mtrr_close, | 394 | .release = mtrr_close, |
| 378 | }; | 395 | }; |
| 379 | 396 | ||
| 380 | static int mtrr_seq_show(struct seq_file *seq, void *offset) | 397 | static int mtrr_seq_show(struct seq_file *seq, void *offset) |
| @@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) | |||
| 388 | max = num_var_ranges; | 405 | max = num_var_ranges; |
| 389 | for (i = 0; i < max; i++) { | 406 | for (i = 0; i < max; i++) { |
| 390 | mtrr_if->get(i, &base, &size, &type); | 407 | mtrr_if->get(i, &base, &size, &type); |
| 391 | if (size == 0) | 408 | if (size == 0) { |
| 392 | mtrr_usage_table[i] = 0; | 409 | mtrr_usage_table[i] = 0; |
| 393 | else { | 410 | continue; |
| 394 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
| 395 | /* less than 1MB */ | ||
| 396 | factor = 'K'; | ||
| 397 | size <<= PAGE_SHIFT - 10; | ||
| 398 | } else { | ||
| 399 | factor = 'M'; | ||
| 400 | size >>= 20 - PAGE_SHIFT; | ||
| 401 | } | ||
| 402 | /* RED-PEN: base can be > 32bit */ | ||
| 403 | len += seq_printf(seq, | ||
| 404 | "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
| 405 | i, base, base >> (20 - PAGE_SHIFT), size, factor, | ||
| 406 | mtrr_usage_table[i], mtrr_attrib_to_str(type)); | ||
| 407 | } | 411 | } |
| 412 | if (size < (0x100000 >> PAGE_SHIFT)) { | ||
| 413 | /* less than 1MB */ | ||
| 414 | factor = 'K'; | ||
| 415 | size <<= PAGE_SHIFT - 10; | ||
| 416 | } else { | ||
| 417 | factor = 'M'; | ||
| 418 | size >>= 20 - PAGE_SHIFT; | ||
| 419 | } | ||
| 420 | /* Base can be > 32bit */ | ||
| 421 | len += seq_printf(seq, "reg%02i: base=0x%06lx000 " | ||
| 422 | "(%5luMB), size=%5lu%cB, count=%d: %s\n", | ||
| 423 | i, base, base >> (20 - PAGE_SHIFT), size, | ||
| 424 | factor, mtrr_usage_table[i], | ||
| 425 | mtrr_attrib_to_str(type)); | ||
| 408 | } | 426 | } |
| 409 | return 0; | 427 | return 0; |
| 410 | } | 428 | } |
| @@ -422,6 +440,5 @@ static int __init mtrr_if_init(void) | |||
| 422 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); | 440 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
| 423 | return 0; | 441 | return 0; |
| 424 | } | 442 | } |
| 425 | |||
| 426 | arch_initcall(mtrr_if_init); | 443 | arch_initcall(mtrr_if_init); |
| 427 | #endif /* CONFIG_PROC_FS */ | 444 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 8fc248b5aeaf..84e83de54575 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -25,43 +25,49 @@ | |||
| 25 | Operating System Writer's Guide" (Intel document number 242692), | 25 | Operating System Writer's Guide" (Intel document number 242692), |
| 26 | section 11.11.7 | 26 | section 11.11.7 |
| 27 | 27 | ||
| 28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> | 28 | This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> |
| 29 | on 6-7 March 2002. | 29 | on 6-7 March 2002. |
| 30 | Source: Intel Architecture Software Developers Manual, Volume 3: | 30 | Source: Intel Architecture Software Developers Manual, Volume 3: |
| 31 | System Programming Guide; Section 9.11. (1997 edition - PPro). | 31 | System Programming Guide; Section 9.11. (1997 edition - PPro). |
| 32 | */ | 32 | */ |
| 33 | 33 | ||
| 34 | #define DEBUG | ||
| 35 | |||
| 36 | #include <linux/types.h> /* FIXME: kvm_para.h needs this */ | ||
| 37 | |||
| 38 | #include <linux/kvm_para.h> | ||
| 39 | #include <linux/uaccess.h> | ||
| 34 | #include <linux/module.h> | 40 | #include <linux/module.h> |
| 41 | #include <linux/mutex.h> | ||
| 35 | #include <linux/init.h> | 42 | #include <linux/init.h> |
| 43 | #include <linux/sort.h> | ||
| 44 | #include <linux/cpu.h> | ||
| 36 | #include <linux/pci.h> | 45 | #include <linux/pci.h> |
| 37 | #include <linux/smp.h> | 46 | #include <linux/smp.h> |
| 38 | #include <linux/cpu.h> | ||
| 39 | #include <linux/mutex.h> | ||
| 40 | #include <linux/sort.h> | ||
| 41 | 47 | ||
| 48 | #include <asm/processor.h> | ||
| 42 | #include <asm/e820.h> | 49 | #include <asm/e820.h> |
| 43 | #include <asm/mtrr.h> | 50 | #include <asm/mtrr.h> |
| 44 | #include <asm/uaccess.h> | ||
| 45 | #include <asm/processor.h> | ||
| 46 | #include <asm/msr.h> | 51 | #include <asm/msr.h> |
| 47 | #include <asm/kvm_para.h> | 52 | |
| 48 | #include "mtrr.h" | 53 | #include "mtrr.h" |
| 49 | 54 | ||
| 50 | u32 num_var_ranges = 0; | 55 | u32 num_var_ranges; |
| 51 | 56 | ||
| 52 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | 57 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
| 53 | static DEFINE_MUTEX(mtrr_mutex); | 58 | static DEFINE_MUTEX(mtrr_mutex); |
| 54 | 59 | ||
| 55 | u64 size_or_mask, size_and_mask; | 60 | u64 size_or_mask, size_and_mask; |
| 61 | static bool mtrr_aps_delayed_init; | ||
| 56 | 62 | ||
| 57 | static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; | 63 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; |
| 58 | 64 | ||
| 59 | struct mtrr_ops * mtrr_if = NULL; | 65 | struct mtrr_ops *mtrr_if; |
| 60 | 66 | ||
| 61 | static void set_mtrr(unsigned int reg, unsigned long base, | 67 | static void set_mtrr(unsigned int reg, unsigned long base, |
| 62 | unsigned long size, mtrr_type type); | 68 | unsigned long size, mtrr_type type); |
| 63 | 69 | ||
| 64 | void set_mtrr_ops(struct mtrr_ops * ops) | 70 | void set_mtrr_ops(struct mtrr_ops *ops) |
| 65 | { | 71 | { |
| 66 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) | 72 | if (ops->vendor && ops->vendor < X86_VENDOR_NUM) |
| 67 | mtrr_ops[ops->vendor] = ops; | 73 | mtrr_ops[ops->vendor] = ops; |
| @@ -72,30 +78,36 @@ static int have_wrcomb(void) | |||
| 72 | { | 78 | { |
| 73 | struct pci_dev *dev; | 79 | struct pci_dev *dev; |
| 74 | u8 rev; | 80 | u8 rev; |
| 75 | 81 | ||
| 76 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { | 82 | dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); |
| 77 | /* ServerWorks LE chipsets < rev 6 have problems with write-combining | 83 | if (dev != NULL) { |
| 78 | Don't allow it and leave room for other chipsets to be tagged */ | 84 | /* |
| 85 | * ServerWorks LE chipsets < rev 6 have problems with | ||
| 86 | * write-combining. Don't allow it and leave room for other | ||
| 87 | * chipsets to be tagged | ||
| 88 | */ | ||
| 79 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && | 89 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
| 80 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { | 90 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
| 81 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); | 91 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
| 82 | if (rev <= 5) { | 92 | if (rev <= 5) { |
| 83 | printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); | 93 | pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); |
| 84 | pci_dev_put(dev); | 94 | pci_dev_put(dev); |
| 85 | return 0; | 95 | return 0; |
| 86 | } | 96 | } |
| 87 | } | 97 | } |
| 88 | /* Intel 450NX errata # 23. Non ascending cacheline evictions to | 98 | /* |
| 89 | write combining memory may resulting in data corruption */ | 99 | * Intel 450NX errata # 23. Non ascending cacheline evictions to |
| 100 | * write combining memory may resulting in data corruption | ||
| 101 | */ | ||
| 90 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | 102 | if (dev->vendor == PCI_VENDOR_ID_INTEL && |
| 91 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { | 103 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { |
| 92 | printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); | 104 | pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); |
| 93 | pci_dev_put(dev); | 105 | pci_dev_put(dev); |
| 94 | return 0; | 106 | return 0; |
| 95 | } | 107 | } |
| 96 | pci_dev_put(dev); | 108 | pci_dev_put(dev); |
| 97 | } | 109 | } |
| 98 | return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); | 110 | return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; |
| 99 | } | 111 | } |
| 100 | 112 | ||
| 101 | /* This function returns the number of variable MTRRs */ | 113 | /* This function returns the number of variable MTRRs */ |
| @@ -103,12 +115,13 @@ static void __init set_num_var_ranges(void) | |||
| 103 | { | 115 | { |
| 104 | unsigned long config = 0, dummy; | 116 | unsigned long config = 0, dummy; |
| 105 | 117 | ||
| 106 | if (use_intel()) { | 118 | if (use_intel()) |
| 107 | rdmsr(MSR_MTRRcap, config, dummy); | 119 | rdmsr(MSR_MTRRcap, config, dummy); |
| 108 | } else if (is_cpu(AMD)) | 120 | else if (is_cpu(AMD)) |
| 109 | config = 2; | 121 | config = 2; |
| 110 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) | 122 | else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) |
| 111 | config = 8; | 123 | config = 8; |
| 124 | |||
| 112 | num_var_ranges = config & 0xff; | 125 | num_var_ranges = config & 0xff; |
| 113 | } | 126 | } |
| 114 | 127 | ||
| @@ -130,10 +143,12 @@ struct set_mtrr_data { | |||
| 130 | mtrr_type smp_type; | 143 | mtrr_type smp_type; |
| 131 | }; | 144 | }; |
| 132 | 145 | ||
| 146 | /** | ||
| 147 | * ipi_handler - Synchronisation handler. Executed by "other" CPUs. | ||
| 148 | * | ||
| 149 | * Returns nothing. | ||
| 150 | */ | ||
| 133 | static void ipi_handler(void *info) | 151 | static void ipi_handler(void *info) |
| 134 | /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. | ||
| 135 | [RETURNS] Nothing. | ||
| 136 | */ | ||
| 137 | { | 152 | { |
| 138 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
| 139 | struct set_mtrr_data *data = info; | 154 | struct set_mtrr_data *data = info; |
| @@ -142,18 +157,22 @@ static void ipi_handler(void *info) | |||
| 142 | local_irq_save(flags); | 157 | local_irq_save(flags); |
| 143 | 158 | ||
| 144 | atomic_dec(&data->count); | 159 | atomic_dec(&data->count); |
| 145 | while(!atomic_read(&data->gate)) | 160 | while (!atomic_read(&data->gate)) |
| 146 | cpu_relax(); | 161 | cpu_relax(); |
| 147 | 162 | ||
| 148 | /* The master has cleared me to execute */ | 163 | /* The master has cleared me to execute */ |
| 149 | if (data->smp_reg != ~0U) | 164 | if (data->smp_reg != ~0U) { |
| 150 | mtrr_if->set(data->smp_reg, data->smp_base, | 165 | mtrr_if->set(data->smp_reg, data->smp_base, |
| 151 | data->smp_size, data->smp_type); | 166 | data->smp_size, data->smp_type); |
| 152 | else | 167 | } else if (mtrr_aps_delayed_init) { |
| 168 | /* | ||
| 169 | * Initialize the MTRRs inaddition to the synchronisation. | ||
| 170 | */ | ||
| 153 | mtrr_if->set_all(); | 171 | mtrr_if->set_all(); |
| 172 | } | ||
| 154 | 173 | ||
| 155 | atomic_dec(&data->count); | 174 | atomic_dec(&data->count); |
| 156 | while(atomic_read(&data->gate)) | 175 | while (atomic_read(&data->gate)) |
| 157 | cpu_relax(); | 176 | cpu_relax(); |
| 158 | 177 | ||
| 159 | atomic_dec(&data->count); | 178 | atomic_dec(&data->count); |
| @@ -161,7 +180,8 @@ static void ipi_handler(void *info) | |||
| 161 | #endif | 180 | #endif |
| 162 | } | 181 | } |
| 163 | 182 | ||
| 164 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | 183 | static inline int types_compatible(mtrr_type type1, mtrr_type type2) |
| 184 | { | ||
| 165 | return type1 == MTRR_TYPE_UNCACHABLE || | 185 | return type1 == MTRR_TYPE_UNCACHABLE || |
| 166 | type2 == MTRR_TYPE_UNCACHABLE || | 186 | type2 == MTRR_TYPE_UNCACHABLE || |
| 167 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || | 187 | (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || |
| @@ -176,10 +196,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
| 176 | * @type: mtrr type | 196 | * @type: mtrr type |
| 177 | * | 197 | * |
| 178 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: | 198 | * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: |
| 179 | * | 199 | * |
| 180 | * 1. Send IPI to do the following: | 200 | * 1. Send IPI to do the following: |
| 181 | * 2. Disable Interrupts | 201 | * 2. Disable Interrupts |
| 182 | * 3. Wait for all procs to do so | 202 | * 3. Wait for all procs to do so |
| 183 | * 4. Enter no-fill cache mode | 203 | * 4. Enter no-fill cache mode |
| 184 | * 5. Flush caches | 204 | * 5. Flush caches |
| 185 | * 6. Clear PGE bit | 205 | * 6. Clear PGE bit |
| @@ -189,26 +209,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) { | |||
| 189 | * 10. Enable all range registers | 209 | * 10. Enable all range registers |
| 190 | * 11. Flush all TLBs and caches again | 210 | * 11. Flush all TLBs and caches again |
| 191 | * 12. Enter normal cache mode and reenable caching | 211 | * 12. Enter normal cache mode and reenable caching |
| 192 | * 13. Set PGE | 212 | * 13. Set PGE |
| 193 | * 14. Wait for buddies to catch up | 213 | * 14. Wait for buddies to catch up |
| 194 | * 15. Enable interrupts. | 214 | * 15. Enable interrupts. |
| 195 | * | 215 | * |
| 196 | * What does that mean for us? Well, first we set data.count to the number | 216 | * What does that mean for us? Well, first we set data.count to the number |
| 197 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait | 217 | * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait |
| 198 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. | 218 | * until it hits 0 and proceed. We set the data.gate flag and reset data.count. |
| 199 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each | 219 | * Meanwhile, they are waiting for that flag to be set. Once it's set, each |
| 200 | * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it | 220 | * CPU goes through the transition of updating MTRRs. |
| 201 | * differently, so we call mtrr_if->set() callback and let them take care of it. | 221 | * The CPU vendors may each do it differently, |
| 202 | * When they're done, they again decrement data->count and wait for data.gate to | 222 | * so we call mtrr_if->set() callback and let them take care of it. |
| 203 | * be reset. | 223 | * When they're done, they again decrement data->count and wait for data.gate |
| 204 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. | 224 | * to be reset. |
| 225 | * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag | ||
| 205 | * Everyone then enables interrupts and we all continue on. | 226 | * Everyone then enables interrupts and we all continue on. |
| 206 | * | 227 | * |
| 207 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff | 228 | * Note that the mechanism is the same for UP systems, too; all the SMP stuff |
| 208 | * becomes nops. | 229 | * becomes nops. |
| 209 | */ | 230 | */ |
| 210 | static void set_mtrr(unsigned int reg, unsigned long base, | 231 | static void |
| 211 | unsigned long size, mtrr_type type) | 232 | set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) |
| 212 | { | 233 | { |
| 213 | struct set_mtrr_data data; | 234 | struct set_mtrr_data data; |
| 214 | unsigned long flags; | 235 | unsigned long flags; |
| @@ -218,121 +239,124 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
| 218 | data.smp_size = size; | 239 | data.smp_size = size; |
| 219 | data.smp_type = type; | 240 | data.smp_type = type; |
| 220 | atomic_set(&data.count, num_booting_cpus() - 1); | 241 | atomic_set(&data.count, num_booting_cpus() - 1); |
| 221 | /* make sure data.count is visible before unleashing other CPUs */ | 242 | |
| 243 | /* Make sure data.count is visible before unleashing other CPUs */ | ||
| 222 | smp_wmb(); | 244 | smp_wmb(); |
| 223 | atomic_set(&data.gate,0); | 245 | atomic_set(&data.gate, 0); |
| 224 | 246 | ||
| 225 | /* Start the ball rolling on other CPUs */ | 247 | /* Start the ball rolling on other CPUs */ |
| 226 | if (smp_call_function(ipi_handler, &data, 0) != 0) | 248 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
| 227 | panic("mtrr: timed out waiting for other CPUs\n"); | 249 | panic("mtrr: timed out waiting for other CPUs\n"); |
| 228 | 250 | ||
| 229 | local_irq_save(flags); | 251 | local_irq_save(flags); |
| 230 | 252 | ||
| 231 | while(atomic_read(&data.count)) | 253 | while (atomic_read(&data.count)) |
| 232 | cpu_relax(); | 254 | cpu_relax(); |
| 233 | 255 | ||
| 234 | /* ok, reset count and toggle gate */ | 256 | /* Ok, reset count and toggle gate */ |
| 235 | atomic_set(&data.count, num_booting_cpus() - 1); | 257 | atomic_set(&data.count, num_booting_cpus() - 1); |
| 236 | smp_wmb(); | 258 | smp_wmb(); |
| 237 | atomic_set(&data.gate,1); | 259 | atomic_set(&data.gate, 1); |
| 238 | 260 | ||
| 239 | /* do our MTRR business */ | 261 | /* Do our MTRR business */ |
| 240 | 262 | ||
| 241 | /* HACK! | 263 | /* |
| 264 | * HACK! | ||
| 242 | * We use this same function to initialize the mtrrs on boot. | 265 | * We use this same function to initialize the mtrrs on boot. |
| 243 | * The state of the boot cpu's mtrrs has been saved, and we want | 266 | * The state of the boot cpu's mtrrs has been saved, and we want |
| 244 | * to replicate across all the APs. | 267 | * to replicate across all the APs. |
| 245 | * If we're doing that @reg is set to something special... | 268 | * If we're doing that @reg is set to something special... |
| 246 | */ | 269 | */ |
| 247 | if (reg != ~0U) | 270 | if (reg != ~0U) |
| 248 | mtrr_if->set(reg,base,size,type); | 271 | mtrr_if->set(reg, base, size, type); |
| 272 | else if (!mtrr_aps_delayed_init) | ||
| 273 | mtrr_if->set_all(); | ||
| 249 | 274 | ||
| 250 | /* wait for the others */ | 275 | /* Wait for the others */ |
| 251 | while(atomic_read(&data.count)) | 276 | while (atomic_read(&data.count)) |
| 252 | cpu_relax(); | 277 | cpu_relax(); |
| 253 | 278 | ||
| 254 | atomic_set(&data.count, num_booting_cpus() - 1); | 279 | atomic_set(&data.count, num_booting_cpus() - 1); |
| 255 | smp_wmb(); | 280 | smp_wmb(); |
| 256 | atomic_set(&data.gate,0); | 281 | atomic_set(&data.gate, 0); |
| 257 | 282 | ||
| 258 | /* | 283 | /* |
| 259 | * Wait here for everyone to have seen the gate change | 284 | * Wait here for everyone to have seen the gate change |
| 260 | * So we're the last ones to touch 'data' | 285 | * So we're the last ones to touch 'data' |
| 261 | */ | 286 | */ |
| 262 | while(atomic_read(&data.count)) | 287 | while (atomic_read(&data.count)) |
| 263 | cpu_relax(); | 288 | cpu_relax(); |
| 264 | 289 | ||
| 265 | local_irq_restore(flags); | 290 | local_irq_restore(flags); |
| 266 | } | 291 | } |
| 267 | 292 | ||
| 268 | /** | 293 | /** |
| 269 | * mtrr_add_page - Add a memory type region | 294 | * mtrr_add_page - Add a memory type region |
| 270 | * @base: Physical base address of region in pages (in units of 4 kB!) | 295 | * @base: Physical base address of region in pages (in units of 4 kB!) |
| 271 | * @size: Physical size of region in pages (4 kB) | 296 | * @size: Physical size of region in pages (4 kB) |
| 272 | * @type: Type of MTRR desired | 297 | * @type: Type of MTRR desired |
| 273 | * @increment: If this is true do usage counting on the region | 298 | * @increment: If this is true do usage counting on the region |
| 274 | * | 299 | * |
| 275 | * Memory type region registers control the caching on newer Intel and | 300 | * Memory type region registers control the caching on newer Intel and |
| 276 | * non Intel processors. This function allows drivers to request an | 301 | * non Intel processors. This function allows drivers to request an |
| 277 | * MTRR is added. The details and hardware specifics of each processor's | 302 | * MTRR is added. The details and hardware specifics of each processor's |
| 278 | * implementation are hidden from the caller, but nevertheless the | 303 | * implementation are hidden from the caller, but nevertheless the |
| 279 | * caller should expect to need to provide a power of two size on an | 304 | * caller should expect to need to provide a power of two size on an |
| 280 | * equivalent power of two boundary. | 305 | * equivalent power of two boundary. |
| 281 | * | 306 | * |
| 282 | * If the region cannot be added either because all regions are in use | 307 | * If the region cannot be added either because all regions are in use |
| 283 | * or the CPU cannot support it a negative value is returned. On success | 308 | * or the CPU cannot support it a negative value is returned. On success |
| 284 | * the register number for this entry is returned, but should be treated | 309 | * the register number for this entry is returned, but should be treated |
| 285 | * as a cookie only. | 310 | * as a cookie only. |
| 286 | * | 311 | * |
| 287 | * On a multiprocessor machine the changes are made to all processors. | 312 | * On a multiprocessor machine the changes are made to all processors. |
| 288 | * This is required on x86 by the Intel processors. | 313 | * This is required on x86 by the Intel processors. |
| 289 | * | 314 | * |
| 290 | * The available types are | 315 | * The available types are |
| 291 | * | 316 | * |
| 292 | * %MTRR_TYPE_UNCACHABLE - No caching | 317 | * %MTRR_TYPE_UNCACHABLE - No caching |
| 293 | * | 318 | * |
| 294 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 319 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
| 295 | * | 320 | * |
| 296 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 321 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
| 297 | * | 322 | * |
| 298 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 323 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
| 299 | * | 324 | * |
| 300 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 325 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
| 301 | * failures and do not wish system log messages to be sent. | 326 | * failures and do not wish system log messages to be sent. |
| 302 | */ | 327 | */ |
| 303 | 328 | int mtrr_add_page(unsigned long base, unsigned long size, | |
| 304 | int mtrr_add_page(unsigned long base, unsigned long size, | ||
| 305 | unsigned int type, bool increment) | 329 | unsigned int type, bool increment) |
| 306 | { | 330 | { |
| 331 | unsigned long lbase, lsize; | ||
| 307 | int i, replace, error; | 332 | int i, replace, error; |
| 308 | mtrr_type ltype; | 333 | mtrr_type ltype; |
| 309 | unsigned long lbase, lsize; | ||
| 310 | 334 | ||
| 311 | if (!mtrr_if) | 335 | if (!mtrr_if) |
| 312 | return -ENXIO; | 336 | return -ENXIO; |
| 313 | 337 | ||
| 314 | if ((error = mtrr_if->validate_add_page(base,size,type))) | 338 | error = mtrr_if->validate_add_page(base, size, type); |
| 339 | if (error) | ||
| 315 | return error; | 340 | return error; |
| 316 | 341 | ||
| 317 | if (type >= MTRR_NUM_TYPES) { | 342 | if (type >= MTRR_NUM_TYPES) { |
| 318 | printk(KERN_WARNING "mtrr: type: %u invalid\n", type); | 343 | pr_warning("mtrr: type: %u invalid\n", type); |
| 319 | return -EINVAL; | 344 | return -EINVAL; |
| 320 | } | 345 | } |
| 321 | 346 | ||
| 322 | /* If the type is WC, check that this processor supports it */ | 347 | /* If the type is WC, check that this processor supports it */ |
| 323 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { | 348 | if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { |
| 324 | printk(KERN_WARNING | 349 | pr_warning("mtrr: your processor doesn't support write-combining\n"); |
| 325 | "mtrr: your processor doesn't support write-combining\n"); | ||
| 326 | return -ENOSYS; | 350 | return -ENOSYS; |
| 327 | } | 351 | } |
| 328 | 352 | ||
| 329 | if (!size) { | 353 | if (!size) { |
| 330 | printk(KERN_WARNING "mtrr: zero sized request\n"); | 354 | pr_warning("mtrr: zero sized request\n"); |
| 331 | return -EINVAL; | 355 | return -EINVAL; |
| 332 | } | 356 | } |
| 333 | 357 | ||
| 334 | if (base & size_or_mask || size & size_or_mask) { | 358 | if (base & size_or_mask || size & size_or_mask) { |
| 335 | printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); | 359 | pr_warning("mtrr: base or size exceeds the MTRR width\n"); |
| 336 | return -EINVAL; | 360 | return -EINVAL; |
| 337 | } | 361 | } |
| 338 | 362 | ||
| @@ -341,36 +365,40 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 341 | 365 | ||
| 342 | /* No CPU hotplug when we change MTRR entries */ | 366 | /* No CPU hotplug when we change MTRR entries */ |
| 343 | get_online_cpus(); | 367 | get_online_cpus(); |
| 344 | /* Search for existing MTRR */ | 368 | |
| 369 | /* Search for existing MTRR */ | ||
| 345 | mutex_lock(&mtrr_mutex); | 370 | mutex_lock(&mtrr_mutex); |
| 346 | for (i = 0; i < num_var_ranges; ++i) { | 371 | for (i = 0; i < num_var_ranges; ++i) { |
| 347 | mtrr_if->get(i, &lbase, &lsize, <ype); | 372 | mtrr_if->get(i, &lbase, &lsize, <ype); |
| 348 | if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) | 373 | if (!lsize || base > lbase + lsize - 1 || |
| 374 | base + size - 1 < lbase) | ||
| 349 | continue; | 375 | continue; |
| 350 | /* At this point we know there is some kind of overlap/enclosure */ | 376 | /* |
| 377 | * At this point we know there is some kind of | ||
| 378 | * overlap/enclosure | ||
| 379 | */ | ||
| 351 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { | 380 | if (base < lbase || base + size - 1 > lbase + lsize - 1) { |
| 352 | if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { | 381 | if (base <= lbase && |
| 382 | base + size - 1 >= lbase + lsize - 1) { | ||
| 353 | /* New region encloses an existing region */ | 383 | /* New region encloses an existing region */ |
| 354 | if (type == ltype) { | 384 | if (type == ltype) { |
| 355 | replace = replace == -1 ? i : -2; | 385 | replace = replace == -1 ? i : -2; |
| 356 | continue; | 386 | continue; |
| 357 | } | 387 | } else if (types_compatible(type, ltype)) |
| 358 | else if (types_compatible(type, ltype)) | ||
| 359 | continue; | 388 | continue; |
| 360 | } | 389 | } |
| 361 | printk(KERN_WARNING | 390 | pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" |
| 362 | "mtrr: 0x%lx000,0x%lx000 overlaps existing" | 391 | " 0x%lx000,0x%lx000\n", base, size, lbase, |
| 363 | " 0x%lx000,0x%lx000\n", base, size, lbase, | 392 | lsize); |
| 364 | lsize); | ||
| 365 | goto out; | 393 | goto out; |
| 366 | } | 394 | } |
| 367 | /* New region is enclosed by an existing region */ | 395 | /* New region is enclosed by an existing region */ |
| 368 | if (ltype != type) { | 396 | if (ltype != type) { |
| 369 | if (types_compatible(type, ltype)) | 397 | if (types_compatible(type, ltype)) |
| 370 | continue; | 398 | continue; |
| 371 | printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", | 399 | pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", |
| 372 | base, size, mtrr_attrib_to_str(ltype), | 400 | base, size, mtrr_attrib_to_str(ltype), |
| 373 | mtrr_attrib_to_str(type)); | 401 | mtrr_attrib_to_str(type)); |
| 374 | goto out; | 402 | goto out; |
| 375 | } | 403 | } |
| 376 | if (increment) | 404 | if (increment) |
| @@ -378,7 +406,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 378 | error = i; | 406 | error = i; |
| 379 | goto out; | 407 | goto out; |
| 380 | } | 408 | } |
| 381 | /* Search for an empty MTRR */ | 409 | /* Search for an empty MTRR */ |
| 382 | i = mtrr_if->get_free_region(base, size, replace); | 410 | i = mtrr_if->get_free_region(base, size, replace); |
| 383 | if (i >= 0) { | 411 | if (i >= 0) { |
| 384 | set_mtrr(i, base, size, type); | 412 | set_mtrr(i, base, size, type); |
| @@ -393,8 +421,9 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 393 | mtrr_usage_table[replace] = 0; | 421 | mtrr_usage_table[replace] = 0; |
| 394 | } | 422 | } |
| 395 | } | 423 | } |
| 396 | } else | 424 | } else { |
| 397 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); | 425 | pr_info("mtrr: no more MTRRs available\n"); |
| 426 | } | ||
| 398 | error = i; | 427 | error = i; |
| 399 | out: | 428 | out: |
| 400 | mutex_unlock(&mtrr_mutex); | 429 | mutex_unlock(&mtrr_mutex); |
| @@ -405,10 +434,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
| 405 | static int mtrr_check(unsigned long base, unsigned long size) | 434 | static int mtrr_check(unsigned long base, unsigned long size) |
| 406 | { | 435 | { |
| 407 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { | 436 | if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { |
| 408 | printk(KERN_WARNING | 437 | pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); |
| 409 | "mtrr: size and base must be multiples of 4 kiB\n"); | 438 | pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); |
| 410 | printk(KERN_DEBUG | ||
| 411 | "mtrr: size: 0x%lx base: 0x%lx\n", size, base); | ||
| 412 | dump_stack(); | 439 | dump_stack(); |
| 413 | return -1; | 440 | return -1; |
| 414 | } | 441 | } |
| @@ -416,66 +443,64 @@ static int mtrr_check(unsigned long base, unsigned long size) | |||
| 416 | } | 443 | } |
| 417 | 444 | ||
| 418 | /** | 445 | /** |
| 419 | * mtrr_add - Add a memory type region | 446 | * mtrr_add - Add a memory type region |
| 420 | * @base: Physical base address of region | 447 | * @base: Physical base address of region |
| 421 | * @size: Physical size of region | 448 | * @size: Physical size of region |
| 422 | * @type: Type of MTRR desired | 449 | * @type: Type of MTRR desired |
| 423 | * @increment: If this is true do usage counting on the region | 450 | * @increment: If this is true do usage counting on the region |
| 424 | * | 451 | * |
| 425 | * Memory type region registers control the caching on newer Intel and | 452 | * Memory type region registers control the caching on newer Intel and |
| 426 | * non Intel processors. This function allows drivers to request an | 453 | * non Intel processors. This function allows drivers to request an |
| 427 | * MTRR is added. The details and hardware specifics of each processor's | 454 | * MTRR is added. The details and hardware specifics of each processor's |
| 428 | * implementation are hidden from the caller, but nevertheless the | 455 | * implementation are hidden from the caller, but nevertheless the |
| 429 | * caller should expect to need to provide a power of two size on an | 456 | * caller should expect to need to provide a power of two size on an |
| 430 | * equivalent power of two boundary. | 457 | * equivalent power of two boundary. |
| 431 | * | 458 | * |
| 432 | * If the region cannot be added either because all regions are in use | 459 | * If the region cannot be added either because all regions are in use |
| 433 | * or the CPU cannot support it a negative value is returned. On success | 460 | * or the CPU cannot support it a negative value is returned. On success |
| 434 | * the register number for this entry is returned, but should be treated | 461 | * the register number for this entry is returned, but should be treated |
| 435 | * as a cookie only. | 462 | * as a cookie only. |
| 436 | * | 463 | * |
| 437 | * On a multiprocessor machine the changes are made to all processors. | 464 | * On a multiprocessor machine the changes are made to all processors. |
| 438 | * This is required on x86 by the Intel processors. | 465 | * This is required on x86 by the Intel processors. |
| 439 | * | 466 | * |
| 440 | * The available types are | 467 | * The available types are |
| 441 | * | 468 | * |
| 442 | * %MTRR_TYPE_UNCACHABLE - No caching | 469 | * %MTRR_TYPE_UNCACHABLE - No caching |
| 443 | * | 470 | * |
| 444 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever | 471 | * %MTRR_TYPE_WRBACK - Write data back in bursts whenever |
| 445 | * | 472 | * |
| 446 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts | 473 | * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts |
| 447 | * | 474 | * |
| 448 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes | 475 | * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes |
| 449 | * | 476 | * |
| 450 | * BUGS: Needs a quiet flag for the cases where drivers do not mind | 477 | * BUGS: Needs a quiet flag for the cases where drivers do not mind |
| 451 | * failures and do not wish system log messages to be sent. | 478 | * failures and do not wish system log messages to be sent. |
| 452 | */ | 479 | */ |
| 453 | 480 | int mtrr_add(unsigned long base, unsigned long size, unsigned int type, | |
| 454 | int | 481 | bool increment) |
| 455 | mtrr_add(unsigned long base, unsigned long size, unsigned int type, | ||
| 456 | bool increment) | ||
| 457 | { | 482 | { |
| 458 | if (mtrr_check(base, size)) | 483 | if (mtrr_check(base, size)) |
| 459 | return -EINVAL; | 484 | return -EINVAL; |
| 460 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, | 485 | return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, |
| 461 | increment); | 486 | increment); |
| 462 | } | 487 | } |
| 488 | EXPORT_SYMBOL(mtrr_add); | ||
| 463 | 489 | ||
| 464 | /** | 490 | /** |
| 465 | * mtrr_del_page - delete a memory type region | 491 | * mtrr_del_page - delete a memory type region |
| 466 | * @reg: Register returned by mtrr_add | 492 | * @reg: Register returned by mtrr_add |
| 467 | * @base: Physical base address | 493 | * @base: Physical base address |
| 468 | * @size: Size of region | 494 | * @size: Size of region |
| 469 | * | 495 | * |
| 470 | * If register is supplied then base and size are ignored. This is | 496 | * If register is supplied then base and size are ignored. This is |
| 471 | * how drivers should call it. | 497 | * how drivers should call it. |
| 472 | * | 498 | * |
| 473 | * Releases an MTRR region. If the usage count drops to zero the | 499 | * Releases an MTRR region. If the usage count drops to zero the |
| 474 | * register is freed and the region returns to default state. | 500 | * register is freed and the region returns to default state. |
| 475 | * On success the register is returned, on failure a negative error | 501 | * On success the register is returned, on failure a negative error |
| 476 | * code. | 502 | * code. |
| 477 | */ | 503 | */ |
| 478 | |||
| 479 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) | 504 | int mtrr_del_page(int reg, unsigned long base, unsigned long size) |
| 480 | { | 505 | { |
| 481 | int i, max; | 506 | int i, max; |
| @@ -500,22 +525,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
| 500 | } | 525 | } |
| 501 | } | 526 | } |
| 502 | if (reg < 0) { | 527 | if (reg < 0) { |
| 503 | printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, | 528 | pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", |
| 504 | size); | 529 | base, size); |
| 505 | goto out; | 530 | goto out; |
| 506 | } | 531 | } |
| 507 | } | 532 | } |
| 508 | if (reg >= max) { | 533 | if (reg >= max) { |
| 509 | printk(KERN_WARNING "mtrr: register: %d too big\n", reg); | 534 | pr_warning("mtrr: register: %d too big\n", reg); |
| 510 | goto out; | 535 | goto out; |
| 511 | } | 536 | } |
| 512 | mtrr_if->get(reg, &lbase, &lsize, <ype); | 537 | mtrr_if->get(reg, &lbase, &lsize, <ype); |
| 513 | if (lsize < 1) { | 538 | if (lsize < 1) { |
| 514 | printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); | 539 | pr_warning("mtrr: MTRR %d not used\n", reg); |
| 515 | goto out; | 540 | goto out; |
| 516 | } | 541 | } |
| 517 | if (mtrr_usage_table[reg] < 1) { | 542 | if (mtrr_usage_table[reg] < 1) { |
| 518 | printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); | 543 | pr_warning("mtrr: reg: %d has count=0\n", reg); |
| 519 | goto out; | 544 | goto out; |
| 520 | } | 545 | } |
| 521 | if (--mtrr_usage_table[reg] < 1) | 546 | if (--mtrr_usage_table[reg] < 1) |
| @@ -526,33 +551,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
| 526 | put_online_cpus(); | 551 | put_online_cpus(); |
| 527 | return error; | 552 | return error; |
| 528 | } | 553 | } |
| 554 | |||
| 529 | /** | 555 | /** |
| 530 | * mtrr_del - delete a memory type region | 556 | * mtrr_del - delete a memory type region |
| 531 | * @reg: Register returned by mtrr_add | 557 | * @reg: Register returned by mtrr_add |
| 532 | * @base: Physical base address | 558 | * @base: Physical base address |
| 533 | * @size: Size of region | 559 | * @size: Size of region |
| 534 | * | 560 | * |
| 535 | * If register is supplied then base and size are ignored. This is | 561 | * If register is supplied then base and size are ignored. This is |
| 536 | * how drivers should call it. | 562 | * how drivers should call it. |
| 537 | * | 563 | * |
| 538 | * Releases an MTRR region. If the usage count drops to zero the | 564 | * Releases an MTRR region. If the usage count drops to zero the |
| 539 | * register is freed and the region returns to default state. | 565 | * register is freed and the region returns to default state. |
| 540 | * On success the register is returned, on failure a negative error | 566 | * On success the register is returned, on failure a negative error |
| 541 | * code. | 567 | * code. |
| 542 | */ | 568 | */ |
| 543 | 569 | int mtrr_del(int reg, unsigned long base, unsigned long size) | |
| 544 | int | ||
| 545 | mtrr_del(int reg, unsigned long base, unsigned long size) | ||
| 546 | { | 570 | { |
| 547 | if (mtrr_check(base, size)) | 571 | if (mtrr_check(base, size)) |
| 548 | return -EINVAL; | 572 | return -EINVAL; |
| 549 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); | 573 | return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); |
| 550 | } | 574 | } |
| 551 | |||
| 552 | EXPORT_SYMBOL(mtrr_add); | ||
| 553 | EXPORT_SYMBOL(mtrr_del); | 575 | EXPORT_SYMBOL(mtrr_del); |
| 554 | 576 | ||
| 555 | /* HACK ALERT! | 577 | /* |
| 578 | * HACK ALERT! | ||
| 556 | * These should be called implicitly, but we can't yet until all the initcall | 579 | * These should be called implicitly, but we can't yet until all the initcall |
| 557 | * stuff is done... | 580 | * stuff is done... |
| 558 | */ | 581 | */ |
| @@ -576,29 +599,28 @@ struct mtrr_value { | |||
| 576 | 599 | ||
| 577 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; | 600 | static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; |
| 578 | 601 | ||
| 579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 602 | static int mtrr_save(struct sys_device *sysdev, pm_message_t state) |
| 580 | { | 603 | { |
| 581 | int i; | 604 | int i; |
| 582 | 605 | ||
| 583 | for (i = 0; i < num_var_ranges; i++) { | 606 | for (i = 0; i < num_var_ranges; i++) { |
| 584 | mtrr_if->get(i, | 607 | mtrr_if->get(i, &mtrr_value[i].lbase, |
| 585 | &mtrr_value[i].lbase, | 608 | &mtrr_value[i].lsize, |
| 586 | &mtrr_value[i].lsize, | 609 | &mtrr_value[i].ltype); |
| 587 | &mtrr_value[i].ltype); | ||
| 588 | } | 610 | } |
| 589 | return 0; | 611 | return 0; |
| 590 | } | 612 | } |
| 591 | 613 | ||
| 592 | static int mtrr_restore(struct sys_device * sysdev) | 614 | static int mtrr_restore(struct sys_device *sysdev) |
| 593 | { | 615 | { |
| 594 | int i; | 616 | int i; |
| 595 | 617 | ||
| 596 | for (i = 0; i < num_var_ranges; i++) { | 618 | for (i = 0; i < num_var_ranges; i++) { |
| 597 | if (mtrr_value[i].lsize) | 619 | if (mtrr_value[i].lsize) { |
| 598 | set_mtrr(i, | 620 | set_mtrr(i, mtrr_value[i].lbase, |
| 599 | mtrr_value[i].lbase, | 621 | mtrr_value[i].lsize, |
| 600 | mtrr_value[i].lsize, | 622 | mtrr_value[i].ltype); |
| 601 | mtrr_value[i].ltype); | 623 | } |
| 602 | } | 624 | } |
| 603 | return 0; | 625 | return 0; |
| 604 | } | 626 | } |
| @@ -615,26 +637,29 @@ int __initdata changed_by_mtrr_cleanup; | |||
| 615 | /** | 637 | /** |
| 616 | * mtrr_bp_init - initialize mtrrs on the boot CPU | 638 | * mtrr_bp_init - initialize mtrrs on the boot CPU |
| 617 | * | 639 | * |
| 618 | * This needs to be called early; before any of the other CPUs are | 640 | * This needs to be called early; before any of the other CPUs are |
| 619 | * initialized (i.e. before smp_init()). | 641 | * initialized (i.e. before smp_init()). |
| 620 | * | 642 | * |
| 621 | */ | 643 | */ |
| 622 | void __init mtrr_bp_init(void) | 644 | void __init mtrr_bp_init(void) |
| 623 | { | 645 | { |
| 624 | u32 phys_addr; | 646 | u32 phys_addr; |
| 647 | |||
| 625 | init_ifs(); | 648 | init_ifs(); |
| 626 | 649 | ||
| 627 | phys_addr = 32; | 650 | phys_addr = 32; |
| 628 | 651 | ||
| 629 | if (cpu_has_mtrr) { | 652 | if (cpu_has_mtrr) { |
| 630 | mtrr_if = &generic_mtrr_ops; | 653 | mtrr_if = &generic_mtrr_ops; |
| 631 | size_or_mask = 0xff000000; /* 36 bits */ | 654 | size_or_mask = 0xff000000; /* 36 bits */ |
| 632 | size_and_mask = 0x00f00000; | 655 | size_and_mask = 0x00f00000; |
| 633 | phys_addr = 36; | 656 | phys_addr = 36; |
| 634 | 657 | ||
| 635 | /* This is an AMD specific MSR, but we assume(hope?) that | 658 | /* |
| 636 | Intel will implement it to when they extend the address | 659 | * This is an AMD specific MSR, but we assume(hope?) that |
| 637 | bus of the Xeon. */ | 660 | * Intel will implement it to when they extend the address |
| 661 | * bus of the Xeon. | ||
| 662 | */ | ||
| 638 | if (cpuid_eax(0x80000000) >= 0x80000008) { | 663 | if (cpuid_eax(0x80000000) >= 0x80000008) { |
| 639 | phys_addr = cpuid_eax(0x80000008) & 0xff; | 664 | phys_addr = cpuid_eax(0x80000008) & 0xff; |
| 640 | /* CPUID workaround for Intel 0F33/0F34 CPU */ | 665 | /* CPUID workaround for Intel 0F33/0F34 CPU */ |
| @@ -649,9 +674,11 @@ void __init mtrr_bp_init(void) | |||
| 649 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; | 674 | size_and_mask = ~size_or_mask & 0xfffff00000ULL; |
| 650 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && | 675 | } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && |
| 651 | boot_cpu_data.x86 == 6) { | 676 | boot_cpu_data.x86 == 6) { |
| 652 | /* VIA C* family have Intel style MTRRs, but | 677 | /* |
| 653 | don't support PAE */ | 678 | * VIA C* family have Intel style MTRRs, |
| 654 | size_or_mask = 0xfff00000; /* 32 bits */ | 679 | * but don't support PAE |
| 680 | */ | ||
| 681 | size_or_mask = 0xfff00000; /* 32 bits */ | ||
| 655 | size_and_mask = 0; | 682 | size_and_mask = 0; |
| 656 | phys_addr = 32; | 683 | phys_addr = 32; |
| 657 | } | 684 | } |
| @@ -694,30 +721,28 @@ void __init mtrr_bp_init(void) | |||
| 694 | changed_by_mtrr_cleanup = 1; | 721 | changed_by_mtrr_cleanup = 1; |
| 695 | mtrr_if->set_all(); | 722 | mtrr_if->set_all(); |
| 696 | } | 723 | } |
| 697 | |||
| 698 | } | 724 | } |
| 699 | } | 725 | } |
| 700 | } | 726 | } |
| 701 | 727 | ||
| 702 | void mtrr_ap_init(void) | 728 | void mtrr_ap_init(void) |
| 703 | { | 729 | { |
| 704 | unsigned long flags; | 730 | if (!use_intel() || mtrr_aps_delayed_init) |
| 705 | |||
| 706 | if (!mtrr_if || !use_intel()) | ||
| 707 | return; | 731 | return; |
| 708 | /* | 732 | /* |
| 709 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, | 733 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries |
| 710 | * but this routine will be called in cpu boot time, holding the lock | 734 | * changed, but this routine will be called in cpu boot time, |
| 711 | * breaks it. This routine is called in two cases: 1.very earily time | 735 | * holding the lock breaks it. |
| 712 | * of software resume, when there absolutely isn't mtrr entry changes; | 736 | * |
| 713 | * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to | 737 | * This routine is called in two cases: |
| 714 | * prevent mtrr entry changes | 738 | * |
| 739 | * 1. very earily time of software resume, when there absolutely | ||
| 740 | * isn't mtrr entry changes; | ||
| 741 | * | ||
| 742 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | ||
| 743 | * lock to prevent mtrr entry changes | ||
| 715 | */ | 744 | */ |
| 716 | local_irq_save(flags); | 745 | set_mtrr(~0U, 0, 0, 0); |
| 717 | |||
| 718 | mtrr_if->set_all(); | ||
| 719 | |||
| 720 | local_irq_restore(flags); | ||
| 721 | } | 746 | } |
| 722 | 747 | ||
| 723 | /** | 748 | /** |
| @@ -728,23 +753,55 @@ void mtrr_save_state(void) | |||
| 728 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); | 753 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
| 729 | } | 754 | } |
| 730 | 755 | ||
| 756 | void set_mtrr_aps_delayed_init(void) | ||
| 757 | { | ||
| 758 | if (!use_intel()) | ||
| 759 | return; | ||
| 760 | |||
| 761 | mtrr_aps_delayed_init = true; | ||
| 762 | } | ||
| 763 | |||
| 764 | /* | ||
| 765 | * MTRR initialization for all AP's | ||
| 766 | */ | ||
| 767 | void mtrr_aps_init(void) | ||
| 768 | { | ||
| 769 | if (!use_intel()) | ||
| 770 | return; | ||
| 771 | |||
| 772 | set_mtrr(~0U, 0, 0, 0); | ||
| 773 | mtrr_aps_delayed_init = false; | ||
| 774 | } | ||
| 775 | |||
| 776 | void mtrr_bp_restore(void) | ||
| 777 | { | ||
| 778 | if (!use_intel()) | ||
| 779 | return; | ||
| 780 | |||
| 781 | mtrr_if->set_all(); | ||
| 782 | } | ||
| 783 | |||
| 731 | static int __init mtrr_init_finialize(void) | 784 | static int __init mtrr_init_finialize(void) |
| 732 | { | 785 | { |
| 733 | if (!mtrr_if) | 786 | if (!mtrr_if) |
| 734 | return 0; | 787 | return 0; |
| 788 | |||
| 735 | if (use_intel()) { | 789 | if (use_intel()) { |
| 736 | if (!changed_by_mtrr_cleanup) | 790 | if (!changed_by_mtrr_cleanup) |
| 737 | mtrr_state_warn(); | 791 | mtrr_state_warn(); |
| 738 | } else { | 792 | return 0; |
| 739 | /* The CPUs haven't MTRR and seem to not support SMP. They have | ||
| 740 | * specific drivers, we use a tricky method to support | ||
| 741 | * suspend/resume for them. | ||
| 742 | * TBD: is there any system with such CPU which supports | ||
| 743 | * suspend/resume? if no, we should remove the code. | ||
| 744 | */ | ||
| 745 | sysdev_driver_register(&cpu_sysdev_class, | ||
| 746 | &mtrr_sysdev_driver); | ||
| 747 | } | 793 | } |
| 794 | |||
| 795 | /* | ||
| 796 | * The CPU has no MTRR and seems to not support SMP. They have | ||
| 797 | * specific drivers, we use a tricky method to support | ||
| 798 | * suspend/resume for them. | ||
| 799 | * | ||
| 800 | * TBD: is there any system with such CPU which supports | ||
| 801 | * suspend/resume? If no, we should remove the code. | ||
| 802 | */ | ||
| 803 | sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver); | ||
| 804 | |||
| 748 | return 0; | 805 | return 0; |
| 749 | } | 806 | } |
| 750 | subsys_initcall(mtrr_init_finialize); | 807 | subsys_initcall(mtrr_init_finialize); |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 7538b767f206..a501dee9a87a 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * local mtrr defines. | 2 | * local MTRR defines. |
| 3 | */ | 3 | */ |
| 4 | 4 | ||
| 5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
| @@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | |||
| 14 | struct mtrr_ops { | 14 | struct mtrr_ops { |
| 15 | u32 vendor; | 15 | u32 vendor; |
| 16 | u32 use_intel_if; | 16 | u32 use_intel_if; |
| 17 | // void (*init)(void); | ||
| 18 | void (*set)(unsigned int reg, unsigned long base, | 17 | void (*set)(unsigned int reg, unsigned long base, |
| 19 | unsigned long size, mtrr_type type); | 18 | unsigned long size, mtrr_type type); |
| 20 | void (*set_all)(void); | 19 | void (*set_all)(void); |
| 21 | 20 | ||
| 22 | void (*get)(unsigned int reg, unsigned long *base, | 21 | void (*get)(unsigned int reg, unsigned long *base, |
| 23 | unsigned long *size, mtrr_type * type); | 22 | unsigned long *size, mtrr_type *type); |
| 24 | int (*get_free_region)(unsigned long base, unsigned long size, | 23 | int (*get_free_region)(unsigned long base, unsigned long size, |
| 25 | int replace_reg); | 24 | int replace_reg); |
| 26 | int (*validate_add_page)(unsigned long base, unsigned long size, | 25 | int (*validate_add_page)(unsigned long base, unsigned long size, |
| @@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void); | |||
| 39 | 38 | ||
| 40 | /* library functions for processor-specific routines */ | 39 | /* library functions for processor-specific routines */ |
| 41 | struct set_mtrr_context { | 40 | struct set_mtrr_context { |
| 42 | unsigned long flags; | 41 | unsigned long flags; |
| 43 | unsigned long cr4val; | 42 | unsigned long cr4val; |
| 44 | u32 deftype_lo; | 43 | u32 deftype_lo; |
| 45 | u32 deftype_hi; | 44 | u32 deftype_hi; |
| 46 | u32 ccr3; | 45 | u32 ccr3; |
| 47 | }; | 46 | }; |
| 48 | 47 | ||
| 49 | void set_mtrr_done(struct set_mtrr_context *ctxt); | 48 | void set_mtrr_done(struct set_mtrr_context *ctxt); |
| @@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index, | |||
| 54 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); | 53 | u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); |
| 55 | void get_mtrr_state(void); | 54 | void get_mtrr_state(void); |
| 56 | 55 | ||
| 57 | extern void set_mtrr_ops(struct mtrr_ops * ops); | 56 | extern void set_mtrr_ops(struct mtrr_ops *ops); |
| 58 | 57 | ||
| 59 | extern u64 size_or_mask, size_and_mask; | 58 | extern u64 size_or_mask, size_and_mask; |
| 60 | extern struct mtrr_ops * mtrr_if; | 59 | extern struct mtrr_ops *mtrr_if; |
| 61 | 60 | ||
| 62 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) | 61 | #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) |
| 63 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) | 62 | #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) |
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c index 1f5fb1588d1f..dfc80b4e6b0d 100644 --- a/arch/x86/kernel/cpu/mtrr/state.c +++ b/arch/x86/kernel/cpu/mtrr/state.c | |||
| @@ -1,24 +1,25 @@ | |||
| 1 | #include <linux/mm.h> | ||
| 2 | #include <linux/init.h> | 1 | #include <linux/init.h> |
| 3 | #include <asm/io.h> | 2 | #include <linux/io.h> |
| 4 | #include <asm/mtrr.h> | 3 | #include <linux/mm.h> |
| 5 | #include <asm/msr.h> | 4 | |
| 6 | #include <asm/processor-cyrix.h> | 5 | #include <asm/processor-cyrix.h> |
| 7 | #include <asm/processor-flags.h> | 6 | #include <asm/processor-flags.h> |
| 8 | #include "mtrr.h" | 7 | #include <asm/mtrr.h> |
| 8 | #include <asm/msr.h> | ||
| 9 | 9 | ||
| 10 | #include "mtrr.h" | ||
| 10 | 11 | ||
| 11 | /* Put the processor into a state where MTRRs can be safely set */ | 12 | /* Put the processor into a state where MTRRs can be safely set */ |
| 12 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | 13 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) |
| 13 | { | 14 | { |
| 14 | unsigned int cr0; | 15 | unsigned int cr0; |
| 15 | 16 | ||
| 16 | /* Disable interrupts locally */ | 17 | /* Disable interrupts locally */ |
| 17 | local_irq_save(ctxt->flags); | 18 | local_irq_save(ctxt->flags); |
| 18 | 19 | ||
| 19 | if (use_intel() || is_cpu(CYRIX)) { | 20 | if (use_intel() || is_cpu(CYRIX)) { |
| 20 | 21 | ||
| 21 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ | 22 | /* Save value of CR4 and clear Page Global Enable (bit 7) */ |
| 22 | if (cpu_has_pge) { | 23 | if (cpu_has_pge) { |
| 23 | ctxt->cr4val = read_cr4(); | 24 | ctxt->cr4val = read_cr4(); |
| 24 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); | 25 | write_cr4(ctxt->cr4val & ~X86_CR4_PGE); |
| @@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) | |||
| 33 | write_cr0(cr0); | 34 | write_cr0(cr0); |
| 34 | wbinvd(); | 35 | wbinvd(); |
| 35 | 36 | ||
| 36 | if (use_intel()) | 37 | if (use_intel()) { |
| 37 | /* Save MTRR state */ | 38 | /* Save MTRR state */ |
| 38 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 39 | rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); |
| 39 | else | 40 | } else { |
| 40 | /* Cyrix ARRs - everything else were excluded at the top */ | 41 | /* |
| 42 | * Cyrix ARRs - | ||
| 43 | * everything else were excluded at the top | ||
| 44 | */ | ||
| 41 | ctxt->ccr3 = getCx86(CX86_CCR3); | 45 | ctxt->ccr3 = getCx86(CX86_CCR3); |
| 46 | } | ||
| 42 | } | 47 | } |
| 43 | } | 48 | } |
| 44 | 49 | ||
| 45 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) | 50 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) |
| 46 | { | 51 | { |
| 47 | if (use_intel()) | 52 | if (use_intel()) { |
| 48 | /* Disable MTRRs, and set the default type to uncached */ | 53 | /* Disable MTRRs, and set the default type to uncached */ |
| 49 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, | 54 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, |
| 50 | ctxt->deftype_hi); | 55 | ctxt->deftype_hi); |
| 51 | else if (is_cpu(CYRIX)) | 56 | } else { |
| 52 | /* Cyrix ARRs - everything else were excluded at the top */ | 57 | if (is_cpu(CYRIX)) { |
| 53 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | 58 | /* Cyrix ARRs - everything else were excluded at the top */ |
| 59 | setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); | ||
| 60 | } | ||
| 61 | } | ||
| 54 | } | 62 | } |
| 55 | 63 | ||
| 56 | /* Restore the processor after a set_mtrr_prepare */ | 64 | /* Restore the processor after a set_mtrr_prepare */ |
| 57 | void set_mtrr_done(struct set_mtrr_context *ctxt) | 65 | void set_mtrr_done(struct set_mtrr_context *ctxt) |
| 58 | { | 66 | { |
| 59 | if (use_intel() || is_cpu(CYRIX)) { | 67 | if (use_intel() || is_cpu(CYRIX)) { |
| 60 | 68 | ||
| 61 | /* Flush caches and TLBs */ | 69 | /* Flush caches and TLBs */ |
| 62 | wbinvd(); | 70 | wbinvd(); |
| 63 | 71 | ||
| 64 | /* Restore MTRRdefType */ | 72 | /* Restore MTRRdefType */ |
| 65 | if (use_intel()) | 73 | if (use_intel()) { |
| 66 | /* Intel (P6) standard MTRRs */ | 74 | /* Intel (P6) standard MTRRs */ |
| 67 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); | 75 | mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, |
| 68 | else | 76 | ctxt->deftype_hi); |
| 69 | /* Cyrix ARRs - everything else was excluded at the top */ | 77 | } else { |
| 78 | /* | ||
| 79 | * Cyrix ARRs - | ||
| 80 | * everything else was excluded at the top | ||
| 81 | */ | ||
| 70 | setCx86(CX86_CCR3, ctxt->ccr3); | 82 | setCx86(CX86_CCR3, ctxt->ccr3); |
| 83 | } | ||
| 71 | 84 | ||
| 72 | /* Enable caches */ | 85 | /* Enable caches */ |
| 73 | write_cr0(read_cr0() & 0xbfffffff); | 86 | write_cr0(read_cr0() & 0xbfffffff); |
| 74 | 87 | ||
| 75 | /* Restore value of CR4 */ | 88 | /* Restore value of CR4 */ |
| 76 | if (cpu_has_pge) | 89 | if (cpu_has_pge) |
| 77 | write_cr4(ctxt->cr4val); | 90 | write_cr4(ctxt->cr4val); |
| 78 | } | 91 | } |
| 79 | /* Re-enable interrupts locally (if enabled previously) */ | 92 | /* Re-enable interrupts locally (if enabled previously) */ |
| 80 | local_irq_restore(ctxt->flags); | 93 | local_irq_restore(ctxt->flags); |
| 81 | } | 94 | } |
| 82 | |||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index e60ed740d2b3..392bea43b890 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | |||
| 68 | /* returns the bit offset of the performance counter register */ | 68 | /* returns the bit offset of the performance counter register */ |
| 69 | switch (boot_cpu_data.x86_vendor) { | 69 | switch (boot_cpu_data.x86_vendor) { |
| 70 | case X86_VENDOR_AMD: | 70 | case X86_VENDOR_AMD: |
| 71 | return (msr - MSR_K7_PERFCTR0); | 71 | return msr - MSR_K7_PERFCTR0; |
| 72 | case X86_VENDOR_INTEL: | 72 | case X86_VENDOR_INTEL: |
| 73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 73 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 74 | return (msr - MSR_ARCH_PERFMON_PERFCTR0); | 74 | return msr - MSR_ARCH_PERFMON_PERFCTR0; |
| 75 | 75 | ||
| 76 | switch (boot_cpu_data.x86) { | 76 | switch (boot_cpu_data.x86) { |
| 77 | case 6: | 77 | case 6: |
| 78 | return (msr - MSR_P6_PERFCTR0); | 78 | return msr - MSR_P6_PERFCTR0; |
| 79 | case 15: | 79 | case 15: |
| 80 | return (msr - MSR_P4_BPU_PERFCTR0); | 80 | return msr - MSR_P4_BPU_PERFCTR0; |
| 81 | } | 81 | } |
| 82 | } | 82 | } |
| 83 | return 0; | 83 | return 0; |
| @@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) | |||
| 92 | /* returns the bit offset of the event selection register */ | 92 | /* returns the bit offset of the event selection register */ |
| 93 | switch (boot_cpu_data.x86_vendor) { | 93 | switch (boot_cpu_data.x86_vendor) { |
| 94 | case X86_VENDOR_AMD: | 94 | case X86_VENDOR_AMD: |
| 95 | return (msr - MSR_K7_EVNTSEL0); | 95 | return msr - MSR_K7_EVNTSEL0; |
| 96 | case X86_VENDOR_INTEL: | 96 | case X86_VENDOR_INTEL: |
| 97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 97 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) |
| 98 | return (msr - MSR_ARCH_PERFMON_EVENTSEL0); | 98 | return msr - MSR_ARCH_PERFMON_EVENTSEL0; |
| 99 | 99 | ||
| 100 | switch (boot_cpu_data.x86) { | 100 | switch (boot_cpu_data.x86) { |
| 101 | case 6: | 101 | case 6: |
| 102 | return (msr - MSR_P6_EVNTSEL0); | 102 | return msr - MSR_P6_EVNTSEL0; |
| 103 | case 15: | 103 | case 15: |
| 104 | return (msr - MSR_P4_BSU_ESCR0); | 104 | return msr - MSR_P4_BSU_ESCR0; |
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | return 0; | 107 | return 0; |
| @@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter) | |||
| 113 | { | 113 | { |
| 114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 114 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
| 115 | 115 | ||
| 116 | return (!test_bit(counter, perfctr_nmi_owner)); | 116 | return !test_bit(counter, perfctr_nmi_owner); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | /* checks the an msr for availability */ | 119 | /* checks the an msr for availability */ |
| @@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr) | |||
| 124 | counter = nmi_perfctr_msr_to_bit(msr); | 124 | counter = nmi_perfctr_msr_to_bit(msr); |
| 125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); | 125 | BUG_ON(counter > NMI_MAX_COUNTER_BITS); |
| 126 | 126 | ||
| 127 | return (!test_bit(counter, perfctr_nmi_owner)); | 127 | return !test_bit(counter, perfctr_nmi_owner); |
| 128 | } | 128 | } |
| 129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); | 129 | EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); |
| 130 | 130 | ||
| @@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz) | |||
| 237 | */ | 237 | */ |
| 238 | counter_val = (u64)cpu_khz * 1000; | 238 | counter_val = (u64)cpu_khz * 1000; |
| 239 | do_div(counter_val, retval); | 239 | do_div(counter_val, retval); |
| 240 | if (counter_val > 0x7fffffffULL) { | 240 | if (counter_val > 0x7fffffffULL) { |
| 241 | u64 count = (u64)cpu_khz * 1000; | 241 | u64 count = (u64)cpu_khz * 1000; |
| 242 | do_div(count, 0x7fffffffUL); | 242 | do_div(count, 0x7fffffffUL); |
| 243 | retval = count + 1; | 243 | retval = count + 1; |
| @@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr, | |||
| 251 | u64 count = (u64)cpu_khz * 1000; | 251 | u64 count = (u64)cpu_khz * 1000; |
| 252 | 252 | ||
| 253 | do_div(count, nmi_hz); | 253 | do_div(count, nmi_hz); |
| 254 | if(descr) | 254 | if (descr) |
| 255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
| 256 | wrmsrl(perfctr_msr, 0 - count); | 256 | wrmsrl(perfctr_msr, 0 - count); |
| 257 | } | 257 | } |
| @@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr, | |||
| 262 | u64 count = (u64)cpu_khz * 1000; | 262 | u64 count = (u64)cpu_khz * 1000; |
| 263 | 263 | ||
| 264 | do_div(count, nmi_hz); | 264 | do_div(count, nmi_hz); |
| 265 | if(descr) | 265 | if (descr) |
| 266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | 266 | pr_debug("setting %s to -0x%08Lx\n", descr, count); |
| 267 | wrmsr(perfctr_msr, (u32)(-count), 0); | 267 | wrmsr(perfctr_msr, (u32)(-count), 0); |
| 268 | } | 268 | } |
| @@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz) | |||
| 296 | 296 | ||
| 297 | /* setup the timer */ | 297 | /* setup the timer */ |
| 298 | wrmsr(evntsel_msr, evntsel, 0); | 298 | wrmsr(evntsel_msr, evntsel, 0); |
| 299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); | 299 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); |
| 300 | 300 | ||
| 301 | /* initialize the wd struct before enabling */ | 301 | /* initialize the wd struct before enabling */ |
| 302 | wd->perfctr_msr = perfctr_msr; | 302 | wd->perfctr_msr = perfctr_msr; |
| @@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz) | |||
| 387 | /* setup the timer */ | 387 | /* setup the timer */ |
| 388 | wrmsr(evntsel_msr, evntsel, 0); | 388 | wrmsr(evntsel_msr, evntsel, 0); |
| 389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | 389 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); |
| 390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); | 390 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); |
| 391 | 391 | ||
| 392 | /* initialize the wd struct before enabling */ | 392 | /* initialize the wd struct before enabling */ |
| 393 | wd->perfctr_msr = perfctr_msr; | 393 | wd->perfctr_msr = perfctr_msr; |
| @@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
| 415 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 415 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
| 416 | 416 | ||
| 417 | /* P6/ARCH_PERFMON has 32 bit counter write */ | 417 | /* P6/ARCH_PERFMON has 32 bit counter write */ |
| 418 | write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); | 418 | write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); |
| 419 | } | 419 | } |
| 420 | 420 | ||
| 421 | static const struct wd_ops p6_wd_ops = { | 421 | static const struct wd_ops p6_wd_ops = { |
| @@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
| 490 | if (smp_num_siblings == 2) { | 490 | if (smp_num_siblings == 2) { |
| 491 | unsigned int ebx, apicid; | 491 | unsigned int ebx, apicid; |
| 492 | 492 | ||
| 493 | ebx = cpuid_ebx(1); | 493 | ebx = cpuid_ebx(1); |
| 494 | apicid = (ebx >> 24) & 0xff; | 494 | apicid = (ebx >> 24) & 0xff; |
| 495 | ht_num = apicid & 1; | 495 | ht_num = apicid & 1; |
| 496 | } else | 496 | } else |
| 497 | #endif | 497 | #endif |
| 498 | ht_num = 0; | 498 | ht_num = 0; |
| @@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
| 544 | } | 544 | } |
| 545 | 545 | ||
| 546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 546 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
| 547 | | P4_ESCR_OS | 547 | | P4_ESCR_OS |
| 548 | | P4_ESCR_USR; | 548 | | P4_ESCR_USR; |
| 549 | 549 | ||
| 550 | cccr_val |= P4_CCCR_THRESHOLD(15) | 550 | cccr_val |= P4_CCCR_THRESHOLD(15) |
| @@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | |||
| 612 | { | 612 | { |
| 613 | unsigned dummy; | 613 | unsigned dummy; |
| 614 | /* | 614 | /* |
| 615 | * P4 quirks: | 615 | * P4 quirks: |
| 616 | * - An overflown perfctr will assert its interrupt | 616 | * - An overflown perfctr will assert its interrupt |
| 617 | * until the OVF flag in its CCCR is cleared. | 617 | * until the OVF flag in its CCCR is cleared. |
| 618 | * - LVTPC is masked on interrupt and must be | 618 | * - LVTPC is masked on interrupt and must be |
| @@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
| 662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | 662 | * NOTE: Corresponding bit = 0 in ebx indicates event present. |
| 663 | */ | 663 | */ |
| 664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | 664 | cpuid(10, &(eax.full), &ebx, &unused, &unused); |
| 665 | if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | 665 | if ((eax.split.mask_length < |
| 666 | (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
| 666 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | 667 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) |
| 667 | return 0; | 668 | return 0; |
| 668 | 669 | ||
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index d5e30397246b..1e904346bbf4 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
| @@ -128,7 +128,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
| 128 | if (i < ARRAY_SIZE(x86_power_flags) && | 128 | if (i < ARRAY_SIZE(x86_power_flags) && |
| 129 | x86_power_flags[i]) | 129 | x86_power_flags[i]) |
| 130 | seq_printf(m, "%s%s", | 130 | seq_printf(m, "%s%s", |
| 131 | x86_power_flags[i][0]?" ":"", | 131 | x86_power_flags[i][0] ? " " : "", |
| 132 | x86_power_flags[i]); | 132 | x86_power_flags[i]); |
| 133 | else | 133 | else |
| 134 | seq_printf(m, " [%d]", i); | 134 | seq_printf(m, " [%d]", i); |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 284c399e3234..bc24f514ec93 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -49,17 +49,17 @@ static inline int __vmware_platform(void) | |||
| 49 | 49 | ||
| 50 | static unsigned long __vmware_get_tsc_khz(void) | 50 | static unsigned long __vmware_get_tsc_khz(void) |
| 51 | { | 51 | { |
| 52 | uint64_t tsc_hz; | 52 | uint64_t tsc_hz; |
| 53 | uint32_t eax, ebx, ecx, edx; | 53 | uint32_t eax, ebx, ecx, edx; |
| 54 | 54 | ||
| 55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); | 55 | VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); |
| 56 | 56 | ||
| 57 | if (ebx == UINT_MAX) | 57 | if (ebx == UINT_MAX) |
| 58 | return 0; | 58 | return 0; |
| 59 | tsc_hz = eax | (((uint64_t)ebx) << 32); | 59 | tsc_hz = eax | (((uint64_t)ebx) << 32); |
| 60 | do_div(tsc_hz, 1000); | 60 | do_div(tsc_hz, 1000); |
| 61 | BUG_ON(tsc_hz >> 32); | 61 | BUG_ON(tsc_hz >> 32); |
| 62 | return tsc_hz; | 62 | return tsc_hz; |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 48bfe1386038..ef42a038f1a6 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c | |||
| @@ -509,15 +509,15 @@ enum bts_field { | |||
| 509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) | 509 | bts_escape = ((unsigned long)-1 & ~bts_qual_mask) |
| 510 | }; | 510 | }; |
| 511 | 511 | ||
| 512 | static inline unsigned long bts_get(const char *base, enum bts_field field) | 512 | static inline unsigned long bts_get(const char *base, unsigned long field) |
| 513 | { | 513 | { |
| 514 | base += (ds_cfg.sizeof_ptr_field * field); | 514 | base += (ds_cfg.sizeof_ptr_field * field); |
| 515 | return *(unsigned long *)base; | 515 | return *(unsigned long *)base; |
| 516 | } | 516 | } |
| 517 | 517 | ||
| 518 | static inline void bts_set(char *base, enum bts_field field, unsigned long val) | 518 | static inline void bts_set(char *base, unsigned long field, unsigned long val) |
| 519 | { | 519 | { |
| 520 | base += (ds_cfg.sizeof_ptr_field * field);; | 520 | base += (ds_cfg.sizeof_ptr_field * field); |
| 521 | (*(unsigned long *)base) = val; | 521 | (*(unsigned long *)base) = val; |
| 522 | } | 522 | } |
| 523 | 523 | ||
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index c8405718a4c3..2d8a371d4339 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
| 16 | #include <linux/nmi.h> | 16 | #include <linux/nmi.h> |
| 17 | #include <linux/sysfs.h> | 17 | #include <linux/sysfs.h> |
| 18 | #include <linux/ftrace.h> | ||
| 19 | 18 | ||
| 20 | #include <asm/stacktrace.h> | 19 | #include <asm/stacktrace.h> |
| 21 | 20 | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 3b09634a5153..7d35d0fe2329 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
| @@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
| 218 | void fixup_irqs(void) | 218 | void fixup_irqs(void) |
| 219 | { | 219 | { |
| 220 | unsigned int irq; | 220 | unsigned int irq; |
| 221 | static int warned; | ||
| 222 | struct irq_desc *desc; | 221 | struct irq_desc *desc; |
| 223 | 222 | ||
| 224 | for_each_irq_desc(irq, desc) { | 223 | for_each_irq_desc(irq, desc) { |
| @@ -236,8 +235,8 @@ void fixup_irqs(void) | |||
| 236 | } | 235 | } |
| 237 | if (desc->chip->set_affinity) | 236 | if (desc->chip->set_affinity) |
| 238 | desc->chip->set_affinity(irq, affinity); | 237 | desc->chip->set_affinity(irq, affinity); |
| 239 | else if (desc->action && !(warned++)) | 238 | else if (desc->action) |
| 240 | printk("Cannot set affinity for irq %i\n", irq); | 239 | printk_once("Cannot set affinity for irq %i\n", irq); |
| 241 | } | 240 | } |
| 242 | 241 | ||
| 243 | #if 0 | 242 | #if 0 |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 63f32d220ef2..eb1f1e6e52b0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -711,6 +711,21 @@ void __init setup_arch(char **cmdline_p) | |||
| 711 | printk(KERN_INFO "Command line: %s\n", boot_command_line); | 711 | printk(KERN_INFO "Command line: %s\n", boot_command_line); |
| 712 | #endif | 712 | #endif |
| 713 | 713 | ||
| 714 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
| 715 | *cmdline_p = command_line; | ||
| 716 | |||
| 717 | #ifdef CONFIG_X86_64 | ||
| 718 | /* | ||
| 719 | * Must call this twice: Once just to detect whether hardware doesn't | ||
| 720 | * support NX (so that the early EHCI debug console setup can safely | ||
| 721 | * call set_fixmap(), and then again after parsing early parameters to | ||
| 722 | * honor the respective command line option. | ||
| 723 | */ | ||
| 724 | check_efer(); | ||
| 725 | #endif | ||
| 726 | |||
| 727 | parse_early_param(); | ||
| 728 | |||
| 714 | /* VMI may relocate the fixmap; do this before touching ioremap area */ | 729 | /* VMI may relocate the fixmap; do this before touching ioremap area */ |
| 715 | vmi_init(); | 730 | vmi_init(); |
| 716 | 731 | ||
| @@ -793,11 +808,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 793 | #endif | 808 | #endif |
| 794 | #endif | 809 | #endif |
| 795 | 810 | ||
| 796 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
| 797 | *cmdline_p = command_line; | ||
| 798 | |||
| 799 | parse_early_param(); | ||
| 800 | |||
| 801 | #ifdef CONFIG_X86_64 | 811 | #ifdef CONFIG_X86_64 |
| 802 | check_efer(); | 812 | check_efer(); |
| 803 | #endif | 813 | #endif |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2fecda69ee64..d720b7e0cf3d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1116,9 +1116,22 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
| 1116 | 1116 | ||
| 1117 | if (is_uv_system()) | 1117 | if (is_uv_system()) |
| 1118 | uv_system_init(); | 1118 | uv_system_init(); |
| 1119 | |||
| 1120 | set_mtrr_aps_delayed_init(); | ||
| 1119 | out: | 1121 | out: |
| 1120 | preempt_enable(); | 1122 | preempt_enable(); |
| 1121 | } | 1123 | } |
| 1124 | |||
| 1125 | void arch_enable_nonboot_cpus_begin(void) | ||
| 1126 | { | ||
| 1127 | set_mtrr_aps_delayed_init(); | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | void arch_enable_nonboot_cpus_end(void) | ||
| 1131 | { | ||
| 1132 | mtrr_aps_init(); | ||
| 1133 | } | ||
| 1134 | |||
| 1122 | /* | 1135 | /* |
| 1123 | * Early setup to make printk work. | 1136 | * Early setup to make printk work. |
| 1124 | */ | 1137 | */ |
| @@ -1140,6 +1153,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
| 1140 | setup_ioapic_dest(); | 1153 | setup_ioapic_dest(); |
| 1141 | #endif | 1154 | #endif |
| 1142 | check_nmi_watchdog(); | 1155 | check_nmi_watchdog(); |
| 1156 | mtrr_aps_init(); | ||
| 1143 | } | 1157 | } |
| 1144 | 1158 | ||
| 1145 | static int __initdata setup_possible_cpus = -1; | 1159 | static int __initdata setup_possible_cpus = -1; |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5204332f475d..7e4b1f5dec8e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -76,7 +76,7 @@ char ignore_fpu_irq; | |||
| 76 | * F0 0F bug workaround.. We have a special link segment | 76 | * F0 0F bug workaround.. We have a special link segment |
| 77 | * for this. | 77 | * for this. |
| 78 | */ | 78 | */ |
| 79 | gate_desc idt_table[256] | 79 | gate_desc idt_table[NR_VECTORS] |
| 80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 80 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
| 81 | #endif | 81 | #endif |
| 82 | 82 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d4529011828..633ccc7400a4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -2297,12 +2297,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
| 2297 | unsigned int bytes, | 2297 | unsigned int bytes, |
| 2298 | struct kvm_vcpu *vcpu) | 2298 | struct kvm_vcpu *vcpu) |
| 2299 | { | 2299 | { |
| 2300 | static int reported; | 2300 | printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); |
| 2301 | |||
| 2302 | if (!reported) { | ||
| 2303 | reported = 1; | ||
| 2304 | printk(KERN_WARNING "kvm: emulating exchange as write\n"); | ||
| 2305 | } | ||
| 2306 | #ifndef CONFIG_X86_64 | 2301 | #ifndef CONFIG_X86_64 |
| 2307 | /* guests cmpxchg8b have to be emulated atomically */ | 2302 | /* guests cmpxchg8b have to be emulated atomically */ |
| 2308 | if (bytes == 8) { | 2303 | if (bytes == 8) { |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index fe6f84ca121e..84e236ce76ba 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
| 23 | 23 | ||
| 24 | int is_io_mapping_possible(resource_size_t base, unsigned long size) | 24 | static int is_io_mapping_possible(resource_size_t base, unsigned long size) |
| 25 | { | 25 | { |
| 26 | #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) | 26 | #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) |
| 27 | /* There is no way to map greater than 1 << 32 address without PAE */ | 27 | /* There is no way to map greater than 1 << 32 address without PAE */ |
| @@ -30,7 +30,30 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size) | |||
| 30 | #endif | 30 | #endif |
| 31 | return 1; | 31 | return 1; |
| 32 | } | 32 | } |
| 33 | EXPORT_SYMBOL_GPL(is_io_mapping_possible); | 33 | |
| 34 | int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) | ||
| 35 | { | ||
| 36 | unsigned long flag = _PAGE_CACHE_WC; | ||
| 37 | int ret; | ||
| 38 | |||
| 39 | if (!is_io_mapping_possible(base, size)) | ||
| 40 | return -EINVAL; | ||
| 41 | |||
| 42 | ret = io_reserve_memtype(base, base + size, &flag); | ||
| 43 | if (ret) | ||
| 44 | return ret; | ||
| 45 | |||
| 46 | *prot = __pgprot(__PAGE_KERNEL | flag); | ||
| 47 | return 0; | ||
| 48 | } | ||
| 49 | EXPORT_SYMBOL_GPL(iomap_create_wc); | ||
| 50 | |||
| 51 | void | ||
| 52 | iomap_free(resource_size_t base, unsigned long size) | ||
| 53 | { | ||
| 54 | io_free_memtype(base, base + size); | ||
| 55 | } | ||
| 56 | EXPORT_SYMBOL_GPL(iomap_free); | ||
| 34 | 57 | ||
| 35 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | 58 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) |
| 36 | { | 59 | { |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8a450930834f..2a76eba9da21 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -228,24 +228,14 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
| 228 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, | 228 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
| 229 | prot_val, &new_prot_val); | 229 | prot_val, &new_prot_val); |
| 230 | if (retval) { | 230 | if (retval) { |
| 231 | pr_debug("Warning: reserve_memtype returned %d\n", retval); | 231 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
| 232 | return NULL; | 232 | return NULL; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | if (prot_val != new_prot_val) { | 235 | if (prot_val != new_prot_val) { |
| 236 | /* | 236 | if (!is_new_memtype_allowed(phys_addr, size, |
| 237 | * Do not fallback to certain memory types with certain | 237 | prot_val, new_prot_val)) { |
| 238 | * requested type: | 238 | printk(KERN_ERR |
| 239 | * - request is uc-, return cannot be write-back | ||
| 240 | * - request is uc-, return cannot be write-combine | ||
| 241 | * - request is write-combine, return cannot be write-back | ||
| 242 | */ | ||
| 243 | if ((prot_val == _PAGE_CACHE_UC_MINUS && | ||
| 244 | (new_prot_val == _PAGE_CACHE_WB || | ||
| 245 | new_prot_val == _PAGE_CACHE_WC)) || | ||
| 246 | (prot_val == _PAGE_CACHE_WC && | ||
| 247 | new_prot_val == _PAGE_CACHE_WB)) { | ||
| 248 | pr_debug( | ||
| 249 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", | 239 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", |
| 250 | (unsigned long long)phys_addr, | 240 | (unsigned long long)phys_addr, |
| 251 | (unsigned long long)(phys_addr + size), | 241 | (unsigned long long)(phys_addr + size), |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7e600c1962db..e245775ec856 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -822,6 +822,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 822 | { | 822 | { |
| 823 | struct cpa_data cpa; | 823 | struct cpa_data cpa; |
| 824 | int ret, cache, checkalias; | 824 | int ret, cache, checkalias; |
| 825 | unsigned long baddr = 0; | ||
| 825 | 826 | ||
| 826 | /* | 827 | /* |
| 827 | * Check, if we are requested to change a not supported | 828 | * Check, if we are requested to change a not supported |
| @@ -853,6 +854,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 853 | */ | 854 | */ |
| 854 | WARN_ON_ONCE(1); | 855 | WARN_ON_ONCE(1); |
| 855 | } | 856 | } |
| 857 | /* | ||
| 858 | * Save address for cache flush. *addr is modified in the call | ||
| 859 | * to __change_page_attr_set_clr() below. | ||
| 860 | */ | ||
| 861 | baddr = *addr; | ||
| 856 | } | 862 | } |
| 857 | 863 | ||
| 858 | /* Must avoid aliasing mappings in the highmem code */ | 864 | /* Must avoid aliasing mappings in the highmem code */ |
| @@ -900,7 +906,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 900 | cpa_flush_array(addr, numpages, cache, | 906 | cpa_flush_array(addr, numpages, cache, |
| 901 | cpa.flags, pages); | 907 | cpa.flags, pages); |
| 902 | } else | 908 | } else |
| 903 | cpa_flush_range(*addr, numpages, cache); | 909 | cpa_flush_range(baddr, numpages, cache); |
| 904 | } else | 910 | } else |
| 905 | cpa_flush_all(cache); | 911 | cpa_flush_all(cache); |
| 906 | 912 | ||
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 352aa9e927e2..9b647f679389 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
| 16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
| 17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
| 18 | #include <linux/rbtree.h> | ||
| 18 | 19 | ||
| 19 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
| 20 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
| @@ -148,11 +149,10 @@ static char *cattr_name(unsigned long flags) | |||
| 148 | * areas). All the aliases have the same cache attributes of course. | 149 | * areas). All the aliases have the same cache attributes of course. |
| 149 | * Zero attributes are represented as holes. | 150 | * Zero attributes are represented as holes. |
| 150 | * | 151 | * |
| 151 | * Currently the data structure is a list because the number of mappings | 152 | * The data structure is a list that is also organized as an rbtree |
| 152 | * are expected to be relatively small. If this should be a problem | 153 | * sorted on the start address of memtype range. |
| 153 | * it could be changed to a rbtree or similar. | ||
| 154 | * | 154 | * |
| 155 | * memtype_lock protects the whole list. | 155 | * memtype_lock protects both the linear list and rbtree. |
| 156 | */ | 156 | */ |
| 157 | 157 | ||
| 158 | struct memtype { | 158 | struct memtype { |
| @@ -160,11 +160,53 @@ struct memtype { | |||
| 160 | u64 end; | 160 | u64 end; |
| 161 | unsigned long type; | 161 | unsigned long type; |
| 162 | struct list_head nd; | 162 | struct list_head nd; |
| 163 | struct rb_node rb; | ||
| 163 | }; | 164 | }; |
| 164 | 165 | ||
| 166 | static struct rb_root memtype_rbroot = RB_ROOT; | ||
| 165 | static LIST_HEAD(memtype_list); | 167 | static LIST_HEAD(memtype_list); |
| 166 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | 168 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ |
| 167 | 169 | ||
| 170 | static struct memtype *memtype_rb_search(struct rb_root *root, u64 start) | ||
| 171 | { | ||
| 172 | struct rb_node *node = root->rb_node; | ||
| 173 | struct memtype *last_lower = NULL; | ||
| 174 | |||
| 175 | while (node) { | ||
| 176 | struct memtype *data = container_of(node, struct memtype, rb); | ||
| 177 | |||
| 178 | if (data->start < start) { | ||
| 179 | last_lower = data; | ||
| 180 | node = node->rb_right; | ||
| 181 | } else if (data->start > start) { | ||
| 182 | node = node->rb_left; | ||
| 183 | } else | ||
| 184 | return data; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* Will return NULL if there is no entry with its start <= start */ | ||
| 188 | return last_lower; | ||
| 189 | } | ||
| 190 | |||
| 191 | static void memtype_rb_insert(struct rb_root *root, struct memtype *data) | ||
| 192 | { | ||
| 193 | struct rb_node **new = &(root->rb_node); | ||
| 194 | struct rb_node *parent = NULL; | ||
| 195 | |||
| 196 | while (*new) { | ||
| 197 | struct memtype *this = container_of(*new, struct memtype, rb); | ||
| 198 | |||
| 199 | parent = *new; | ||
| 200 | if (data->start <= this->start) | ||
| 201 | new = &((*new)->rb_left); | ||
| 202 | else if (data->start > this->start) | ||
| 203 | new = &((*new)->rb_right); | ||
| 204 | } | ||
| 205 | |||
| 206 | rb_link_node(&data->rb, parent, new); | ||
| 207 | rb_insert_color(&data->rb, root); | ||
| 208 | } | ||
| 209 | |||
| 168 | /* | 210 | /* |
| 169 | * Does intersection of PAT memory type and MTRR memory type and returns | 211 | * Does intersection of PAT memory type and MTRR memory type and returns |
| 170 | * the resulting memory type as PAT understands it. | 212 | * the resulting memory type as PAT understands it. |
| @@ -218,9 +260,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |||
| 218 | return -EBUSY; | 260 | return -EBUSY; |
| 219 | } | 261 | } |
| 220 | 262 | ||
| 221 | static struct memtype *cached_entry; | ||
| 222 | static u64 cached_start; | ||
| 223 | |||
| 224 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | 263 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) |
| 225 | { | 264 | { |
| 226 | int ram_page = 0, not_rampage = 0; | 265 | int ram_page = 0, not_rampage = 0; |
| @@ -249,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | |||
| 249 | } | 288 | } |
| 250 | 289 | ||
| 251 | /* | 290 | /* |
| 252 | * For RAM pages, mark the pages as non WB memory type using | 291 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
| 253 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | 292 | * Here we do two pass: |
| 254 | * set_memory_wc() on a RAM page at a time before marking it as WB again. | 293 | * - Find the memtype of all the pages in the range, look for any conflicts |
| 255 | * This is ok, because only one driver will be owning the page and | 294 | * - In case of no conflicts, set the new memtype for pages in the range |
| 256 | * doing set_memory_*() calls. | ||
| 257 | * | 295 | * |
| 258 | * For now, we use PageNonWB to track that the RAM page is being mapped | 296 | * Caller must hold memtype_lock for atomicity. |
| 259 | * as non WB. In future, we will have to use one more flag | ||
| 260 | * (or some other mechanism in page_struct) to distinguish between | ||
| 261 | * UC and WC mapping. | ||
| 262 | */ | 297 | */ |
| 263 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | 298 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, |
| 264 | unsigned long *new_type) | 299 | unsigned long *new_type) |
| 265 | { | 300 | { |
| 266 | struct page *page; | 301 | struct page *page; |
| 267 | u64 pfn, end_pfn; | 302 | u64 pfn; |
| 303 | |||
| 304 | if (req_type == _PAGE_CACHE_UC) { | ||
| 305 | /* We do not support strong UC */ | ||
| 306 | WARN_ON_ONCE(1); | ||
| 307 | req_type = _PAGE_CACHE_UC_MINUS; | ||
| 308 | } | ||
| 268 | 309 | ||
| 269 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 310 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 270 | page = pfn_to_page(pfn); | 311 | unsigned long type; |
| 271 | if (page_mapped(page) || PageNonWB(page)) | ||
| 272 | goto out; | ||
| 273 | 312 | ||
| 274 | SetPageNonWB(page); | 313 | page = pfn_to_page(pfn); |
| 314 | type = get_page_memtype(page); | ||
| 315 | if (type != -1) { | ||
| 316 | printk(KERN_INFO "reserve_ram_pages_type failed " | ||
| 317 | "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", | ||
| 318 | start, end, type, req_type); | ||
| 319 | if (new_type) | ||
| 320 | *new_type = type; | ||
| 321 | |||
| 322 | return -EBUSY; | ||
| 323 | } | ||
| 275 | } | 324 | } |
| 276 | return 0; | ||
| 277 | 325 | ||
| 278 | out: | 326 | if (new_type) |
| 279 | end_pfn = pfn; | 327 | *new_type = req_type; |
| 280 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | 328 | |
| 329 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | ||
| 281 | page = pfn_to_page(pfn); | 330 | page = pfn_to_page(pfn); |
| 282 | ClearPageNonWB(page); | 331 | set_page_memtype(page, req_type); |
| 283 | } | 332 | } |
| 284 | 333 | return 0; | |
| 285 | return -EINVAL; | ||
| 286 | } | 334 | } |
| 287 | 335 | ||
| 288 | static int free_ram_pages_type(u64 start, u64 end) | 336 | static int free_ram_pages_type(u64 start, u64 end) |
| 289 | { | 337 | { |
| 290 | struct page *page; | 338 | struct page *page; |
| 291 | u64 pfn, end_pfn; | 339 | u64 pfn; |
| 292 | 340 | ||
| 293 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 341 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 294 | page = pfn_to_page(pfn); | 342 | page = pfn_to_page(pfn); |
| 295 | if (page_mapped(page) || !PageNonWB(page)) | 343 | set_page_memtype(page, -1); |
| 296 | goto out; | ||
| 297 | |||
| 298 | ClearPageNonWB(page); | ||
| 299 | } | 344 | } |
| 300 | return 0; | 345 | return 0; |
| 301 | |||
| 302 | out: | ||
| 303 | end_pfn = pfn; | ||
| 304 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | ||
| 305 | page = pfn_to_page(pfn); | ||
| 306 | SetPageNonWB(page); | ||
| 307 | } | ||
| 308 | return -EINVAL; | ||
| 309 | } | 346 | } |
| 310 | 347 | ||
| 311 | /* | 348 | /* |
| @@ -339,6 +376,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 339 | if (new_type) { | 376 | if (new_type) { |
| 340 | if (req_type == -1) | 377 | if (req_type == -1) |
| 341 | *new_type = _PAGE_CACHE_WB; | 378 | *new_type = _PAGE_CACHE_WB; |
| 379 | else if (req_type == _PAGE_CACHE_WC) | ||
| 380 | *new_type = _PAGE_CACHE_UC_MINUS; | ||
| 342 | else | 381 | else |
| 343 | *new_type = req_type & _PAGE_CACHE_MASK; | 382 | *new_type = req_type & _PAGE_CACHE_MASK; |
| 344 | } | 383 | } |
| @@ -364,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 364 | *new_type = actual_type; | 403 | *new_type = actual_type; |
| 365 | 404 | ||
| 366 | is_range_ram = pat_pagerange_is_ram(start, end); | 405 | is_range_ram = pat_pagerange_is_ram(start, end); |
| 367 | if (is_range_ram == 1) | 406 | if (is_range_ram == 1) { |
| 368 | return reserve_ram_pages_type(start, end, req_type, | 407 | |
| 369 | new_type); | 408 | spin_lock(&memtype_lock); |
| 370 | else if (is_range_ram < 0) | 409 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
| 410 | spin_unlock(&memtype_lock); | ||
| 411 | |||
| 412 | return err; | ||
| 413 | } else if (is_range_ram < 0) { | ||
| 371 | return -EINVAL; | 414 | return -EINVAL; |
| 415 | } | ||
| 372 | 416 | ||
| 373 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 417 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
| 374 | if (!new) | 418 | if (!new) |
| @@ -380,17 +424,11 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 380 | 424 | ||
| 381 | spin_lock(&memtype_lock); | 425 | spin_lock(&memtype_lock); |
| 382 | 426 | ||
| 383 | if (cached_entry && start >= cached_start) | ||
| 384 | entry = cached_entry; | ||
| 385 | else | ||
| 386 | entry = list_entry(&memtype_list, struct memtype, nd); | ||
| 387 | |||
| 388 | /* Search for existing mapping that overlaps the current range */ | 427 | /* Search for existing mapping that overlaps the current range */ |
| 389 | where = NULL; | 428 | where = NULL; |
| 390 | list_for_each_entry_continue(entry, &memtype_list, nd) { | 429 | list_for_each_entry(entry, &memtype_list, nd) { |
| 391 | if (end <= entry->start) { | 430 | if (end <= entry->start) { |
| 392 | where = entry->nd.prev; | 431 | where = entry->nd.prev; |
| 393 | cached_entry = list_entry(where, struct memtype, nd); | ||
| 394 | break; | 432 | break; |
| 395 | } else if (start <= entry->start) { /* end > entry->start */ | 433 | } else if (start <= entry->start) { /* end > entry->start */ |
| 396 | err = chk_conflict(new, entry, new_type); | 434 | err = chk_conflict(new, entry, new_type); |
| @@ -398,8 +436,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 398 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 436 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
| 399 | entry->start, entry->end); | 437 | entry->start, entry->end); |
| 400 | where = entry->nd.prev; | 438 | where = entry->nd.prev; |
| 401 | cached_entry = list_entry(where, | ||
| 402 | struct memtype, nd); | ||
| 403 | } | 439 | } |
| 404 | break; | 440 | break; |
| 405 | } else if (start < entry->end) { /* start > entry->start */ | 441 | } else if (start < entry->end) { /* start > entry->start */ |
| @@ -407,8 +443,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 407 | if (!err) { | 443 | if (!err) { |
| 408 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 444 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
| 409 | entry->start, entry->end); | 445 | entry->start, entry->end); |
| 410 | cached_entry = list_entry(entry->nd.prev, | ||
| 411 | struct memtype, nd); | ||
| 412 | 446 | ||
| 413 | /* | 447 | /* |
| 414 | * Move to right position in the linked | 448 | * Move to right position in the linked |
| @@ -436,13 +470,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 436 | return err; | 470 | return err; |
| 437 | } | 471 | } |
| 438 | 472 | ||
| 439 | cached_start = start; | ||
| 440 | |||
| 441 | if (where) | 473 | if (where) |
| 442 | list_add(&new->nd, where); | 474 | list_add(&new->nd, where); |
| 443 | else | 475 | else |
| 444 | list_add_tail(&new->nd, &memtype_list); | 476 | list_add_tail(&new->nd, &memtype_list); |
| 445 | 477 | ||
| 478 | memtype_rb_insert(&memtype_rbroot, new); | ||
| 479 | |||
| 446 | spin_unlock(&memtype_lock); | 480 | spin_unlock(&memtype_lock); |
| 447 | 481 | ||
| 448 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | 482 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", |
| @@ -454,7 +488,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
| 454 | 488 | ||
| 455 | int free_memtype(u64 start, u64 end) | 489 | int free_memtype(u64 start, u64 end) |
| 456 | { | 490 | { |
| 457 | struct memtype *entry; | 491 | struct memtype *entry, *saved_entry; |
| 458 | int err = -EINVAL; | 492 | int err = -EINVAL; |
| 459 | int is_range_ram; | 493 | int is_range_ram; |
| 460 | 494 | ||
| @@ -466,23 +500,58 @@ int free_memtype(u64 start, u64 end) | |||
| 466 | return 0; | 500 | return 0; |
| 467 | 501 | ||
| 468 | is_range_ram = pat_pagerange_is_ram(start, end); | 502 | is_range_ram = pat_pagerange_is_ram(start, end); |
| 469 | if (is_range_ram == 1) | 503 | if (is_range_ram == 1) { |
| 470 | return free_ram_pages_type(start, end); | 504 | |
| 471 | else if (is_range_ram < 0) | 505 | spin_lock(&memtype_lock); |
| 506 | err = free_ram_pages_type(start, end); | ||
| 507 | spin_unlock(&memtype_lock); | ||
| 508 | |||
| 509 | return err; | ||
| 510 | } else if (is_range_ram < 0) { | ||
| 472 | return -EINVAL; | 511 | return -EINVAL; |
| 512 | } | ||
| 473 | 513 | ||
| 474 | spin_lock(&memtype_lock); | 514 | spin_lock(&memtype_lock); |
| 475 | list_for_each_entry(entry, &memtype_list, nd) { | 515 | |
| 516 | entry = memtype_rb_search(&memtype_rbroot, start); | ||
| 517 | if (unlikely(entry == NULL)) | ||
| 518 | goto unlock_ret; | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Saved entry points to an entry with start same or less than what | ||
| 522 | * we searched for. Now go through the list in both directions to look | ||
| 523 | * for the entry that matches with both start and end, with list stored | ||
| 524 | * in sorted start address | ||
| 525 | */ | ||
| 526 | saved_entry = entry; | ||
| 527 | list_for_each_entry_from(entry, &memtype_list, nd) { | ||
| 476 | if (entry->start == start && entry->end == end) { | 528 | if (entry->start == start && entry->end == end) { |
| 477 | if (cached_entry == entry || cached_start == start) | 529 | rb_erase(&entry->rb, &memtype_rbroot); |
| 478 | cached_entry = NULL; | 530 | list_del(&entry->nd); |
| 531 | kfree(entry); | ||
| 532 | err = 0; | ||
| 533 | break; | ||
| 534 | } else if (entry->start > start) { | ||
| 535 | break; | ||
| 536 | } | ||
| 537 | } | ||
| 538 | |||
| 539 | if (!err) | ||
| 540 | goto unlock_ret; | ||
| 479 | 541 | ||
| 542 | entry = saved_entry; | ||
| 543 | list_for_each_entry_reverse(entry, &memtype_list, nd) { | ||
| 544 | if (entry->start == start && entry->end == end) { | ||
| 545 | rb_erase(&entry->rb, &memtype_rbroot); | ||
| 480 | list_del(&entry->nd); | 546 | list_del(&entry->nd); |
| 481 | kfree(entry); | 547 | kfree(entry); |
| 482 | err = 0; | 548 | err = 0; |
| 483 | break; | 549 | break; |
| 550 | } else if (entry->start < start) { | ||
| 551 | break; | ||
| 484 | } | 552 | } |
| 485 | } | 553 | } |
| 554 | unlock_ret: | ||
| 486 | spin_unlock(&memtype_lock); | 555 | spin_unlock(&memtype_lock); |
| 487 | 556 | ||
| 488 | if (err) { | 557 | if (err) { |
| @@ -496,6 +565,101 @@ int free_memtype(u64 start, u64 end) | |||
| 496 | } | 565 | } |
| 497 | 566 | ||
| 498 | 567 | ||
| 568 | /** | ||
| 569 | * lookup_memtype - Looksup the memory type for a physical address | ||
| 570 | * @paddr: physical address of which memory type needs to be looked up | ||
| 571 | * | ||
| 572 | * Only to be called when PAT is enabled | ||
| 573 | * | ||
| 574 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or | ||
| 575 | * _PAGE_CACHE_UC | ||
| 576 | */ | ||
| 577 | static unsigned long lookup_memtype(u64 paddr) | ||
| 578 | { | ||
| 579 | int rettype = _PAGE_CACHE_WB; | ||
| 580 | struct memtype *entry; | ||
| 581 | |||
| 582 | if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1)) | ||
| 583 | return rettype; | ||
| 584 | |||
| 585 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { | ||
| 586 | struct page *page; | ||
| 587 | spin_lock(&memtype_lock); | ||
| 588 | page = pfn_to_page(paddr >> PAGE_SHIFT); | ||
| 589 | rettype = get_page_memtype(page); | ||
| 590 | spin_unlock(&memtype_lock); | ||
| 591 | /* | ||
| 592 | * -1 from get_page_memtype() implies RAM page is in its | ||
| 593 | * default state and not reserved, and hence of type WB | ||
| 594 | */ | ||
| 595 | if (rettype == -1) | ||
| 596 | rettype = _PAGE_CACHE_WB; | ||
| 597 | |||
| 598 | return rettype; | ||
| 599 | } | ||
| 600 | |||
| 601 | spin_lock(&memtype_lock); | ||
| 602 | |||
| 603 | entry = memtype_rb_search(&memtype_rbroot, paddr); | ||
| 604 | if (entry != NULL) | ||
| 605 | rettype = entry->type; | ||
| 606 | else | ||
| 607 | rettype = _PAGE_CACHE_UC_MINUS; | ||
| 608 | |||
| 609 | spin_unlock(&memtype_lock); | ||
| 610 | return rettype; | ||
| 611 | } | ||
| 612 | |||
| 613 | /** | ||
| 614 | * io_reserve_memtype - Request a memory type mapping for a region of memory | ||
| 615 | * @start: start (physical address) of the region | ||
| 616 | * @end: end (physical address) of the region | ||
| 617 | * @type: A pointer to memtype, with requested type. On success, requested | ||
| 618 | * or any other compatible type that was available for the region is returned | ||
| 619 | * | ||
| 620 | * On success, returns 0 | ||
| 621 | * On failure, returns non-zero | ||
| 622 | */ | ||
| 623 | int io_reserve_memtype(resource_size_t start, resource_size_t end, | ||
| 624 | unsigned long *type) | ||
| 625 | { | ||
| 626 | resource_size_t size = end - start; | ||
| 627 | unsigned long req_type = *type; | ||
| 628 | unsigned long new_type; | ||
| 629 | int ret; | ||
| 630 | |||
| 631 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); | ||
| 632 | |||
| 633 | ret = reserve_memtype(start, end, req_type, &new_type); | ||
| 634 | if (ret) | ||
| 635 | goto out_err; | ||
| 636 | |||
| 637 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) | ||
| 638 | goto out_free; | ||
| 639 | |||
| 640 | if (kernel_map_sync_memtype(start, size, new_type) < 0) | ||
| 641 | goto out_free; | ||
| 642 | |||
| 643 | *type = new_type; | ||
| 644 | return 0; | ||
| 645 | |||
| 646 | out_free: | ||
| 647 | free_memtype(start, end); | ||
| 648 | ret = -EBUSY; | ||
| 649 | out_err: | ||
| 650 | return ret; | ||
| 651 | } | ||
| 652 | |||
| 653 | /** | ||
| 654 | * io_free_memtype - Release a memory type mapping for a region of memory | ||
| 655 | * @start: start (physical address) of the region | ||
| 656 | * @end: end (physical address) of the region | ||
| 657 | */ | ||
| 658 | void io_free_memtype(resource_size_t start, resource_size_t end) | ||
| 659 | { | ||
| 660 | free_memtype(start, end); | ||
| 661 | } | ||
| 662 | |||
| 499 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 663 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 500 | unsigned long size, pgprot_t vma_prot) | 664 | unsigned long size, pgprot_t vma_prot) |
| 501 | { | 665 | { |
| @@ -577,7 +741,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) | |||
| 577 | { | 741 | { |
| 578 | unsigned long id_sz; | 742 | unsigned long id_sz; |
| 579 | 743 | ||
| 580 | if (!pat_enabled || base >= __pa(high_memory)) | 744 | if (base >= __pa(high_memory)) |
| 581 | return 0; | 745 | return 0; |
| 582 | 746 | ||
| 583 | id_sz = (__pa(high_memory) < base + size) ? | 747 | id_sz = (__pa(high_memory) < base + size) ? |
| @@ -612,11 +776,29 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, | |||
| 612 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); | 776 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
| 613 | 777 | ||
| 614 | /* | 778 | /* |
| 615 | * reserve_pfn_range() doesn't support RAM pages. Maintain the current | 779 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
| 616 | * behavior with RAM pages by returning success. | 780 | * track of number of mappings of RAM pages. We can assert that |
| 781 | * the type requested matches the type of first page in the range. | ||
| 617 | */ | 782 | */ |
| 618 | if (is_ram != 0) | 783 | if (is_ram) { |
| 784 | if (!pat_enabled) | ||
| 785 | return 0; | ||
| 786 | |||
| 787 | flags = lookup_memtype(paddr); | ||
| 788 | if (want_flags != flags) { | ||
| 789 | printk(KERN_WARNING | ||
| 790 | "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", | ||
| 791 | current->comm, current->pid, | ||
| 792 | cattr_name(want_flags), | ||
| 793 | (unsigned long long)paddr, | ||
| 794 | (unsigned long long)(paddr + size), | ||
| 795 | cattr_name(flags)); | ||
| 796 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & | ||
| 797 | (~_PAGE_CACHE_MASK)) | | ||
| 798 | flags); | ||
| 799 | } | ||
| 619 | return 0; | 800 | return 0; |
| 801 | } | ||
| 620 | 802 | ||
| 621 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | 803 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); |
| 622 | if (ret) | 804 | if (ret) |
| @@ -678,14 +860,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
| 678 | unsigned long vma_size = vma->vm_end - vma->vm_start; | 860 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
| 679 | pgprot_t pgprot; | 861 | pgprot_t pgprot; |
| 680 | 862 | ||
| 681 | if (!pat_enabled) | ||
| 682 | return 0; | ||
| 683 | |||
| 684 | /* | ||
| 685 | * For now, only handle remap_pfn_range() vmas where | ||
| 686 | * is_linear_pfn_mapping() == TRUE. Handling of | ||
| 687 | * vm_insert_pfn() is TBD. | ||
| 688 | */ | ||
| 689 | if (is_linear_pfn_mapping(vma)) { | 863 | if (is_linear_pfn_mapping(vma)) { |
| 690 | /* | 864 | /* |
| 691 | * reserve the whole chunk covered by vma. We need the | 865 | * reserve the whole chunk covered by vma. We need the |
| @@ -713,23 +887,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) | |||
| 713 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | 887 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
| 714 | unsigned long pfn, unsigned long size) | 888 | unsigned long pfn, unsigned long size) |
| 715 | { | 889 | { |
| 890 | unsigned long flags; | ||
| 716 | resource_size_t paddr; | 891 | resource_size_t paddr; |
| 717 | unsigned long vma_size = vma->vm_end - vma->vm_start; | 892 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
| 718 | 893 | ||
| 719 | if (!pat_enabled) | ||
| 720 | return 0; | ||
| 721 | |||
| 722 | /* | ||
| 723 | * For now, only handle remap_pfn_range() vmas where | ||
| 724 | * is_linear_pfn_mapping() == TRUE. Handling of | ||
| 725 | * vm_insert_pfn() is TBD. | ||
| 726 | */ | ||
| 727 | if (is_linear_pfn_mapping(vma)) { | 894 | if (is_linear_pfn_mapping(vma)) { |
| 728 | /* reserve the whole chunk starting from vm_pgoff */ | 895 | /* reserve the whole chunk starting from vm_pgoff */ |
| 729 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | 896 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
| 730 | return reserve_pfn_range(paddr, vma_size, prot, 0); | 897 | return reserve_pfn_range(paddr, vma_size, prot, 0); |
| 731 | } | 898 | } |
| 732 | 899 | ||
| 900 | if (!pat_enabled) | ||
| 901 | return 0; | ||
| 902 | |||
| 903 | /* for vm_insert_pfn and friends, we set prot based on lookup */ | ||
| 904 | flags = lookup_memtype(pfn << PAGE_SHIFT); | ||
| 905 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | | ||
| 906 | flags); | ||
| 907 | |||
| 733 | return 0; | 908 | return 0; |
| 734 | } | 909 | } |
| 735 | 910 | ||
| @@ -744,14 +919,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |||
| 744 | resource_size_t paddr; | 919 | resource_size_t paddr; |
| 745 | unsigned long vma_size = vma->vm_end - vma->vm_start; | 920 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
| 746 | 921 | ||
| 747 | if (!pat_enabled) | ||
| 748 | return; | ||
| 749 | |||
| 750 | /* | ||
| 751 | * For now, only handle remap_pfn_range() vmas where | ||
| 752 | * is_linear_pfn_mapping() == TRUE. Handling of | ||
| 753 | * vm_insert_pfn() is TBD. | ||
| 754 | */ | ||
| 755 | if (is_linear_pfn_mapping(vma)) { | 922 | if (is_linear_pfn_mapping(vma)) { |
| 756 | /* free the whole chunk starting from vm_pgoff */ | 923 | /* free the whole chunk starting from vm_pgoff */ |
| 757 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | 924 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index b3d20b9cac63..417c9f5b4afa 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
| @@ -242,7 +242,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
| 242 | fix_processor_context(); | 242 | fix_processor_context(); |
| 243 | 243 | ||
| 244 | do_fpu_end(); | 244 | do_fpu_end(); |
| 245 | mtrr_ap_init(); | 245 | mtrr_bp_restore(); |
| 246 | 246 | ||
| 247 | #ifdef CONFIG_X86_OLD_MCE | 247 | #ifdef CONFIG_X86_OLD_MCE |
| 248 | mcheck_init(&boot_cpu_data); | 248 | mcheck_init(&boot_cpu_data); |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0adb0f91568c..97eb928b4924 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
| @@ -49,23 +49,30 @@ static inline struct io_mapping * | |||
| 49 | io_mapping_create_wc(resource_size_t base, unsigned long size) | 49 | io_mapping_create_wc(resource_size_t base, unsigned long size) |
| 50 | { | 50 | { |
| 51 | struct io_mapping *iomap; | 51 | struct io_mapping *iomap; |
| 52 | 52 | pgprot_t prot; | |
| 53 | if (!is_io_mapping_possible(base, size)) | ||
| 54 | return NULL; | ||
| 55 | 53 | ||
| 56 | iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); | 54 | iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); |
| 57 | if (!iomap) | 55 | if (!iomap) |
| 58 | return NULL; | 56 | goto out_err; |
| 57 | |||
| 58 | if (iomap_create_wc(base, size, &prot)) | ||
| 59 | goto out_free; | ||
| 59 | 60 | ||
| 60 | iomap->base = base; | 61 | iomap->base = base; |
| 61 | iomap->size = size; | 62 | iomap->size = size; |
| 62 | iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL)); | 63 | iomap->prot = prot; |
| 63 | return iomap; | 64 | return iomap; |
| 65 | |||
| 66 | out_free: | ||
| 67 | kfree(iomap); | ||
| 68 | out_err: | ||
| 69 | return NULL; | ||
| 64 | } | 70 | } |
| 65 | 71 | ||
| 66 | static inline void | 72 | static inline void |
| 67 | io_mapping_free(struct io_mapping *mapping) | 73 | io_mapping_free(struct io_mapping *mapping) |
| 68 | { | 74 | { |
| 75 | iomap_free(mapping->base, mapping->size); | ||
| 69 | kfree(mapping); | 76 | kfree(mapping); |
| 70 | } | 77 | } |
| 71 | 78 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e2e5ce543595..2b87acfc5f87 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -99,7 +99,7 @@ enum pageflags { | |||
| 99 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 99 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT |
| 100 | PG_mlocked, /* Page is vma mlocked */ | 100 | PG_mlocked, /* Page is vma mlocked */ |
| 101 | #endif | 101 | #endif |
| 102 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 102 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
| 103 | PG_uncached, /* Page has been mapped as uncached */ | 103 | PG_uncached, /* Page has been mapped as uncached */ |
| 104 | #endif | 104 | #endif |
| 105 | __NR_PAGEFLAGS, | 105 | __NR_PAGEFLAGS, |
| @@ -257,7 +257,7 @@ PAGEFLAG_FALSE(Mlocked) | |||
| 257 | SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) | 257 | SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) |
| 258 | #endif | 258 | #endif |
| 259 | 259 | ||
| 260 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 260 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
| 261 | PAGEFLAG(Uncached, uncached) | 261 | PAGEFLAG(Uncached, uncached) |
| 262 | #else | 262 | #else |
| 263 | PAGEFLAG_FALSE(Uncached) | 263 | PAGEFLAG_FALSE(Uncached) |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ce10043e4ac..f5f9485b8c0f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -413,6 +413,14 @@ int disable_nonboot_cpus(void) | |||
| 413 | return error; | 413 | return error; |
| 414 | } | 414 | } |
| 415 | 415 | ||
| 416 | void __weak arch_enable_nonboot_cpus_begin(void) | ||
| 417 | { | ||
| 418 | } | ||
| 419 | |||
| 420 | void __weak arch_enable_nonboot_cpus_end(void) | ||
| 421 | { | ||
| 422 | } | ||
| 423 | |||
| 416 | void __ref enable_nonboot_cpus(void) | 424 | void __ref enable_nonboot_cpus(void) |
| 417 | { | 425 | { |
| 418 | int cpu, error; | 426 | int cpu, error; |
| @@ -424,6 +432,9 @@ void __ref enable_nonboot_cpus(void) | |||
| 424 | goto out; | 432 | goto out; |
| 425 | 433 | ||
| 426 | printk("Enabling non-boot CPUs ...\n"); | 434 | printk("Enabling non-boot CPUs ...\n"); |
| 435 | |||
| 436 | arch_enable_nonboot_cpus_begin(); | ||
| 437 | |||
| 427 | for_each_cpu(cpu, frozen_cpus) { | 438 | for_each_cpu(cpu, frozen_cpus) { |
| 428 | error = _cpu_up(cpu, 1); | 439 | error = _cpu_up(cpu, 1); |
| 429 | if (!error) { | 440 | if (!error) { |
| @@ -432,6 +443,9 @@ void __ref enable_nonboot_cpus(void) | |||
| 432 | } | 443 | } |
| 433 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 444 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
| 434 | } | 445 | } |
| 446 | |||
| 447 | arch_enable_nonboot_cpus_end(); | ||
| 448 | |||
| 435 | cpumask_clear(frozen_cpus); | 449 | cpumask_clear(frozen_cpus); |
| 436 | out: | 450 | out: |
| 437 | cpu_maps_update_done(); | 451 | cpu_maps_update_done(); |
diff --git a/kernel/smp.c b/kernel/smp.c index 94188b8ecc33..8e218500ab14 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -177,6 +177,11 @@ void generic_smp_call_function_interrupt(void) | |||
| 177 | int cpu = get_cpu(); | 177 | int cpu = get_cpu(); |
| 178 | 178 | ||
| 179 | /* | 179 | /* |
| 180 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
| 181 | */ | ||
| 182 | WARN_ON_ONCE(!cpu_online(cpu)); | ||
| 183 | |||
| 184 | /* | ||
| 180 | * Ensure entry is visible on call_function_queue after we have | 185 | * Ensure entry is visible on call_function_queue after we have |
| 181 | * entered the IPI. See comment in smp_call_function_many. | 186 | * entered the IPI. See comment in smp_call_function_many. |
| 182 | * If we don't have this, then we may miss an entry on the list | 187 | * If we don't have this, then we may miss an entry on the list |
| @@ -230,6 +235,11 @@ void generic_smp_call_function_single_interrupt(void) | |||
| 230 | unsigned int data_flags; | 235 | unsigned int data_flags; |
| 231 | LIST_HEAD(list); | 236 | LIST_HEAD(list); |
| 232 | 237 | ||
| 238 | /* | ||
| 239 | * Shouldn't receive this interrupt on a cpu that is not yet online. | ||
| 240 | */ | ||
| 241 | WARN_ON_ONCE(!cpu_online(smp_processor_id())); | ||
| 242 | |||
| 233 | spin_lock(&q->lock); | 243 | spin_lock(&q->lock); |
| 234 | list_replace_init(&q->list, &list); | 244 | list_replace_init(&q->list, &list); |
| 235 | spin_unlock(&q->lock); | 245 | spin_unlock(&q->lock); |
| @@ -285,8 +295,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
| 285 | */ | 295 | */ |
| 286 | this_cpu = get_cpu(); | 296 | this_cpu = get_cpu(); |
| 287 | 297 | ||
| 288 | /* Can deadlock when called with interrupts disabled */ | 298 | /* |
| 289 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 299 | * Can deadlock when called with interrupts disabled. |
| 300 | * We allow cpu's that are not yet online though, as no one else can | ||
| 301 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 302 | * can't happen. | ||
| 303 | */ | ||
| 304 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
| 305 | && !oops_in_progress); | ||
| 290 | 306 | ||
| 291 | if (cpu == this_cpu) { | 307 | if (cpu == this_cpu) { |
| 292 | local_irq_save(flags); | 308 | local_irq_save(flags); |
| @@ -329,8 +345,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 329 | { | 345 | { |
| 330 | csd_lock(data); | 346 | csd_lock(data); |
| 331 | 347 | ||
| 332 | /* Can deadlock when called with interrupts disabled */ | 348 | /* |
| 333 | WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress); | 349 | * Can deadlock when called with interrupts disabled. |
| 350 | * We allow cpu's that are not yet online though, as no one else can | ||
| 351 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 352 | * can't happen. | ||
| 353 | */ | ||
| 354 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | ||
| 355 | && !oops_in_progress); | ||
| 334 | 356 | ||
| 335 | generic_exec_single(cpu, data, wait); | 357 | generic_exec_single(cpu, data, wait); |
| 336 | } | 358 | } |
| @@ -365,8 +387,14 @@ void smp_call_function_many(const struct cpumask *mask, | |||
| 365 | unsigned long flags; | 387 | unsigned long flags; |
| 366 | int cpu, next_cpu, this_cpu = smp_processor_id(); | 388 | int cpu, next_cpu, this_cpu = smp_processor_id(); |
| 367 | 389 | ||
| 368 | /* Can deadlock when called with interrupts disabled */ | 390 | /* |
| 369 | WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); | 391 | * Can deadlock when called with interrupts disabled. |
| 392 | * We allow cpu's that are not yet online though, as no one else can | ||
| 393 | * send smp call function interrupt to this cpu and as such deadlocks | ||
| 394 | * can't happen. | ||
| 395 | */ | ||
| 396 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | ||
| 397 | && !oops_in_progress); | ||
| 370 | 398 | ||
| 371 | /* So, what's a CPU they want? Ignoring this one. */ | 399 | /* So, what's a CPU they want? Ignoring this one. */ |
| 372 | cpu = cpumask_first_and(mask, cpu_online_mask); | 400 | cpu = cpumask_first_and(mask, cpu_online_mask); |
diff --git a/mm/Kconfig b/mm/Kconfig index fe5f674d7a7d..3aa519f52e18 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
| @@ -153,7 +153,7 @@ config MEMORY_HOTREMOVE | |||
| 153 | # | 153 | # |
| 154 | config PAGEFLAGS_EXTENDED | 154 | config PAGEFLAGS_EXTENDED |
| 155 | def_bool y | 155 | def_bool y |
| 156 | depends on 64BIT || SPARSEMEM_VMEMMAP || !NUMA || !SPARSEMEM | 156 | depends on 64BIT || SPARSEMEM_VMEMMAP || !SPARSEMEM |
| 157 | 157 | ||
| 158 | # Heavily threaded applications may benefit from splitting the mm-wide | 158 | # Heavily threaded applications may benefit from splitting the mm-wide |
| 159 | # page_table_lock, so that faults on different parts of the user address | 159 | # page_table_lock, so that faults on different parts of the user address |
