diff options
| -rw-r--r-- | Documentation/ABI/testing/sysfs-devices-cache_disable | 18 | ||||
| -rw-r--r-- | arch/x86/include/asm/cpu_debug.h | 101 | ||||
| -rw-r--r-- | arch/x86/include/asm/k8.h | 13 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpu_debug.c | 417 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 153 | ||||
| -rw-r--r-- | arch/x86/kernel/quirks.c | 37 |
6 files changed, 237 insertions, 502 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-cache_disable b/Documentation/ABI/testing/sysfs-devices-cache_disable new file mode 100644 index 000000000000..175bb4f70512 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-cache_disable | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X | ||
| 2 | Date: August 2008 | ||
| 3 | KernelVersion: 2.6.27 | ||
| 4 | Contact: mark.langsdorf@amd.com | ||
| 5 | Description: These files exist in every cpu's cache index directories. | ||
| 6 | There are currently 2 cache_disable_# files in each | ||
| 7 | directory. Reading from these files on a supported | ||
| 8 | processor will return that cache disable index value | ||
| 9 | for that processor and node. Writing to one of these | ||
| 10 | files will cause the specificed cache index to be disabled. | ||
| 11 | |||
| 12 | Currently, only AMD Family 10h Processors support cache index | ||
| 13 | disable, and only for their L3 caches. See the BIOS and | ||
| 14 | Kernel Developer's Guide at | ||
| 15 | http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf | ||
| 16 | for formatting information and other details on the | ||
| 17 | cache index disable. | ||
| 18 | Users: joachim.deguara@amd.com | ||
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h index 222802029fa6..d96c1ee3a95c 100644 --- a/arch/x86/include/asm/cpu_debug.h +++ b/arch/x86/include/asm/cpu_debug.h | |||
| @@ -86,105 +86,7 @@ enum cpu_file_bit { | |||
| 86 | CPU_VALUE_BIT, /* value */ | 86 | CPU_VALUE_BIT, /* value */ |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | 89 | #define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) |
| 90 | |||
| 91 | /* | ||
| 92 | * DisplayFamily_DisplayModel Processor Families/Processor Number Series | ||
| 93 | * -------------------------- ------------------------------------------ | ||
| 94 | * 05_01, 05_02, 05_04 Pentium, Pentium with MMX | ||
| 95 | * | ||
| 96 | * 06_01 Pentium Pro | ||
| 97 | * 06_03, 06_05 Pentium II Xeon, Pentium II | ||
| 98 | * 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III | ||
| 99 | * | ||
| 100 | * 06_09, 060D Pentium M | ||
| 101 | * | ||
| 102 | * 06_0E Core Duo, Core Solo | ||
| 103 | * | ||
| 104 | * 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series, | ||
| 105 | * Core 2 Quad, Core 2 Extreme, Core 2 Duo, | ||
| 106 | * Pentium dual-core | ||
| 107 | * 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650 | ||
| 108 | * | ||
| 109 | * 06_1C Atom | ||
| 110 | * | ||
| 111 | * 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4 | ||
| 112 | * 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D | ||
| 113 | * | ||
| 114 | * 0F_06 Xeon 7100, 5000 Series, Xeon MP, | ||
| 115 | * Pentium 4, Pentium D | ||
| 116 | */ | ||
| 117 | |||
| 118 | /* Register processors bits */ | ||
| 119 | enum cpu_processor_bit { | ||
| 120 | CPU_NONE, | ||
| 121 | /* Intel */ | ||
| 122 | CPU_INTEL_PENTIUM_BIT, | ||
| 123 | CPU_INTEL_P6_BIT, | ||
| 124 | CPU_INTEL_PENTIUM_M_BIT, | ||
| 125 | CPU_INTEL_CORE_BIT, | ||
| 126 | CPU_INTEL_CORE2_BIT, | ||
| 127 | CPU_INTEL_ATOM_BIT, | ||
| 128 | CPU_INTEL_XEON_P4_BIT, | ||
| 129 | CPU_INTEL_XEON_MP_BIT, | ||
| 130 | /* AMD */ | ||
| 131 | CPU_AMD_K6_BIT, | ||
| 132 | CPU_AMD_K7_BIT, | ||
| 133 | CPU_AMD_K8_BIT, | ||
| 134 | CPU_AMD_0F_BIT, | ||
| 135 | CPU_AMD_10_BIT, | ||
| 136 | CPU_AMD_11_BIT, | ||
| 137 | }; | ||
| 138 | |||
| 139 | #define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT) | ||
| 140 | #define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT) | ||
| 141 | #define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT) | ||
| 142 | #define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT) | ||
| 143 | #define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT) | ||
| 144 | #define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT) | ||
| 145 | #define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT) | ||
| 146 | #define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT) | ||
| 147 | |||
| 148 | #define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M) | ||
| 149 | #define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2) | ||
| 150 | #define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP) | ||
| 151 | #define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM) | ||
| 152 | #define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM) | ||
| 153 | #define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM) | ||
| 154 | #define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON) | ||
| 155 | #define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON) | ||
| 156 | #define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT) | ||
| 157 | #define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON) | ||
| 158 | #define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON) | ||
| 159 | #define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT) | ||
| 160 | #define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX) | ||
| 161 | #define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE) | ||
| 162 | #define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE) | ||
| 163 | #define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT) | ||
| 164 | #define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE) | ||
| 165 | #define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT) | ||
| 166 | #define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE) | ||
| 167 | |||
| 168 | /* Select all supported Intel CPUs */ | ||
| 169 | #define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE) | ||
| 170 | |||
| 171 | #define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT) | ||
| 172 | #define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT) | ||
| 173 | #define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT) | ||
| 174 | #define CPU_AMD_0F (1 << CPU_AMD_0F_BIT) | ||
| 175 | #define CPU_AMD_10 (1 << CPU_AMD_10_BIT) | ||
| 176 | #define CPU_AMD_11 (1 << CPU_AMD_11_BIT) | ||
| 177 | |||
| 178 | #define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11) | ||
| 179 | #define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS) | ||
| 180 | #define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS) | ||
| 181 | #define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS) | ||
| 182 | |||
| 183 | /* Select all supported AMD CPUs */ | ||
| 184 | #define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS) | ||
| 185 | |||
| 186 | /* Select all supported CPUs */ | ||
| 187 | #define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL) | ||
| 188 | 90 | ||
| 189 | #define MAX_CPU_FILES 512 | 91 | #define MAX_CPU_FILES 512 |
| 190 | 92 | ||
| @@ -220,7 +122,6 @@ struct cpu_debug_range { | |||
| 220 | unsigned min; /* Register range min */ | 122 | unsigned min; /* Register range min */ |
| 221 | unsigned max; /* Register range max */ | 123 | unsigned max; /* Register range max */ |
| 222 | unsigned flag; /* Supported flags */ | 124 | unsigned flag; /* Supported flags */ |
| 223 | unsigned model; /* Supported models */ | ||
| 224 | }; | 125 | }; |
| 225 | 126 | ||
| 226 | #endif /* _ASM_X86_CPU_DEBUG_H */ | 127 | #endif /* _ASM_X86_CPU_DEBUG_H */ |
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/k8.h index 54c8cc53b24d..c2d1f3b58e5f 100644 --- a/arch/x86/include/asm/k8.h +++ b/arch/x86/include/asm/k8.h | |||
| @@ -12,4 +12,17 @@ extern int cache_k8_northbridges(void); | |||
| 12 | extern void k8_flush_garts(void); | 12 | extern void k8_flush_garts(void); |
| 13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); | 13 | extern int k8_scan_nodes(unsigned long start, unsigned long end); |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_K8_NB | ||
| 16 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | ||
| 17 | { | ||
| 18 | return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; | ||
| 19 | } | ||
| 20 | #else | ||
| 21 | static inline struct pci_dev *node_to_k8_nb_misc(int node) | ||
| 22 | { | ||
| 23 | return NULL; | ||
| 24 | } | ||
| 25 | #endif | ||
| 26 | |||
| 27 | |||
| 15 | #endif /* _ASM_X86_K8_H */ | 28 | #endif /* _ASM_X86_K8_H */ |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 2fc4f6bb9ca5..6b2a52dd0403 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
| @@ -32,9 +32,7 @@ | |||
| 32 | 32 | ||
| 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); |
| 34 | static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); | 34 | static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); |
| 35 | static DEFINE_PER_CPU(unsigned, cpu_modelflag); | ||
| 36 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpu_priv_count); |
| 37 | static DEFINE_PER_CPU(unsigned, cpu_model); | ||
| 38 | 36 | ||
| 39 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
| 40 | 38 | ||
| @@ -80,302 +78,102 @@ static struct cpu_file_base cpu_file[] = { | |||
| 80 | { "value", CPU_REG_ALL, 1 }, | 78 | { "value", CPU_REG_ALL, 1 }, |
| 81 | }; | 79 | }; |
| 82 | 80 | ||
| 83 | /* Intel Registers Range */ | 81 | /* CPU Registers Range */ |
| 84 | static struct cpu_debug_range cpu_intel_range[] = { | 82 | static struct cpu_debug_range cpu_reg_range[] = { |
| 85 | { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL }, | 83 | { 0x00000000, 0x00000001, CPU_MC, }, |
| 86 | { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE }, | 84 | { 0x00000006, 0x00000007, CPU_MONITOR, }, |
| 87 | { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL }, | 85 | { 0x00000010, 0x00000010, CPU_TIME, }, |
| 88 | { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM }, | 86 | { 0x00000011, 0x00000013, CPU_PMC, }, |
| 89 | { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE }, | 87 | { 0x00000017, 0x00000017, CPU_PLATFORM, }, |
| 90 | { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE }, | 88 | { 0x0000001B, 0x0000001B, CPU_APIC, }, |
| 91 | 89 | { 0x0000002A, 0x0000002B, CPU_POWERON, }, | |
| 92 | { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE }, | 90 | { 0x0000002C, 0x0000002C, CPU_FREQ, }, |
| 93 | { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON }, | 91 | { 0x0000003A, 0x0000003A, CPU_CONTROL, }, |
| 94 | { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON }, | 92 | { 0x00000040, 0x00000047, CPU_LBRANCH, }, |
| 95 | { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE }, | 93 | { 0x00000060, 0x00000067, CPU_LBRANCH, }, |
| 96 | 94 | { 0x00000079, 0x00000079, CPU_BIOS, }, | |
| 97 | { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE }, | 95 | { 0x00000088, 0x0000008A, CPU_CACHE, }, |
| 98 | { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT }, | 96 | { 0x0000008B, 0x0000008B, CPU_BIOS, }, |
| 99 | { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT }, | 97 | { 0x0000009B, 0x0000009B, CPU_MONITOR, }, |
| 100 | { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM }, | 98 | { 0x000000C1, 0x000000C4, CPU_PMC, }, |
| 101 | 99 | { 0x000000CD, 0x000000CD, CPU_FREQ, }, | |
| 102 | { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE }, | 100 | { 0x000000E7, 0x000000E8, CPU_PERF, }, |
| 103 | { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 }, | 101 | { 0x000000FE, 0x000000FE, CPU_MTRR, }, |
| 104 | { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE }, | 102 | |
| 105 | { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON }, | 103 | { 0x00000116, 0x0000011E, CPU_CACHE, }, |
| 106 | 104 | { 0x00000174, 0x00000176, CPU_SYSENTER, }, | |
| 107 | { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT }, | 105 | { 0x00000179, 0x0000017B, CPU_MC, }, |
| 108 | { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT }, | 106 | { 0x00000186, 0x00000189, CPU_PMC, }, |
| 109 | { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT }, | 107 | { 0x00000198, 0x00000199, CPU_PERF, }, |
| 110 | { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE }, | 108 | { 0x0000019A, 0x0000019A, CPU_TIME, }, |
| 111 | 109 | { 0x0000019B, 0x0000019D, CPU_THERM, }, | |
| 112 | { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 }, | 110 | { 0x000001A0, 0x000001A0, CPU_MISC, }, |
| 113 | { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 }, | 111 | { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, |
| 114 | { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX }, | 112 | { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, |
| 115 | { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 }, | 113 | { 0x000001D9, 0x000001D9, CPU_DEBUG, }, |
| 116 | { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT }, | 114 | { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, |
| 117 | 115 | ||
| 118 | { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE }, | 116 | { 0x00000200, 0x0000020F, CPU_MTRR, }, |
| 119 | { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE }, | 117 | { 0x00000250, 0x00000250, CPU_MTRR, }, |
| 120 | { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE }, | 118 | { 0x00000258, 0x00000259, CPU_MTRR, }, |
| 121 | { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT }, | 119 | { 0x00000268, 0x0000026F, CPU_MTRR, }, |
| 122 | { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE }, | 120 | { 0x00000277, 0x00000277, CPU_PAT, }, |
| 123 | { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE }, | 121 | { 0x000002FF, 0x000002FF, CPU_MTRR, }, |
| 124 | { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE }, | 122 | |
| 125 | { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE }, | 123 | { 0x00000300, 0x00000311, CPU_PMC, }, |
| 126 | 124 | { 0x00000345, 0x00000345, CPU_PMC, }, | |
| 127 | { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT }, | 125 | { 0x00000360, 0x00000371, CPU_PMC, }, |
| 128 | { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON }, | 126 | { 0x0000038D, 0x00000390, CPU_PMC, }, |
| 129 | { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE }, | 127 | { 0x000003A0, 0x000003BE, CPU_PMC, }, |
| 130 | { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON }, | 128 | { 0x000003C0, 0x000003CD, CPU_PMC, }, |
| 131 | { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE }, | 129 | { 0x000003E0, 0x000003E1, CPU_PMC, }, |
| 132 | { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 }, | 130 | { 0x000003F0, 0x000003F2, CPU_PMC, }, |
| 133 | { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE }, | 131 | |
| 134 | { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 }, | 132 | { 0x00000400, 0x00000417, CPU_MC, }, |
| 135 | 133 | { 0x00000480, 0x0000048B, CPU_VMX, }, | |
| 136 | { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE }, | 134 | |
| 137 | { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE }, | 135 | { 0x00000600, 0x00000600, CPU_DEBUG, }, |
| 138 | { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE }, | 136 | { 0x00000680, 0x0000068F, CPU_LBRANCH, }, |
| 139 | { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE }, | 137 | { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, |
| 140 | { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE }, | 138 | |
| 141 | { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE }, | 139 | { 0x000107CC, 0x000107D3, CPU_PMC, }, |
| 142 | 140 | ||
| 143 | { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON }, | 141 | { 0xC0000080, 0xC0000080, CPU_FEATURES, }, |
| 144 | { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE }, | 142 | { 0xC0000081, 0xC0000084, CPU_CALL, }, |
| 145 | { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON }, | 143 | { 0xC0000100, 0xC0000102, CPU_BASE, }, |
| 146 | { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT }, | 144 | { 0xC0000103, 0xC0000103, CPU_TIME, }, |
| 147 | { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON }, | 145 | |
| 148 | { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT }, | 146 | { 0xC0010000, 0xC0010007, CPU_PMC, }, |
| 149 | { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON }, | 147 | { 0xC0010010, 0xC0010010, CPU_CONF, }, |
| 150 | { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON }, | 148 | { 0xC0010015, 0xC0010015, CPU_CONF, }, |
| 151 | { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON }, | 149 | { 0xC0010016, 0xC001001A, CPU_MTRR, }, |
| 152 | { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON }, | 150 | { 0xC001001D, 0xC001001D, CPU_MTRR, }, |
| 153 | { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE }, | 151 | { 0xC001001F, 0xC001001F, CPU_CONF, }, |
| 154 | { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON }, | 152 | { 0xC0010030, 0xC0010035, CPU_BIOS, }, |
| 155 | 153 | { 0xC0010044, 0xC0010048, CPU_MC, }, | |
| 156 | { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE }, | 154 | { 0xC0010050, 0xC0010056, CPU_SMM, }, |
| 157 | { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON }, | 155 | { 0xC0010058, 0xC0010058, CPU_CONF, }, |
| 158 | { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE }, | 156 | { 0xC0010060, 0xC0010060, CPU_CACHE, }, |
| 159 | { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON }, | 157 | { 0xC0010061, 0xC0010068, CPU_SMM, }, |
| 160 | { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE }, | 158 | { 0xC0010069, 0xC001006B, CPU_SMM, }, |
| 161 | { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON }, | 159 | { 0xC0010070, 0xC0010071, CPU_SMM, }, |
| 162 | { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE }, | 160 | { 0xC0010111, 0xC0010113, CPU_SMM, }, |
| 163 | { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON }, | 161 | { 0xC0010114, 0xC0010118, CPU_SVM, }, |
| 164 | { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE }, | 162 | { 0xC0010140, 0xC0010141, CPU_OSVM, }, |
| 165 | { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE }, | 163 | { 0xC0011022, 0xC0011023, CPU_CONF, }, |
| 166 | { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE }, | ||
| 167 | |||
| 168 | { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE }, | ||
| 169 | { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 170 | { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON }, | ||
| 171 | |||
| 172 | { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP }, | ||
| 173 | |||
| 174 | { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON }, | ||
| 175 | { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON }, | ||
| 176 | { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON }, | ||
| 177 | { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON }, | ||
| 178 | }; | 164 | }; |
| 179 | 165 | ||
| 180 | /* AMD Registers Range */ | ||
| 181 | static struct cpu_debug_range cpu_amd_range[] = { | ||
| 182 | { 0x00000000, 0x00000001, CPU_MC, CPU_K10_PLUS, }, | ||
| 183 | { 0x00000010, 0x00000010, CPU_TIME, CPU_K8_PLUS, }, | ||
| 184 | { 0x0000001B, 0x0000001B, CPU_APIC, CPU_K8_PLUS, }, | ||
| 185 | { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_K7_PLUS }, | ||
| 186 | { 0x0000008B, 0x0000008B, CPU_VER, CPU_K8_PLUS }, | ||
| 187 | { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 188 | |||
| 189 | { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_K8_PLUS, }, | ||
| 190 | { 0x00000179, 0x0000017B, CPU_MC, CPU_K8_PLUS, }, | ||
| 191 | { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_K8_PLUS, }, | ||
| 192 | { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_K8_PLUS, }, | ||
| 193 | |||
| 194 | { 0x00000200, 0x0000020F, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 195 | { 0x00000250, 0x00000250, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 196 | { 0x00000258, 0x00000259, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 197 | { 0x00000268, 0x0000026F, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 198 | { 0x00000277, 0x00000277, CPU_PAT, CPU_K8_PLUS, }, | ||
| 199 | { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 200 | |||
| 201 | { 0x00000400, 0x00000413, CPU_MC, CPU_K8_PLUS, }, | ||
| 202 | |||
| 203 | { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_AMD_ALL, }, | ||
| 204 | { 0xC0000081, 0xC0000084, CPU_CALL, CPU_K8_PLUS, }, | ||
| 205 | { 0xC0000100, 0xC0000102, CPU_BASE, CPU_K8_PLUS, }, | ||
| 206 | { 0xC0000103, 0xC0000103, CPU_TIME, CPU_K10_PLUS, }, | ||
| 207 | |||
| 208 | { 0xC0010000, 0xC0010007, CPU_PMC, CPU_K8_PLUS, }, | ||
| 209 | { 0xC0010010, 0xC0010010, CPU_CONF, CPU_K7_PLUS, }, | ||
| 210 | { 0xC0010015, 0xC0010015, CPU_CONF, CPU_K7_PLUS, }, | ||
| 211 | { 0xC0010016, 0xC001001A, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 212 | { 0xC001001D, 0xC001001D, CPU_MTRR, CPU_K8_PLUS, }, | ||
| 213 | { 0xC001001F, 0xC001001F, CPU_CONF, CPU_K8_PLUS, }, | ||
| 214 | { 0xC0010030, 0xC0010035, CPU_BIOS, CPU_K8_PLUS, }, | ||
| 215 | { 0xC0010044, 0xC0010048, CPU_MC, CPU_K8_PLUS, }, | ||
| 216 | { 0xC0010050, 0xC0010056, CPU_SMM, CPU_K0F_PLUS, }, | ||
| 217 | { 0xC0010058, 0xC0010058, CPU_CONF, CPU_K10_PLUS, }, | ||
| 218 | { 0xC0010060, 0xC0010060, CPU_CACHE, CPU_AMD_11, }, | ||
| 219 | { 0xC0010061, 0xC0010068, CPU_SMM, CPU_K10_PLUS, }, | ||
| 220 | { 0xC0010069, 0xC001006B, CPU_SMM, CPU_AMD_11, }, | ||
| 221 | { 0xC0010070, 0xC0010071, CPU_SMM, CPU_K10_PLUS, }, | ||
| 222 | { 0xC0010111, 0xC0010113, CPU_SMM, CPU_K8_PLUS, }, | ||
| 223 | { 0xC0010114, 0xC0010118, CPU_SVM, CPU_K10_PLUS, }, | ||
| 224 | { 0xC0010140, 0xC0010141, CPU_OSVM, CPU_K10_PLUS, }, | ||
| 225 | { 0xC0011022, 0xC0011023, CPU_CONF, CPU_K10_PLUS, }, | ||
| 226 | }; | ||
| 227 | |||
| 228 | |||
| 229 | /* Intel */ | ||
| 230 | static int get_intel_modelflag(unsigned model) | ||
| 231 | { | ||
| 232 | int flag; | ||
| 233 | |||
| 234 | switch (model) { | ||
| 235 | case 0x0501: | ||
| 236 | case 0x0502: | ||
| 237 | case 0x0504: | ||
| 238 | flag = CPU_INTEL_PENTIUM; | ||
| 239 | break; | ||
| 240 | case 0x0601: | ||
| 241 | case 0x0603: | ||
| 242 | case 0x0605: | ||
| 243 | case 0x0607: | ||
| 244 | case 0x0608: | ||
| 245 | case 0x060A: | ||
| 246 | case 0x060B: | ||
| 247 | flag = CPU_INTEL_P6; | ||
| 248 | break; | ||
| 249 | case 0x0609: | ||
| 250 | case 0x060D: | ||
| 251 | flag = CPU_INTEL_PENTIUM_M; | ||
| 252 | break; | ||
| 253 | case 0x060E: | ||
| 254 | flag = CPU_INTEL_CORE; | ||
| 255 | break; | ||
| 256 | case 0x060F: | ||
| 257 | case 0x0617: | ||
| 258 | flag = CPU_INTEL_CORE2; | ||
| 259 | break; | ||
| 260 | case 0x061C: | ||
| 261 | flag = CPU_INTEL_ATOM; | ||
| 262 | break; | ||
| 263 | case 0x0F00: | ||
| 264 | case 0x0F01: | ||
| 265 | case 0x0F02: | ||
| 266 | case 0x0F03: | ||
| 267 | case 0x0F04: | ||
| 268 | flag = CPU_INTEL_XEON_P4; | ||
| 269 | break; | ||
| 270 | case 0x0F06: | ||
| 271 | flag = CPU_INTEL_XEON_MP; | ||
| 272 | break; | ||
| 273 | default: | ||
| 274 | flag = CPU_NONE; | ||
| 275 | break; | ||
| 276 | } | ||
| 277 | |||
| 278 | return flag; | ||
| 279 | } | ||
| 280 | |||
| 281 | /* AMD */ | ||
| 282 | static int get_amd_modelflag(unsigned model) | ||
| 283 | { | ||
| 284 | int flag; | ||
| 285 | |||
| 286 | switch (model >> 8) { | ||
| 287 | case 0x6: | ||
| 288 | flag = CPU_AMD_K6; | ||
| 289 | break; | ||
| 290 | case 0x7: | ||
| 291 | flag = CPU_AMD_K7; | ||
| 292 | break; | ||
| 293 | case 0x8: | ||
| 294 | flag = CPU_AMD_K8; | ||
| 295 | break; | ||
| 296 | case 0xf: | ||
| 297 | flag = CPU_AMD_0F; | ||
| 298 | break; | ||
| 299 | case 0x10: | ||
| 300 | flag = CPU_AMD_10; | ||
| 301 | break; | ||
| 302 | case 0x11: | ||
| 303 | flag = CPU_AMD_11; | ||
| 304 | break; | ||
| 305 | default: | ||
| 306 | flag = CPU_NONE; | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | |||
| 310 | return flag; | ||
| 311 | } | ||
| 312 | |||
| 313 | static int get_cpu_modelflag(unsigned cpu) | ||
| 314 | { | ||
| 315 | int flag; | ||
| 316 | |||
| 317 | flag = per_cpu(cpu_model, cpu); | ||
| 318 | |||
| 319 | switch (flag >> 16) { | ||
| 320 | case X86_VENDOR_INTEL: | ||
| 321 | flag = get_intel_modelflag(flag); | ||
| 322 | break; | ||
| 323 | case X86_VENDOR_AMD: | ||
| 324 | flag = get_amd_modelflag(flag & 0xffff); | ||
| 325 | break; | ||
| 326 | default: | ||
| 327 | flag = CPU_NONE; | ||
| 328 | break; | ||
| 329 | } | ||
| 330 | |||
| 331 | return flag; | ||
| 332 | } | ||
| 333 | |||
| 334 | static int get_cpu_range_count(unsigned cpu) | ||
| 335 | { | ||
| 336 | int index; | ||
| 337 | |||
| 338 | switch (per_cpu(cpu_model, cpu) >> 16) { | ||
| 339 | case X86_VENDOR_INTEL: | ||
| 340 | index = ARRAY_SIZE(cpu_intel_range); | ||
| 341 | break; | ||
| 342 | case X86_VENDOR_AMD: | ||
| 343 | index = ARRAY_SIZE(cpu_amd_range); | ||
| 344 | break; | ||
| 345 | default: | ||
| 346 | index = 0; | ||
| 347 | break; | ||
| 348 | } | ||
| 349 | |||
| 350 | return index; | ||
| 351 | } | ||
| 352 | |||
| 353 | static int is_typeflag_valid(unsigned cpu, unsigned flag) | 166 | static int is_typeflag_valid(unsigned cpu, unsigned flag) |
| 354 | { | 167 | { |
| 355 | unsigned vendor, modelflag; | 168 | int i; |
| 356 | int i, index; | ||
| 357 | 169 | ||
| 358 | /* Standard Registers should be always valid */ | 170 | /* Standard Registers should be always valid */ |
| 359 | if (flag >= CPU_TSS) | 171 | if (flag >= CPU_TSS) |
| 360 | return 1; | 172 | return 1; |
| 361 | 173 | ||
| 362 | modelflag = per_cpu(cpu_modelflag, cpu); | 174 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { |
| 363 | vendor = per_cpu(cpu_model, cpu) >> 16; | 175 | if (cpu_reg_range[i].flag == flag) |
| 364 | index = get_cpu_range_count(cpu); | 176 | return 1; |
| 365 | |||
| 366 | for (i = 0; i < index; i++) { | ||
| 367 | switch (vendor) { | ||
| 368 | case X86_VENDOR_INTEL: | ||
| 369 | if ((cpu_intel_range[i].model & modelflag) && | ||
| 370 | (cpu_intel_range[i].flag & flag)) | ||
| 371 | return 1; | ||
| 372 | break; | ||
| 373 | case X86_VENDOR_AMD: | ||
| 374 | if ((cpu_amd_range[i].model & modelflag) && | ||
| 375 | (cpu_amd_range[i].flag & flag)) | ||
| 376 | return 1; | ||
| 377 | break; | ||
| 378 | } | ||
| 379 | } | 177 | } |
| 380 | 178 | ||
| 381 | /* Invalid */ | 179 | /* Invalid */ |
| @@ -385,26 +183,11 @@ static int is_typeflag_valid(unsigned cpu, unsigned flag) | |||
| 385 | static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, | 183 | static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, |
| 386 | int index, unsigned flag) | 184 | int index, unsigned flag) |
| 387 | { | 185 | { |
| 388 | unsigned modelflag; | 186 | if (cpu_reg_range[index].flag == flag) { |
| 389 | 187 | *min = cpu_reg_range[index].min; | |
| 390 | modelflag = per_cpu(cpu_modelflag, cpu); | 188 | *max = cpu_reg_range[index].max; |
| 391 | *max = 0; | 189 | } else |
| 392 | switch (per_cpu(cpu_model, cpu) >> 16) { | 190 | *max = 0; |
| 393 | case X86_VENDOR_INTEL: | ||
| 394 | if ((cpu_intel_range[index].model & modelflag) && | ||
| 395 | (cpu_intel_range[index].flag & flag)) { | ||
| 396 | *min = cpu_intel_range[index].min; | ||
| 397 | *max = cpu_intel_range[index].max; | ||
| 398 | } | ||
| 399 | break; | ||
| 400 | case X86_VENDOR_AMD: | ||
| 401 | if ((cpu_amd_range[index].model & modelflag) && | ||
| 402 | (cpu_amd_range[index].flag & flag)) { | ||
| 403 | *min = cpu_amd_range[index].min; | ||
| 404 | *max = cpu_amd_range[index].max; | ||
| 405 | } | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | 191 | ||
| 409 | return *max; | 192 | return *max; |
| 410 | } | 193 | } |
| @@ -434,7 +217,7 @@ static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | |||
| 434 | unsigned msr, msr_min, msr_max; | 217 | unsigned msr, msr_min, msr_max; |
| 435 | struct cpu_private *priv; | 218 | struct cpu_private *priv; |
| 436 | u32 low, high; | 219 | u32 low, high; |
| 437 | int i, range; | 220 | int i; |
| 438 | 221 | ||
| 439 | if (seq) { | 222 | if (seq) { |
| 440 | priv = seq->private; | 223 | priv = seq->private; |
| @@ -446,9 +229,7 @@ static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | |||
| 446 | } | 229 | } |
| 447 | } | 230 | } |
| 448 | 231 | ||
| 449 | range = get_cpu_range_count(cpu); | 232 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { |
| 450 | |||
| 451 | for (i = 0; i < range; i++) { | ||
| 452 | if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) | 233 | if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) |
| 453 | continue; | 234 | continue; |
| 454 | 235 | ||
| @@ -800,13 +581,11 @@ static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) | |||
| 800 | { | 581 | { |
| 801 | struct dentry *cpu_dentry = NULL; | 582 | struct dentry *cpu_dentry = NULL; |
| 802 | unsigned reg, reg_min, reg_max; | 583 | unsigned reg, reg_min, reg_max; |
| 803 | int i, range, err = 0; | 584 | int i, err = 0; |
| 804 | char reg_dir[12]; | 585 | char reg_dir[12]; |
| 805 | u32 low, high; | 586 | u32 low, high; |
| 806 | 587 | ||
| 807 | range = get_cpu_range_count(cpu); | 588 | for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { |
| 808 | |||
| 809 | for (i = 0; i < range; i++) { | ||
| 810 | if (!get_cpu_range(cpu, ®_min, ®_max, i, | 589 | if (!get_cpu_range(cpu, ®_min, ®_max, i, |
| 811 | cpu_base[type].flag)) | 590 | cpu_base[type].flag)) |
| 812 | continue; | 591 | continue; |
| @@ -862,10 +641,6 @@ static int cpu_init_cpu(void) | |||
| 862 | cpui = &cpu_data(cpu); | 641 | cpui = &cpu_data(cpu); |
| 863 | if (!cpu_has(cpui, X86_FEATURE_MSR)) | 642 | if (!cpu_has(cpui, X86_FEATURE_MSR)) |
| 864 | continue; | 643 | continue; |
| 865 | per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) | | ||
| 866 | (cpui->x86 << 8) | | ||
| 867 | (cpui->x86_model)); | ||
| 868 | per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu); | ||
| 869 | 644 | ||
| 870 | sprintf(cpu_dir, "cpu%d", cpu); | 645 | sprintf(cpu_dir, "cpu%d", cpu); |
| 871 | cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); | 646 | cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 483eda96e102..789efe217e1a 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
| 19 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
| 20 | #include <asm/k8.h> | ||
| 20 | 21 | ||
| 21 | #define LVL_1_INST 1 | 22 | #define LVL_1_INST 1 |
| 22 | #define LVL_1_DATA 2 | 23 | #define LVL_1_DATA 2 |
| @@ -159,14 +160,6 @@ struct _cpuid4_info_regs { | |||
| 159 | unsigned long can_disable; | 160 | unsigned long can_disable; |
| 160 | }; | 161 | }; |
| 161 | 162 | ||
| 162 | #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS) | ||
| 163 | static struct pci_device_id k8_nb_id[] = { | ||
| 164 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | ||
| 165 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | ||
| 166 | {} | ||
| 167 | }; | ||
| 168 | #endif | ||
| 169 | |||
| 170 | unsigned short num_cache_leaves; | 163 | unsigned short num_cache_leaves; |
| 171 | 164 | ||
| 172 | /* AMD doesn't have CPUID4. Emulate it here to report the same | 165 | /* AMD doesn't have CPUID4. Emulate it here to report the same |
| @@ -207,10 +200,17 @@ union l3_cache { | |||
| 207 | }; | 200 | }; |
| 208 | 201 | ||
| 209 | static const unsigned short __cpuinitconst assocs[] = { | 202 | static const unsigned short __cpuinitconst assocs[] = { |
| 210 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, | 203 | [1] = 1, |
| 211 | [8] = 16, [0xa] = 32, [0xb] = 48, | 204 | [2] = 2, |
| 205 | [4] = 4, | ||
| 206 | [6] = 8, | ||
| 207 | [8] = 16, | ||
| 208 | [0xa] = 32, | ||
| 209 | [0xb] = 48, | ||
| 212 | [0xc] = 64, | 210 | [0xc] = 64, |
| 213 | [0xf] = 0xffff // ?? | 211 | [0xd] = 96, |
| 212 | [0xe] = 128, | ||
| 213 | [0xf] = 0xffff /* fully associative - no way to show this currently */ | ||
| 214 | }; | 214 | }; |
| 215 | 215 | ||
| 216 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; | 216 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; |
| @@ -271,7 +271,8 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
| 271 | eax->split.type = types[leaf]; | 271 | eax->split.type = types[leaf]; |
| 272 | eax->split.level = levels[leaf]; | 272 | eax->split.level = levels[leaf]; |
| 273 | if (leaf == 3) | 273 | if (leaf == 3) |
| 274 | eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1; | 274 | eax->split.num_threads_sharing = |
| 275 | current_cpu_data.x86_max_cores - 1; | ||
| 275 | else | 276 | else |
| 276 | eax->split.num_threads_sharing = 0; | 277 | eax->split.num_threads_sharing = 0; |
| 277 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 278 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; |
| @@ -291,6 +292,14 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) | |||
| 291 | { | 292 | { |
| 292 | if (index < 3) | 293 | if (index < 3) |
| 293 | return; | 294 | return; |
| 295 | |||
| 296 | if (boot_cpu_data.x86 == 0x11) | ||
| 297 | return; | ||
| 298 | |||
| 299 | /* see erratum #382 */ | ||
| 300 | if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8)) | ||
| 301 | return; | ||
| 302 | |||
| 294 | this_leaf->can_disable = 1; | 303 | this_leaf->can_disable = 1; |
| 295 | } | 304 | } |
| 296 | 305 | ||
| @@ -696,97 +705,75 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) | |||
| 696 | #define to_object(k) container_of(k, struct _index_kobject, kobj) | 705 | #define to_object(k) container_of(k, struct _index_kobject, kobj) |
| 697 | #define to_attr(a) container_of(a, struct _cache_attr, attr) | 706 | #define to_attr(a) container_of(a, struct _cache_attr, attr) |
| 698 | 707 | ||
| 699 | #ifdef CONFIG_PCI | 708 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, |
| 700 | static struct pci_dev *get_k8_northbridge(int node) | 709 | unsigned int index) |
| 701 | { | ||
| 702 | struct pci_dev *dev = NULL; | ||
| 703 | int i; | ||
| 704 | |||
| 705 | for (i = 0; i <= node; i++) { | ||
| 706 | do { | ||
| 707 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | ||
| 708 | if (!dev) | ||
| 709 | break; | ||
| 710 | } while (!pci_match_id(&k8_nb_id[0], dev)); | ||
| 711 | if (!dev) | ||
| 712 | break; | ||
| 713 | } | ||
| 714 | return dev; | ||
| 715 | } | ||
| 716 | #else | ||
| 717 | static struct pci_dev *get_k8_northbridge(int node) | ||
| 718 | { | ||
| 719 | return NULL; | ||
| 720 | } | ||
| 721 | #endif | ||
| 722 | |||
| 723 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | ||
| 724 | { | 710 | { |
| 725 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | 711 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
| 726 | int node = cpu_to_node(cpumask_first(mask)); | 712 | int node = cpu_to_node(cpu); |
| 727 | struct pci_dev *dev = NULL; | 713 | struct pci_dev *dev = node_to_k8_nb_misc(node); |
| 728 | ssize_t ret = 0; | 714 | unsigned int reg = 0; |
| 729 | int i; | ||
| 730 | 715 | ||
| 731 | if (!this_leaf->can_disable) | 716 | if (!this_leaf->can_disable) |
| 732 | return sprintf(buf, "Feature not enabled\n"); | ||
| 733 | |||
| 734 | dev = get_k8_northbridge(node); | ||
| 735 | if (!dev) { | ||
| 736 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | ||
| 737 | return -EINVAL; | 717 | return -EINVAL; |
| 738 | } | ||
| 739 | 718 | ||
| 740 | for (i = 0; i < 2; i++) { | 719 | if (!dev) |
| 741 | unsigned int reg; | 720 | return -EINVAL; |
| 742 | 721 | ||
| 743 | pci_read_config_dword(dev, 0x1BC + i * 4, ®); | 722 | pci_read_config_dword(dev, 0x1BC + index * 4, ®); |
| 723 | return sprintf(buf, "%x\n", reg); | ||
| 724 | } | ||
| 744 | 725 | ||
| 745 | ret += sprintf(buf, "%sEntry: %d\n", buf, i); | 726 | #define SHOW_CACHE_DISABLE(index) \ |
| 746 | ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n", | 727 | static ssize_t \ |
| 747 | buf, | 728 | show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \ |
| 748 | reg & 0x80000000 ? "Disabled" : "Allowed", | 729 | { \ |
| 749 | reg & 0x40000000 ? "Disabled" : "Allowed"); | 730 | return show_cache_disable(this_leaf, buf, index); \ |
| 750 | ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", | ||
| 751 | buf, (reg & 0x30000) >> 16, reg & 0xfff); | ||
| 752 | } | ||
| 753 | return ret; | ||
| 754 | } | 731 | } |
| 732 | SHOW_CACHE_DISABLE(0) | ||
| 733 | SHOW_CACHE_DISABLE(1) | ||
| 755 | 734 | ||
| 756 | static ssize_t | 735 | static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, |
| 757 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 736 | const char *buf, size_t count, unsigned int index) |
| 758 | size_t count) | ||
| 759 | { | 737 | { |
| 760 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); | 738 | int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
| 761 | int node = cpu_to_node(cpumask_first(mask)); | 739 | int node = cpu_to_node(cpu); |
| 762 | struct pci_dev *dev = NULL; | 740 | struct pci_dev *dev = node_to_k8_nb_misc(node); |
| 763 | unsigned int ret, index, val; | 741 | unsigned long val = 0; |
| 742 | unsigned int scrubber = 0; | ||
| 764 | 743 | ||
| 765 | if (!this_leaf->can_disable) | 744 | if (!this_leaf->can_disable) |
| 766 | return 0; | ||
| 767 | |||
| 768 | if (strlen(buf) > 15) | ||
| 769 | return -EINVAL; | 745 | return -EINVAL; |
| 770 | 746 | ||
| 771 | ret = sscanf(buf, "%x %x", &index, &val); | 747 | if (!capable(CAP_SYS_ADMIN)) |
| 772 | if (ret != 2) | 748 | return -EPERM; |
| 749 | |||
| 750 | if (!dev) | ||
| 773 | return -EINVAL; | 751 | return -EINVAL; |
| 774 | if (index > 1) | 752 | |
| 753 | if (strict_strtoul(buf, 10, &val) < 0) | ||
| 775 | return -EINVAL; | 754 | return -EINVAL; |
| 776 | 755 | ||
| 777 | val |= 0xc0000000; | 756 | val |= 0xc0000000; |
| 778 | dev = get_k8_northbridge(node); | 757 | |
| 779 | if (!dev) { | 758 | pci_read_config_dword(dev, 0x58, &scrubber); |
| 780 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | 759 | scrubber &= ~0x1f000000; |
| 781 | return -EINVAL; | 760 | pci_write_config_dword(dev, 0x58, scrubber); |
| 782 | } | ||
| 783 | 761 | ||
| 784 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | 762 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); |
| 785 | wbinvd(); | 763 | wbinvd(); |
| 786 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | 764 | pci_write_config_dword(dev, 0x1BC + index * 4, val); |
| 765 | return count; | ||
| 766 | } | ||
| 787 | 767 | ||
| 788 | return 1; | 768 | #define STORE_CACHE_DISABLE(index) \ |
| 769 | static ssize_t \ | ||
| 770 | store_cache_disable_##index(struct _cpuid4_info *this_leaf, \ | ||
| 771 | const char *buf, size_t count) \ | ||
| 772 | { \ | ||
| 773 | return store_cache_disable(this_leaf, buf, count, index); \ | ||
| 789 | } | 774 | } |
| 775 | STORE_CACHE_DISABLE(0) | ||
| 776 | STORE_CACHE_DISABLE(1) | ||
| 790 | 777 | ||
| 791 | struct _cache_attr { | 778 | struct _cache_attr { |
| 792 | struct attribute attr; | 779 | struct attribute attr; |
| @@ -808,7 +795,10 @@ define_one_ro(size); | |||
| 808 | define_one_ro(shared_cpu_map); | 795 | define_one_ro(shared_cpu_map); |
| 809 | define_one_ro(shared_cpu_list); | 796 | define_one_ro(shared_cpu_list); |
| 810 | 797 | ||
| 811 | static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable); | 798 | static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, |
| 799 | show_cache_disable_0, store_cache_disable_0); | ||
| 800 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | ||
| 801 | show_cache_disable_1, store_cache_disable_1); | ||
| 812 | 802 | ||
| 813 | static struct attribute * default_attrs[] = { | 803 | static struct attribute * default_attrs[] = { |
| 814 | &type.attr, | 804 | &type.attr, |
| @@ -820,7 +810,8 @@ static struct attribute * default_attrs[] = { | |||
| 820 | &size.attr, | 810 | &size.attr, |
| 821 | &shared_cpu_map.attr, | 811 | &shared_cpu_map.attr, |
| 822 | &shared_cpu_list.attr, | 812 | &shared_cpu_list.attr, |
| 823 | &cache_disable.attr, | 813 | &cache_disable_0.attr, |
| 814 | &cache_disable_1.attr, | ||
| 824 | NULL | 815 | NULL |
| 825 | }; | 816 | }; |
| 826 | 817 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 7563b31b4f03..af71d06624bf 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
| @@ -491,5 +491,42 @@ void force_hpet_resume(void) | |||
| 491 | break; | 491 | break; |
| 492 | } | 492 | } |
| 493 | } | 493 | } |
| 494 | #endif | ||
| 495 | |||
| 496 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) | ||
| 497 | /* Set correct numa_node information for AMD NB functions */ | ||
| 498 | static void __init quirk_amd_nb_node(struct pci_dev *dev) | ||
| 499 | { | ||
| 500 | struct pci_dev *nb_ht; | ||
| 501 | unsigned int devfn; | ||
| 502 | u32 val; | ||
| 503 | |||
| 504 | devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); | ||
| 505 | nb_ht = pci_get_slot(dev->bus, devfn); | ||
| 506 | if (!nb_ht) | ||
| 507 | return; | ||
| 508 | |||
| 509 | pci_read_config_dword(nb_ht, 0x60, &val); | ||
| 510 | set_dev_node(&dev->dev, val & 7); | ||
| 511 | pci_dev_put(dev); | ||
| 512 | } | ||
| 494 | 513 | ||
| 514 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, | ||
| 515 | quirk_amd_nb_node); | ||
| 516 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | ||
| 517 | quirk_amd_nb_node); | ||
| 518 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, | ||
| 519 | quirk_amd_nb_node); | ||
| 520 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC, | ||
| 521 | quirk_amd_nb_node); | ||
| 522 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT, | ||
| 523 | quirk_amd_nb_node); | ||
| 524 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP, | ||
| 525 | quirk_amd_nb_node); | ||
| 526 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM, | ||
| 527 | quirk_amd_nb_node); | ||
| 528 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, | ||
| 529 | quirk_amd_nb_node); | ||
| 530 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, | ||
| 531 | quirk_amd_nb_node); | ||
| 495 | #endif | 532 | #endif |
