diff options
-rw-r--r-- | arch/x86/include/asm/amd_nb.h | 16 | ||||
-rw-r--r-- | arch/x86/kernel/amd_nb.c | 100 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 76 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 1 | ||||
-rw-r--r-- | arch/x86/pci/amd_bus.c | 2 | ||||
-rw-r--r-- | include/linux/pci_ids.h | 1 |
7 files changed, 169 insertions, 37 deletions
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 64dc82ee19f0..527fb966ab5c 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -9,15 +9,17 @@ struct amd_nb_bus_dev_range { | |||
9 | u8 dev_limit; | 9 | u8 dev_limit; |
10 | }; | 10 | }; |
11 | 11 | ||
12 | extern struct pci_device_id amd_nb_misc_ids[]; | 12 | extern const struct pci_device_id amd_nb_misc_ids[]; |
13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; | 13 | extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; |
14 | struct bootnode; | 14 | struct bootnode; |
15 | 15 | ||
16 | extern int early_is_amd_nb(u32 value); | 16 | extern bool early_is_amd_nb(u32 value); |
17 | extern int amd_cache_northbridges(void); | 17 | extern int amd_cache_northbridges(void); |
18 | extern void amd_flush_garts(void); | 18 | extern void amd_flush_garts(void); |
19 | extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); | 19 | extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); |
20 | extern int amd_scan_nodes(void); | 20 | extern int amd_scan_nodes(void); |
21 | extern int amd_get_subcaches(int); | ||
22 | extern int amd_set_subcaches(int, int); | ||
21 | 23 | ||
22 | #ifdef CONFIG_NUMA_EMU | 24 | #ifdef CONFIG_NUMA_EMU |
23 | extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); | 25 | extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes); |
@@ -26,6 +28,7 @@ extern void amd_get_nodes(struct bootnode *nodes); | |||
26 | 28 | ||
27 | struct amd_northbridge { | 29 | struct amd_northbridge { |
28 | struct pci_dev *misc; | 30 | struct pci_dev *misc; |
31 | struct pci_dev *link; | ||
29 | }; | 32 | }; |
30 | 33 | ||
31 | struct amd_northbridge_info { | 34 | struct amd_northbridge_info { |
@@ -35,17 +38,18 @@ struct amd_northbridge_info { | |||
35 | }; | 38 | }; |
36 | extern struct amd_northbridge_info amd_northbridges; | 39 | extern struct amd_northbridge_info amd_northbridges; |
37 | 40 | ||
38 | #define AMD_NB_GART 0x1 | 41 | #define AMD_NB_GART BIT(0) |
39 | #define AMD_NB_L3_INDEX_DISABLE 0x2 | 42 | #define AMD_NB_L3_INDEX_DISABLE BIT(1) |
43 | #define AMD_NB_L3_PARTITIONING BIT(2) | ||
40 | 44 | ||
41 | #ifdef CONFIG_AMD_NB | 45 | #ifdef CONFIG_AMD_NB |
42 | 46 | ||
43 | static inline int amd_nb_num(void) | 47 | static inline u16 amd_nb_num(void) |
44 | { | 48 | { |
45 | return amd_northbridges.num; | 49 | return amd_northbridges.num; |
46 | } | 50 | } |
47 | 51 | ||
48 | static inline int amd_nb_has_feature(int feature) | 52 | static inline bool amd_nb_has_feature(unsigned feature) |
49 | { | 53 | { |
50 | return ((amd_northbridges.flags & feature) == feature); | 54 | return ((amd_northbridges.flags & feature) == feature); |
51 | } | 55 | } |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 0a99f7198bc3..65634190ffd6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | static u32 *flush_words; | 13 | static u32 *flush_words; |
14 | 14 | ||
15 | struct pci_device_id amd_nb_misc_ids[] = { | 15 | const struct pci_device_id amd_nb_misc_ids[] = { |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, |
@@ -20,6 +20,11 @@ struct pci_device_id amd_nb_misc_ids[] = { | |||
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(amd_nb_misc_ids); | 21 | EXPORT_SYMBOL(amd_nb_misc_ids); |
22 | 22 | ||
23 | static struct pci_device_id amd_nb_link_ids[] = { | ||
24 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) }, | ||
25 | {} | ||
26 | }; | ||
27 | |||
23 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { | 28 | const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
24 | { 0x00, 0x18, 0x20 }, | 29 | { 0x00, 0x18, 0x20 }, |
25 | { 0xff, 0x00, 0x20 }, | 30 | { 0xff, 0x00, 0x20 }, |
@@ -31,7 +36,7 @@ struct amd_northbridge_info amd_northbridges; | |||
31 | EXPORT_SYMBOL(amd_northbridges); | 36 | EXPORT_SYMBOL(amd_northbridges); |
32 | 37 | ||
33 | static struct pci_dev *next_northbridge(struct pci_dev *dev, | 38 | static struct pci_dev *next_northbridge(struct pci_dev *dev, |
34 | struct pci_device_id *ids) | 39 | const struct pci_device_id *ids) |
35 | { | 40 | { |
36 | do { | 41 | do { |
37 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 42 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
@@ -43,9 +48,9 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev, | |||
43 | 48 | ||
44 | int amd_cache_northbridges(void) | 49 | int amd_cache_northbridges(void) |
45 | { | 50 | { |
46 | int i = 0; | 51 | u16 i = 0; |
47 | struct amd_northbridge *nb; | 52 | struct amd_northbridge *nb; |
48 | struct pci_dev *misc; | 53 | struct pci_dev *misc, *link; |
49 | 54 | ||
50 | if (amd_nb_num()) | 55 | if (amd_nb_num()) |
51 | return 0; | 56 | return 0; |
@@ -64,10 +69,12 @@ int amd_cache_northbridges(void) | |||
64 | amd_northbridges.nb = nb; | 69 | amd_northbridges.nb = nb; |
65 | amd_northbridges.num = i; | 70 | amd_northbridges.num = i; |
66 | 71 | ||
67 | misc = NULL; | 72 | link = misc = NULL; |
68 | for (i = 0; i != amd_nb_num(); i++) { | 73 | for (i = 0; i != amd_nb_num(); i++) { |
69 | node_to_amd_nb(i)->misc = misc = | 74 | node_to_amd_nb(i)->misc = misc = |
70 | next_northbridge(misc, amd_nb_misc_ids); | 75 | next_northbridge(misc, amd_nb_misc_ids); |
76 | node_to_amd_nb(i)->link = link = | ||
77 | next_northbridge(link, amd_nb_link_ids); | ||
71 | } | 78 | } |
72 | 79 | ||
73 | /* some CPU families (e.g. family 0x11) do not support GART */ | 80 | /* some CPU families (e.g. family 0x11) do not support GART */ |
@@ -85,26 +92,95 @@ int amd_cache_northbridges(void) | |||
85 | boot_cpu_data.x86_mask >= 0x1)) | 92 | boot_cpu_data.x86_mask >= 0x1)) |
86 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | 93 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
87 | 94 | ||
95 | if (boot_cpu_data.x86 == 0x15) | ||
96 | amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; | ||
97 | |||
98 | /* L3 cache partitioning is supported on family 0x15 */ | ||
99 | if (boot_cpu_data.x86 == 0x15) | ||
100 | amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; | ||
101 | |||
88 | return 0; | 102 | return 0; |
89 | } | 103 | } |
90 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); | 104 | EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
91 | 105 | ||
92 | /* Ignores subdevice/subvendor but as far as I can figure out | 106 | /* |
93 | they're useless anyways */ | 107 | * Ignores subdevice/subvendor but as far as I can figure out |
94 | int __init early_is_amd_nb(u32 device) | 108 | * they're useless anyways |
109 | */ | ||
110 | bool __init early_is_amd_nb(u32 device) | ||
95 | { | 111 | { |
96 | struct pci_device_id *id; | 112 | const struct pci_device_id *id; |
97 | u32 vendor = device & 0xffff; | 113 | u32 vendor = device & 0xffff; |
114 | |||
98 | device >>= 16; | 115 | device >>= 16; |
99 | for (id = amd_nb_misc_ids; id->vendor; id++) | 116 | for (id = amd_nb_misc_ids; id->vendor; id++) |
100 | if (vendor == id->vendor && device == id->device) | 117 | if (vendor == id->vendor && device == id->device) |
101 | return 1; | 118 | return true; |
119 | return false; | ||
120 | } | ||
121 | |||
122 | int amd_get_subcaches(int cpu) | ||
123 | { | ||
124 | struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; | ||
125 | unsigned int mask; | ||
126 | int cuid = 0; | ||
127 | |||
128 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
129 | return 0; | ||
130 | |||
131 | pci_read_config_dword(link, 0x1d4, &mask); | ||
132 | |||
133 | #ifdef CONFIG_SMP | ||
134 | cuid = cpu_data(cpu).compute_unit_id; | ||
135 | #endif | ||
136 | return (mask >> (4 * cuid)) & 0xf; | ||
137 | } | ||
138 | |||
139 | int amd_set_subcaches(int cpu, int mask) | ||
140 | { | ||
141 | static unsigned int reset, ban; | ||
142 | struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); | ||
143 | unsigned int reg; | ||
144 | int cuid = 0; | ||
145 | |||
146 | if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* if necessary, collect reset state of L3 partitioning and BAN mode */ | ||
150 | if (reset == 0) { | ||
151 | pci_read_config_dword(nb->link, 0x1d4, &reset); | ||
152 | pci_read_config_dword(nb->misc, 0x1b8, &ban); | ||
153 | ban &= 0x180000; | ||
154 | } | ||
155 | |||
156 | /* deactivate BAN mode if any subcaches are to be disabled */ | ||
157 | if (mask != 0xf) { | ||
158 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
159 | pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); | ||
160 | } | ||
161 | |||
162 | #ifdef CONFIG_SMP | ||
163 | cuid = cpu_data(cpu).compute_unit_id; | ||
164 | #endif | ||
165 | mask <<= 4 * cuid; | ||
166 | mask |= (0xf ^ (1 << cuid)) << 26; | ||
167 | |||
168 | pci_write_config_dword(nb->link, 0x1d4, mask); | ||
169 | |||
170 | /* reset BAN mode if L3 partitioning returned to reset state */ | ||
171 | pci_read_config_dword(nb->link, 0x1d4, ®); | ||
172 | if (reg == reset) { | ||
173 | pci_read_config_dword(nb->misc, 0x1b8, ®); | ||
174 | reg &= ~0x180000; | ||
175 | pci_write_config_dword(nb->misc, 0x1b8, reg | ban); | ||
176 | } | ||
177 | |||
102 | return 0; | 178 | return 0; |
103 | } | 179 | } |
104 | 180 | ||
105 | int amd_cache_gart(void) | 181 | static int amd_cache_gart(void) |
106 | { | 182 | { |
107 | int i; | 183 | u16 i; |
108 | 184 | ||
109 | if (!amd_nb_has_feature(AMD_NB_GART)) | 185 | if (!amd_nb_has_feature(AMD_NB_GART)) |
110 | return 0; | 186 | return 0; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 48eaa1b6fc46..58f1b012e1c8 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -261,7 +261,7 @@ static int __cpuinit nearby_node(int apicid) | |||
261 | #ifdef CONFIG_X86_HT | 261 | #ifdef CONFIG_X86_HT |
262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | 262 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) |
263 | { | 263 | { |
264 | u32 nodes; | 264 | u32 nodes, cores_per_cu = 1; |
265 | u8 node_id; | 265 | u8 node_id; |
266 | int cpu = smp_processor_id(); | 266 | int cpu = smp_processor_id(); |
267 | 267 | ||
@@ -276,6 +276,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
276 | /* get compute unit information */ | 276 | /* get compute unit information */ |
277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; | 277 | smp_num_siblings = ((ebx >> 8) & 3) + 1; |
278 | c->compute_unit_id = ebx & 0xff; | 278 | c->compute_unit_id = ebx & 0xff; |
279 | cores_per_cu += ((ebx >> 8) & 3); | ||
279 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { | 280 | } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { |
280 | u64 value; | 281 | u64 value; |
281 | 282 | ||
@@ -288,15 +289,18 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
288 | /* fixup multi-node processor information */ | 289 | /* fixup multi-node processor information */ |
289 | if (nodes > 1) { | 290 | if (nodes > 1) { |
290 | u32 cores_per_node; | 291 | u32 cores_per_node; |
292 | u32 cus_per_node; | ||
291 | 293 | ||
292 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); | 294 | set_cpu_cap(c, X86_FEATURE_AMD_DCM); |
293 | cores_per_node = c->x86_max_cores / nodes; | 295 | cores_per_node = c->x86_max_cores / nodes; |
296 | cus_per_node = cores_per_node / cores_per_cu; | ||
294 | 297 | ||
295 | /* store NodeID, use llc_shared_map to store sibling info */ | 298 | /* store NodeID, use llc_shared_map to store sibling info */ |
296 | per_cpu(cpu_llc_id, cpu) = node_id; | 299 | per_cpu(cpu_llc_id, cpu) = node_id; |
297 | 300 | ||
298 | /* core id to be in range from 0 to (cores_per_node - 1) */ | 301 | /* core id has to be in the [0 .. cores_per_node - 1] range */ |
299 | c->cpu_core_id = c->cpu_core_id % cores_per_node; | 302 | c->cpu_core_id %= cores_per_node; |
303 | c->compute_unit_id %= cus_per_node; | ||
300 | } | 304 | } |
301 | } | 305 | } |
302 | #endif | 306 | #endif |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index ec2c19a7b8ef..90cc675ac746 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -304,8 +304,9 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
304 | 304 | ||
305 | struct _cache_attr { | 305 | struct _cache_attr { |
306 | struct attribute attr; | 306 | struct attribute attr; |
307 | ssize_t (*show)(struct _cpuid4_info *, char *); | 307 | ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); |
308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); | 308 | ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, |
309 | unsigned int); | ||
309 | }; | 310 | }; |
310 | 311 | ||
311 | #ifdef CONFIG_AMD_NB | 312 | #ifdef CONFIG_AMD_NB |
@@ -400,7 +401,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
400 | 401 | ||
401 | #define SHOW_CACHE_DISABLE(slot) \ | 402 | #define SHOW_CACHE_DISABLE(slot) \ |
402 | static ssize_t \ | 403 | static ssize_t \ |
403 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \ | 404 | show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ |
405 | unsigned int cpu) \ | ||
404 | { \ | 406 | { \ |
405 | return show_cache_disable(this_leaf, buf, slot); \ | 407 | return show_cache_disable(this_leaf, buf, slot); \ |
406 | } | 408 | } |
@@ -512,7 +514,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
512 | #define STORE_CACHE_DISABLE(slot) \ | 514 | #define STORE_CACHE_DISABLE(slot) \ |
513 | static ssize_t \ | 515 | static ssize_t \ |
514 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 516 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
515 | const char *buf, size_t count) \ | 517 | const char *buf, size_t count, \ |
518 | unsigned int cpu) \ | ||
516 | { \ | 519 | { \ |
517 | return store_cache_disable(this_leaf, buf, count, slot); \ | 520 | return store_cache_disable(this_leaf, buf, count, slot); \ |
518 | } | 521 | } |
@@ -524,6 +527,39 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, | |||
524 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | 527 | static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, |
525 | show_cache_disable_1, store_cache_disable_1); | 528 | show_cache_disable_1, store_cache_disable_1); |
526 | 529 | ||
530 | static ssize_t | ||
531 | show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) | ||
532 | { | ||
533 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
534 | return -EINVAL; | ||
535 | |||
536 | return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); | ||
537 | } | ||
538 | |||
539 | static ssize_t | ||
540 | store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, | ||
541 | unsigned int cpu) | ||
542 | { | ||
543 | unsigned long val; | ||
544 | |||
545 | if (!capable(CAP_SYS_ADMIN)) | ||
546 | return -EPERM; | ||
547 | |||
548 | if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (strict_strtoul(buf, 16, &val) < 0) | ||
552 | return -EINVAL; | ||
553 | |||
554 | if (amd_set_subcaches(cpu, val)) | ||
555 | return -EINVAL; | ||
556 | |||
557 | return count; | ||
558 | } | ||
559 | |||
560 | static struct _cache_attr subcaches = | ||
561 | __ATTR(subcaches, 0644, show_subcaches, store_subcaches); | ||
562 | |||
527 | #else /* CONFIG_AMD_NB */ | 563 | #else /* CONFIG_AMD_NB */ |
528 | #define amd_init_l3_cache(x, y) | 564 | #define amd_init_l3_cache(x, y) |
529 | #endif /* CONFIG_AMD_NB */ | 565 | #endif /* CONFIG_AMD_NB */ |
@@ -532,9 +568,9 @@ static int | |||
532 | __cpuinit cpuid4_cache_lookup_regs(int index, | 568 | __cpuinit cpuid4_cache_lookup_regs(int index, |
533 | struct _cpuid4_info_regs *this_leaf) | 569 | struct _cpuid4_info_regs *this_leaf) |
534 | { | 570 | { |
535 | union _cpuid4_leaf_eax eax; | 571 | union _cpuid4_leaf_eax eax; |
536 | union _cpuid4_leaf_ebx ebx; | 572 | union _cpuid4_leaf_ebx ebx; |
537 | union _cpuid4_leaf_ecx ecx; | 573 | union _cpuid4_leaf_ecx ecx; |
538 | unsigned edx; | 574 | unsigned edx; |
539 | 575 | ||
540 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 576 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
@@ -870,8 +906,8 @@ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); | |||
870 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) | 906 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
871 | 907 | ||
872 | #define show_one_plus(file_name, object, val) \ | 908 | #define show_one_plus(file_name, object, val) \ |
873 | static ssize_t show_##file_name \ | 909 | static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ |
874 | (struct _cpuid4_info *this_leaf, char *buf) \ | 910 | unsigned int cpu) \ |
875 | { \ | 911 | { \ |
876 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ | 912 | return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ |
877 | } | 913 | } |
@@ -882,7 +918,8 @@ show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); | |||
882 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); | 918 | show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); |
883 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); | 919 | show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); |
884 | 920 | ||
885 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) | 921 | static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, |
922 | unsigned int cpu) | ||
886 | { | 923 | { |
887 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); | 924 | return sprintf(buf, "%luK\n", this_leaf->size / 1024); |
888 | } | 925 | } |
@@ -906,17 +943,20 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
906 | return n; | 943 | return n; |
907 | } | 944 | } |
908 | 945 | ||
909 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf) | 946 | static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, |
947 | unsigned int cpu) | ||
910 | { | 948 | { |
911 | return show_shared_cpu_map_func(leaf, 0, buf); | 949 | return show_shared_cpu_map_func(leaf, 0, buf); |
912 | } | 950 | } |
913 | 951 | ||
914 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf) | 952 | static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, |
953 | unsigned int cpu) | ||
915 | { | 954 | { |
916 | return show_shared_cpu_map_func(leaf, 1, buf); | 955 | return show_shared_cpu_map_func(leaf, 1, buf); |
917 | } | 956 | } |
918 | 957 | ||
919 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) | 958 | static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, |
959 | unsigned int cpu) | ||
920 | { | 960 | { |
921 | switch (this_leaf->eax.split.type) { | 961 | switch (this_leaf->eax.split.type) { |
922 | case CACHE_TYPE_DATA: | 962 | case CACHE_TYPE_DATA: |
@@ -974,6 +1014,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
974 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | 1014 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) |
975 | n += 2; | 1015 | n += 2; |
976 | 1016 | ||
1017 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1018 | n += 1; | ||
1019 | |||
977 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | 1020 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); |
978 | if (attrs == NULL) | 1021 | if (attrs == NULL) |
979 | return attrs = default_attrs; | 1022 | return attrs = default_attrs; |
@@ -986,6 +1029,9 @@ static struct attribute ** __cpuinit amd_l3_attrs(void) | |||
986 | attrs[n++] = &cache_disable_1.attr; | 1029 | attrs[n++] = &cache_disable_1.attr; |
987 | } | 1030 | } |
988 | 1031 | ||
1032 | if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) | ||
1033 | attrs[n++] = &subcaches.attr; | ||
1034 | |||
989 | return attrs; | 1035 | return attrs; |
990 | } | 1036 | } |
991 | #endif | 1037 | #endif |
@@ -998,7 +1044,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | |||
998 | 1044 | ||
999 | ret = fattr->show ? | 1045 | ret = fattr->show ? |
1000 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1046 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1001 | buf) : | 1047 | buf, this_leaf->cpu) : |
1002 | 0; | 1048 | 0; |
1003 | return ret; | 1049 | return ret; |
1004 | } | 1050 | } |
@@ -1012,7 +1058,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, | |||
1012 | 1058 | ||
1013 | ret = fattr->store ? | 1059 | ret = fattr->store ? |
1014 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 1060 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
1015 | buf, count) : | 1061 | buf, count, this_leaf->cpu) : |
1016 | 0; | 1062 | 0; |
1017 | return ret; | 1063 | return ret; |
1018 | } | 1064 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 08776a953487..1bfb1c615a62 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -414,6 +414,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
414 | 414 | ||
415 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { | 415 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
416 | if (c->phys_proc_id == o->phys_proc_id && | 416 | if (c->phys_proc_id == o->phys_proc_id && |
417 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) && | ||
417 | c->compute_unit_id == o->compute_unit_id) | 418 | c->compute_unit_id == o->compute_unit_id) |
418 | link_thread_siblings(cpu, i); | 419 | link_thread_siblings(cpu, i); |
419 | } else if (c->phys_proc_id == o->phys_proc_id && | 420 | } else if (c->phys_proc_id == o->phys_proc_id && |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e27dffbbb1a7..026e4931d162 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -350,7 +350,7 @@ static int __init early_fill_mp_bus_info(void) | |||
350 | 350 | ||
351 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) | 351 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) |
352 | 352 | ||
353 | static void enable_pci_io_ecs(void *unused) | 353 | static void __cpuinit enable_pci_io_ecs(void *unused) |
354 | { | 354 | { |
355 | u64 reg; | 355 | u64 reg; |
356 | rdmsrl(MSR_AMD64_NB_CFG, reg); | 356 | rdmsrl(MSR_AMD64_NB_CFG, reg); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3adb06ebf841..580de67f318b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -518,6 +518,7 @@ | |||
518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 | 518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 |
519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 | 519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 |
520 | #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 | 520 | #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 |
521 | #define PCI_DEVICE_ID_AMD_15H_NB_LINK 0x1604 | ||
521 | #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 | 522 | #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 |
522 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 | 523 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 |
523 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 | 524 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 |