aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/amd_nb.h (renamed from arch/x86/include/asm/k8.h)21
-rw-r--r--arch/x86/include/asm/cpufeature.h13
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_nb.c (renamed from arch/x86/kernel/k8.c)56
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c75
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c14
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c9
-rw-r--r--arch/x86/kernel/cpu/scattered.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c29
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tsc.c58
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/k8topology_64.c2
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/agp/amd64-agp.c35
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/amd64_edac.c4
-rw-r--r--include/linux/pci_ids.h1
24 files changed, 208 insertions, 170 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 89b88e3a56e9..a1bd569f6c5a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -674,7 +674,7 @@ config GART_IOMMU
674 bool "GART IOMMU support" if EMBEDDED 674 bool "GART IOMMU support" if EMBEDDED
675 default y 675 default y
676 select SWIOTLB 676 select SWIOTLB
677 depends on X86_64 && PCI && K8_NB 677 depends on X86_64 && PCI && AMD_NB
678 ---help--- 678 ---help---
679 Support for full DMA access of devices with 32bit memory access only 679 Support for full DMA access of devices with 32bit memory access only
680 on systems with more than 3GB. This is usually needed for USB, 680 on systems with more than 3GB. This is usually needed for USB,
@@ -2091,7 +2091,7 @@ config OLPC_OPENFIRMWARE
2091 2091
2092endif # X86_32 2092endif # X86_32
2093 2093
2094config K8_NB 2094config AMD_NB
2095 def_bool y 2095 def_bool y
2096 depends on CPU_SUP_AMD && PCI 2096 depends on CPU_SUP_AMD && PCI
2097 2097
diff --git a/arch/x86/include/asm/k8.h b/arch/x86/include/asm/amd_nb.h
index af00bd1d2089..c8517f81b21e 100644
--- a/arch/x86/include/asm/k8.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_K8_H 1#ifndef _ASM_X86_AMD_NB_H
2#define _ASM_X86_K8_H 2#define _ASM_X86_AMD_NB_H
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
@@ -7,24 +7,27 @@ extern struct pci_device_id k8_nb_ids[];
7struct bootnode; 7struct bootnode;
8 8
9extern int early_is_k8_nb(u32 value); 9extern int early_is_k8_nb(u32 value);
10extern struct pci_dev **k8_northbridges;
11extern int num_k8_northbridges;
12extern int cache_k8_northbridges(void); 10extern int cache_k8_northbridges(void);
13extern void k8_flush_garts(void); 11extern void k8_flush_garts(void);
14extern int k8_get_nodes(struct bootnode *nodes); 12extern int k8_get_nodes(struct bootnode *nodes);
15extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); 13extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn);
16extern int k8_scan_nodes(void); 14extern int k8_scan_nodes(void);
17 15
18#ifdef CONFIG_K8_NB 16struct k8_northbridge_info {
19extern int num_k8_northbridges; 17 u16 num;
18 u8 gart_supported;
19 struct pci_dev **nb_misc;
20};
21extern struct k8_northbridge_info k8_northbridges;
22
23#ifdef CONFIG_AMD_NB
20 24
21static inline struct pci_dev *node_to_k8_nb_misc(int node) 25static inline struct pci_dev *node_to_k8_nb_misc(int node)
22{ 26{
23 return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL; 27 return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL;
24} 28}
25 29
26#else 30#else
27#define num_k8_northbridges 0
28 31
29static inline struct pci_dev *node_to_k8_nb_misc(int node) 32static inline struct pci_dev *node_to_k8_nb_misc(int node)
30{ 33{
@@ -33,4 +36,4 @@ static inline struct pci_dev *node_to_k8_nb_misc(int node)
33#endif 36#endif
34 37
35 38
36#endif /* _ASM_X86_K8_H */ 39#endif /* _ASM_X86_AMD_NB_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 3f76523589af..220e2ea08e80 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -152,10 +152,14 @@
152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ 152#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ 153#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ 154#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
155#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ 155#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ 156#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ 157#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
158#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
159#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
158#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ 160#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
161#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
162#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
159 163
160/* 164/*
161 * Auxiliary flags: Linux defined - For features scattered in various 165 * Auxiliary flags: Linux defined - For features scattered in various
@@ -180,6 +184,13 @@
180#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ 184#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
181#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ 185#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
182#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ 186#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
187#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
188#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
189#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
190#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
191#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
192#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
193
183 194
184/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 195/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
185#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 196#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 325b7bdbebaa..69e80c2ec6c2 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -110,6 +110,8 @@ struct cpuinfo_x86 {
110 u16 phys_proc_id; 110 u16 phys_proc_id;
111 /* Core id: */ 111 /* Core id: */
112 u16 cpu_core_id; 112 u16 cpu_core_id;
113 /* Compute unit id */
114 u8 compute_unit_id;
113 /* Index into per_cpu list: */ 115 /* Index into per_cpu list: */
114 u16 cpu_index; 116 u16 cpu_index;
115#endif 117#endif
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 7490bf8d1459..81f37703379c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -90,7 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
90obj-$(CONFIG_HPET_TIMER) += hpet.o 90obj-$(CONFIG_HPET_TIMER) += hpet.o
91obj-$(CONFIG_APB_TIMER) += apb_timer.o 91obj-$(CONFIG_APB_TIMER) += apb_timer.o
92 92
93obj-$(CONFIG_K8_NB) += k8.o 93obj-$(CONFIG_AMD_NB) += amd_nb.o
94obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o 94obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
95obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o 95obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
96 96
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/amd_nb.c
index 0f7bc20cfcde..8f6463d8ed0d 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -8,21 +8,19 @@
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <asm/k8.h> 11#include <asm/amd_nb.h>
12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15 12
16static u32 *flush_words; 13static u32 *flush_words;
17 14
18struct pci_device_id k8_nb_ids[] = { 15struct pci_device_id k8_nb_ids[] = {
19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
21 {} 19 {}
22}; 20};
23EXPORT_SYMBOL(k8_nb_ids); 21EXPORT_SYMBOL(k8_nb_ids);
24 22
25struct pci_dev **k8_northbridges; 23struct k8_northbridge_info k8_northbridges;
26EXPORT_SYMBOL(k8_northbridges); 24EXPORT_SYMBOL(k8_northbridges);
27 25
28static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 26static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
@@ -40,36 +38,45 @@ int cache_k8_northbridges(void)
40 int i; 38 int i;
41 struct pci_dev *dev; 39 struct pci_dev *dev;
42 40
43 if (num_k8_northbridges) 41 if (k8_northbridges.num)
44 return 0; 42 return 0;
45 43
46 dev = NULL; 44 dev = NULL;
47 while ((dev = next_k8_northbridge(dev)) != NULL) 45 while ((dev = next_k8_northbridge(dev)) != NULL)
48 num_k8_northbridges++; 46 k8_northbridges.num++;
47
48 /* some CPU families (e.g. family 0x11) do not support GART */
49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
50 boot_cpu_data.x86 == 0x15)
51 k8_northbridges.gart_supported = 1;
49 52
50 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), 53 k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
51 GFP_KERNEL); 54 sizeof(void *), GFP_KERNEL);
52 if (!k8_northbridges) 55 if (!k8_northbridges.nb_misc)
53 return -ENOMEM; 56 return -ENOMEM;
54 57
55 if (!num_k8_northbridges) { 58 if (!k8_northbridges.num) {
56 k8_northbridges[0] = NULL; 59 k8_northbridges.nb_misc[0] = NULL;
57 return 0; 60 return 0;
58 } 61 }
59 62
60 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); 63 if (k8_northbridges.gart_supported) {
61 if (!flush_words) { 64 flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
62 kfree(k8_northbridges); 65 GFP_KERNEL);
63 return -ENOMEM; 66 if (!flush_words) {
67 kfree(k8_northbridges.nb_misc);
68 return -ENOMEM;
69 }
64 } 70 }
65 71
66 dev = NULL; 72 dev = NULL;
67 i = 0; 73 i = 0;
68 while ((dev = next_k8_northbridge(dev)) != NULL) { 74 while ((dev = next_k8_northbridge(dev)) != NULL) {
69 k8_northbridges[i] = dev; 75 k8_northbridges.nb_misc[i] = dev;
70 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 76 if (k8_northbridges.gart_supported)
77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
71 } 78 }
72 k8_northbridges[i] = NULL; 79 k8_northbridges.nb_misc[i] = NULL;
73 return 0; 80 return 0;
74} 81}
75EXPORT_SYMBOL_GPL(cache_k8_northbridges); 82EXPORT_SYMBOL_GPL(cache_k8_northbridges);
@@ -93,22 +100,25 @@ void k8_flush_garts(void)
93 unsigned long flags; 100 unsigned long flags;
94 static DEFINE_SPINLOCK(gart_lock); 101 static DEFINE_SPINLOCK(gart_lock);
95 102
103 if (!k8_northbridges.gart_supported)
104 return;
105
96 /* Avoid races between AGP and IOMMU. In theory it's not needed 106 /* Avoid races between AGP and IOMMU. In theory it's not needed
97 but I'm not sure if the hardware won't lose flush requests 107 but I'm not sure if the hardware won't lose flush requests
98 when another is pending. This whole thing is so expensive anyways 108 when another is pending. This whole thing is so expensive anyways
99 that it doesn't matter to serialize more. -AK */ 109 that it doesn't matter to serialize more. -AK */
100 spin_lock_irqsave(&gart_lock, flags); 110 spin_lock_irqsave(&gart_lock, flags);
101 flushed = 0; 111 flushed = 0;
102 for (i = 0; i < num_k8_northbridges; i++) { 112 for (i = 0; i < k8_northbridges.num; i++) {
103 pci_write_config_dword(k8_northbridges[i], 0x9c, 113 pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
104 flush_words[i]|1); 114 flush_words[i]|1);
105 flushed++; 115 flushed++;
106 } 116 }
107 for (i = 0; i < num_k8_northbridges; i++) { 117 for (i = 0; i < k8_northbridges.num; i++) {
108 u32 w; 118 u32 w;
109 /* Make sure the hardware actually executed the flush*/ 119 /* Make sure the hardware actually executed the flush*/
110 for (;;) { 120 for (;;) {
111 pci_read_config_dword(k8_northbridges[i], 121 pci_read_config_dword(k8_northbridges.nb_misc[i],
112 0x9c, &w); 122 0x9c, &w);
113 if (!(w & 1)) 123 if (!(w & 1))
114 break; 124 break;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index c9cb17368448..377f5db3b8b4 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -27,7 +27,7 @@
27#include <asm/gart.h> 27#include <asm/gart.h>
28#include <asm/pci-direct.h> 28#include <asm/pci-direct.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/k8.h> 30#include <asm/amd_nb.h>
31#include <asm/x86_init.h> 31#include <asm/x86_init.h>
32 32
33int gart_iommu_aperture; 33int gart_iommu_aperture;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ba5f62f45f01..70168ab88b7f 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -253,37 +253,51 @@ static int __cpuinit nearby_node(int apicid)
253#endif 253#endif
254 254
255/* 255/*
256 * Fixup core topology information for AMD multi-node processors. 256 * Fixup core topology information for
257 * Assumption: Number of cores in each internal node is the same. 257 * (1) AMD multi-node processors
258 * Assumption: Number of cores in each internal node is the same.
259 * (2) AMD processors supporting compute units
258 */ 260 */
259#ifdef CONFIG_X86_HT 261#ifdef CONFIG_X86_HT
260static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c) 262static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
261{ 263{
262 unsigned long long value; 264 u32 nodes;
263 u32 nodes, cores_per_node; 265 u8 node_id;
264 int cpu = smp_processor_id(); 266 int cpu = smp_processor_id();
265 267
266 if (!cpu_has(c, X86_FEATURE_NODEID_MSR)) 268 /* get information required for multi-node processors */
267 return; 269 if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
270 u32 eax, ebx, ecx, edx;
268 271
269 /* fixup topology information only once for a core */ 272 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
270 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 273 nodes = ((ecx >> 8) & 7) + 1;
271 return; 274 node_id = ecx & 7;
272 275
273 rdmsrl(MSR_FAM10H_NODE_ID, value); 276 /* get compute unit information */
277 smp_num_siblings = ((ebx >> 8) & 3) + 1;
278 c->compute_unit_id = ebx & 0xff;
279 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
280 u64 value;
274 281
275 nodes = ((value >> 3) & 7) + 1; 282 rdmsrl(MSR_FAM10H_NODE_ID, value);
276 if (nodes == 1) 283 nodes = ((value >> 3) & 7) + 1;
284 node_id = value & 7;
285 } else
277 return; 286 return;
278 287
279 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 288 /* fixup multi-node processor information */
280 cores_per_node = c->x86_max_cores / nodes; 289 if (nodes > 1) {
290 u32 cores_per_node;
291
292 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
293 cores_per_node = c->x86_max_cores / nodes;
281 294
282 /* store NodeID, use llc_shared_map to store sibling info */ 295 /* store NodeID, use llc_shared_map to store sibling info */
283 per_cpu(cpu_llc_id, cpu) = value & 7; 296 per_cpu(cpu_llc_id, cpu) = node_id;
284 297
285 /* fixup core id to be in range from 0 to (cores_per_node - 1) */ 298 /* core id to be in range from 0 to (cores_per_node - 1) */
286 c->cpu_core_id = c->cpu_core_id % cores_per_node; 299 c->cpu_core_id = c->cpu_core_id % cores_per_node;
300 }
287} 301}
288#endif 302#endif
289 303
@@ -304,9 +318,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
304 c->phys_proc_id = c->initial_apicid >> bits; 318 c->phys_proc_id = c->initial_apicid >> bits;
305 /* use socket ID also for last level cache */ 319 /* use socket ID also for last level cache */
306 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 320 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
307 /* fixup topology information on multi-node processors */ 321 amd_get_topology(c);
308 if ((c->x86 == 0x10) && (c->x86_model == 9))
309 amd_fixup_dcm(c);
310#endif 322#endif
311} 323}
312 324
@@ -412,6 +424,23 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
412 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 424 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
413 } 425 }
414#endif 426#endif
427
428 /* We need to do the following only once */
429 if (c != &boot_cpu_data)
430 return;
431
432 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
433
434 if (c->x86 > 0x10 ||
435 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
436 u64 val;
437
438 rdmsrl(MSR_K7_HWCR, val);
439 if (!(val & BIT(24)))
440 printk(KERN_WARNING FW_BUG "TSC doesn't count "
441 "with P0 frequency!\n");
442 }
443 }
415} 444}
416 445
417static void __cpuinit init_amd(struct cpuinfo_x86 *c) 446static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -523,7 +552,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
523#endif 552#endif
524 553
525 if (c->extended_cpuid_level >= 0x80000006) { 554 if (c->extended_cpuid_level >= 0x80000006) {
526 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) 555 if (cpuid_edx(0x80000006) & 0xf000)
527 num_cache_leaves = 4; 556 num_cache_leaves = 4;
528 else 557 else
529 num_cache_leaves = 3; 558 num_cache_leaves = 3;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 898c2f4eab88..12cd823c8d03 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -17,7 +17,7 @@
17 17
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <asm/k8.h> 20#include <asm/amd_nb.h>
21#include <asm/smp.h> 21#include <asm/smp.h>
22 22
23#define LVL_1_INST 1 23#define LVL_1_INST 1
@@ -306,7 +306,7 @@ struct _cache_attr {
306 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count); 306 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
307}; 307};
308 308
309#ifdef CONFIG_CPU_SUP_AMD 309#ifdef CONFIG_AMD_NB
310 310
311/* 311/*
312 * L3 cache descriptors 312 * L3 cache descriptors
@@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
369 return; 369 return;
370 370
371 /* not in virtualized environments */ 371 /* not in virtualized environments */
372 if (num_k8_northbridges == 0) 372 if (k8_northbridges.num == 0)
373 return; 373 return;
374 374
375 /* 375 /*
@@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
377 * never freed but this is done only on shutdown so it doesn't matter. 377 * never freed but this is done only on shutdown so it doesn't matter.
378 */ 378 */
379 if (!l3_caches) { 379 if (!l3_caches) {
380 int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); 380 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
381 381
382 l3_caches = kzalloc(size, GFP_ATOMIC); 382 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches) 383 if (!l3_caches)
@@ -556,12 +556,12 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
556static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, 556static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
557 show_cache_disable_1, store_cache_disable_1); 557 show_cache_disable_1, store_cache_disable_1);
558 558
559#else /* CONFIG_CPU_SUP_AMD */ 559#else /* CONFIG_AMD_NB */
560static void __cpuinit 560static void __cpuinit
561amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) 561amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
562{ 562{
563}; 563};
564#endif /* CONFIG_CPU_SUP_AMD */ 564#endif /* CONFIG_AMD_NB */
565 565
566static int 566static int
567__cpuinit cpuid4_cache_lookup_regs(int index, 567__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1000,7 +1000,7 @@ static struct attribute *default_attrs[] = {
1000 1000
1001static struct attribute *default_l3_attrs[] = { 1001static struct attribute *default_l3_attrs[] = {
1002 DEFAULT_SYSFS_CACHE_ATTRS, 1002 DEFAULT_SYSFS_CACHE_ATTRS,
1003#ifdef CONFIG_CPU_SUP_AMD 1003#ifdef CONFIG_AMD_NB
1004 &cache_disable_0.attr, 1004 &cache_disable_0.attr,
1005 &cache_disable_1.attr, 1005 &cache_disable_1.attr,
1006#endif 1006#endif
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index c5f59d071425..ac140c7be396 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void)
827 827
828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
829 return 0; 829 return 0;
830 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 830 if (boot_cpu_data.x86 < 0xf)
831 return 0; 831 return 0;
832 /* In case some hypervisor doesn't pass SYSCFG through: */ 832 /* In case some hypervisor doesn't pass SYSCFG through: */
833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) 833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index fb329e9f8494..d9f4ff8fcd69 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -700,11 +700,10 @@ static void probe_nmi_watchdog(void)
700{ 700{
701 switch (boot_cpu_data.x86_vendor) { 701 switch (boot_cpu_data.x86_vendor) {
702 case X86_VENDOR_AMD: 702 case X86_VENDOR_AMD:
703 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && 703 if (boot_cpu_data.x86 == 6 ||
704 boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) 704 (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15))
705 return; 705 wd_ops = &k7_wd_ops;
706 wd_ops = &k7_wd_ops; 706 return;
707 break;
708 case X86_VENDOR_INTEL: 707 case X86_VENDOR_INTEL:
709 /* Work around where perfctr1 doesn't have a working enable 708 /* Work around where perfctr1 doesn't have a working enable
710 * bit as described in the following errata: 709 * bit as described in the following errata:
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d49079515122..c7f64e6f537a 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -44,6 +44,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
44 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 }, 44 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
45 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 }, 45 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
46 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 }, 46 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
47 { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
48 { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
49 { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
50 { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
51 { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
52 { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
47 { 0, 0, 0, 0, 0 } 53 { 0, 0, 0, 0, 0 }
48 }; 54 };
49 55
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 6015ee13e22b..c562207b1b3d 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -39,7 +39,7 @@
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/swiotlb.h> 40#include <asm/swiotlb.h>
41#include <asm/dma.h> 41#include <asm/dma.h>
42#include <asm/k8.h> 42#include <asm/amd_nb.h>
43#include <asm/x86_init.h> 43#include <asm/x86_init.h>
44 44
45static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 45static unsigned long iommu_bus_base; /* GART remapping area (physical) */
@@ -560,8 +560,11 @@ static void enable_gart_translations(void)
560{ 560{
561 int i; 561 int i;
562 562
563 for (i = 0; i < num_k8_northbridges; i++) { 563 if (!k8_northbridges.gart_supported)
564 struct pci_dev *dev = k8_northbridges[i]; 564 return;
565
566 for (i = 0; i < k8_northbridges.num; i++) {
567 struct pci_dev *dev = k8_northbridges.nb_misc[i];
565 568
566 enable_gart_translation(dev, __pa(agp_gatt_table)); 569 enable_gart_translation(dev, __pa(agp_gatt_table));
567 } 570 }
@@ -592,10 +595,13 @@ static void gart_fixup_northbridges(struct sys_device *dev)
592 if (!fix_up_north_bridges) 595 if (!fix_up_north_bridges)
593 return; 596 return;
594 597
598 if (!k8_northbridges.gart_supported)
599 return;
600
595 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 601 pr_info("PCI-DMA: Restoring GART aperture settings\n");
596 602
597 for (i = 0; i < num_k8_northbridges; i++) { 603 for (i = 0; i < k8_northbridges.num; i++) {
598 struct pci_dev *dev = k8_northbridges[i]; 604 struct pci_dev *dev = k8_northbridges.nb_misc[i];
599 605
600 /* 606 /*
601 * Don't enable translations just yet. That is the next 607 * Don't enable translations just yet. That is the next
@@ -649,8 +655,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
649 655
650 aper_size = aper_base = info->aper_size = 0; 656 aper_size = aper_base = info->aper_size = 0;
651 dev = NULL; 657 dev = NULL;
652 for (i = 0; i < num_k8_northbridges; i++) { 658 for (i = 0; i < k8_northbridges.num; i++) {
653 dev = k8_northbridges[i]; 659 dev = k8_northbridges.nb_misc[i];
654 new_aper_base = read_aperture(dev, &new_aper_size); 660 new_aper_base = read_aperture(dev, &new_aper_size);
655 if (!new_aper_base) 661 if (!new_aper_base)
656 goto nommu; 662 goto nommu;
@@ -718,10 +724,13 @@ static void gart_iommu_shutdown(void)
718 if (!no_agp) 724 if (!no_agp)
719 return; 725 return;
720 726
721 for (i = 0; i < num_k8_northbridges; i++) { 727 if (!k8_northbridges.gart_supported)
728 return;
729
730 for (i = 0; i < k8_northbridges.num; i++) {
722 u32 ctl; 731 u32 ctl;
723 732
724 dev = k8_northbridges[i]; 733 dev = k8_northbridges.nb_misc[i];
725 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 734 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
726 735
727 ctl &= ~GARTEN; 736 ctl &= ~GARTEN;
@@ -739,7 +748,7 @@ int __init gart_iommu_init(void)
739 unsigned long scratch; 748 unsigned long scratch;
740 long i; 749 long i;
741 750
742 if (num_k8_northbridges == 0) 751 if (!k8_northbridges.gart_supported)
743 return 0; 752 return 0;
744 753
745#ifndef CONFIG_AGP_AMD64 754#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 00e167870f71..df770ad99b3e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -107,7 +107,7 @@
107#include <asm/percpu.h> 107#include <asm/percpu.h>
108#include <asm/topology.h> 108#include <asm/topology.h>
109#include <asm/apicdef.h> 109#include <asm/apicdef.h>
110#include <asm/k8.h> 110#include <asm/amd_nb.h>
111#ifdef CONFIG_X86_64 111#ifdef CONFIG_X86_64
112#include <asm/numa_64.h> 112#include <asm/numa_64.h>
113#endif 113#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8b3bfc4dd708..bc2cc444844a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -397,6 +397,19 @@ void __cpuinit smp_store_cpu_info(int id)
397 identify_secondary_cpu(c); 397 identify_secondary_cpu(c);
398} 398}
399 399
400static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
401{
402 struct cpuinfo_x86 *c1 = &cpu_data(cpu1);
403 struct cpuinfo_x86 *c2 = &cpu_data(cpu2);
404
405 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
406 cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
407 cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
408 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
409 cpumask_set_cpu(cpu1, c2->llc_shared_map);
410 cpumask_set_cpu(cpu2, c1->llc_shared_map);
411}
412
400 413
401void __cpuinit set_cpu_sibling_map(int cpu) 414void __cpuinit set_cpu_sibling_map(int cpu)
402{ 415{
@@ -409,14 +422,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
409 for_each_cpu(i, cpu_sibling_setup_mask) { 422 for_each_cpu(i, cpu_sibling_setup_mask) {
410 struct cpuinfo_x86 *o = &cpu_data(i); 423 struct cpuinfo_x86 *o = &cpu_data(i);
411 424
412 if (c->phys_proc_id == o->phys_proc_id && 425 if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
413 c->cpu_core_id == o->cpu_core_id) { 426 if (c->phys_proc_id == o->phys_proc_id &&
414 cpumask_set_cpu(i, cpu_sibling_mask(cpu)); 427 c->compute_unit_id == o->compute_unit_id)
415 cpumask_set_cpu(cpu, cpu_sibling_mask(i)); 428 link_thread_siblings(cpu, i);
416 cpumask_set_cpu(i, cpu_core_mask(cpu)); 429 } else if (c->phys_proc_id == o->phys_proc_id &&
417 cpumask_set_cpu(cpu, cpu_core_mask(i)); 430 c->cpu_core_id == o->cpu_core_id) {
418 cpumask_set_cpu(i, c->llc_shared_map); 431 link_thread_siblings(cpu, i);
419 cpumask_set_cpu(cpu, o->llc_shared_map);
420 } 432 }
421 } 433 }
422 } else { 434 } else {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a1c2cd768538..0c40d8b72416 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -897,60 +897,6 @@ static void __init init_tsc_clocksource(void)
897 clocksource_register_khz(&clocksource_tsc, tsc_khz); 897 clocksource_register_khz(&clocksource_tsc, tsc_khz);
898} 898}
899 899
900#ifdef CONFIG_X86_64
901/*
902 * calibrate_cpu is used on systems with fixed rate TSCs to determine
903 * processor frequency
904 */
905#define TICK_COUNT 100000000
906static unsigned long __init calibrate_cpu(void)
907{
908 int tsc_start, tsc_now;
909 int i, no_ctr_free;
910 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
911 unsigned long flags;
912
913 for (i = 0; i < 4; i++)
914 if (avail_to_resrv_perfctr_nmi_bit(i))
915 break;
916 no_ctr_free = (i == 4);
917 if (no_ctr_free) {
918 WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
919 "cpu_khz value may be incorrect.\n");
920 i = 3;
921 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
922 wrmsrl(MSR_K7_EVNTSEL3, 0);
923 rdmsrl(MSR_K7_PERFCTR3, pmc3);
924 } else {
925 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
926 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
927 }
928 local_irq_save(flags);
929 /* start measuring cycles, incrementing from 0 */
930 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
931 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
932 rdtscl(tsc_start);
933 do {
934 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
935 tsc_now = get_cycles();
936 } while ((tsc_now - tsc_start) < TICK_COUNT);
937
938 local_irq_restore(flags);
939 if (no_ctr_free) {
940 wrmsrl(MSR_K7_EVNTSEL3, 0);
941 wrmsrl(MSR_K7_PERFCTR3, pmc3);
942 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
943 } else {
944 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
945 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
946 }
947
948 return pmc_now * tsc_khz / (tsc_now - tsc_start);
949}
950#else
951static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
952#endif
953
954void __init tsc_init(void) 900void __init tsc_init(void)
955{ 901{
956 u64 lpj; 902 u64 lpj;
@@ -969,10 +915,6 @@ void __init tsc_init(void)
969 return; 915 return;
970 } 916 }
971 917
972 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
973 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
974 cpu_khz = calibrate_cpu();
975
976 printk("Detected %lu.%03lu MHz processor.\n", 918 printk("Detected %lu.%03lu MHz processor.\n",
977 (unsigned long)cpu_khz / 1000, 919 (unsigned long)cpu_khz / 1000,
978 (unsigned long)cpu_khz % 1000); 920 (unsigned long)cpu_khz % 1000);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3a09c625d526..6c2ecf0a806d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1991,13 +1991,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1991 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | 1991 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1992 0 /* Reserved, DCA */ | F(XMM4_1) | 1992 0 /* Reserved, DCA */ | F(XMM4_1) |
1993 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 1993 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1994 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); 1994 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
1995 F(F16C);
1995 /* cpuid 0x80000001.ecx */ 1996 /* cpuid 0x80000001.ecx */
1996 const u32 kvm_supported_word6_x86_features = 1997 const u32 kvm_supported_word6_x86_features =
1997 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | 1998 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1998 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 1999 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1999 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | 2000 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
2000 0 /* SKINIT */ | 0 /* WDT */; 2001 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
2001 2002
2002 /* all calls to cpuid_count() should be made on the same cpu */ 2003 /* all calls to cpuid_count() should be made on the same cpu */
2003 get_cpu(); 2004 get_cpu();
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c
index 970ed579d4e4..ab75b181812f 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/k8topology_64.c
@@ -22,7 +22,7 @@
22#include <asm/numa.h> 22#include <asm/numa.h>
23#include <asm/mpspec.h> 23#include <asm/mpspec.h>
24#include <asm/apic.h> 24#include <asm/apic.h>
25#include <asm/k8.h> 25#include <asm/amd_nb.h>
26 26
27static struct bootnode __initdata nodes[8]; 27static struct bootnode __initdata nodes[8];
28static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; 28static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a7bcc23ef96c..4962f1aeda6f 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -18,7 +18,7 @@
18#include <asm/dma.h> 18#include <asm/dma.h>
19#include <asm/numa.h> 19#include <asm/numa.h>
20#include <asm/acpi.h> 20#include <asm/acpi.h>
21#include <asm/k8.h> 21#include <asm/amd_nb.h>
22 22
23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24EXPORT_SYMBOL(node_data); 24EXPORT_SYMBOL(node_data);
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 4b66c69eaf57..5ddf67e76f8b 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -57,7 +57,7 @@ config AGP_AMD
57 57
58config AGP_AMD64 58config AGP_AMD64
59 tristate "AMD Opteron/Athlon64 on-CPU GART support" 59 tristate "AMD Opteron/Athlon64 on-CPU GART support"
60 depends on AGP && X86 && K8_NB 60 depends on AGP && X86 && AMD_NB
61 help 61 help
62 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 564808a5c3c0..42396df55556 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -15,7 +15,7 @@
15#include <linux/mmzone.h> 15#include <linux/mmzone.h>
16#include <asm/page.h> /* PAGE_SIZE */ 16#include <asm/page.h> /* PAGE_SIZE */
17#include <asm/e820.h> 17#include <asm/e820.h>
18#include <asm/k8.h> 18#include <asm/amd_nb.h>
19#include <asm/gart.h> 19#include <asm/gart.h>
20#include "agp.h" 20#include "agp.h"
21 21
@@ -124,7 +124,7 @@ static int amd64_fetch_size(void)
124 u32 temp; 124 u32 temp;
125 struct aper_size_info_32 *values; 125 struct aper_size_info_32 *values;
126 126
127 dev = k8_northbridges[0]; 127 dev = k8_northbridges.nb_misc[0];
128 if (dev==NULL) 128 if (dev==NULL)
129 return 0; 129 return 0;
130 130
@@ -181,10 +181,14 @@ static int amd_8151_configure(void)
181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
182 int i; 182 int i;
183 183
184 if (!k8_northbridges.gart_supported)
185 return 0;
186
184 /* Configure AGP regs in each x86-64 host bridge. */ 187 /* Configure AGP regs in each x86-64 host bridge. */
185 for (i = 0; i < num_k8_northbridges; i++) { 188 for (i = 0; i < k8_northbridges.num; i++) {
186 agp_bridge->gart_bus_addr = 189 agp_bridge->gart_bus_addr =
187 amd64_configure(k8_northbridges[i], gatt_bus); 190 amd64_configure(k8_northbridges.nb_misc[i],
191 gatt_bus);
188 } 192 }
189 k8_flush_garts(); 193 k8_flush_garts();
190 return 0; 194 return 0;
@@ -195,8 +199,12 @@ static void amd64_cleanup(void)
195{ 199{
196 u32 tmp; 200 u32 tmp;
197 int i; 201 int i;
198 for (i = 0; i < num_k8_northbridges; i++) { 202
199 struct pci_dev *dev = k8_northbridges[i]; 203 if (!k8_northbridges.gart_supported)
204 return;
205
206 for (i = 0; i < k8_northbridges.num; i++) {
207 struct pci_dev *dev = k8_northbridges.nb_misc[i];
200 /* disable gart translation */ 208 /* disable gart translation */
201 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
202 tmp &= ~GARTEN; 210 tmp &= ~GARTEN;
@@ -319,16 +327,19 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
319 return 0; 327 return 0;
320} 328}
321 329
322static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) 330static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
323{ 331{
324 int i; 332 int i;
325 333
326 if (cache_k8_northbridges() < 0) 334 if (cache_k8_northbridges() < 0)
327 return -ENODEV; 335 return -ENODEV;
328 336
337 if (!k8_northbridges.gart_supported)
338 return -ENODEV;
339
329 i = 0; 340 i = 0;
330 for (i = 0; i < num_k8_northbridges; i++) { 341 for (i = 0; i < k8_northbridges.num; i++) {
331 struct pci_dev *dev = k8_northbridges[i]; 342 struct pci_dev *dev = k8_northbridges.nb_misc[i];
332 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
333 dev_err(&dev->dev, "no usable aperture found\n"); 344 dev_err(&dev->dev, "no usable aperture found\n");
334#ifdef __x86_64__ 345#ifdef __x86_64__
@@ -405,7 +416,8 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
405 } 416 }
406 417
407 /* shadow x86-64 registers into ULi registers */ 418 /* shadow x86-64 registers into ULi registers */
408 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 419 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
420 &httfea);
409 421
410 /* if x86-64 aperture base is beyond 4G, exit here */ 422 /* if x86-64 aperture base is beyond 4G, exit here */
411 if ((httfea & 0x7fff) >> (32 - 25)) { 423 if ((httfea & 0x7fff) >> (32 - 25)) {
@@ -472,7 +484,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
472 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
473 485
474 /* shadow x86-64 registers into NVIDIA registers */ 486 /* shadow x86-64 registers into NVIDIA registers */
475 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); 487 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
488 &apbase);
476 489
477 /* if x86-64 aperture base is beyond 4G, exit here */ 490 /* if x86-64 aperture base is beyond 4G, exit here */
478 if ( (apbase & 0x7fff) >> (32 - 25) ) { 491 if ( (apbase & 0x7fff) >> (32 - 25) ) {
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 70bb350de996..734e2e062374 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -66,7 +66,7 @@ config EDAC_MCE
66 66
67config EDAC_AMD64 67config EDAC_AMD64
68 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" 68 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
69 depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE 69 depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE
70 help 70 help
71 Support for error detection and correction on the AMD 64 71 Support for error detection and correction on the AMD 64
72 Families of Memory Controllers (K8, F10h and F11h) 72 Families of Memory Controllers (K8, F10h and F11h)
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index e7d5d6b5dcf6..09fcc5282327 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,5 +1,5 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2#include <asm/k8.h> 2#include <asm/amd_nb.h>
3 3
4static struct edac_pci_ctl_info *amd64_ctl_pci; 4static struct edac_pci_ctl_info *amd64_ctl_pci;
5 5
@@ -2927,7 +2927,7 @@ static int __init amd64_edac_init(void)
2927 * to finish initialization of the MC instances. 2927 * to finish initialization of the MC instances.
2928 */ 2928 */
2929 err = -ENODEV; 2929 err = -ENODEV;
2930 for (nb = 0; nb < num_k8_northbridges; nb++) { 2930 for (nb = 0; nb < k8_northbridges.num; nb++) {
2931 if (!pvt_lookup[nb]) 2931 if (!pvt_lookup[nb])
2932 continue; 2932 continue;
2933 2933
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 570fddeb0388..2615c37c8fe5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,6 +517,7 @@
517#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 517#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
520#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
520#define PCI_DEVICE_ID_AMD_LANCE 0x2000 521#define PCI_DEVICE_ID_AMD_LANCE 0x2000
521#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 522#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
522#define PCI_DEVICE_ID_AMD_SCSI 0x2020 523#define PCI_DEVICE_ID_AMD_SCSI 0x2020