aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/include/asm/amd_nb.h24
-rw-r--r--arch/x86/kernel/amd_nb.c72
-rw-r--r--arch/x86/kernel/aperture_64.c10
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c34
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/amdtopology_64.c (renamed from arch/x86/mm/k8topology_64.c)12
-rw-r--r--arch/x86/mm/numa_64.c22
-rw-r--r--drivers/char/agp/amd64-agp.c32
-rw-r--r--drivers/edac/amd64_edac.c4
12 files changed, 119 insertions, 119 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e8327686d3c5..08993a38b119 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1141,16 +1141,16 @@ config NUMA
1141comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 1141comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI) 1142 depends on X86_32 && X86_SUMMIT && (!HIGHMEM64G || !ACPI)
1143 1143
1144config K8_NUMA 1144config AMD_NUMA
1145 def_bool y 1145 def_bool y
1146 prompt "Old style AMD Opteron NUMA detection" 1146 prompt "Old style AMD Opteron NUMA detection"
1147 depends on X86_64 && NUMA && PCI 1147 depends on X86_64 && NUMA && PCI
1148 ---help--- 1148 ---help---
1149 Enable K8 NUMA node topology detection. You should say Y here if 1149 Enable AMD NUMA node topology detection. You should say Y here if
1150 you have a multi processor AMD K8 system. This uses an old 1150 you have a multi processor AMD system. This uses an old method to
1151 method to read the NUMA configuration directly from the builtin 1151 read the NUMA configuration directly from the builtin Northbridge
1152 Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA 1152 of Opteron. It is recommended to use X86_64_ACPI_NUMA instead,
1153 instead, which also takes priority if both are compiled in. 1153 which also takes priority if both are compiled in.
1154 1154
1155config X86_64_ACPI_NUMA 1155config X86_64_ACPI_NUMA
1156 def_bool y 1156 def_bool y
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index c8517f81b21e..35b17a821e34 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,33 +3,33 @@
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
6extern struct pci_device_id k8_nb_ids[]; 6extern struct pci_device_id amd_nb_ids[];
7struct bootnode; 7struct bootnode;
8 8
9extern int early_is_k8_nb(u32 value); 9extern int early_is_amd_nb(u32 value);
10extern int cache_k8_northbridges(void); 10extern int cache_amd_northbridges(void);
11extern void k8_flush_garts(void); 11extern void amd_flush_garts(void);
12extern int k8_get_nodes(struct bootnode *nodes); 12extern int amd_get_nodes(struct bootnode *nodes);
13extern int k8_numa_init(unsigned long start_pfn, unsigned long end_pfn); 13extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
14extern int k8_scan_nodes(void); 14extern int amd_scan_nodes(void);
15 15
16struct k8_northbridge_info { 16struct amd_northbridge_info {
17 u16 num; 17 u16 num;
18 u8 gart_supported; 18 u8 gart_supported;
19 struct pci_dev **nb_misc; 19 struct pci_dev **nb_misc;
20}; 20};
21extern struct k8_northbridge_info k8_northbridges; 21extern struct amd_northbridge_info amd_northbridges;
22 22
23#ifdef CONFIG_AMD_NB 23#ifdef CONFIG_AMD_NB
24 24
25static inline struct pci_dev *node_to_k8_nb_misc(int node) 25static inline struct pci_dev *node_to_amd_nb_misc(int node)
26{ 26{
27 return (node < k8_northbridges.num) ? k8_northbridges.nb_misc[node] : NULL; 27 return (node < amd_northbridges.num) ? amd_northbridges.nb_misc[node] : NULL;
28} 28}
29 29
30#else 30#else
31 31
32static inline struct pci_dev *node_to_k8_nb_misc(int node) 32static inline struct pci_dev *node_to_amd_nb_misc(int node)
33{ 33{
34 return NULL; 34 return NULL;
35} 35}
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 8f6463d8ed0d..c46df406a2a9 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -12,95 +12,95 @@
12 12
13static u32 *flush_words; 13static u32 *flush_words;
14 14
15struct pci_device_id k8_nb_ids[] = { 15struct pci_device_id amd_nb_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, 18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 {} 19 {}
20}; 20};
21EXPORT_SYMBOL(k8_nb_ids); 21EXPORT_SYMBOL(amd_nb_ids);
22 22
23struct k8_northbridge_info k8_northbridges; 23struct amd_northbridge_info amd_northbridges;
24EXPORT_SYMBOL(k8_northbridges); 24EXPORT_SYMBOL(amd_northbridges);
25 25
26static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 26static struct pci_dev *next_amd_northbridge(struct pci_dev *dev)
27{ 27{
28 do { 28 do {
29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 29 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
30 if (!dev) 30 if (!dev)
31 break; 31 break;
32 } while (!pci_match_id(&k8_nb_ids[0], dev)); 32 } while (!pci_match_id(&amd_nb_ids[0], dev));
33 return dev; 33 return dev;
34} 34}
35 35
36int cache_k8_northbridges(void) 36int cache_amd_northbridges(void)
37{ 37{
38 int i; 38 int i;
39 struct pci_dev *dev; 39 struct pci_dev *dev;
40 40
41 if (k8_northbridges.num) 41 if (amd_northbridges.num)
42 return 0; 42 return 0;
43 43
44 dev = NULL; 44 dev = NULL;
45 while ((dev = next_k8_northbridge(dev)) != NULL) 45 while ((dev = next_amd_northbridge(dev)) != NULL)
46 k8_northbridges.num++; 46 amd_northbridges.num++;
47 47
48 /* some CPU families (e.g. family 0x11) do not support GART */ 48 /* some CPU families (e.g. family 0x11) do not support GART */
49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 49 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
50 boot_cpu_data.x86 == 0x15) 50 boot_cpu_data.x86 == 0x15)
51 k8_northbridges.gart_supported = 1; 51 amd_northbridges.gart_supported = 1;
52 52
53 k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * 53 amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) *
54 sizeof(void *), GFP_KERNEL); 54 sizeof(void *), GFP_KERNEL);
55 if (!k8_northbridges.nb_misc) 55 if (!amd_northbridges.nb_misc)
56 return -ENOMEM; 56 return -ENOMEM;
57 57
58 if (!k8_northbridges.num) { 58 if (!amd_northbridges.num) {
59 k8_northbridges.nb_misc[0] = NULL; 59 amd_northbridges.nb_misc[0] = NULL;
60 return 0; 60 return 0;
61 } 61 }
62 62
63 if (k8_northbridges.gart_supported) { 63 if (amd_northbridges.gart_supported) {
64 flush_words = kmalloc(k8_northbridges.num * sizeof(u32), 64 flush_words = kmalloc(amd_northbridges.num * sizeof(u32),
65 GFP_KERNEL); 65 GFP_KERNEL);
66 if (!flush_words) { 66 if (!flush_words) {
67 kfree(k8_northbridges.nb_misc); 67 kfree(amd_northbridges.nb_misc);
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
70 } 70 }
71 71
72 dev = NULL; 72 dev = NULL;
73 i = 0; 73 i = 0;
74 while ((dev = next_k8_northbridge(dev)) != NULL) { 74 while ((dev = next_amd_northbridge(dev)) != NULL) {
75 k8_northbridges.nb_misc[i] = dev; 75 amd_northbridges.nb_misc[i] = dev;
76 if (k8_northbridges.gart_supported) 76 if (amd_northbridges.gart_supported)
77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 77 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
78 } 78 }
79 k8_northbridges.nb_misc[i] = NULL; 79 amd_northbridges.nb_misc[i] = NULL;
80 return 0; 80 return 0;
81} 81}
82EXPORT_SYMBOL_GPL(cache_k8_northbridges); 82EXPORT_SYMBOL_GPL(cache_amd_northbridges);
83 83
84/* Ignores subdevice/subvendor but as far as I can figure out 84/* Ignores subdevice/subvendor but as far as I can figure out
85 they're useless anyways */ 85 they're useless anyways */
86int __init early_is_k8_nb(u32 device) 86int __init early_is_amd_nb(u32 device)
87{ 87{
88 struct pci_device_id *id; 88 struct pci_device_id *id;
89 u32 vendor = device & 0xffff; 89 u32 vendor = device & 0xffff;
90 device >>= 16; 90 device >>= 16;
91 for (id = k8_nb_ids; id->vendor; id++) 91 for (id = amd_nb_ids; id->vendor; id++)
92 if (vendor == id->vendor && device == id->device) 92 if (vendor == id->vendor && device == id->device)
93 return 1; 93 return 1;
94 return 0; 94 return 0;
95} 95}
96 96
97void k8_flush_garts(void) 97void amd_flush_garts(void)
98{ 98{
99 int flushed, i; 99 int flushed, i;
100 unsigned long flags; 100 unsigned long flags;
101 static DEFINE_SPINLOCK(gart_lock); 101 static DEFINE_SPINLOCK(gart_lock);
102 102
103 if (!k8_northbridges.gart_supported) 103 if (!amd_northbridges.gart_supported)
104 return; 104 return;
105 105
106 /* Avoid races between AGP and IOMMU. In theory it's not needed 106 /* Avoid races between AGP and IOMMU. In theory it's not needed
@@ -109,16 +109,16 @@ void k8_flush_garts(void)
109 that it doesn't matter to serialize more. -AK */ 109 that it doesn't matter to serialize more. -AK */
110 spin_lock_irqsave(&gart_lock, flags); 110 spin_lock_irqsave(&gart_lock, flags);
111 flushed = 0; 111 flushed = 0;
112 for (i = 0; i < k8_northbridges.num; i++) { 112 for (i = 0; i < amd_northbridges.num; i++) {
113 pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, 113 pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c,
114 flush_words[i]|1); 114 flush_words[i]|1);
115 flushed++; 115 flushed++;
116 } 116 }
117 for (i = 0; i < k8_northbridges.num; i++) { 117 for (i = 0; i < amd_northbridges.num; i++) {
118 u32 w; 118 u32 w;
119 /* Make sure the hardware actually executed the flush*/ 119 /* Make sure the hardware actually executed the flush*/
120 for (;;) { 120 for (;;) {
121 pci_read_config_dword(k8_northbridges.nb_misc[i], 121 pci_read_config_dword(amd_northbridges.nb_misc[i],
122 0x9c, &w); 122 0x9c, &w);
123 if (!(w & 1)) 123 if (!(w & 1))
124 break; 124 break;
@@ -129,19 +129,19 @@ void k8_flush_garts(void)
129 if (!flushed) 129 if (!flushed)
130 printk("nothing to flush?\n"); 130 printk("nothing to flush?\n");
131} 131}
132EXPORT_SYMBOL_GPL(k8_flush_garts); 132EXPORT_SYMBOL_GPL(amd_flush_garts);
133 133
134static __init int init_k8_nbs(void) 134static __init int init_amd_nbs(void)
135{ 135{
136 int err = 0; 136 int err = 0;
137 137
138 err = cache_k8_northbridges(); 138 err = cache_amd_northbridges();
139 139
140 if (err < 0) 140 if (err < 0)
141 printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); 141 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
142 142
143 return err; 143 return err;
144} 144}
145 145
146/* This has to go after the PCI subsystem */ 146/* This has to go after the PCI subsystem */
147fs_initcall(init_k8_nbs); 147fs_initcall(init_amd_nbs);
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index b3a16e8f0703..dcd7c83e1659 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -206,7 +206,7 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
206 * Do an PCI bus scan by hand because we're running before the PCI 206 * Do an PCI bus scan by hand because we're running before the PCI
207 * subsystem. 207 * subsystem.
208 * 208 *
209 * All K8 AGP bridges are AGPv3 compliant, so we can do this scan 209 * All AMD AGP bridges are AGPv3 compliant, so we can do this scan
210 * generically. It's probably overkill to always scan all slots because 210 * generically. It's probably overkill to always scan all slots because
211 * the AGP bridges should be always an own bus on the HT hierarchy, 211 * the AGP bridges should be always an own bus on the HT hierarchy,
212 * but do it here for future safety. 212 * but do it here for future safety.
@@ -303,7 +303,7 @@ void __init early_gart_iommu_check(void)
303 dev_limit = bus_dev_ranges[i].dev_limit; 303 dev_limit = bus_dev_ranges[i].dev_limit;
304 304
305 for (slot = dev_base; slot < dev_limit; slot++) { 305 for (slot = dev_base; slot < dev_limit; slot++) {
306 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 306 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
307 continue; 307 continue;
308 308
309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 309 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
@@ -358,7 +358,7 @@ void __init early_gart_iommu_check(void)
358 dev_limit = bus_dev_ranges[i].dev_limit; 358 dev_limit = bus_dev_ranges[i].dev_limit;
359 359
360 for (slot = dev_base; slot < dev_limit; slot++) { 360 for (slot = dev_base; slot < dev_limit; slot++) {
361 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 361 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
362 continue; 362 continue;
363 363
364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); 364 ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL);
@@ -400,7 +400,7 @@ int __init gart_iommu_hole_init(void)
400 dev_limit = bus_dev_ranges[i].dev_limit; 400 dev_limit = bus_dev_ranges[i].dev_limit;
401 401
402 for (slot = dev_base; slot < dev_limit; slot++) { 402 for (slot = dev_base; slot < dev_limit; slot++) {
403 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 403 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
404 continue; 404 continue;
405 405
406 iommu_detected = 1; 406 iommu_detected = 1;
@@ -518,7 +518,7 @@ out:
518 dev_base = bus_dev_ranges[i].dev_base; 518 dev_base = bus_dev_ranges[i].dev_base;
519 dev_limit = bus_dev_ranges[i].dev_limit; 519 dev_limit = bus_dev_ranges[i].dev_limit;
520 for (slot = dev_base; slot < dev_limit; slot++) { 520 for (slot = dev_base; slot < dev_limit; slot++) {
521 if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) 521 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
522 continue; 522 continue;
523 523
524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); 524 write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 17ad03366211..92512ed380e7 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -333,7 +333,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
333static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 333static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
334{ 334{
335 struct amd_l3_cache *l3; 335 struct amd_l3_cache *l3;
336 struct pci_dev *dev = node_to_k8_nb_misc(node); 336 struct pci_dev *dev = node_to_amd_nb_misc(node);
337 337
338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); 338 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
339 if (!l3) { 339 if (!l3) {
@@ -370,7 +370,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
370 return; 370 return;
371 371
372 /* not in virtualized environments */ 372 /* not in virtualized environments */
373 if (k8_northbridges.num == 0) 373 if (amd_northbridges.num == 0)
374 return; 374 return;
375 375
376 /* 376 /*
@@ -378,7 +378,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
378 * never freed but this is done only on shutdown so it doesn't matter. 378 * never freed but this is done only on shutdown so it doesn't matter.
379 */ 379 */
380 if (!l3_caches) { 380 if (!l3_caches) {
381 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); 381 int size = amd_northbridges.num * sizeof(struct amd_l3_cache *);
382 382
383 l3_caches = kzalloc(size, GFP_ATOMIC); 383 l3_caches = kzalloc(size, GFP_ATOMIC);
384 if (!l3_caches) 384 if (!l3_caches)
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index ba0f0ca9f280..63317c5694d7 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -143,7 +143,7 @@ static void flush_gart(void)
143 143
144 spin_lock_irqsave(&iommu_bitmap_lock, flags); 144 spin_lock_irqsave(&iommu_bitmap_lock, flags);
145 if (need_flush) { 145 if (need_flush) {
146 k8_flush_garts(); 146 amd_flush_garts();
147 need_flush = false; 147 need_flush = false;
148 } 148 }
149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -561,17 +561,17 @@ static void enable_gart_translations(void)
561{ 561{
562 int i; 562 int i;
563 563
564 if (!k8_northbridges.gart_supported) 564 if (!amd_northbridges.gart_supported)
565 return; 565 return;
566 566
567 for (i = 0; i < k8_northbridges.num; i++) { 567 for (i = 0; i < amd_northbridges.num; i++) {
568 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 568 struct pci_dev *dev = amd_northbridges.nb_misc[i];
569 569
570 enable_gart_translation(dev, __pa(agp_gatt_table)); 570 enable_gart_translation(dev, __pa(agp_gatt_table));
571 } 571 }
572 572
573 /* Flush the GART-TLB to remove stale entries */ 573 /* Flush the GART-TLB to remove stale entries */
574 k8_flush_garts(); 574 amd_flush_garts();
575} 575}
576 576
577/* 577/*
@@ -596,13 +596,13 @@ static void gart_fixup_northbridges(struct sys_device *dev)
596 if (!fix_up_north_bridges) 596 if (!fix_up_north_bridges)
597 return; 597 return;
598 598
599 if (!k8_northbridges.gart_supported) 599 if (!amd_northbridges.gart_supported)
600 return; 600 return;
601 601
602 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 602 pr_info("PCI-DMA: Restoring GART aperture settings\n");
603 603
604 for (i = 0; i < k8_northbridges.num; i++) { 604 for (i = 0; i < amd_northbridges.num; i++) {
605 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 605 struct pci_dev *dev = amd_northbridges.nb_misc[i];
606 606
607 /* 607 /*
608 * Don't enable translations just yet. That is the next 608 * Don't enable translations just yet. That is the next
@@ -644,7 +644,7 @@ static struct sys_device device_gart = {
644 * Private Northbridge GATT initialization in case we cannot use the 644 * Private Northbridge GATT initialization in case we cannot use the
645 * AGP driver for some reason. 645 * AGP driver for some reason.
646 */ 646 */
647static __init int init_k8_gatt(struct agp_kern_info *info) 647static __init int init_amd_gatt(struct agp_kern_info *info)
648{ 648{
649 unsigned aper_size, gatt_size, new_aper_size; 649 unsigned aper_size, gatt_size, new_aper_size;
650 unsigned aper_base, new_aper_base; 650 unsigned aper_base, new_aper_base;
@@ -656,8 +656,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
656 656
657 aper_size = aper_base = info->aper_size = 0; 657 aper_size = aper_base = info->aper_size = 0;
658 dev = NULL; 658 dev = NULL;
659 for (i = 0; i < k8_northbridges.num; i++) { 659 for (i = 0; i < amd_northbridges.num; i++) {
660 dev = k8_northbridges.nb_misc[i]; 660 dev = amd_northbridges.nb_misc[i];
661 new_aper_base = read_aperture(dev, &new_aper_size); 661 new_aper_base = read_aperture(dev, &new_aper_size);
662 if (!new_aper_base) 662 if (!new_aper_base)
663 goto nommu; 663 goto nommu;
@@ -725,13 +725,13 @@ static void gart_iommu_shutdown(void)
725 if (!no_agp) 725 if (!no_agp)
726 return; 726 return;
727 727
728 if (!k8_northbridges.gart_supported) 728 if (!amd_northbridges.gart_supported)
729 return; 729 return;
730 730
731 for (i = 0; i < k8_northbridges.num; i++) { 731 for (i = 0; i < amd_northbridges.num; i++) {
732 u32 ctl; 732 u32 ctl;
733 733
734 dev = k8_northbridges.nb_misc[i]; 734 dev = amd_northbridges.nb_misc[i];
735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 735 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
736 736
737 ctl &= ~GARTEN; 737 ctl &= ~GARTEN;
@@ -749,14 +749,14 @@ int __init gart_iommu_init(void)
749 unsigned long scratch; 749 unsigned long scratch;
750 long i; 750 long i;
751 751
752 if (!k8_northbridges.gart_supported) 752 if (!amd_northbridges.gart_supported)
753 return 0; 753 return 0;
754 754
755#ifndef CONFIG_AGP_AMD64 755#ifndef CONFIG_AGP_AMD64
756 no_agp = 1; 756 no_agp = 1;
757#else 757#else
758 /* Makefile puts PCI initialization via subsys_initcall first. */ 758 /* Makefile puts PCI initialization via subsys_initcall first. */
759 /* Add other K8 AGP bridge drivers here */ 759 /* Add other AMD AGP bridge drivers here */
760 no_agp = no_agp || 760 no_agp = no_agp ||
761 (agp_amd64_init() < 0) || 761 (agp_amd64_init() < 0) ||
762 (agp_copy_info(agp_bridge, &info) < 0); 762 (agp_copy_info(agp_bridge, &info) < 0);
@@ -765,7 +765,7 @@ int __init gart_iommu_init(void)
765 if (no_iommu || 765 if (no_iommu ||
766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 766 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
767 !gart_iommu_aperture || 767 !gart_iommu_aperture ||
768 (no_agp && init_k8_gatt(&info) < 0)) { 768 (no_agp && init_amd_gatt(&info) < 0)) {
769 if (max_pfn > MAX_DMA32_PFN) { 769 if (max_pfn > MAX_DMA32_PFN) {
770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 770 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
771 pr_warning("falling back to iommu=soft.\n"); 771 pr_warning("falling back to iommu=soft.\n");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 21c6746338af..df172c1e8238 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -694,7 +694,7 @@ static u64 __init get_max_mapped(void)
694void __init setup_arch(char **cmdline_p) 694void __init setup_arch(char **cmdline_p)
695{ 695{
696 int acpi = 0; 696 int acpi = 0;
697 int k8 = 0; 697 int amd = 0;
698 unsigned long flags; 698 unsigned long flags;
699 699
700#ifdef CONFIG_X86_32 700#ifdef CONFIG_X86_32
@@ -981,12 +981,12 @@ void __init setup_arch(char **cmdline_p)
981 acpi = acpi_numa_init(); 981 acpi = acpi_numa_init();
982#endif 982#endif
983 983
984#ifdef CONFIG_K8_NUMA 984#ifdef CONFIG_AMD_NUMA
985 if (!acpi) 985 if (!acpi)
986 k8 = !k8_numa_init(0, max_pfn); 986 amd = !amd_numa_init(0, max_pfn);
987#endif 987#endif
988 988
989 initmem_init(0, max_pfn, acpi, k8); 989 initmem_init(0, max_pfn, acpi, amd);
990 memblock_find_dma_reserve(); 990 memblock_find_dma_reserve();
991 dma32_reserve_bootmem(); 991 dma32_reserve_bootmem();
992 992
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 55543397a8a7..09df2f9a3d69 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -23,7 +23,7 @@ mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
23obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o 23obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
24 24
25obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o 25obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o
26obj-$(CONFIG_K8_NUMA) += k8topology_64.o 26obj-$(CONFIG_AMD_NUMA) += amdtopology_64.o
27obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o 27obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
28 28
29obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o 29obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/amdtopology_64.c
index 804a3b6c6e14..51fae9cfdecb 100644
--- a/arch/x86/mm/k8topology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * AMD K8 NUMA support. 2 * AMD NUMA support.
3 * Discover the memory map and associated nodes. 3 * Discover the memory map and associated nodes.
4 * 4 *
5 * This version reads it directly from the K8 northbridge. 5 * This version reads it directly from the AMD northbridge.
6 * 6 *
7 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 7 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
8 */ 8 */
@@ -57,7 +57,7 @@ static __init void early_get_boot_cpu_id(void)
57{ 57{
58 /* 58 /*
59 * need to get the APIC ID of the BSP so can use that to 59 * need to get the APIC ID of the BSP so can use that to
60 * create apicid_to_node in k8_scan_nodes() 60 * create apicid_to_node in amd_scan_nodes()
61 */ 61 */
62#ifdef CONFIG_X86_MPPARSE 62#ifdef CONFIG_X86_MPPARSE
63 /* 63 /*
@@ -69,7 +69,7 @@ static __init void early_get_boot_cpu_id(void)
69 early_init_lapic_mapping(); 69 early_init_lapic_mapping();
70} 70}
71 71
72int __init k8_get_nodes(struct bootnode *physnodes) 72int __init amd_get_nodes(struct bootnode *physnodes)
73{ 73{
74 int i; 74 int i;
75 int ret = 0; 75 int ret = 0;
@@ -82,7 +82,7 @@ int __init k8_get_nodes(struct bootnode *physnodes)
82 return ret; 82 return ret;
83} 83}
84 84
85int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn) 85int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
86{ 86{
87 unsigned long start = PFN_PHYS(start_pfn); 87 unsigned long start = PFN_PHYS(start_pfn);
88 unsigned long end = PFN_PHYS(end_pfn); 88 unsigned long end = PFN_PHYS(end_pfn);
@@ -194,7 +194,7 @@ int __init k8_numa_init(unsigned long start_pfn, unsigned long end_pfn)
194 return 0; 194 return 0;
195} 195}
196 196
197int __init k8_scan_nodes(void) 197int __init amd_scan_nodes(void)
198{ 198{
199 unsigned int bits; 199 unsigned int bits;
200 unsigned int cores; 200 unsigned int cores;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7ffc9b727efd..7762a517d69d 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -264,7 +264,7 @@ static struct bootnode physnodes[MAX_NUMNODES] __initdata;
264static char *cmdline __initdata; 264static char *cmdline __initdata;
265 265
266static int __init setup_physnodes(unsigned long start, unsigned long end, 266static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int k8) 267 int acpi, int amd)
268{ 268{
269 int nr_nodes = 0; 269 int nr_nodes = 0;
270 int ret = 0; 270 int ret = 0;
@@ -274,13 +274,13 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
274 if (acpi) 274 if (acpi)
275 nr_nodes = acpi_get_nodes(physnodes); 275 nr_nodes = acpi_get_nodes(physnodes);
276#endif 276#endif
277#ifdef CONFIG_K8_NUMA 277#ifdef CONFIG_AMD_NUMA
278 if (k8) 278 if (amd)
279 nr_nodes = k8_get_nodes(physnodes); 279 nr_nodes = amd_get_nodes(physnodes);
280#endif 280#endif
281 /* 281 /*
282 * Basic sanity checking on the physical node map: there may be errors 282 * Basic sanity checking on the physical node map: there may be errors
283 * if the SRAT or K8 incorrectly reported the topology or the mem= 283 * if the SRAT or AMD code incorrectly reported the topology or the mem=
284 * kernel parameter is used. 284 * kernel parameter is used.
285 */ 285 */
286 for (i = 0; i < nr_nodes; i++) { 286 for (i = 0; i < nr_nodes; i++) {
@@ -549,7 +549,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
549 * numa=fake command-line option. 549 * numa=fake command-line option.
550 */ 550 */
551static int __init numa_emulation(unsigned long start_pfn, 551static int __init numa_emulation(unsigned long start_pfn,
552 unsigned long last_pfn, int acpi, int k8) 552 unsigned long last_pfn, int acpi, int amd)
553{ 553{
554 u64 addr = start_pfn << PAGE_SHIFT; 554 u64 addr = start_pfn << PAGE_SHIFT;
555 u64 max_addr = last_pfn << PAGE_SHIFT; 555 u64 max_addr = last_pfn << PAGE_SHIFT;
@@ -557,7 +557,7 @@ static int __init numa_emulation(unsigned long start_pfn,
557 int num_nodes; 557 int num_nodes;
558 int i; 558 int i;
559 559
560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, k8); 560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
561 /* 561 /*
562 * If the numa=fake command-line contains a 'M' or 'G', it represents 562 * If the numa=fake command-line contains a 'M' or 'G', it represents
563 * the fixed node size. Otherwise, if it is just a single number N, 563 * the fixed node size. Otherwise, if it is just a single number N,
@@ -602,7 +602,7 @@ static int __init numa_emulation(unsigned long start_pfn,
602#endif /* CONFIG_NUMA_EMU */ 602#endif /* CONFIG_NUMA_EMU */
603 603
604void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, 604void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
605 int acpi, int k8) 605 int acpi, int amd)
606{ 606{
607 int i; 607 int i;
608 608
@@ -610,7 +610,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
610 nodes_clear(node_online_map); 610 nodes_clear(node_online_map);
611 611
612#ifdef CONFIG_NUMA_EMU 612#ifdef CONFIG_NUMA_EMU
613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, k8)) 613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
614 return; 614 return;
615 nodes_clear(node_possible_map); 615 nodes_clear(node_possible_map);
616 nodes_clear(node_online_map); 616 nodes_clear(node_online_map);
@@ -624,8 +624,8 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
624 nodes_clear(node_online_map); 624 nodes_clear(node_online_map);
625#endif 625#endif
626 626
627#ifdef CONFIG_K8_NUMA 627#ifdef CONFIG_AMD_NUMA
628 if (!numa_off && k8 && !k8_scan_nodes()) 628 if (!numa_off && amd && !amd_scan_nodes())
629 return; 629 return;
630 nodes_clear(node_possible_map); 630 nodes_clear(node_possible_map);
631 nodes_clear(node_online_map); 631 nodes_clear(node_online_map);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 42396df55556..b1f8bb53941a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -38,7 +38,7 @@ static int agp_bridges_found;
38 38
39static void amd64_tlbflush(struct agp_memory *temp) 39static void amd64_tlbflush(struct agp_memory *temp)
40{ 40{
41 k8_flush_garts(); 41 amd_flush_garts();
42} 42}
43 43
44static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) 44static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
@@ -124,7 +124,7 @@ static int amd64_fetch_size(void)
124 u32 temp; 124 u32 temp;
125 struct aper_size_info_32 *values; 125 struct aper_size_info_32 *values;
126 126
127 dev = k8_northbridges.nb_misc[0]; 127 dev = amd_northbridges.nb_misc[0];
128 if (dev==NULL) 128 if (dev==NULL)
129 return 0; 129 return 0;
130 130
@@ -181,16 +181,16 @@ static int amd_8151_configure(void)
181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
182 int i; 182 int i;
183 183
184 if (!k8_northbridges.gart_supported) 184 if (!amd_northbridges.gart_supported)
185 return 0; 185 return 0;
186 186
187 /* Configure AGP regs in each x86-64 host bridge. */ 187 /* Configure AGP regs in each x86-64 host bridge. */
188 for (i = 0; i < k8_northbridges.num; i++) { 188 for (i = 0; i < amd_northbridges.num; i++) {
189 agp_bridge->gart_bus_addr = 189 agp_bridge->gart_bus_addr =
190 amd64_configure(k8_northbridges.nb_misc[i], 190 amd64_configure(amd_northbridges.nb_misc[i],
191 gatt_bus); 191 gatt_bus);
192 } 192 }
193 k8_flush_garts(); 193 amd_flush_garts();
194 return 0; 194 return 0;
195} 195}
196 196
@@ -200,11 +200,11 @@ static void amd64_cleanup(void)
200 u32 tmp; 200 u32 tmp;
201 int i; 201 int i;
202 202
203 if (!k8_northbridges.gart_supported) 203 if (!amd_northbridges.gart_supported)
204 return; 204 return;
205 205
206 for (i = 0; i < k8_northbridges.num; i++) { 206 for (i = 0; i < amd_northbridges.num; i++) {
207 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 207 struct pci_dev *dev = amd_northbridges.nb_misc[i];
208 /* disable gart translation */ 208 /* disable gart translation */
209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
210 tmp &= ~GARTEN; 210 tmp &= ~GARTEN;
@@ -331,15 +331,15 @@ static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
331{ 331{
332 int i; 332 int i;
333 333
334 if (cache_k8_northbridges() < 0) 334 if (cache_amd_northbridges() < 0)
335 return -ENODEV; 335 return -ENODEV;
336 336
337 if (!k8_northbridges.gart_supported) 337 if (!amd_northbridges.gart_supported)
338 return -ENODEV; 338 return -ENODEV;
339 339
340 i = 0; 340 i = 0;
341 for (i = 0; i < k8_northbridges.num; i++) { 341 for (i = 0; i < amd_northbridges.num; i++) {
342 struct pci_dev *dev = k8_northbridges.nb_misc[i]; 342 struct pci_dev *dev = amd_northbridges.nb_misc[i];
343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
344 dev_err(&dev->dev, "no usable aperture found\n"); 344 dev_err(&dev->dev, "no usable aperture found\n");
345#ifdef __x86_64__ 345#ifdef __x86_64__
@@ -416,7 +416,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
416 } 416 }
417 417
418 /* shadow x86-64 registers into ULi registers */ 418 /* shadow x86-64 registers into ULi registers */
419 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 419 pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
420 &httfea); 420 &httfea);
421 421
422 /* if x86-64 aperture base is beyond 4G, exit here */ 422 /* if x86-64 aperture base is beyond 4G, exit here */
@@ -484,7 +484,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
485 485
486 /* shadow x86-64 registers into NVIDIA registers */ 486 /* shadow x86-64 registers into NVIDIA registers */
487 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, 487 pci_read_config_dword (amd_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
488 &apbase); 488 &apbase);
489 489
490 /* if x86-64 aperture base is beyond 4G, exit here */ 490 /* if x86-64 aperture base is beyond 4G, exit here */
@@ -778,7 +778,7 @@ int __init agp_amd64_init(void)
778 } 778 }
779 779
780 /* First check that we have at least one AMD64 NB */ 780 /* First check that we have at least one AMD64 NB */
781 if (!pci_dev_present(k8_nb_ids)) 781 if (!pci_dev_present(amd_nb_ids))
782 return -ENODEV; 782 return -ENODEV;
783 783
784 /* Look for any AGP bridge */ 784 /* Look for any AGP bridge */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8521401bbd75..8b144ccf08aa 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2917,7 +2917,7 @@ static int __init amd64_edac_init(void)
2917 2917
2918 opstate_init(); 2918 opstate_init();
2919 2919
2920 if (cache_k8_northbridges() < 0) 2920 if (cache_amd_northbridges() < 0)
2921 goto err_ret; 2921 goto err_ret;
2922 2922
2923 msrs = msrs_alloc(); 2923 msrs = msrs_alloc();
@@ -2934,7 +2934,7 @@ static int __init amd64_edac_init(void)
2934 * to finish initialization of the MC instances. 2934 * to finish initialization of the MC instances.
2935 */ 2935 */
2936 err = -ENODEV; 2936 err = -ENODEV;
2937 for (nb = 0; nb < k8_northbridges.num; nb++) { 2937 for (nb = 0; nb < amd_northbridges.num; nb++) {
2938 if (!pvt_lookup[nb]) 2938 if (!pvt_lookup[nb])
2939 continue; 2939 continue;
2940 2940