diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/amd_nb.c | 72 | ||||
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 34 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 8 |
5 files changed, 65 insertions, 65 deletions
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 8f6463d8ed0d..c46df406a2a9 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -12,95 +12,95 @@ | |||
12 | 12 | ||
13 | static u32 *flush_words; | 13 | static u32 *flush_words; |
14 | 14 | ||
15 | struct pci_device_id k8_nb_ids[] = { | 15 | struct pci_device_id amd_nb_ids[] = { |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, |
19 | {} | 19 | {} |
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(k8_nb_ids); | 21 | EXPORT_SYMBOL(amd_nb_ids); |
22 | 22 | ||
23 | struct k8_northbridge_info k8_northbridges; | 23 | struct amd_northbridge_info amd_northbridges; |
24 | EXPORT_SYMBOL(k8_northbridges); | 24 | EXPORT_SYMBOL(amd_northbridges); |
25 | 25 | ||
26 | static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) | 26 | static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) |
27 | { | 27 | { |
28 | do { | 28 | do { |
29 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 29 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
30 | if (!dev) | 30 | if (!dev) |
31 | break; | 31 | break; |
32 | } while (!pci_match_id(&k8_nb_ids[0], dev)); | 32 | } while (!pci_match_id(&amd_nb_ids[0], dev)); |
33 | return dev; | 33 | return dev; |
34 | } | 34 | } |
35 | 35 | ||
36 | int cache_k8_northbridges(void) | 36 | int cache_amd_northbridges(void) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
40 | 40 | ||
41 | if (k8_northbridges.num) | 41 | if (amd_northbridges.num) |
42 | return 0; | 42 | return 0; |
43 | 43 | ||
44 | dev = NULL; | 44 | dev = NULL; |
45 | while ((dev = next_k8_northbridge(dev)) != NULL) | 45 | while ((dev = next_amd_northbridge(dev)) != NULL) |
46 | k8_northbridges.num++; | 46 | amd_northbridges.num++; |
47 | 47 | ||
48 | /* some CPU families (e.g. family 0x11) do not support GART */ | 48 | /* some CPU families (e.g. family 0x11) do not support GART */ |
49 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || | 49 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || |
50 | boot_cpu_data.x86 == 0x15) | 50 | boot_cpu_data.x86 == 0x15) |
51 | k8_northbridges.gart_supported = 1; | 51 | amd_northbridges.gart_supported = 1; |
52 | 52 | ||
53 | k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * | 53 | amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * |
54 | sizeof(void *), GFP_KERNEL); | 54 | sizeof(void *), GFP_KERNEL); |
55 | if (!k8_northbridges.nb_misc) | 55 | if (!amd_northbridges.nb_misc) |
56 | return -ENOMEM; | 56 | return -ENOMEM; |
57 | 57 | ||
58 | if (!k8_northbridges.num) { | 58 | if (!amd_northbridges.num) { |
59 | k8_northbridges.nb_misc[0] = NULL; | 59 | amd_northbridges.nb_misc[0] = NULL; |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | if (k8_northbridges.gart_supported) { | 63 | if (amd_northbridges.gart_supported) { |
64 | flush_words = kmalloc(k8_northbridges.num * sizeof(u32), | 64 | flush_words = kmalloc(amd_northbridges.num * sizeof(u32), |
65 | GFP_KERNEL); | 65 | GFP_KERNEL); |
66 | if (!flush_words) { | 66 | if (!flush_words) { |
67 | kfree(k8_northbridges.nb_misc); | 67 | kfree(amd_northbridges.nb_misc); |
68 | return -ENOMEM; | 68 | return -ENOMEM; |
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
72 | dev = NULL; | 72 | dev = NULL; |
73 | i = 0; | 73 | i = 0; |
74 | while ((dev = next_k8_northbridge(dev)) != NULL) { | 74 | while ((dev = next_amd_northbridge(dev)) != NULL) { |
75 | k8_northbridges.nb_misc[i] = dev; | 75 | amd_northbridges.nb_misc[i] = dev; |
76 | if (k8_northbridges.gart_supported) | 76 | if (amd_northbridges.gart_supported) |
77 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | 77 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); |
78 | } | 78 | } |
79 | k8_northbridges.nb_misc[i] = NULL; | 79 | amd_northbridges.nb_misc[i] = NULL; |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | EXPORT_SYMBOL_GPL(cache_k8_northbridges); | 82 | EXPORT_SYMBOL_GPL(cache_amd_northbridges); |
83 | 83 | ||
84 | /* Ignores subdevice/subvendor but as far as I can figure out | 84 | /* Ignores subdevice/subvendor but as far as I can figure out |
85 | they're useless anyways */ | 85 | they're useless anyways */ |
86 | int __init early_is_k8_nb(u32 device) | 86 | int __init early_is_amd_nb(u32 device) |
87 | { | 87 | { |
88 | struct pci_device_id *id; | 88 | struct pci_device_id *id; |
89 | u32 vendor = device & 0xffff; | 89 | u32 vendor = device & 0xffff; |
90 | device >>= 16; | 90 | device >>= 16; |
91 | for (id = k8_nb_ids; id->vendor; id++) | 91 | for (id = amd_nb_ids; id->vendor; id++) |
92 | if (vendor == id->vendor && device == id->device) | 92 | if (vendor == id->vendor && device == id->device) |
93 | return 1; | 93 | return 1; |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | 96 | ||
97 | void k8_flush_garts(void) | 97 | void amd_flush_garts(void) |
98 | { | 98 | { |
99 | int flushed, i; | 99 | int flushed, i; |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | static DEFINE_SPINLOCK(gart_lock); | 101 | static DEFINE_SPINLOCK(gart_lock); |
102 | 102 | ||
103 | if (!k8_northbridges.gart_supported) | 103 | if (!amd_northbridges.gart_supported) |
104 | return; | 104 | return; |
105 | 105 | ||
106 | /* Avoid races between AGP and IOMMU. In theory it's not needed | 106 | /* Avoid races between AGP and IOMMU. In theory it's not needed |
@@ -109,16 +109,16 @@ void k8_flush_garts(void) | |||
109 | that it doesn't matter to serialize more. -AK */ | 109 | that it doesn't matter to serialize more. -AK */ |
110 | spin_lock_irqsave(&gart_lock, flags); | 110 | spin_lock_irqsave(&gart_lock, flags); |
111 | flushed = 0; | 111 | flushed = 0; |
112 | for (i = 0; i < k8_northbridges.num; i++) { | 112 | for (i = 0; i < amd_northbridges.num; i++) { |
113 | pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, | 113 | pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, |
114 | flush_words[i]|1); | 114 | flush_words[i]|1); |
115 | flushed++; | 115 | flushed++; |
116 | } | 116 | } |
117 | for (i = 0; i < k8_northbridges.num; i++) { | 117 | for (i = 0; i < amd_northbridges.num; i++) { |
118 | u32 w; | 118 | u32 w; |
119 | /* Make sure the hardware actually executed the flush*/ | 119 | /* Make sure the hardware actually executed the flush*/ |
120 | for (;;) { | 120 | for (;;) { |
121 | pci_read_config_dword(k8_northbridges.nb_misc[i], | 121 | pci_read_config_dword(amd_northbridges.nb_misc[i], |
122 | 0x9c, &w); | 122 | 0x9c, &w); |
123 | if (!(w & 1)) | 123 | if (!(w & 1)) |
124 | break; | 124 | break; |
@@ -129,19 +129,19 @@ void k8_flush_garts(void) | |||
129 | if (!flushed) | 129 | if (!flushed) |
130 | printk("nothing to flush?\n"); | 130 | printk("nothing to flush?\n"); |
131 | } | 131 | } |
132 | EXPORT_SYMBOL_GPL(k8_flush_garts); | 132 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
133 | 133 | ||
134 | static __init int init_k8_nbs(void) | 134 | static __init int init_amd_nbs(void) |
135 | { | 135 | { |
136 | int err = 0; | 136 | int err = 0; |
137 | 137 | ||
138 | err = cache_k8_northbridges(); | 138 | err = cache_amd_northbridges(); |
139 | 139 | ||
140 | if (err < 0) | 140 | if (err < 0) |
141 | printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); | 141 | printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); |
142 | 142 | ||
143 | return err; | 143 | return err; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* This has to go after the PCI subsystem */ | 146 | /* This has to go after the PCI subsystem */ |
147 | fs_initcall(init_k8_nbs); | 147 | fs_initcall(init_amd_nbs); |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index b3a16e8f0703..dcd7c83e1659 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -206,7 +206,7 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) | |||
206 | * Do an PCI bus scan by hand because we're running before the PCI | 206 | * Do an PCI bus scan by hand because we're running before the PCI |
207 | * subsystem. | 207 | * subsystem. |
208 | * | 208 | * |
209 | * All K8 AGP bridges are AGPv3 compliant, so we can do this scan | 209 | * All AMD AGP bridges are AGPv3 compliant, so we can do this scan |
210 | * generically. It's probably overkill to always scan all slots because | 210 | * generically. It's probably overkill to always scan all slots because |
211 | * the AGP bridges should be always an own bus on the HT hierarchy, | 211 | * the AGP bridges should be always an own bus on the HT hierarchy, |
212 | * but do it here for future safety. | 212 | * but do it here for future safety. |
@@ -303,7 +303,7 @@ void __init early_gart_iommu_check(void) | |||
303 | dev_limit = bus_dev_ranges[i].dev_limit; | 303 | dev_limit = bus_dev_ranges[i].dev_limit; |
304 | 304 | ||
305 | for (slot = dev_base; slot < dev_limit; slot++) { | 305 | for (slot = dev_base; slot < dev_limit; slot++) { |
306 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | 306 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
307 | continue; | 307 | continue; |
308 | 308 | ||
309 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); | 309 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
@@ -358,7 +358,7 @@ void __init early_gart_iommu_check(void) | |||
358 | dev_limit = bus_dev_ranges[i].dev_limit; | 358 | dev_limit = bus_dev_ranges[i].dev_limit; |
359 | 359 | ||
360 | for (slot = dev_base; slot < dev_limit; slot++) { | 360 | for (slot = dev_base; slot < dev_limit; slot++) { |
361 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | 361 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
362 | continue; | 362 | continue; |
363 | 363 | ||
364 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); | 364 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
@@ -400,7 +400,7 @@ int __init gart_iommu_hole_init(void) | |||
400 | dev_limit = bus_dev_ranges[i].dev_limit; | 400 | dev_limit = bus_dev_ranges[i].dev_limit; |
401 | 401 | ||
402 | for (slot = dev_base; slot < dev_limit; slot++) { | 402 | for (slot = dev_base; slot < dev_limit; slot++) { |
403 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | 403 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
404 | continue; | 404 | continue; |
405 | 405 | ||
406 | iommu_detected = 1; | 406 | iommu_detected = 1; |
@@ -518,7 +518,7 @@ out: | |||
518 | dev_base = bus_dev_ranges[i].dev_base; | 518 | dev_base = bus_dev_ranges[i].dev_base; |
519 | dev_limit = bus_dev_ranges[i].dev_limit; | 519 | dev_limit = bus_dev_ranges[i].dev_limit; |
520 | for (slot = dev_base; slot < dev_limit; slot++) { | 520 | for (slot = dev_base; slot < dev_limit; slot++) { |
521 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | 521 | if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) |
522 | continue; | 522 | continue; |
523 | 523 | ||
524 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | 524 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 17ad03366211..92512ed380e7 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -333,7 +333,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
333 | static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) | 333 | static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) |
334 | { | 334 | { |
335 | struct amd_l3_cache *l3; | 335 | struct amd_l3_cache *l3; |
336 | struct pci_dev *dev = node_to_k8_nb_misc(node); | 336 | struct pci_dev *dev = node_to_amd_nb_misc(node); |
337 | 337 | ||
338 | l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); | 338 | l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); |
339 | if (!l3) { | 339 | if (!l3) { |
@@ -370,7 +370,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
370 | return; | 370 | return; |
371 | 371 | ||
372 | /* not in virtualized environments */ | 372 | /* not in virtualized environments */ |
373 | if (k8_northbridges.num == 0) | 373 | if (amd_northbridges.num == 0) |
374 | return; | 374 | return; |
375 | 375 | ||
376 | /* | 376 | /* |
@@ -378,7 +378,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
378 | * never freed but this is done only on shutdown so it doesn't matter. | 378 | * never freed but this is done only on shutdown so it doesn't matter. |
379 | */ | 379 | */ |
380 | if (!l3_caches) { | 380 | if (!l3_caches) { |
381 | int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); | 381 | int size = amd_northbridges.num * sizeof(struct amd_l3_cache *); |
382 | 382 | ||
383 | l3_caches = kzalloc(size, GFP_ATOMIC); | 383 | l3_caches = kzalloc(size, GFP_ATOMIC); |
384 | if (!l3_caches) | 384 | if (!l3_caches) |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index ba0f0ca9f280..63317c5694d7 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -143,7 +143,7 @@ static void flush_gart(void) | |||
143 | 143 | ||
144 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 144 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
145 | if (need_flush) { | 145 | if (need_flush) { |
146 | k8_flush_garts(); | 146 | amd_flush_garts(); |
147 | need_flush = false; | 147 | need_flush = false; |
148 | } | 148 | } |
149 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 149 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
@@ -561,17 +561,17 @@ static void enable_gart_translations(void) | |||
561 | { | 561 | { |
562 | int i; | 562 | int i; |
563 | 563 | ||
564 | if (!k8_northbridges.gart_supported) | 564 | if (!amd_northbridges.gart_supported) |
565 | return; | 565 | return; |
566 | 566 | ||
567 | for (i = 0; i < k8_northbridges.num; i++) { | 567 | for (i = 0; i < amd_northbridges.num; i++) { |
568 | struct pci_dev *dev = k8_northbridges.nb_misc[i]; | 568 | struct pci_dev *dev = amd_northbridges.nb_misc[i]; |
569 | 569 | ||
570 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 570 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
571 | } | 571 | } |
572 | 572 | ||
573 | /* Flush the GART-TLB to remove stale entries */ | 573 | /* Flush the GART-TLB to remove stale entries */ |
574 | k8_flush_garts(); | 574 | amd_flush_garts(); |
575 | } | 575 | } |
576 | 576 | ||
577 | /* | 577 | /* |
@@ -596,13 +596,13 @@ static void gart_fixup_northbridges(struct sys_device *dev) | |||
596 | if (!fix_up_north_bridges) | 596 | if (!fix_up_north_bridges) |
597 | return; | 597 | return; |
598 | 598 | ||
599 | if (!k8_northbridges.gart_supported) | 599 | if (!amd_northbridges.gart_supported) |
600 | return; | 600 | return; |
601 | 601 | ||
602 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); | 602 | pr_info("PCI-DMA: Restoring GART aperture settings\n"); |
603 | 603 | ||
604 | for (i = 0; i < k8_northbridges.num; i++) { | 604 | for (i = 0; i < amd_northbridges.num; i++) { |
605 | struct pci_dev *dev = k8_northbridges.nb_misc[i]; | 605 | struct pci_dev *dev = amd_northbridges.nb_misc[i]; |
606 | 606 | ||
607 | /* | 607 | /* |
608 | * Don't enable translations just yet. That is the next | 608 | * Don't enable translations just yet. That is the next |
@@ -644,7 +644,7 @@ static struct sys_device device_gart = { | |||
644 | * Private Northbridge GATT initialization in case we cannot use the | 644 | * Private Northbridge GATT initialization in case we cannot use the |
645 | * AGP driver for some reason. | 645 | * AGP driver for some reason. |
646 | */ | 646 | */ |
647 | static __init int init_k8_gatt(struct agp_kern_info *info) | 647 | static __init int init_amd_gatt(struct agp_kern_info *info) |
648 | { | 648 | { |
649 | unsigned aper_size, gatt_size, new_aper_size; | 649 | unsigned aper_size, gatt_size, new_aper_size; |
650 | unsigned aper_base, new_aper_base; | 650 | unsigned aper_base, new_aper_base; |
@@ -656,8 +656,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
656 | 656 | ||
657 | aper_size = aper_base = info->aper_size = 0; | 657 | aper_size = aper_base = info->aper_size = 0; |
658 | dev = NULL; | 658 | dev = NULL; |
659 | for (i = 0; i < k8_northbridges.num; i++) { | 659 | for (i = 0; i < amd_northbridges.num; i++) { |
660 | dev = k8_northbridges.nb_misc[i]; | 660 | dev = amd_northbridges.nb_misc[i]; |
661 | new_aper_base = read_aperture(dev, &new_aper_size); | 661 | new_aper_base = read_aperture(dev, &new_aper_size); |
662 | if (!new_aper_base) | 662 | if (!new_aper_base) |
663 | goto nommu; | 663 | goto nommu; |
@@ -725,13 +725,13 @@ static void gart_iommu_shutdown(void) | |||
725 | if (!no_agp) | 725 | if (!no_agp) |
726 | return; | 726 | return; |
727 | 727 | ||
728 | if (!k8_northbridges.gart_supported) | 728 | if (!amd_northbridges.gart_supported) |
729 | return; | 729 | return; |
730 | 730 | ||
731 | for (i = 0; i < k8_northbridges.num; i++) { | 731 | for (i = 0; i < amd_northbridges.num; i++) { |
732 | u32 ctl; | 732 | u32 ctl; |
733 | 733 | ||
734 | dev = k8_northbridges.nb_misc[i]; | 734 | dev = amd_northbridges.nb_misc[i]; |
735 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); | 735 | pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); |
736 | 736 | ||
737 | ctl &= ~GARTEN; | 737 | ctl &= ~GARTEN; |
@@ -749,14 +749,14 @@ int __init gart_iommu_init(void) | |||
749 | unsigned long scratch; | 749 | unsigned long scratch; |
750 | long i; | 750 | long i; |
751 | 751 | ||
752 | if (!k8_northbridges.gart_supported) | 752 | if (!amd_northbridges.gart_supported) |
753 | return 0; | 753 | return 0; |
754 | 754 | ||
755 | #ifndef CONFIG_AGP_AMD64 | 755 | #ifndef CONFIG_AGP_AMD64 |
756 | no_agp = 1; | 756 | no_agp = 1; |
757 | #else | 757 | #else |
758 | /* Makefile puts PCI initialization via subsys_initcall first. */ | 758 | /* Makefile puts PCI initialization via subsys_initcall first. */ |
759 | /* Add other K8 AGP bridge drivers here */ | 759 | /* Add other AMD AGP bridge drivers here */ |
760 | no_agp = no_agp || | 760 | no_agp = no_agp || |
761 | (agp_amd64_init() < 0) || | 761 | (agp_amd64_init() < 0) || |
762 | (agp_copy_info(agp_bridge, &info) < 0); | 762 | (agp_copy_info(agp_bridge, &info) < 0); |
@@ -765,7 +765,7 @@ int __init gart_iommu_init(void) | |||
765 | if (no_iommu || | 765 | if (no_iommu || |
766 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || | 766 | (!force_iommu && max_pfn <= MAX_DMA32_PFN) || |
767 | !gart_iommu_aperture || | 767 | !gart_iommu_aperture || |
768 | (no_agp && init_k8_gatt(&info) < 0)) { | 768 | (no_agp && init_amd_gatt(&info) < 0)) { |
769 | if (max_pfn > MAX_DMA32_PFN) { | 769 | if (max_pfn > MAX_DMA32_PFN) { |
770 | pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); | 770 | pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); |
771 | pr_warning("falling back to iommu=soft.\n"); | 771 | pr_warning("falling back to iommu=soft.\n"); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 21c6746338af..df172c1e8238 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -694,7 +694,7 @@ static u64 __init get_max_mapped(void) | |||
694 | void __init setup_arch(char **cmdline_p) | 694 | void __init setup_arch(char **cmdline_p) |
695 | { | 695 | { |
696 | int acpi = 0; | 696 | int acpi = 0; |
697 | int k8 = 0; | 697 | int amd = 0; |
698 | unsigned long flags; | 698 | unsigned long flags; |
699 | 699 | ||
700 | #ifdef CONFIG_X86_32 | 700 | #ifdef CONFIG_X86_32 |
@@ -981,12 +981,12 @@ void __init setup_arch(char **cmdline_p) | |||
981 | acpi = acpi_numa_init(); | 981 | acpi = acpi_numa_init(); |
982 | #endif | 982 | #endif |
983 | 983 | ||
984 | #ifdef CONFIG_K8_NUMA | 984 | #ifdef CONFIG_AMD_NUMA |
985 | if (!acpi) | 985 | if (!acpi) |
986 | k8 = !k8_numa_init(0, max_pfn); | 986 | amd = !amd_numa_init(0, max_pfn); |
987 | #endif | 987 | #endif |
988 | 988 | ||
989 | initmem_init(0, max_pfn, acpi, k8); | 989 | initmem_init(0, max_pfn, acpi, amd); |
990 | memblock_find_dma_reserve(); | 990 | memblock_find_dma_reserve(); |
991 | dma32_reserve_bootmem(); | 991 | dma32_reserve_bootmem(); |
992 | 992 | ||