aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2010-09-17 12:02:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-09-17 16:26:21 -0400
commit900f9ac9f12dc3dd6fc8e33e16df172eafcaead6 (patch)
tree7fb7bf3a150f8a3cc513e1bf6bd842e4ad213473 /arch/x86/kernel
parent3518dd14ca888085797ca8d3a9e11c8ef9e7ae68 (diff)
x86, k8-gart: Decouple handling of garts and northbridges
So far we only provide num_k8_northbridges. This is required in different areas (e.g. L3 cache index disable, GART). But not all AMD CPUs provide a GART. Thus it is useful to split off the GART handling from the generic caching of AMD northbridge misc devices. Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> LKML-Reference: <20100917160254.GC4958@loge.amd.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/x86/kernel/k8.c52
-rw-r--r--arch/x86/kernel/pci-gart_64.c27
3 files changed, 50 insertions, 33 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 2521cdcb877e..6fdfb0b20f8c 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -369,7 +369,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
369 return; 369 return;
370 370
371 /* not in virtualized environments */ 371 /* not in virtualized environments */
372 if (num_k8_northbridges == 0) 372 if (k8_northbridges.num == 0)
373 return; 373 return;
374 374
375 /* 375 /*
@@ -377,7 +377,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
377 * never freed but this is done only on shutdown so it doesn't matter. 377 * never freed but this is done only on shutdown so it doesn't matter.
378 */ 378 */
379 if (!l3_caches) { 379 if (!l3_caches) {
380 int size = num_k8_northbridges * sizeof(struct amd_l3_cache *); 380 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
381 381
382 l3_caches = kzalloc(size, GFP_ATOMIC); 382 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches) 383 if (!l3_caches)
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index 0f7bc20cfcde..5de1b6b39639 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -10,9 +10,6 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <asm/k8.h> 11#include <asm/k8.h>
12 12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15
16static u32 *flush_words; 13static u32 *flush_words;
17 14
18struct pci_device_id k8_nb_ids[] = { 15struct pci_device_id k8_nb_ids[] = {
@@ -22,7 +19,7 @@ struct pci_device_id k8_nb_ids[] = {
22}; 19};
23EXPORT_SYMBOL(k8_nb_ids); 20EXPORT_SYMBOL(k8_nb_ids);
24 21
25struct pci_dev **k8_northbridges; 22struct k8_northbridge_info k8_northbridges;
26EXPORT_SYMBOL(k8_northbridges); 23EXPORT_SYMBOL(k8_northbridges);
27 24
28static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 25static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
@@ -40,36 +37,44 @@ int cache_k8_northbridges(void)
40 int i; 37 int i;
41 struct pci_dev *dev; 38 struct pci_dev *dev;
42 39
43 if (num_k8_northbridges) 40 if (k8_northbridges.num)
44 return 0; 41 return 0;
45 42
46 dev = NULL; 43 dev = NULL;
47 while ((dev = next_k8_northbridge(dev)) != NULL) 44 while ((dev = next_k8_northbridge(dev)) != NULL)
48 num_k8_northbridges++; 45 k8_northbridges.num++;
46
47 /* some CPU families (e.g. family 0x11) do not support GART */
48 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
49 k8_northbridges.gart_supported = 1;
49 50
50 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), 51 k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
51 GFP_KERNEL); 52 sizeof(void *), GFP_KERNEL);
52 if (!k8_northbridges) 53 if (!k8_northbridges.nb_misc)
53 return -ENOMEM; 54 return -ENOMEM;
54 55
55 if (!num_k8_northbridges) { 56 if (!k8_northbridges.num) {
56 k8_northbridges[0] = NULL; 57 k8_northbridges.nb_misc[0] = NULL;
57 return 0; 58 return 0;
58 } 59 }
59 60
60 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); 61 if (k8_northbridges.gart_supported) {
61 if (!flush_words) { 62 flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
62 kfree(k8_northbridges); 63 GFP_KERNEL);
63 return -ENOMEM; 64 if (!flush_words) {
65 kfree(k8_northbridges.nb_misc);
66 return -ENOMEM;
67 }
64 } 68 }
65 69
66 dev = NULL; 70 dev = NULL;
67 i = 0; 71 i = 0;
68 while ((dev = next_k8_northbridge(dev)) != NULL) { 72 while ((dev = next_k8_northbridge(dev)) != NULL) {
69 k8_northbridges[i] = dev; 73 k8_northbridges.nb_misc[i] = dev;
70 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 74 if (k8_northbridges.gart_supported)
75 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
71 } 76 }
72 k8_northbridges[i] = NULL; 77 k8_northbridges.nb_misc[i] = NULL;
73 return 0; 78 return 0;
74} 79}
75EXPORT_SYMBOL_GPL(cache_k8_northbridges); 80EXPORT_SYMBOL_GPL(cache_k8_northbridges);
@@ -93,22 +98,25 @@ void k8_flush_garts(void)
93 unsigned long flags; 98 unsigned long flags;
94 static DEFINE_SPINLOCK(gart_lock); 99 static DEFINE_SPINLOCK(gart_lock);
95 100
101 if (!k8_northbridges.gart_supported)
102 return;
103
96 /* Avoid races between AGP and IOMMU. In theory it's not needed 104 /* Avoid races between AGP and IOMMU. In theory it's not needed
97 but I'm not sure if the hardware won't lose flush requests 105 but I'm not sure if the hardware won't lose flush requests
98 when another is pending. This whole thing is so expensive anyways 106 when another is pending. This whole thing is so expensive anyways
99 that it doesn't matter to serialize more. -AK */ 107 that it doesn't matter to serialize more. -AK */
100 spin_lock_irqsave(&gart_lock, flags); 108 spin_lock_irqsave(&gart_lock, flags);
101 flushed = 0; 109 flushed = 0;
102 for (i = 0; i < num_k8_northbridges; i++) { 110 for (i = 0; i < k8_northbridges.num; i++) {
103 pci_write_config_dword(k8_northbridges[i], 0x9c, 111 pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
104 flush_words[i]|1); 112 flush_words[i]|1);
105 flushed++; 113 flushed++;
106 } 114 }
107 for (i = 0; i < num_k8_northbridges; i++) { 115 for (i = 0; i < k8_northbridges.num; i++) {
108 u32 w; 116 u32 w;
109 /* Make sure the hardware actually executed the flush*/ 117 /* Make sure the hardware actually executed the flush*/
110 for (;;) { 118 for (;;) {
111 pci_read_config_dword(k8_northbridges[i], 119 pci_read_config_dword(k8_northbridges.nb_misc[i],
112 0x9c, &w); 120 0x9c, &w);
113 if (!(w & 1)) 121 if (!(w & 1))
114 break; 122 break;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 0f7f130caa67..8f214a2643fa 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -560,8 +560,11 @@ static void enable_gart_translations(void)
560{ 560{
561 int i; 561 int i;
562 562
563 for (i = 0; i < num_k8_northbridges; i++) { 563 if (!k8_northbridges.gart_supported)
564 struct pci_dev *dev = k8_northbridges[i]; 564 return;
565
566 for (i = 0; i < k8_northbridges.num; i++) {
567 struct pci_dev *dev = k8_northbridges.nb_misc[i];
565 568
566 enable_gart_translation(dev, __pa(agp_gatt_table)); 569 enable_gart_translation(dev, __pa(agp_gatt_table));
567 } 570 }
@@ -592,10 +595,13 @@ static void gart_fixup_northbridges(struct sys_device *dev)
592 if (!fix_up_north_bridges) 595 if (!fix_up_north_bridges)
593 return; 596 return;
594 597
598 if (!k8_northbridges.gart_supported)
599 return;
600
595 pr_info("PCI-DMA: Restoring GART aperture settings\n"); 601 pr_info("PCI-DMA: Restoring GART aperture settings\n");
596 602
597 for (i = 0; i < num_k8_northbridges; i++) { 603 for (i = 0; i < k8_northbridges.num; i++) {
598 struct pci_dev *dev = k8_northbridges[i]; 604 struct pci_dev *dev = k8_northbridges.nb_misc[i];
599 605
600 /* 606 /*
601 * Don't enable translations just yet. That is the next 607 * Don't enable translations just yet. That is the next
@@ -649,8 +655,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
649 655
650 aper_size = aper_base = info->aper_size = 0; 656 aper_size = aper_base = info->aper_size = 0;
651 dev = NULL; 657 dev = NULL;
652 for (i = 0; i < num_k8_northbridges; i++) { 658 for (i = 0; i < k8_northbridges.num; i++) {
653 dev = k8_northbridges[i]; 659 dev = k8_northbridges.nb_misc[i];
654 new_aper_base = read_aperture(dev, &new_aper_size); 660 new_aper_base = read_aperture(dev, &new_aper_size);
655 if (!new_aper_base) 661 if (!new_aper_base)
656 goto nommu; 662 goto nommu;
@@ -718,10 +724,13 @@ static void gart_iommu_shutdown(void)
718 if (!no_agp) 724 if (!no_agp)
719 return; 725 return;
720 726
721 for (i = 0; i < num_k8_northbridges; i++) { 727 if (!k8_northbridges.gart_supported)
728 return;
729
730 for (i = 0; i < k8_northbridges.num; i++) {
722 u32 ctl; 731 u32 ctl;
723 732
724 dev = k8_northbridges[i]; 733 dev = k8_northbridges.nb_misc[i];
725 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 734 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
726 735
727 ctl &= ~GARTEN; 736 ctl &= ~GARTEN;
@@ -739,7 +748,7 @@ int __init gart_iommu_init(void)
739 unsigned long scratch; 748 unsigned long scratch;
740 long i; 749 long i;
741 750
742 if (num_k8_northbridges == 0) 751 if (!k8_northbridges.gart_supported)
743 return 0; 752 return 0;
744 753
745#ifndef CONFIG_AGP_AMD64 754#ifndef CONFIG_AGP_AMD64