aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/k8.c
diff options
context:
space:
mode:
authorAndreas Herrmann <andreas.herrmann3@amd.com>2010-09-17 12:02:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-09-17 16:26:21 -0400
commit900f9ac9f12dc3dd6fc8e33e16df172eafcaead6 (patch)
tree7fb7bf3a150f8a3cc513e1bf6bd842e4ad213473 /arch/x86/kernel/k8.c
parent3518dd14ca888085797ca8d3a9e11c8ef9e7ae68 (diff)
x86, k8-gart: Decouple handling of garts and northbridges
So far we only provide num_k8_northbridges. This is required in different areas (e.g. L3 cache index disable, GART). But not all AMD CPUs provide a GART. Thus it is useful to split off the GART handling from the generic caching of AMD northbridge misc devices. Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> LKML-Reference: <20100917160254.GC4958@loge.amd.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/k8.c')
-rw-r--r--arch/x86/kernel/k8.c52
1 files changed, 30 insertions, 22 deletions
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index 0f7bc20cfcde..5de1b6b39639 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -10,9 +10,6 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <asm/k8.h> 11#include <asm/k8.h>
12 12
13int num_k8_northbridges;
14EXPORT_SYMBOL(num_k8_northbridges);
15
16static u32 *flush_words; 13static u32 *flush_words;
17 14
18struct pci_device_id k8_nb_ids[] = { 15struct pci_device_id k8_nb_ids[] = {
@@ -22,7 +19,7 @@ struct pci_device_id k8_nb_ids[] = {
22}; 19};
23EXPORT_SYMBOL(k8_nb_ids); 20EXPORT_SYMBOL(k8_nb_ids);
24 21
25struct pci_dev **k8_northbridges; 22struct k8_northbridge_info k8_northbridges;
26EXPORT_SYMBOL(k8_northbridges); 23EXPORT_SYMBOL(k8_northbridges);
27 24
28static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) 25static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
@@ -40,36 +37,44 @@ int cache_k8_northbridges(void)
40 int i; 37 int i;
41 struct pci_dev *dev; 38 struct pci_dev *dev;
42 39
43 if (num_k8_northbridges) 40 if (k8_northbridges.num)
44 return 0; 41 return 0;
45 42
46 dev = NULL; 43 dev = NULL;
47 while ((dev = next_k8_northbridge(dev)) != NULL) 44 while ((dev = next_k8_northbridge(dev)) != NULL)
48 num_k8_northbridges++; 45 k8_northbridges.num++;
46
47 /* some CPU families (e.g. family 0x11) do not support GART */
48 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
49 k8_northbridges.gart_supported = 1;
49 50
50 k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *), 51 k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) *
51 GFP_KERNEL); 52 sizeof(void *), GFP_KERNEL);
52 if (!k8_northbridges) 53 if (!k8_northbridges.nb_misc)
53 return -ENOMEM; 54 return -ENOMEM;
54 55
55 if (!num_k8_northbridges) { 56 if (!k8_northbridges.num) {
56 k8_northbridges[0] = NULL; 57 k8_northbridges.nb_misc[0] = NULL;
57 return 0; 58 return 0;
58 } 59 }
59 60
60 flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL); 61 if (k8_northbridges.gart_supported) {
61 if (!flush_words) { 62 flush_words = kmalloc(k8_northbridges.num * sizeof(u32),
62 kfree(k8_northbridges); 63 GFP_KERNEL);
63 return -ENOMEM; 64 if (!flush_words) {
65 kfree(k8_northbridges.nb_misc);
66 return -ENOMEM;
67 }
64 } 68 }
65 69
66 dev = NULL; 70 dev = NULL;
67 i = 0; 71 i = 0;
68 while ((dev = next_k8_northbridge(dev)) != NULL) { 72 while ((dev = next_k8_northbridge(dev)) != NULL) {
69 k8_northbridges[i] = dev; 73 k8_northbridges.nb_misc[i] = dev;
70 pci_read_config_dword(dev, 0x9c, &flush_words[i++]); 74 if (k8_northbridges.gart_supported)
75 pci_read_config_dword(dev, 0x9c, &flush_words[i++]);
71 } 76 }
72 k8_northbridges[i] = NULL; 77 k8_northbridges.nb_misc[i] = NULL;
73 return 0; 78 return 0;
74} 79}
75EXPORT_SYMBOL_GPL(cache_k8_northbridges); 80EXPORT_SYMBOL_GPL(cache_k8_northbridges);
@@ -93,22 +98,25 @@ void k8_flush_garts(void)
93 unsigned long flags; 98 unsigned long flags;
94 static DEFINE_SPINLOCK(gart_lock); 99 static DEFINE_SPINLOCK(gart_lock);
95 100
101 if (!k8_northbridges.gart_supported)
102 return;
103
96 /* Avoid races between AGP and IOMMU. In theory it's not needed 104 /* Avoid races between AGP and IOMMU. In theory it's not needed
97 but I'm not sure if the hardware won't lose flush requests 105 but I'm not sure if the hardware won't lose flush requests
98 when another is pending. This whole thing is so expensive anyways 106 when another is pending. This whole thing is so expensive anyways
99 that it doesn't matter to serialize more. -AK */ 107 that it doesn't matter to serialize more. -AK */
100 spin_lock_irqsave(&gart_lock, flags); 108 spin_lock_irqsave(&gart_lock, flags);
101 flushed = 0; 109 flushed = 0;
102 for (i = 0; i < num_k8_northbridges; i++) { 110 for (i = 0; i < k8_northbridges.num; i++) {
103 pci_write_config_dword(k8_northbridges[i], 0x9c, 111 pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c,
104 flush_words[i]|1); 112 flush_words[i]|1);
105 flushed++; 113 flushed++;
106 } 114 }
107 for (i = 0; i < num_k8_northbridges; i++) { 115 for (i = 0; i < k8_northbridges.num; i++) {
108 u32 w; 116 u32 w;
109 /* Make sure the hardware actually executed the flush*/ 117 /* Make sure the hardware actually executed the flush*/
110 for (;;) { 118 for (;;) {
111 pci_read_config_dword(k8_northbridges[i], 119 pci_read_config_dword(k8_northbridges.nb_misc[i],
112 0x9c, &w); 120 0x9c, &w);
113 if (!(w & 1)) 121 if (!(w & 1))
114 break; 122 break;