diff options
Diffstat (limited to 'arch/x86/kernel/amd_nb.c')
-rw-r--r-- | arch/x86/kernel/amd_nb.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 8f6463d8ed0..c46df406a2a 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -12,95 +12,95 @@ | |||
12 | 12 | ||
13 | static u32 *flush_words; | 13 | static u32 *flush_words; |
14 | 14 | ||
15 | struct pci_device_id k8_nb_ids[] = { | 15 | struct pci_device_id amd_nb_ids[] = { |
16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 16 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 17 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, | 18 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) }, |
19 | {} | 19 | {} |
20 | }; | 20 | }; |
21 | EXPORT_SYMBOL(k8_nb_ids); | 21 | EXPORT_SYMBOL(amd_nb_ids); |
22 | 22 | ||
23 | struct k8_northbridge_info k8_northbridges; | 23 | struct amd_northbridge_info amd_northbridges; |
24 | EXPORT_SYMBOL(k8_northbridges); | 24 | EXPORT_SYMBOL(amd_northbridges); |
25 | 25 | ||
26 | static struct pci_dev *next_k8_northbridge(struct pci_dev *dev) | 26 | static struct pci_dev *next_amd_northbridge(struct pci_dev *dev) |
27 | { | 27 | { |
28 | do { | 28 | do { |
29 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); | 29 | dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
30 | if (!dev) | 30 | if (!dev) |
31 | break; | 31 | break; |
32 | } while (!pci_match_id(&k8_nb_ids[0], dev)); | 32 | } while (!pci_match_id(&amd_nb_ids[0], dev)); |
33 | return dev; | 33 | return dev; |
34 | } | 34 | } |
35 | 35 | ||
36 | int cache_k8_northbridges(void) | 36 | int cache_amd_northbridges(void) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | struct pci_dev *dev; | 39 | struct pci_dev *dev; |
40 | 40 | ||
41 | if (k8_northbridges.num) | 41 | if (amd_northbridges.num) |
42 | return 0; | 42 | return 0; |
43 | 43 | ||
44 | dev = NULL; | 44 | dev = NULL; |
45 | while ((dev = next_k8_northbridge(dev)) != NULL) | 45 | while ((dev = next_amd_northbridge(dev)) != NULL) |
46 | k8_northbridges.num++; | 46 | amd_northbridges.num++; |
47 | 47 | ||
48 | /* some CPU families (e.g. family 0x11) do not support GART */ | 48 | /* some CPU families (e.g. family 0x11) do not support GART */ |
49 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || | 49 | if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || |
50 | boot_cpu_data.x86 == 0x15) | 50 | boot_cpu_data.x86 == 0x15) |
51 | k8_northbridges.gart_supported = 1; | 51 | amd_northbridges.gart_supported = 1; |
52 | 52 | ||
53 | k8_northbridges.nb_misc = kmalloc((k8_northbridges.num + 1) * | 53 | amd_northbridges.nb_misc = kmalloc((amd_northbridges.num + 1) * |
54 | sizeof(void *), GFP_KERNEL); | 54 | sizeof(void *), GFP_KERNEL); |
55 | if (!k8_northbridges.nb_misc) | 55 | if (!amd_northbridges.nb_misc) |
56 | return -ENOMEM; | 56 | return -ENOMEM; |
57 | 57 | ||
58 | if (!k8_northbridges.num) { | 58 | if (!amd_northbridges.num) { |
59 | k8_northbridges.nb_misc[0] = NULL; | 59 | amd_northbridges.nb_misc[0] = NULL; |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | if (k8_northbridges.gart_supported) { | 63 | if (amd_northbridges.gart_supported) { |
64 | flush_words = kmalloc(k8_northbridges.num * sizeof(u32), | 64 | flush_words = kmalloc(amd_northbridges.num * sizeof(u32), |
65 | GFP_KERNEL); | 65 | GFP_KERNEL); |
66 | if (!flush_words) { | 66 | if (!flush_words) { |
67 | kfree(k8_northbridges.nb_misc); | 67 | kfree(amd_northbridges.nb_misc); |
68 | return -ENOMEM; | 68 | return -ENOMEM; |
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
72 | dev = NULL; | 72 | dev = NULL; |
73 | i = 0; | 73 | i = 0; |
74 | while ((dev = next_k8_northbridge(dev)) != NULL) { | 74 | while ((dev = next_amd_northbridge(dev)) != NULL) { |
75 | k8_northbridges.nb_misc[i] = dev; | 75 | amd_northbridges.nb_misc[i] = dev; |
76 | if (k8_northbridges.gart_supported) | 76 | if (amd_northbridges.gart_supported) |
77 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); | 77 | pci_read_config_dword(dev, 0x9c, &flush_words[i++]); |
78 | } | 78 | } |
79 | k8_northbridges.nb_misc[i] = NULL; | 79 | amd_northbridges.nb_misc[i] = NULL; |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | EXPORT_SYMBOL_GPL(cache_k8_northbridges); | 82 | EXPORT_SYMBOL_GPL(cache_amd_northbridges); |
83 | 83 | ||
84 | /* Ignores subdevice/subvendor but as far as I can figure out | 84 | /* Ignores subdevice/subvendor but as far as I can figure out |
85 | they're useless anyways */ | 85 | they're useless anyways */ |
86 | int __init early_is_k8_nb(u32 device) | 86 | int __init early_is_amd_nb(u32 device) |
87 | { | 87 | { |
88 | struct pci_device_id *id; | 88 | struct pci_device_id *id; |
89 | u32 vendor = device & 0xffff; | 89 | u32 vendor = device & 0xffff; |
90 | device >>= 16; | 90 | device >>= 16; |
91 | for (id = k8_nb_ids; id->vendor; id++) | 91 | for (id = amd_nb_ids; id->vendor; id++) |
92 | if (vendor == id->vendor && device == id->device) | 92 | if (vendor == id->vendor && device == id->device) |
93 | return 1; | 93 | return 1; |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | 96 | ||
97 | void k8_flush_garts(void) | 97 | void amd_flush_garts(void) |
98 | { | 98 | { |
99 | int flushed, i; | 99 | int flushed, i; |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | static DEFINE_SPINLOCK(gart_lock); | 101 | static DEFINE_SPINLOCK(gart_lock); |
102 | 102 | ||
103 | if (!k8_northbridges.gart_supported) | 103 | if (!amd_northbridges.gart_supported) |
104 | return; | 104 | return; |
105 | 105 | ||
106 | /* Avoid races between AGP and IOMMU. In theory it's not needed | 106 | /* Avoid races between AGP and IOMMU. In theory it's not needed |
@@ -109,16 +109,16 @@ void k8_flush_garts(void) | |||
109 | that it doesn't matter to serialize more. -AK */ | 109 | that it doesn't matter to serialize more. -AK */ |
110 | spin_lock_irqsave(&gart_lock, flags); | 110 | spin_lock_irqsave(&gart_lock, flags); |
111 | flushed = 0; | 111 | flushed = 0; |
112 | for (i = 0; i < k8_northbridges.num; i++) { | 112 | for (i = 0; i < amd_northbridges.num; i++) { |
113 | pci_write_config_dword(k8_northbridges.nb_misc[i], 0x9c, | 113 | pci_write_config_dword(amd_northbridges.nb_misc[i], 0x9c, |
114 | flush_words[i]|1); | 114 | flush_words[i]|1); |
115 | flushed++; | 115 | flushed++; |
116 | } | 116 | } |
117 | for (i = 0; i < k8_northbridges.num; i++) { | 117 | for (i = 0; i < amd_northbridges.num; i++) { |
118 | u32 w; | 118 | u32 w; |
119 | /* Make sure the hardware actually executed the flush*/ | 119 | /* Make sure the hardware actually executed the flush*/ |
120 | for (;;) { | 120 | for (;;) { |
121 | pci_read_config_dword(k8_northbridges.nb_misc[i], | 121 | pci_read_config_dword(amd_northbridges.nb_misc[i], |
122 | 0x9c, &w); | 122 | 0x9c, &w); |
123 | if (!(w & 1)) | 123 | if (!(w & 1)) |
124 | break; | 124 | break; |
@@ -129,19 +129,19 @@ void k8_flush_garts(void) | |||
129 | if (!flushed) | 129 | if (!flushed) |
130 | printk("nothing to flush?\n"); | 130 | printk("nothing to flush?\n"); |
131 | } | 131 | } |
132 | EXPORT_SYMBOL_GPL(k8_flush_garts); | 132 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
133 | 133 | ||
134 | static __init int init_k8_nbs(void) | 134 | static __init int init_amd_nbs(void) |
135 | { | 135 | { |
136 | int err = 0; | 136 | int err = 0; |
137 | 137 | ||
138 | err = cache_k8_northbridges(); | 138 | err = cache_amd_northbridges(); |
139 | 139 | ||
140 | if (err < 0) | 140 | if (err < 0) |
141 | printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); | 141 | printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n"); |
142 | 142 | ||
143 | return err; | 143 | return err; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* This has to go after the PCI subsystem */ | 146 | /* This has to go after the PCI subsystem */ |
147 | fs_initcall(init_k8_nbs); | 147 | fs_initcall(init_amd_nbs); |