diff options
Diffstat (limited to 'arch/x86/kernel/aperture_64.c')
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 307 |
1 files changed, 196 insertions, 111 deletions
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 479926d9e004..e819362c7068 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -35,6 +35,18 @@ int fallback_aper_force __initdata; | |||
35 | 35 | ||
36 | int fix_aperture __initdata = 1; | 36 | int fix_aperture __initdata = 1; |
37 | 37 | ||
38 | struct bus_dev_range { | ||
39 | int bus; | ||
40 | int dev_base; | ||
41 | int dev_limit; | ||
42 | }; | ||
43 | |||
44 | static struct bus_dev_range bus_dev_ranges[] __initdata = { | ||
45 | { 0x00, 0x18, 0x20}, | ||
46 | { 0xff, 0x00, 0x20}, | ||
47 | { 0xfe, 0x00, 0x20} | ||
48 | }; | ||
49 | |||
38 | static struct resource gart_resource = { | 50 | static struct resource gart_resource = { |
39 | .name = "GART", | 51 | .name = "GART", |
40 | .flags = IORESOURCE_MEM, | 52 | .flags = IORESOURCE_MEM, |
@@ -55,8 +67,9 @@ static u32 __init allocate_aperture(void) | |||
55 | u32 aper_size; | 67 | u32 aper_size; |
56 | void *p; | 68 | void *p; |
57 | 69 | ||
58 | if (fallback_aper_order > 7) | 70 | /* aper_size should <= 1G */ |
59 | fallback_aper_order = 7; | 71 | if (fallback_aper_order > 5) |
72 | fallback_aper_order = 5; | ||
60 | aper_size = (32 * 1024 * 1024) << fallback_aper_order; | 73 | aper_size = (32 * 1024 * 1024) << fallback_aper_order; |
61 | 74 | ||
62 | /* | 75 | /* |
@@ -65,7 +78,20 @@ static u32 __init allocate_aperture(void) | |||
65 | * memory. Unfortunately we cannot move it up because that would | 78 | * memory. Unfortunately we cannot move it up because that would |
66 | * make the IOMMU useless. | 79 | * make the IOMMU useless. |
67 | */ | 80 | */ |
68 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 0); | 81 | /* |
82 | * using 512M as goal, in case kexec will load kernel_big | ||
83 | * that will do the on position decompress, and could overlap with | ||
84 | * that positon with gart that is used. | ||
85 | * sequende: | ||
86 | * kernel_small | ||
87 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
88 | * ==> kernel_small(gart area become e820_reserved) | ||
89 | * ==> kexec (with kdump trigger path or previous doesn't shutdown gart) | ||
90 | * ==> kerne_big (uncompressed size will be big than 64M or 128M) | ||
91 | * so don't use 512M below as gart iommu, leave the space for kernel | ||
92 | * code for safe | ||
93 | */ | ||
94 | p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); | ||
69 | if (!p || __pa(p)+aper_size > 0xffffffff) { | 95 | if (!p || __pa(p)+aper_size > 0xffffffff) { |
70 | printk(KERN_ERR | 96 | printk(KERN_ERR |
71 | "Cannot allocate aperture memory hole (%p,%uK)\n", | 97 | "Cannot allocate aperture memory hole (%p,%uK)\n", |
@@ -83,69 +109,53 @@ static u32 __init allocate_aperture(void) | |||
83 | return (u32)__pa(p); | 109 | return (u32)__pa(p); |
84 | } | 110 | } |
85 | 111 | ||
86 | static int __init aperture_valid(u64 aper_base, u32 aper_size) | ||
87 | { | ||
88 | if (!aper_base) | ||
89 | return 0; | ||
90 | |||
91 | if (aper_base + aper_size > 0x100000000UL) { | ||
92 | printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); | ||
93 | return 0; | ||
94 | } | ||
95 | if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { | ||
96 | printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); | ||
97 | return 0; | ||
98 | } | ||
99 | if (aper_size < 64*1024*1024) { | ||
100 | printk(KERN_ERR "Aperture too small (%d MB)\n", aper_size>>20); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | return 1; | ||
105 | } | ||
106 | 112 | ||
107 | /* Find a PCI capability */ | 113 | /* Find a PCI capability */ |
108 | static __u32 __init find_cap(int num, int slot, int func, int cap) | 114 | static u32 __init find_cap(int bus, int slot, int func, int cap) |
109 | { | 115 | { |
110 | int bytes; | 116 | int bytes; |
111 | u8 pos; | 117 | u8 pos; |
112 | 118 | ||
113 | if (!(read_pci_config_16(num, slot, func, PCI_STATUS) & | 119 | if (!(read_pci_config_16(bus, slot, func, PCI_STATUS) & |
114 | PCI_STATUS_CAP_LIST)) | 120 | PCI_STATUS_CAP_LIST)) |
115 | return 0; | 121 | return 0; |
116 | 122 | ||
117 | pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST); | 123 | pos = read_pci_config_byte(bus, slot, func, PCI_CAPABILITY_LIST); |
118 | for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { | 124 | for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) { |
119 | u8 id; | 125 | u8 id; |
120 | 126 | ||
121 | pos &= ~3; | 127 | pos &= ~3; |
122 | id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID); | 128 | id = read_pci_config_byte(bus, slot, func, pos+PCI_CAP_LIST_ID); |
123 | if (id == 0xff) | 129 | if (id == 0xff) |
124 | break; | 130 | break; |
125 | if (id == cap) | 131 | if (id == cap) |
126 | return pos; | 132 | return pos; |
127 | pos = read_pci_config_byte(num, slot, func, | 133 | pos = read_pci_config_byte(bus, slot, func, |
128 | pos+PCI_CAP_LIST_NEXT); | 134 | pos+PCI_CAP_LIST_NEXT); |
129 | } | 135 | } |
130 | return 0; | 136 | return 0; |
131 | } | 137 | } |
132 | 138 | ||
133 | /* Read a standard AGPv3 bridge header */ | 139 | /* Read a standard AGPv3 bridge header */ |
134 | static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) | 140 | static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) |
135 | { | 141 | { |
136 | u32 apsize; | 142 | u32 apsize; |
137 | u32 apsizereg; | 143 | u32 apsizereg; |
138 | int nbits; | 144 | int nbits; |
139 | u32 aper_low, aper_hi; | 145 | u32 aper_low, aper_hi; |
140 | u64 aper; | 146 | u64 aper; |
147 | u32 old_order; | ||
141 | 148 | ||
142 | printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", num, slot, func); | 149 | printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func); |
143 | apsizereg = read_pci_config_16(num, slot, func, cap + 0x14); | 150 | apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14); |
144 | if (apsizereg == 0xffffffff) { | 151 | if (apsizereg == 0xffffffff) { |
145 | printk(KERN_ERR "APSIZE in AGP bridge unreadable\n"); | 152 | printk(KERN_ERR "APSIZE in AGP bridge unreadable\n"); |
146 | return 0; | 153 | return 0; |
147 | } | 154 | } |
148 | 155 | ||
156 | /* old_order could be the value from NB gart setting */ | ||
157 | old_order = *order; | ||
158 | |||
149 | apsize = apsizereg & 0xfff; | 159 | apsize = apsizereg & 0xfff; |
150 | /* Some BIOS use weird encodings not in the AGPv3 table. */ | 160 | /* Some BIOS use weird encodings not in the AGPv3 table. */ |
151 | if (apsize & 0xff) | 161 | if (apsize & 0xff) |
@@ -155,14 +165,26 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) | |||
155 | if ((int)*order < 0) /* < 32MB */ | 165 | if ((int)*order < 0) /* < 32MB */ |
156 | *order = 0; | 166 | *order = 0; |
157 | 167 | ||
158 | aper_low = read_pci_config(num, slot, func, 0x10); | 168 | aper_low = read_pci_config(bus, slot, func, 0x10); |
159 | aper_hi = read_pci_config(num, slot, func, 0x14); | 169 | aper_hi = read_pci_config(bus, slot, func, 0x14); |
160 | aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); | 170 | aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32); |
161 | 171 | ||
172 | /* | ||
173 | * On some sick chips, APSIZE is 0. It means it wants 4G | ||
174 | * so let double check that order, and lets trust AMD NB settings: | ||
175 | */ | ||
176 | printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n", | ||
177 | aper, 32 << old_order); | ||
178 | if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { | ||
179 | printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n", | ||
180 | 32 << *order, apsizereg); | ||
181 | *order = old_order; | ||
182 | } | ||
183 | |||
162 | printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", | 184 | printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", |
163 | aper, 32 << *order, apsizereg); | 185 | aper, 32 << *order, apsizereg); |
164 | 186 | ||
165 | if (!aperture_valid(aper, (32*1024*1024) << *order)) | 187 | if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20)) |
166 | return 0; | 188 | return 0; |
167 | return (u32)aper; | 189 | return (u32)aper; |
168 | } | 190 | } |
@@ -180,17 +202,17 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) | |||
180 | * the AGP bridges should be always an own bus on the HT hierarchy, | 202 | * the AGP bridges should be always an own bus on the HT hierarchy, |
181 | * but do it here for future safety. | 203 | * but do it here for future safety. |
182 | */ | 204 | */ |
183 | static __u32 __init search_agp_bridge(u32 *order, int *valid_agp) | 205 | static u32 __init search_agp_bridge(u32 *order, int *valid_agp) |
184 | { | 206 | { |
185 | int num, slot, func; | 207 | int bus, slot, func; |
186 | 208 | ||
187 | /* Poor man's PCI discovery */ | 209 | /* Poor man's PCI discovery */ |
188 | for (num = 0; num < 256; num++) { | 210 | for (bus = 0; bus < 256; bus++) { |
189 | for (slot = 0; slot < 32; slot++) { | 211 | for (slot = 0; slot < 32; slot++) { |
190 | for (func = 0; func < 8; func++) { | 212 | for (func = 0; func < 8; func++) { |
191 | u32 class, cap; | 213 | u32 class, cap; |
192 | u8 type; | 214 | u8 type; |
193 | class = read_pci_config(num, slot, func, | 215 | class = read_pci_config(bus, slot, func, |
194 | PCI_CLASS_REVISION); | 216 | PCI_CLASS_REVISION); |
195 | if (class == 0xffffffff) | 217 | if (class == 0xffffffff) |
196 | break; | 218 | break; |
@@ -199,17 +221,17 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp) | |||
199 | case PCI_CLASS_BRIDGE_HOST: | 221 | case PCI_CLASS_BRIDGE_HOST: |
200 | case PCI_CLASS_BRIDGE_OTHER: /* needed? */ | 222 | case PCI_CLASS_BRIDGE_OTHER: /* needed? */ |
201 | /* AGP bridge? */ | 223 | /* AGP bridge? */ |
202 | cap = find_cap(num, slot, func, | 224 | cap = find_cap(bus, slot, func, |
203 | PCI_CAP_ID_AGP); | 225 | PCI_CAP_ID_AGP); |
204 | if (!cap) | 226 | if (!cap) |
205 | break; | 227 | break; |
206 | *valid_agp = 1; | 228 | *valid_agp = 1; |
207 | return read_agp(num, slot, func, cap, | 229 | return read_agp(bus, slot, func, cap, |
208 | order); | 230 | order); |
209 | } | 231 | } |
210 | 232 | ||
211 | /* No multi-function device? */ | 233 | /* No multi-function device? */ |
212 | type = read_pci_config_byte(num, slot, func, | 234 | type = read_pci_config_byte(bus, slot, func, |
213 | PCI_HEADER_TYPE); | 235 | PCI_HEADER_TYPE); |
214 | if (!(type & 0x80)) | 236 | if (!(type & 0x80)) |
215 | break; | 237 | break; |
@@ -249,36 +271,50 @@ void __init early_gart_iommu_check(void) | |||
249 | * or BIOS forget to put that in reserved. | 271 | * or BIOS forget to put that in reserved. |
250 | * try to update e820 to make that region as reserved. | 272 | * try to update e820 to make that region as reserved. |
251 | */ | 273 | */ |
252 | int fix, num; | 274 | int i, fix, slot; |
253 | u32 ctl; | 275 | u32 ctl; |
254 | u32 aper_size = 0, aper_order = 0, last_aper_order = 0; | 276 | u32 aper_size = 0, aper_order = 0, last_aper_order = 0; |
255 | u64 aper_base = 0, last_aper_base = 0; | 277 | u64 aper_base = 0, last_aper_base = 0; |
256 | int aper_enabled = 0, last_aper_enabled = 0; | 278 | int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0; |
257 | 279 | ||
258 | if (!early_pci_allowed()) | 280 | if (!early_pci_allowed()) |
259 | return; | 281 | return; |
260 | 282 | ||
283 | /* This is mostly duplicate of iommu_hole_init */ | ||
261 | fix = 0; | 284 | fix = 0; |
262 | for (num = 24; num < 32; num++) { | 285 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
263 | if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) | 286 | int bus; |
264 | continue; | 287 | int dev_base, dev_limit; |
265 | 288 | ||
266 | ctl = read_pci_config(0, num, 3, 0x90); | 289 | bus = bus_dev_ranges[i].bus; |
267 | aper_enabled = ctl & 1; | 290 | dev_base = bus_dev_ranges[i].dev_base; |
268 | aper_order = (ctl >> 1) & 7; | 291 | dev_limit = bus_dev_ranges[i].dev_limit; |
269 | aper_size = (32 * 1024 * 1024) << aper_order; | 292 | |
270 | aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff; | 293 | for (slot = dev_base; slot < dev_limit; slot++) { |
271 | aper_base <<= 25; | 294 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) |
272 | 295 | continue; | |
273 | if ((last_aper_order && aper_order != last_aper_order) || | 296 | |
274 | (last_aper_base && aper_base != last_aper_base) || | 297 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
275 | (last_aper_enabled && aper_enabled != last_aper_enabled)) { | 298 | aper_enabled = ctl & AMD64_GARTEN; |
276 | fix = 1; | 299 | aper_order = (ctl >> 1) & 7; |
277 | break; | 300 | aper_size = (32 * 1024 * 1024) << aper_order; |
301 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | ||
302 | aper_base <<= 25; | ||
303 | |||
304 | if (last_valid) { | ||
305 | if ((aper_order != last_aper_order) || | ||
306 | (aper_base != last_aper_base) || | ||
307 | (aper_enabled != last_aper_enabled)) { | ||
308 | fix = 1; | ||
309 | break; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | last_aper_order = aper_order; | ||
314 | last_aper_base = aper_base; | ||
315 | last_aper_enabled = aper_enabled; | ||
316 | last_valid = 1; | ||
278 | } | 317 | } |
279 | last_aper_order = aper_order; | ||
280 | last_aper_base = aper_base; | ||
281 | last_aper_enabled = aper_enabled; | ||
282 | } | 318 | } |
283 | 319 | ||
284 | if (!fix && !aper_enabled) | 320 | if (!fix && !aper_enabled) |
@@ -290,32 +326,46 @@ void __init early_gart_iommu_check(void) | |||
290 | if (gart_fix_e820 && !fix && aper_enabled) { | 326 | if (gart_fix_e820 && !fix && aper_enabled) { |
291 | if (e820_any_mapped(aper_base, aper_base + aper_size, | 327 | if (e820_any_mapped(aper_base, aper_base + aper_size, |
292 | E820_RAM)) { | 328 | E820_RAM)) { |
293 | /* reserved it, so we can resuse it in second kernel */ | 329 | /* reserve it, so we can reuse it in second kernel */ |
294 | printk(KERN_INFO "update e820 for GART\n"); | 330 | printk(KERN_INFO "update e820 for GART\n"); |
295 | add_memory_region(aper_base, aper_size, E820_RESERVED); | 331 | add_memory_region(aper_base, aper_size, E820_RESERVED); |
296 | update_e820(); | 332 | update_e820(); |
297 | } | 333 | } |
298 | return; | ||
299 | } | 334 | } |
300 | 335 | ||
336 | if (!fix) | ||
337 | return; | ||
338 | |||
301 | /* different nodes have different setting, disable them all at first*/ | 339 | /* different nodes have different setting, disable them all at first*/ |
302 | for (num = 24; num < 32; num++) { | 340 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
303 | if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) | 341 | int bus; |
304 | continue; | 342 | int dev_base, dev_limit; |
343 | |||
344 | bus = bus_dev_ranges[i].bus; | ||
345 | dev_base = bus_dev_ranges[i].dev_base; | ||
346 | dev_limit = bus_dev_ranges[i].dev_limit; | ||
347 | |||
348 | for (slot = dev_base; slot < dev_limit; slot++) { | ||
349 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) | ||
350 | continue; | ||
305 | 351 | ||
306 | ctl = read_pci_config(0, num, 3, 0x90); | 352 | ctl = read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL); |
307 | ctl &= ~1; | 353 | ctl &= ~AMD64_GARTEN; |
308 | write_pci_config(0, num, 3, 0x90, ctl); | 354 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); |
355 | } | ||
309 | } | 356 | } |
310 | 357 | ||
311 | } | 358 | } |
312 | 359 | ||
360 | static int __initdata printed_gart_size_msg; | ||
361 | |||
313 | void __init gart_iommu_hole_init(void) | 362 | void __init gart_iommu_hole_init(void) |
314 | { | 363 | { |
364 | u32 agp_aper_base = 0, agp_aper_order = 0; | ||
315 | u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; | 365 | u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0; |
316 | u64 aper_base, last_aper_base = 0; | 366 | u64 aper_base, last_aper_base = 0; |
317 | int fix, num, valid_agp = 0; | 367 | int fix, slot, valid_agp = 0; |
318 | int node; | 368 | int i, node; |
319 | 369 | ||
320 | if (gart_iommu_aperture_disabled || !fix_aperture || | 370 | if (gart_iommu_aperture_disabled || !fix_aperture || |
321 | !early_pci_allowed()) | 371 | !early_pci_allowed()) |
@@ -323,38 +373,63 @@ void __init gart_iommu_hole_init(void) | |||
323 | 373 | ||
324 | printk(KERN_INFO "Checking aperture...\n"); | 374 | printk(KERN_INFO "Checking aperture...\n"); |
325 | 375 | ||
376 | if (!fallback_aper_force) | ||
377 | agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); | ||
378 | |||
326 | fix = 0; | 379 | fix = 0; |
327 | node = 0; | 380 | node = 0; |
328 | for (num = 24; num < 32; num++) { | 381 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
329 | if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) | 382 | int bus; |
330 | continue; | 383 | int dev_base, dev_limit; |
331 | 384 | ||
332 | iommu_detected = 1; | 385 | bus = bus_dev_ranges[i].bus; |
333 | gart_iommu_aperture = 1; | 386 | dev_base = bus_dev_ranges[i].dev_base; |
334 | 387 | dev_limit = bus_dev_ranges[i].dev_limit; | |
335 | aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; | 388 | |
336 | aper_size = (32 * 1024 * 1024) << aper_order; | 389 | for (slot = dev_base; slot < dev_limit; slot++) { |
337 | aper_base = read_pci_config(0, num, 3, 0x94) & 0x7fff; | 390 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) |
338 | aper_base <<= 25; | 391 | continue; |
339 | 392 | ||
340 | printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n", | 393 | iommu_detected = 1; |
341 | node, aper_base, aper_size >> 20); | 394 | gart_iommu_aperture = 1; |
342 | node++; | 395 | |
343 | 396 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | |
344 | if (!aperture_valid(aper_base, aper_size)) { | 397 | aper_size = (32 * 1024 * 1024) << aper_order; |
345 | fix = 1; | 398 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
346 | break; | 399 | aper_base <<= 25; |
347 | } | 400 | |
401 | printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n", | ||
402 | node, aper_base, aper_size >> 20); | ||
403 | node++; | ||
404 | |||
405 | if (!aperture_valid(aper_base, aper_size, 64<<20)) { | ||
406 | if (valid_agp && agp_aper_base && | ||
407 | agp_aper_base == aper_base && | ||
408 | agp_aper_order == aper_order) { | ||
409 | /* the same between two setting from NB and agp */ | ||
410 | if (!no_iommu && end_pfn > MAX_DMA32_PFN && !printed_gart_size_msg) { | ||
411 | printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n"); | ||
412 | printk(KERN_ERR "please increase GART size in your BIOS setup\n"); | ||
413 | printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n"); | ||
414 | printed_gart_size_msg = 1; | ||
415 | } | ||
416 | } else { | ||
417 | fix = 1; | ||
418 | goto out; | ||
419 | } | ||
420 | } | ||
348 | 421 | ||
349 | if ((last_aper_order && aper_order != last_aper_order) || | 422 | if ((last_aper_order && aper_order != last_aper_order) || |
350 | (last_aper_base && aper_base != last_aper_base)) { | 423 | (last_aper_base && aper_base != last_aper_base)) { |
351 | fix = 1; | 424 | fix = 1; |
352 | break; | 425 | goto out; |
426 | } | ||
427 | last_aper_order = aper_order; | ||
428 | last_aper_base = aper_base; | ||
353 | } | 429 | } |
354 | last_aper_order = aper_order; | ||
355 | last_aper_base = aper_base; | ||
356 | } | 430 | } |
357 | 431 | ||
432 | out: | ||
358 | if (!fix && !fallback_aper_force) { | 433 | if (!fix && !fallback_aper_force) { |
359 | if (last_aper_base) { | 434 | if (last_aper_base) { |
360 | unsigned long n = (32 * 1024 * 1024) << last_aper_order; | 435 | unsigned long n = (32 * 1024 * 1024) << last_aper_order; |
@@ -364,8 +439,10 @@ void __init gart_iommu_hole_init(void) | |||
364 | return; | 439 | return; |
365 | } | 440 | } |
366 | 441 | ||
367 | if (!fallback_aper_force) | 442 | if (!fallback_aper_force) { |
368 | aper_alloc = search_agp_bridge(&aper_order, &valid_agp); | 443 | aper_alloc = agp_aper_base; |
444 | aper_order = agp_aper_order; | ||
445 | } | ||
369 | 446 | ||
370 | if (aper_alloc) { | 447 | if (aper_alloc) { |
371 | /* Got the aperture from the AGP bridge */ | 448 | /* Got the aperture from the AGP bridge */ |
@@ -401,16 +478,24 @@ void __init gart_iommu_hole_init(void) | |||
401 | } | 478 | } |
402 | 479 | ||
403 | /* Fix up the north bridges */ | 480 | /* Fix up the north bridges */ |
404 | for (num = 24; num < 32; num++) { | 481 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
405 | if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00))) | 482 | int bus; |
406 | continue; | 483 | int dev_base, dev_limit; |
407 | 484 | ||
408 | /* | 485 | bus = bus_dev_ranges[i].bus; |
409 | * Don't enable translation yet. That is done later. | 486 | dev_base = bus_dev_ranges[i].dev_base; |
410 | * Assume this BIOS didn't initialise the GART so | 487 | dev_limit = bus_dev_ranges[i].dev_limit; |
411 | * just overwrite all previous bits | 488 | for (slot = dev_base; slot < dev_limit; slot++) { |
412 | */ | 489 | if (!early_is_k8_nb(read_pci_config(bus, slot, 3, 0x00))) |
413 | write_pci_config(0, num, 3, 0x90, aper_order<<1); | 490 | continue; |
414 | write_pci_config(0, num, 3, 0x94, aper_alloc>>25); | 491 | |
492 | /* Don't enable translation yet. That is done later. | ||
493 | Assume this BIOS didn't initialise the GART so | ||
494 | just overwrite all previous bits */ | ||
495 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, aper_order << 1); | ||
496 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE, aper_alloc >> 25); | ||
497 | } | ||
415 | } | 498 | } |
499 | |||
500 | set_up_gart_resume(aper_order, aper_alloc); | ||
416 | } | 501 | } |