aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp/intel-agp.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-04-13 18:29:52 -0400
committerEric Anholt <eric@anholt.net>2010-04-18 20:35:47 -0400
commitf51b76621137c18501f6d21a995d36a8bcb49999 (patch)
treeea146954362dd400b4582e4fb89242b075aaf556 /drivers/char/agp/intel-agp.c
parentff7cdd691a0c4925c1803bf89a4c08ccda2d7658 (diff)
agp/intel: split out the GTT support
intel-agp.c contains actually two different drivers: An agp driver for _physical_ agp slots an the gtt driver that is used by the intel drm modules. Split them to prevent any further confusion. This patch just moves the code and includes intel-gtt.c in intel-agp.c Later patches will untangle these two drivers further. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/char/agp/intel-agp.c')
-rw-r--r--drivers/char/agp/intel-agp.c1525
1 files changed, 2 insertions, 1523 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 154bb9256961..6a22aa9783b8 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,1375 +12,11 @@
12#include "agp.h" 12#include "agp.h"
13#include "intel-agp.h" 13#include "intel-agp.h"
14 14
15#include "intel-gtt.c"
16
15int intel_agp_enabled; 17int intel_agp_enabled;
16EXPORT_SYMBOL(intel_agp_enabled); 18EXPORT_SYMBOL(intel_agp_enabled);
17 19
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28extern int agp_memory_reserved;
29
30
31static const struct aper_size_info_fixed intel_i810_sizes[] =
32{
33 {64, 16384, 4},
34 /* The 32M mode still requires a 64k gatt */
35 {32, 8192, 4}
36};
37
38#define AGP_DCACHE_MEMORY 1
39#define AGP_PHYS_MEMORY 2
40#define INTEL_AGP_CACHED_MEMORY 3
41
42static struct gatt_mask intel_i810_masks[] =
43{
44 {.mask = I810_PTE_VALID, .type = 0},
45 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
46 {.mask = I810_PTE_VALID, .type = 0},
47 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
48 .type = INTEL_AGP_CACHED_MEMORY}
49};
50
51static struct _intel_private {
52 struct pci_dev *pcidev; /* device one */
53 u8 __iomem *registers;
54 u32 __iomem *gtt; /* I915G */
55 int num_dcache_entries;
56 /* gtt_entries is the number of gtt entries that are already mapped
57 * to stolen memory. Stolen memory is larger than the memory mapped
58 * through gtt_entries, as it includes some reserved space for the BIOS
59 * popup and for the GTT.
60 */
61 int gtt_entries; /* i830+ */
62 int gtt_total_size;
63 union {
64 void __iomem *i9xx_flush_page;
65 void *i8xx_flush_page;
66 };
67 struct page *i8xx_page;
68 struct resource ifp_resource;
69 int resource_valid;
70} intel_private;
71
72#ifdef USE_PCI_DMA_API
73static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
74{
75 *ret = pci_map_page(intel_private.pcidev, page, 0,
76 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
77 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
78 return -EINVAL;
79 return 0;
80}
81
82static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
83{
84 pci_unmap_page(intel_private.pcidev, dma,
85 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
86}
87
88static void intel_agp_free_sglist(struct agp_memory *mem)
89{
90 struct sg_table st;
91
92 st.sgl = mem->sg_list;
93 st.orig_nents = st.nents = mem->page_count;
94
95 sg_free_table(&st);
96
97 mem->sg_list = NULL;
98 mem->num_sg = 0;
99}
100
101static int intel_agp_map_memory(struct agp_memory *mem)
102{
103 struct sg_table st;
104 struct scatterlist *sg;
105 int i;
106
107 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
108
109 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
110 return -ENOMEM;
111
112 mem->sg_list = sg = st.sgl;
113
114 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
115 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
116
117 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
118 mem->page_count, PCI_DMA_BIDIRECTIONAL);
119 if (unlikely(!mem->num_sg)) {
120 intel_agp_free_sglist(mem);
121 return -ENOMEM;
122 }
123 return 0;
124}
125
126static void intel_agp_unmap_memory(struct agp_memory *mem)
127{
128 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
129
130 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
131 mem->page_count, PCI_DMA_BIDIRECTIONAL);
132 intel_agp_free_sglist(mem);
133}
134
135static void intel_agp_insert_sg_entries(struct agp_memory *mem,
136 off_t pg_start, int mask_type)
137{
138 struct scatterlist *sg;
139 int i, j;
140
141 j = pg_start;
142
143 WARN_ON(!mem->num_sg);
144
145 if (mem->num_sg == mem->page_count) {
146 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
147 writel(agp_bridge->driver->mask_memory(agp_bridge,
148 sg_dma_address(sg), mask_type),
149 intel_private.gtt+j);
150 j++;
151 }
152 } else {
153 /* sg may merge pages, but we have to separate
154 * per-page addr for GTT */
155 unsigned int len, m;
156
157 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
158 len = sg_dma_len(sg) / PAGE_SIZE;
159 for (m = 0; m < len; m++) {
160 writel(agp_bridge->driver->mask_memory(agp_bridge,
161 sg_dma_address(sg) + m * PAGE_SIZE,
162 mask_type),
163 intel_private.gtt+j);
164 j++;
165 }
166 }
167 }
168 readl(intel_private.gtt+j-1);
169}
170
171#else
172
173static void intel_agp_insert_sg_entries(struct agp_memory *mem,
174 off_t pg_start, int mask_type)
175{
176 int i, j;
177 u32 cache_bits = 0;
178
179 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
180 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
181 {
182 cache_bits = I830_PTE_SYSTEM_CACHED;
183 }
184
185 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
186 writel(agp_bridge->driver->mask_memory(agp_bridge,
187 page_to_phys(mem->pages[i]), mask_type),
188 intel_private.gtt+j);
189 }
190
191 readl(intel_private.gtt+j-1);
192}
193
194#endif
195
196static int intel_i810_fetch_size(void)
197{
198 u32 smram_miscc;
199 struct aper_size_info_fixed *values;
200
201 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
202 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
203
204 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
205 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
206 return 0;
207 }
208 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
209 agp_bridge->previous_size =
210 agp_bridge->current_size = (void *) (values + 1);
211 agp_bridge->aperture_size_idx = 1;
212 return values[1].size;
213 } else {
214 agp_bridge->previous_size =
215 agp_bridge->current_size = (void *) (values);
216 agp_bridge->aperture_size_idx = 0;
217 return values[0].size;
218 }
219
220 return 0;
221}
222
223static int intel_i810_configure(void)
224{
225 struct aper_size_info_fixed *current_size;
226 u32 temp;
227 int i;
228
229 current_size = A_SIZE_FIX(agp_bridge->current_size);
230
231 if (!intel_private.registers) {
232 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
233 temp &= 0xfff80000;
234
235 intel_private.registers = ioremap(temp, 128 * 4096);
236 if (!intel_private.registers) {
237 dev_err(&intel_private.pcidev->dev,
238 "can't remap memory\n");
239 return -ENOMEM;
240 }
241 }
242
243 if ((readl(intel_private.registers+I810_DRAM_CTL)
244 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
245 /* This will need to be dynamically assigned */
246 dev_info(&intel_private.pcidev->dev,
247 "detected 4MB dedicated video ram\n");
248 intel_private.num_dcache_entries = 1024;
249 }
250 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
251 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
252 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
253 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
254
255 if (agp_bridge->driver->needs_scratch_page) {
256 for (i = 0; i < current_size->num_entries; i++) {
257 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
258 }
259 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
260 }
261 global_cache_flush();
262 return 0;
263}
264
265static void intel_i810_cleanup(void)
266{
267 writel(0, intel_private.registers+I810_PGETBL_CTL);
268 readl(intel_private.registers); /* PCI Posting. */
269 iounmap(intel_private.registers);
270}
271
272static void intel_i810_tlbflush(struct agp_memory *mem)
273{
274 return;
275}
276
277static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
278{
279 return;
280}
281
282/* Exists to support ARGB cursors */
283static struct page *i8xx_alloc_pages(void)
284{
285 struct page *page;
286
287 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
288 if (page == NULL)
289 return NULL;
290
291 if (set_pages_uc(page, 4) < 0) {
292 set_pages_wb(page, 4);
293 __free_pages(page, 2);
294 return NULL;
295 }
296 get_page(page);
297 atomic_inc(&agp_bridge->current_memory_agp);
298 return page;
299}
300
301static void i8xx_destroy_pages(struct page *page)
302{
303 if (page == NULL)
304 return;
305
306 set_pages_wb(page, 4);
307 put_page(page);
308 __free_pages(page, 2);
309 atomic_dec(&agp_bridge->current_memory_agp);
310}
311
312static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
313 int type)
314{
315 if (type < AGP_USER_TYPES)
316 return type;
317 else if (type == AGP_USER_CACHED_MEMORY)
318 return INTEL_AGP_CACHED_MEMORY;
319 else
320 return 0;
321}
322
323static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
324 int type)
325{
326 int i, j, num_entries;
327 void *temp;
328 int ret = -EINVAL;
329 int mask_type;
330
331 if (mem->page_count == 0)
332 goto out;
333
334 temp = agp_bridge->current_size;
335 num_entries = A_SIZE_FIX(temp)->num_entries;
336
337 if ((pg_start + mem->page_count) > num_entries)
338 goto out_err;
339
340
341 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
342 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
343 ret = -EBUSY;
344 goto out_err;
345 }
346 }
347
348 if (type != mem->type)
349 goto out_err;
350
351 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
352
353 switch (mask_type) {
354 case AGP_DCACHE_MEMORY:
355 if (!mem->is_flushed)
356 global_cache_flush();
357 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
358 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
359 intel_private.registers+I810_PTE_BASE+(i*4));
360 }
361 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
362 break;
363 case AGP_PHYS_MEMORY:
364 case AGP_NORMAL_MEMORY:
365 if (!mem->is_flushed)
366 global_cache_flush();
367 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
368 writel(agp_bridge->driver->mask_memory(agp_bridge,
369 page_to_phys(mem->pages[i]), mask_type),
370 intel_private.registers+I810_PTE_BASE+(j*4));
371 }
372 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
373 break;
374 default:
375 goto out_err;
376 }
377
378 agp_bridge->driver->tlb_flush(mem);
379out:
380 ret = 0;
381out_err:
382 mem->is_flushed = true;
383 return ret;
384}
385
386static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
387 int type)
388{
389 int i;
390
391 if (mem->page_count == 0)
392 return 0;
393
394 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
395 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
396 }
397 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
398
399 agp_bridge->driver->tlb_flush(mem);
400 return 0;
401}
402
403/*
404 * The i810/i830 requires a physical address to program its mouse
405 * pointer into hardware.
406 * However the Xserver still writes to it through the agp aperture.
407 */
408static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
409{
410 struct agp_memory *new;
411 struct page *page;
412
413 switch (pg_count) {
414 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
415 break;
416 case 4:
417 /* kludge to get 4 physical pages for ARGB cursor */
418 page = i8xx_alloc_pages();
419 break;
420 default:
421 return NULL;
422 }
423
424 if (page == NULL)
425 return NULL;
426
427 new = agp_create_memory(pg_count);
428 if (new == NULL)
429 return NULL;
430
431 new->pages[0] = page;
432 if (pg_count == 4) {
433 /* kludge to get 4 physical pages for ARGB cursor */
434 new->pages[1] = new->pages[0] + 1;
435 new->pages[2] = new->pages[1] + 1;
436 new->pages[3] = new->pages[2] + 1;
437 }
438 new->page_count = pg_count;
439 new->num_scratch_pages = pg_count;
440 new->type = AGP_PHYS_MEMORY;
441 new->physical = page_to_phys(new->pages[0]);
442 return new;
443}
444
445static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
446{
447 struct agp_memory *new;
448
449 if (type == AGP_DCACHE_MEMORY) {
450 if (pg_count != intel_private.num_dcache_entries)
451 return NULL;
452
453 new = agp_create_memory(1);
454 if (new == NULL)
455 return NULL;
456
457 new->type = AGP_DCACHE_MEMORY;
458 new->page_count = pg_count;
459 new->num_scratch_pages = 0;
460 agp_free_page_array(new);
461 return new;
462 }
463 if (type == AGP_PHYS_MEMORY)
464 return alloc_agpphysmem_i8xx(pg_count, type);
465 return NULL;
466}
467
468static void intel_i810_free_by_type(struct agp_memory *curr)
469{
470 agp_free_key(curr->key);
471 if (curr->type == AGP_PHYS_MEMORY) {
472 if (curr->page_count == 4)
473 i8xx_destroy_pages(curr->pages[0]);
474 else {
475 agp_bridge->driver->agp_destroy_page(curr->pages[0],
476 AGP_PAGE_DESTROY_UNMAP);
477 agp_bridge->driver->agp_destroy_page(curr->pages[0],
478 AGP_PAGE_DESTROY_FREE);
479 }
480 agp_free_page_array(curr);
481 }
482 kfree(curr);
483}
484
485static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
486 dma_addr_t addr, int type)
487{
488 /* Type checking must be done elsewhere */
489 return addr | bridge->driver->masks[type].mask;
490}
491
492static struct aper_size_info_fixed intel_i830_sizes[] =
493{
494 {128, 32768, 5},
495 /* The 64M mode still requires a 128k gatt */
496 {64, 16384, 5},
497 {256, 65536, 6},
498 {512, 131072, 7},
499};
500
501static void intel_i830_init_gtt_entries(void)
502{
503 u16 gmch_ctrl;
504 int gtt_entries = 0;
505 u8 rdct;
506 int local = 0;
507 static const int ddt[4] = { 0, 16, 32, 64 };
508 int size; /* reserved space (in kb) at the top of stolen memory */
509
510 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
511
512 if (IS_I965) {
513 u32 pgetbl_ctl;
514 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
515
516 /* The 965 has a field telling us the size of the GTT,
517 * which may be larger than what is necessary to map the
518 * aperture.
519 */
520 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
521 case I965_PGETBL_SIZE_128KB:
522 size = 128;
523 break;
524 case I965_PGETBL_SIZE_256KB:
525 size = 256;
526 break;
527 case I965_PGETBL_SIZE_512KB:
528 size = 512;
529 break;
530 case I965_PGETBL_SIZE_1MB:
531 size = 1024;
532 break;
533 case I965_PGETBL_SIZE_2MB:
534 size = 2048;
535 break;
536 case I965_PGETBL_SIZE_1_5MB:
537 size = 1024 + 512;
538 break;
539 default:
540 dev_info(&intel_private.pcidev->dev,
541 "unknown page table size, assuming 512KB\n");
542 size = 512;
543 }
544 size += 4; /* add in BIOS popup space */
545 } else if (IS_G33 && !IS_PINEVIEW) {
546 /* G33's GTT size defined in gmch_ctrl */
547 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
548 case G33_PGETBL_SIZE_1M:
549 size = 1024;
550 break;
551 case G33_PGETBL_SIZE_2M:
552 size = 2048;
553 break;
554 default:
555 dev_info(&agp_bridge->dev->dev,
556 "unknown page table size 0x%x, assuming 512KB\n",
557 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
558 size = 512;
559 }
560 size += 4;
561 } else if (IS_G4X || IS_PINEVIEW) {
562 /* On 4 series hardware, GTT stolen is separate from graphics
563 * stolen, ignore it in stolen gtt entries counting. However,
564 * 4KB of the stolen memory doesn't get mapped to the GTT.
565 */
566 size = 4;
567 } else {
568 /* On previous hardware, the GTT size was just what was
569 * required to map the aperture.
570 */
571 size = agp_bridge->driver->fetch_size() + 4;
572 }
573
574 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
575 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
576 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
577 case I830_GMCH_GMS_STOLEN_512:
578 gtt_entries = KB(512) - KB(size);
579 break;
580 case I830_GMCH_GMS_STOLEN_1024:
581 gtt_entries = MB(1) - KB(size);
582 break;
583 case I830_GMCH_GMS_STOLEN_8192:
584 gtt_entries = MB(8) - KB(size);
585 break;
586 case I830_GMCH_GMS_LOCAL:
587 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
588 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
589 MB(ddt[I830_RDRAM_DDT(rdct)]);
590 local = 1;
591 break;
592 default:
593 gtt_entries = 0;
594 break;
595 }
596 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
597 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
598 /*
599 * SandyBridge has new memory control reg at 0x50.w
600 */
601 u16 snb_gmch_ctl;
602 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
603 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
604 case SNB_GMCH_GMS_STOLEN_32M:
605 gtt_entries = MB(32) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_64M:
608 gtt_entries = MB(64) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_96M:
611 gtt_entries = MB(96) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_128M:
614 gtt_entries = MB(128) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_160M:
617 gtt_entries = MB(160) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_192M:
620 gtt_entries = MB(192) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_224M:
623 gtt_entries = MB(224) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_256M:
626 gtt_entries = MB(256) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_288M:
629 gtt_entries = MB(288) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_320M:
632 gtt_entries = MB(320) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_352M:
635 gtt_entries = MB(352) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_384M:
638 gtt_entries = MB(384) - KB(size);
639 break;
640 case SNB_GMCH_GMS_STOLEN_416M:
641 gtt_entries = MB(416) - KB(size);
642 break;
643 case SNB_GMCH_GMS_STOLEN_448M:
644 gtt_entries = MB(448) - KB(size);
645 break;
646 case SNB_GMCH_GMS_STOLEN_480M:
647 gtt_entries = MB(480) - KB(size);
648 break;
649 case SNB_GMCH_GMS_STOLEN_512M:
650 gtt_entries = MB(512) - KB(size);
651 break;
652 }
653 } else {
654 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
655 case I855_GMCH_GMS_STOLEN_1M:
656 gtt_entries = MB(1) - KB(size);
657 break;
658 case I855_GMCH_GMS_STOLEN_4M:
659 gtt_entries = MB(4) - KB(size);
660 break;
661 case I855_GMCH_GMS_STOLEN_8M:
662 gtt_entries = MB(8) - KB(size);
663 break;
664 case I855_GMCH_GMS_STOLEN_16M:
665 gtt_entries = MB(16) - KB(size);
666 break;
667 case I855_GMCH_GMS_STOLEN_32M:
668 gtt_entries = MB(32) - KB(size);
669 break;
670 case I915_GMCH_GMS_STOLEN_48M:
671 /* Check it's really I915G */
672 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
673 gtt_entries = MB(48) - KB(size);
674 else
675 gtt_entries = 0;
676 break;
677 case I915_GMCH_GMS_STOLEN_64M:
678 /* Check it's really I915G */
679 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
680 gtt_entries = MB(64) - KB(size);
681 else
682 gtt_entries = 0;
683 break;
684 case G33_GMCH_GMS_STOLEN_128M:
685 if (IS_G33 || IS_I965 || IS_G4X)
686 gtt_entries = MB(128) - KB(size);
687 else
688 gtt_entries = 0;
689 break;
690 case G33_GMCH_GMS_STOLEN_256M:
691 if (IS_G33 || IS_I965 || IS_G4X)
692 gtt_entries = MB(256) - KB(size);
693 else
694 gtt_entries = 0;
695 break;
696 case INTEL_GMCH_GMS_STOLEN_96M:
697 if (IS_I965 || IS_G4X)
698 gtt_entries = MB(96) - KB(size);
699 else
700 gtt_entries = 0;
701 break;
702 case INTEL_GMCH_GMS_STOLEN_160M:
703 if (IS_I965 || IS_G4X)
704 gtt_entries = MB(160) - KB(size);
705 else
706 gtt_entries = 0;
707 break;
708 case INTEL_GMCH_GMS_STOLEN_224M:
709 if (IS_I965 || IS_G4X)
710 gtt_entries = MB(224) - KB(size);
711 else
712 gtt_entries = 0;
713 break;
714 case INTEL_GMCH_GMS_STOLEN_352M:
715 if (IS_I965 || IS_G4X)
716 gtt_entries = MB(352) - KB(size);
717 else
718 gtt_entries = 0;
719 break;
720 default:
721 gtt_entries = 0;
722 break;
723 }
724 }
725 if (gtt_entries > 0) {
726 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
727 gtt_entries / KB(1), local ? "local" : "stolen");
728 gtt_entries /= KB(4);
729 } else {
730 dev_info(&agp_bridge->dev->dev,
731 "no pre-allocated video memory detected\n");
732 gtt_entries = 0;
733 }
734
735 intel_private.gtt_entries = gtt_entries;
736}
737
738static void intel_i830_fini_flush(void)
739{
740 kunmap(intel_private.i8xx_page);
741 intel_private.i8xx_flush_page = NULL;
742 unmap_page_from_agp(intel_private.i8xx_page);
743
744 __free_page(intel_private.i8xx_page);
745 intel_private.i8xx_page = NULL;
746}
747
748static void intel_i830_setup_flush(void)
749{
750 /* return if we've already set the flush mechanism up */
751 if (intel_private.i8xx_page)
752 return;
753
754 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
755 if (!intel_private.i8xx_page)
756 return;
757
758 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
759 if (!intel_private.i8xx_flush_page)
760 intel_i830_fini_flush();
761}
762
763/* The chipset_flush interface needs to get data that has already been
764 * flushed out of the CPU all the way out to main memory, because the GPU
765 * doesn't snoop those buffers.
766 *
767 * The 8xx series doesn't have the same lovely interface for flushing the
768 * chipset write buffers that the later chips do. According to the 865
769 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
770 * that buffer out, we just fill 1KB and clflush it out, on the assumption
771 * that it'll push whatever was in there out. It appears to work.
772 */
773static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
774{
775 unsigned int *pg = intel_private.i8xx_flush_page;
776
777 memset(pg, 0, 1024);
778
779 if (cpu_has_clflush)
780 clflush_cache_range(pg, 1024);
781 else if (wbinvd_on_all_cpus() != 0)
782 printk(KERN_ERR "Timed out waiting for cache flush.\n");
783}
784
785/* The intel i830 automatically initializes the agp aperture during POST.
786 * Use the memory already set aside for in the GTT.
787 */
788static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
789{
790 int page_order;
791 struct aper_size_info_fixed *size;
792 int num_entries;
793 u32 temp;
794
795 size = agp_bridge->current_size;
796 page_order = size->page_order;
797 num_entries = size->num_entries;
798 agp_bridge->gatt_table_real = NULL;
799
800 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
801 temp &= 0xfff80000;
802
803 intel_private.registers = ioremap(temp, 128 * 4096);
804 if (!intel_private.registers)
805 return -ENOMEM;
806
807 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
808 global_cache_flush(); /* FIXME: ?? */
809
810 /* we have to call this as early as possible after the MMIO base address is known */
811 intel_i830_init_gtt_entries();
812
813 agp_bridge->gatt_table = NULL;
814
815 agp_bridge->gatt_bus_addr = temp;
816
817 return 0;
818}
819
820/* Return the gatt table to a sane state. Use the top of stolen
821 * memory for the GTT.
822 */
823static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
824{
825 return 0;
826}
827
828static int intel_i830_fetch_size(void)
829{
830 u16 gmch_ctrl;
831 struct aper_size_info_fixed *values;
832
833 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
834
835 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
836 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
837 /* 855GM/852GM/865G has 128MB aperture size */
838 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
839 agp_bridge->aperture_size_idx = 0;
840 return values[0].size;
841 }
842
843 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
844
845 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
846 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
847 agp_bridge->aperture_size_idx = 0;
848 return values[0].size;
849 } else {
850 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
851 agp_bridge->aperture_size_idx = 1;
852 return values[1].size;
853 }
854
855 return 0;
856}
857
858static int intel_i830_configure(void)
859{
860 struct aper_size_info_fixed *current_size;
861 u32 temp;
862 u16 gmch_ctrl;
863 int i;
864
865 current_size = A_SIZE_FIX(agp_bridge->current_size);
866
867 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
868 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
869
870 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
871 gmch_ctrl |= I830_GMCH_ENABLED;
872 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
873
874 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
875 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
876
877 if (agp_bridge->driver->needs_scratch_page) {
878 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
879 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
880 }
881 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
882 }
883
884 global_cache_flush();
885
886 intel_i830_setup_flush();
887 return 0;
888}
889
890static void intel_i830_cleanup(void)
891{
892 iounmap(intel_private.registers);
893}
894
895static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
896 int type)
897{
898 int i, j, num_entries;
899 void *temp;
900 int ret = -EINVAL;
901 int mask_type;
902
903 if (mem->page_count == 0)
904 goto out;
905
906 temp = agp_bridge->current_size;
907 num_entries = A_SIZE_FIX(temp)->num_entries;
908
909 if (pg_start < intel_private.gtt_entries) {
910 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
911 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
912 pg_start, intel_private.gtt_entries);
913
914 dev_info(&intel_private.pcidev->dev,
915 "trying to insert into local/stolen memory\n");
916 goto out_err;
917 }
918
919 if ((pg_start + mem->page_count) > num_entries)
920 goto out_err;
921
922 /* The i830 can't check the GTT for entries since its read only,
923 * depend on the caller to make the correct offset decisions.
924 */
925
926 if (type != mem->type)
927 goto out_err;
928
929 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
930
931 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
932 mask_type != INTEL_AGP_CACHED_MEMORY)
933 goto out_err;
934
935 if (!mem->is_flushed)
936 global_cache_flush();
937
938 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
939 writel(agp_bridge->driver->mask_memory(agp_bridge,
940 page_to_phys(mem->pages[i]), mask_type),
941 intel_private.registers+I810_PTE_BASE+(j*4));
942 }
943 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
944 agp_bridge->driver->tlb_flush(mem);
945
946out:
947 ret = 0;
948out_err:
949 mem->is_flushed = true;
950 return ret;
951}
952
953static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
954 int type)
955{
956 int i;
957
958 if (mem->page_count == 0)
959 return 0;
960
961 if (pg_start < intel_private.gtt_entries) {
962 dev_info(&intel_private.pcidev->dev,
963 "trying to disable local/stolen memory\n");
964 return -EINVAL;
965 }
966
967 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
968 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
969 }
970 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
971
972 agp_bridge->driver->tlb_flush(mem);
973 return 0;
974}
975
976static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
977{
978 if (type == AGP_PHYS_MEMORY)
979 return alloc_agpphysmem_i8xx(pg_count, type);
980 /* always return NULL for other allocation types for now */
981 return NULL;
982}
983
984static int intel_alloc_chipset_flush_resource(void)
985{
986 int ret;
987 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
988 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
989 pcibios_align_resource, agp_bridge->dev);
990
991 return ret;
992}
993
994static void intel_i915_setup_chipset_flush(void)
995{
996 int ret;
997 u32 temp;
998
999 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1000 if (!(temp & 0x1)) {
1001 intel_alloc_chipset_flush_resource();
1002 intel_private.resource_valid = 1;
1003 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1004 } else {
1005 temp &= ~1;
1006
1007 intel_private.resource_valid = 1;
1008 intel_private.ifp_resource.start = temp;
1009 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1010 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1011 /* some BIOSes reserve this area in a pnp some don't */
1012 if (ret)
1013 intel_private.resource_valid = 0;
1014 }
1015}
1016
1017static void intel_i965_g33_setup_chipset_flush(void)
1018{
1019 u32 temp_hi, temp_lo;
1020 int ret;
1021
1022 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1023 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1024
1025 if (!(temp_lo & 0x1)) {
1026
1027 intel_alloc_chipset_flush_resource();
1028
1029 intel_private.resource_valid = 1;
1030 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1031 upper_32_bits(intel_private.ifp_resource.start));
1032 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1033 } else {
1034 u64 l64;
1035
1036 temp_lo &= ~0x1;
1037 l64 = ((u64)temp_hi << 32) | temp_lo;
1038
1039 intel_private.resource_valid = 1;
1040 intel_private.ifp_resource.start = l64;
1041 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1042 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1043 /* some BIOSes reserve this area in a pnp some don't */
1044 if (ret)
1045 intel_private.resource_valid = 0;
1046 }
1047}
1048
1049static void intel_i9xx_setup_flush(void)
1050{
1051 /* return if already configured */
1052 if (intel_private.ifp_resource.start)
1053 return;
1054
1055 if (IS_SNB)
1056 return;
1057
1058 /* setup a resource for this object */
1059 intel_private.ifp_resource.name = "Intel Flush Page";
1060 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1061
1062 /* Setup chipset flush for 915 */
1063 if (IS_I965 || IS_G33 || IS_G4X) {
1064 intel_i965_g33_setup_chipset_flush();
1065 } else {
1066 intel_i915_setup_chipset_flush();
1067 }
1068
1069 if (intel_private.ifp_resource.start) {
1070 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1071 if (!intel_private.i9xx_flush_page)
1072 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1073 }
1074}
1075
1076static int intel_i915_configure(void)
1077{
1078 struct aper_size_info_fixed *current_size;
1079 u32 temp;
1080 u16 gmch_ctrl;
1081 int i;
1082
1083 current_size = A_SIZE_FIX(agp_bridge->current_size);
1084
1085 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1086
1087 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1088
1089 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1090 gmch_ctrl |= I830_GMCH_ENABLED;
1091 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1092
1093 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1094 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1095
1096 if (agp_bridge->driver->needs_scratch_page) {
1097 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1098 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1099 }
1100 readl(intel_private.gtt+i-1); /* PCI Posting. */
1101 }
1102
1103 global_cache_flush();
1104
1105 intel_i9xx_setup_flush();
1106
1107 return 0;
1108}
1109
1110static void intel_i915_cleanup(void)
1111{
1112 if (intel_private.i9xx_flush_page)
1113 iounmap(intel_private.i9xx_flush_page);
1114 if (intel_private.resource_valid)
1115 release_resource(&intel_private.ifp_resource);
1116 intel_private.ifp_resource.start = 0;
1117 intel_private.resource_valid = 0;
1118 iounmap(intel_private.gtt);
1119 iounmap(intel_private.registers);
1120}
1121
1122static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1123{
1124 if (intel_private.i9xx_flush_page)
1125 writel(1, intel_private.i9xx_flush_page);
1126}
1127
1128static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1129 int type)
1130{
1131 int num_entries;
1132 void *temp;
1133 int ret = -EINVAL;
1134 int mask_type;
1135
1136 if (mem->page_count == 0)
1137 goto out;
1138
1139 temp = agp_bridge->current_size;
1140 num_entries = A_SIZE_FIX(temp)->num_entries;
1141
1142 if (pg_start < intel_private.gtt_entries) {
1143 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1144 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1145 pg_start, intel_private.gtt_entries);
1146
1147 dev_info(&intel_private.pcidev->dev,
1148 "trying to insert into local/stolen memory\n");
1149 goto out_err;
1150 }
1151
1152 if ((pg_start + mem->page_count) > num_entries)
1153 goto out_err;
1154
1155 /* The i915 can't check the GTT for entries since it's read only;
1156 * depend on the caller to make the correct offset decisions.
1157 */
1158
1159 if (type != mem->type)
1160 goto out_err;
1161
1162 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1163
1164 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1165 mask_type != INTEL_AGP_CACHED_MEMORY)
1166 goto out_err;
1167
1168 if (!mem->is_flushed)
1169 global_cache_flush();
1170
1171 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1172 agp_bridge->driver->tlb_flush(mem);
1173
1174 out:
1175 ret = 0;
1176 out_err:
1177 mem->is_flushed = true;
1178 return ret;
1179}
1180
1181static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1182 int type)
1183{
1184 int i;
1185
1186 if (mem->page_count == 0)
1187 return 0;
1188
1189 if (pg_start < intel_private.gtt_entries) {
1190 dev_info(&intel_private.pcidev->dev,
1191 "trying to disable local/stolen memory\n");
1192 return -EINVAL;
1193 }
1194
1195 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1196 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1197
1198 readl(intel_private.gtt+i-1);
1199
1200 agp_bridge->driver->tlb_flush(mem);
1201 return 0;
1202}
1203
1204/* Return the aperture size by just checking the resource length. The effect
1205 * described in the spec of the MSAC registers is just changing of the
1206 * resource size.
1207 */
1208static int intel_i9xx_fetch_size(void)
1209{
1210 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1211 int aper_size; /* size in megabytes */
1212 int i;
1213
1214 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1215
1216 for (i = 0; i < num_sizes; i++) {
1217 if (aper_size == intel_i830_sizes[i].size) {
1218 agp_bridge->current_size = intel_i830_sizes + i;
1219 agp_bridge->previous_size = agp_bridge->current_size;
1220 return aper_size;
1221 }
1222 }
1223
1224 return 0;
1225}
1226
1227/* The intel i915 automatically initializes the agp aperture during POST.
1228 * Use the memory already set aside for in the GTT.
1229 */
1230static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1231{
1232 int page_order;
1233 struct aper_size_info_fixed *size;
1234 int num_entries;
1235 u32 temp, temp2;
1236 int gtt_map_size = 256 * 1024;
1237
1238 size = agp_bridge->current_size;
1239 page_order = size->page_order;
1240 num_entries = size->num_entries;
1241 agp_bridge->gatt_table_real = NULL;
1242
1243 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1244 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1245
1246 if (IS_G33)
1247 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1248 intel_private.gtt = ioremap(temp2, gtt_map_size);
1249 if (!intel_private.gtt)
1250 return -ENOMEM;
1251
1252 intel_private.gtt_total_size = gtt_map_size / 4;
1253
1254 temp &= 0xfff80000;
1255
1256 intel_private.registers = ioremap(temp, 128 * 4096);
1257 if (!intel_private.registers) {
1258 iounmap(intel_private.gtt);
1259 return -ENOMEM;
1260 }
1261
1262 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1263 global_cache_flush(); /* FIXME: ? */
1264
1265 /* we have to call this as early as possible after the MMIO base address is known */
1266 intel_i830_init_gtt_entries();
1267
1268 agp_bridge->gatt_table = NULL;
1269
1270 agp_bridge->gatt_bus_addr = temp;
1271
1272 return 0;
1273}
1274
1275/*
1276 * The i965 supports 36-bit physical addresses, but to keep
1277 * the format of the GTT the same, the bits that don't fit
1278 * in a 32-bit word are shifted down to bits 4..7.
1279 *
1280 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1281 * is always zero on 32-bit architectures, so no need to make
1282 * this conditional.
1283 */
1284static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1285 dma_addr_t addr, int type)
1286{
1287 /* Shift high bits down */
1288 addr |= (addr >> 28) & 0xf0;
1289
1290 /* Type checking must be done elsewhere */
1291 return addr | bridge->driver->masks[type].mask;
1292}
1293
1294static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1295{
1296 u16 snb_gmch_ctl;
1297
1298 switch (agp_bridge->dev->device) {
1299 case PCI_DEVICE_ID_INTEL_GM45_HB:
1300 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1301 case PCI_DEVICE_ID_INTEL_Q45_HB:
1302 case PCI_DEVICE_ID_INTEL_G45_HB:
1303 case PCI_DEVICE_ID_INTEL_G41_HB:
1304 case PCI_DEVICE_ID_INTEL_B43_HB:
1305 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1306 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1307 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1308 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1309 *gtt_offset = *gtt_size = MB(2);
1310 break;
1311 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1312 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1313 *gtt_offset = MB(2);
1314
1315 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1316 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1317 default:
1318 case SNB_GTT_SIZE_0M:
1319 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1320 *gtt_size = MB(0);
1321 break;
1322 case SNB_GTT_SIZE_1M:
1323 *gtt_size = MB(1);
1324 break;
1325 case SNB_GTT_SIZE_2M:
1326 *gtt_size = MB(2);
1327 break;
1328 }
1329 break;
1330 default:
1331 *gtt_offset = *gtt_size = KB(512);
1332 }
1333}
1334
1335/* The intel i965 automatically initializes the agp aperture during POST.
1336 * Use the memory already set aside for in the GTT.
1337 */
1338static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1339{
1340 int page_order;
1341 struct aper_size_info_fixed *size;
1342 int num_entries;
1343 u32 temp;
1344 int gtt_offset, gtt_size;
1345
1346 size = agp_bridge->current_size;
1347 page_order = size->page_order;
1348 num_entries = size->num_entries;
1349 agp_bridge->gatt_table_real = NULL;
1350
1351 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1352
1353 temp &= 0xfff00000;
1354
1355 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1356
1357 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1358
1359 if (!intel_private.gtt)
1360 return -ENOMEM;
1361
1362 intel_private.gtt_total_size = gtt_size / 4;
1363
1364 intel_private.registers = ioremap(temp, 128 * 4096);
1365 if (!intel_private.registers) {
1366 iounmap(intel_private.gtt);
1367 return -ENOMEM;
1368 }
1369
1370 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1371 global_cache_flush(); /* FIXME: ? */
1372
1373 /* we have to call this as early as possible after the MMIO base address is known */
1374 intel_i830_init_gtt_entries();
1375
1376 agp_bridge->gatt_table = NULL;
1377
1378 agp_bridge->gatt_bus_addr = temp;
1379
1380 return 0;
1381}
1382
1383
1384static int intel_fetch_size(void) 20static int intel_fetch_size(void)
1385{ 21{
1386 int i; 22 int i;
@@ -1848,33 +484,6 @@ static const struct agp_bridge_driver intel_generic_driver = {
1848 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 484 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1849}; 485};
1850 486
1851static const struct agp_bridge_driver intel_810_driver = {
1852 .owner = THIS_MODULE,
1853 .aperture_sizes = intel_i810_sizes,
1854 .size_type = FIXED_APER_SIZE,
1855 .num_aperture_sizes = 2,
1856 .needs_scratch_page = true,
1857 .configure = intel_i810_configure,
1858 .fetch_size = intel_i810_fetch_size,
1859 .cleanup = intel_i810_cleanup,
1860 .tlb_flush = intel_i810_tlbflush,
1861 .mask_memory = intel_i810_mask_memory,
1862 .masks = intel_i810_masks,
1863 .agp_enable = intel_i810_agp_enable,
1864 .cache_flush = global_cache_flush,
1865 .create_gatt_table = agp_generic_create_gatt_table,
1866 .free_gatt_table = agp_generic_free_gatt_table,
1867 .insert_memory = intel_i810_insert_entries,
1868 .remove_memory = intel_i810_remove_entries,
1869 .alloc_by_type = intel_i810_alloc_by_type,
1870 .free_by_type = intel_i810_free_by_type,
1871 .agp_alloc_page = agp_generic_alloc_page,
1872 .agp_alloc_pages = agp_generic_alloc_pages,
1873 .agp_destroy_page = agp_generic_destroy_page,
1874 .agp_destroy_pages = agp_generic_destroy_pages,
1875 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1876};
1877
1878static const struct agp_bridge_driver intel_815_driver = { 487static const struct agp_bridge_driver intel_815_driver = {
1879 .owner = THIS_MODULE, 488 .owner = THIS_MODULE,
1880 .aperture_sizes = intel_815_sizes, 489 .aperture_sizes = intel_815_sizes,
@@ -1901,34 +510,6 @@ static const struct agp_bridge_driver intel_815_driver = {
1901 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 510 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1902}; 511};
1903 512
1904static const struct agp_bridge_driver intel_830_driver = {
1905 .owner = THIS_MODULE,
1906 .aperture_sizes = intel_i830_sizes,
1907 .size_type = FIXED_APER_SIZE,
1908 .num_aperture_sizes = 4,
1909 .needs_scratch_page = true,
1910 .configure = intel_i830_configure,
1911 .fetch_size = intel_i830_fetch_size,
1912 .cleanup = intel_i830_cleanup,
1913 .tlb_flush = intel_i810_tlbflush,
1914 .mask_memory = intel_i810_mask_memory,
1915 .masks = intel_i810_masks,
1916 .agp_enable = intel_i810_agp_enable,
1917 .cache_flush = global_cache_flush,
1918 .create_gatt_table = intel_i830_create_gatt_table,
1919 .free_gatt_table = intel_i830_free_gatt_table,
1920 .insert_memory = intel_i830_insert_entries,
1921 .remove_memory = intel_i830_remove_entries,
1922 .alloc_by_type = intel_i830_alloc_by_type,
1923 .free_by_type = intel_i810_free_by_type,
1924 .agp_alloc_page = agp_generic_alloc_page,
1925 .agp_alloc_pages = agp_generic_alloc_pages,
1926 .agp_destroy_page = agp_generic_destroy_page,
1927 .agp_destroy_pages = agp_generic_destroy_pages,
1928 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1929 .chipset_flush = intel_i830_chipset_flush,
1930};
1931
1932static const struct agp_bridge_driver intel_820_driver = { 513static const struct agp_bridge_driver intel_820_driver = {
1933 .owner = THIS_MODULE, 514 .owner = THIS_MODULE,
1934 .aperture_sizes = intel_8xx_sizes, 515 .aperture_sizes = intel_8xx_sizes,
@@ -2085,74 +666,6 @@ static const struct agp_bridge_driver intel_860_driver = {
2085 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 666 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2086}; 667};
2087 668
2088static const struct agp_bridge_driver intel_915_driver = {
2089 .owner = THIS_MODULE,
2090 .aperture_sizes = intel_i830_sizes,
2091 .size_type = FIXED_APER_SIZE,
2092 .num_aperture_sizes = 4,
2093 .needs_scratch_page = true,
2094 .configure = intel_i915_configure,
2095 .fetch_size = intel_i9xx_fetch_size,
2096 .cleanup = intel_i915_cleanup,
2097 .tlb_flush = intel_i810_tlbflush,
2098 .mask_memory = intel_i810_mask_memory,
2099 .masks = intel_i810_masks,
2100 .agp_enable = intel_i810_agp_enable,
2101 .cache_flush = global_cache_flush,
2102 .create_gatt_table = intel_i915_create_gatt_table,
2103 .free_gatt_table = intel_i830_free_gatt_table,
2104 .insert_memory = intel_i915_insert_entries,
2105 .remove_memory = intel_i915_remove_entries,
2106 .alloc_by_type = intel_i830_alloc_by_type,
2107 .free_by_type = intel_i810_free_by_type,
2108 .agp_alloc_page = agp_generic_alloc_page,
2109 .agp_alloc_pages = agp_generic_alloc_pages,
2110 .agp_destroy_page = agp_generic_destroy_page,
2111 .agp_destroy_pages = agp_generic_destroy_pages,
2112 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2113 .chipset_flush = intel_i915_chipset_flush,
2114#ifdef USE_PCI_DMA_API
2115 .agp_map_page = intel_agp_map_page,
2116 .agp_unmap_page = intel_agp_unmap_page,
2117 .agp_map_memory = intel_agp_map_memory,
2118 .agp_unmap_memory = intel_agp_unmap_memory,
2119#endif
2120};
2121
2122static const struct agp_bridge_driver intel_i965_driver = {
2123 .owner = THIS_MODULE,
2124 .aperture_sizes = intel_i830_sizes,
2125 .size_type = FIXED_APER_SIZE,
2126 .num_aperture_sizes = 4,
2127 .needs_scratch_page = true,
2128 .configure = intel_i915_configure,
2129 .fetch_size = intel_i9xx_fetch_size,
2130 .cleanup = intel_i915_cleanup,
2131 .tlb_flush = intel_i810_tlbflush,
2132 .mask_memory = intel_i965_mask_memory,
2133 .masks = intel_i810_masks,
2134 .agp_enable = intel_i810_agp_enable,
2135 .cache_flush = global_cache_flush,
2136 .create_gatt_table = intel_i965_create_gatt_table,
2137 .free_gatt_table = intel_i830_free_gatt_table,
2138 .insert_memory = intel_i915_insert_entries,
2139 .remove_memory = intel_i915_remove_entries,
2140 .alloc_by_type = intel_i830_alloc_by_type,
2141 .free_by_type = intel_i810_free_by_type,
2142 .agp_alloc_page = agp_generic_alloc_page,
2143 .agp_alloc_pages = agp_generic_alloc_pages,
2144 .agp_destroy_page = agp_generic_destroy_page,
2145 .agp_destroy_pages = agp_generic_destroy_pages,
2146 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2147 .chipset_flush = intel_i915_chipset_flush,
2148#ifdef USE_PCI_DMA_API
2149 .agp_map_page = intel_agp_map_page,
2150 .agp_unmap_page = intel_agp_unmap_page,
2151 .agp_map_memory = intel_agp_map_memory,
2152 .agp_unmap_memory = intel_agp_unmap_memory,
2153#endif
2154};
2155
2156static const struct agp_bridge_driver intel_7505_driver = { 669static const struct agp_bridge_driver intel_7505_driver = {
2157 .owner = THIS_MODULE, 670 .owner = THIS_MODULE,
2158 .aperture_sizes = intel_8xx_sizes, 671 .aperture_sizes = intel_8xx_sizes,
@@ -2179,40 +692,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
2179 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 692 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2180}; 693};
2181 694
2182static const struct agp_bridge_driver intel_g33_driver = {
2183 .owner = THIS_MODULE,
2184 .aperture_sizes = intel_i830_sizes,
2185 .size_type = FIXED_APER_SIZE,
2186 .num_aperture_sizes = 4,
2187 .needs_scratch_page = true,
2188 .configure = intel_i915_configure,
2189 .fetch_size = intel_i9xx_fetch_size,
2190 .cleanup = intel_i915_cleanup,
2191 .tlb_flush = intel_i810_tlbflush,
2192 .mask_memory = intel_i965_mask_memory,
2193 .masks = intel_i810_masks,
2194 .agp_enable = intel_i810_agp_enable,
2195 .cache_flush = global_cache_flush,
2196 .create_gatt_table = intel_i915_create_gatt_table,
2197 .free_gatt_table = intel_i830_free_gatt_table,
2198 .insert_memory = intel_i915_insert_entries,
2199 .remove_memory = intel_i915_remove_entries,
2200 .alloc_by_type = intel_i830_alloc_by_type,
2201 .free_by_type = intel_i810_free_by_type,
2202 .agp_alloc_page = agp_generic_alloc_page,
2203 .agp_alloc_pages = agp_generic_alloc_pages,
2204 .agp_destroy_page = agp_generic_destroy_page,
2205 .agp_destroy_pages = agp_generic_destroy_pages,
2206 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2207 .chipset_flush = intel_i915_chipset_flush,
2208#ifdef USE_PCI_DMA_API
2209 .agp_map_page = intel_agp_map_page,
2210 .agp_unmap_page = intel_agp_unmap_page,
2211 .agp_map_memory = intel_agp_map_memory,
2212 .agp_unmap_memory = intel_agp_unmap_memory,
2213#endif
2214};
2215
2216static int find_gmch(u16 device) 695static int find_gmch(u16 device)
2217{ 696{
2218 struct pci_dev *gmch_device; 697 struct pci_dev *gmch_device;