aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp/intel-gtt.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-04-13 18:29:52 -0400
committerEric Anholt <eric@anholt.net>2010-04-18 20:35:47 -0400
commitf51b76621137c18501f6d21a995d36a8bcb49999 (patch)
treeea146954362dd400b4582e4fb89242b075aaf556 /drivers/char/agp/intel-gtt.c
parentff7cdd691a0c4925c1803bf89a4c08ccda2d7658 (diff)
agp/intel: split out the GTT support
intel-agp.c contains actually two different drivers: An agp driver for _physical_ agp slots an the gtt driver that is used by the intel drm modules. Split them to prevent any further confusion. This patch just moves the code and includes intel-gtt.c in intel-agp.c Later patches will untangle these two drivers further. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/char/agp/intel-gtt.c')
-rw-r--r--drivers/char/agp/intel-gtt.c1535
1 files changed, 1535 insertions, 0 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..131c5d5e427e
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1535 @@
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28static const struct aper_size_info_fixed intel_i810_sizes[] =
29{
30 {64, 16384, 4},
31 /* The 32M mode still requires a 64k gatt */
32 {32, 8192, 4}
33};
34
35#define AGP_DCACHE_MEMORY 1
36#define AGP_PHYS_MEMORY 2
37#define INTEL_AGP_CACHED_MEMORY 3
38
39static struct gatt_mask intel_i810_masks[] =
40{
41 {.mask = I810_PTE_VALID, .type = 0},
42 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 {.mask = I810_PTE_VALID, .type = 0},
44 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 .type = INTEL_AGP_CACHED_MEMORY}
46};
47
48static struct _intel_private {
49 struct pci_dev *pcidev; /* device one */
50 u8 __iomem *registers;
51 u32 __iomem *gtt; /* I915G */
52 int num_dcache_entries;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
57 */
58 int gtt_entries; /* i830+ */
59 int gtt_total_size;
60 union {
61 void __iomem *i9xx_flush_page;
62 void *i8xx_flush_page;
63 };
64 struct page *i8xx_page;
65 struct resource ifp_resource;
66 int resource_valid;
67} intel_private;
68
69#ifdef USE_PCI_DMA_API
70static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71{
72 *ret = pci_map_page(intel_private.pcidev, page, 0,
73 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 return -EINVAL;
76 return 0;
77}
78
79static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80{
81 pci_unmap_page(intel_private.pcidev, dma,
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83}
84
85static void intel_agp_free_sglist(struct agp_memory *mem)
86{
87 struct sg_table st;
88
89 st.sgl = mem->sg_list;
90 st.orig_nents = st.nents = mem->page_count;
91
92 sg_free_table(&st);
93
94 mem->sg_list = NULL;
95 mem->num_sg = 0;
96}
97
98static int intel_agp_map_memory(struct agp_memory *mem)
99{
100 struct sg_table st;
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM;
108
109 mem->sg_list = sg = st.sgl;
110
111 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) {
117 intel_agp_free_sglist(mem);
118 return -ENOMEM;
119 }
120 return 0;
121}
122
123static void intel_agp_unmap_memory(struct agp_memory *mem)
124{
125 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 intel_agp_free_sglist(mem);
130}
131
132static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 off_t pg_start, int mask_type)
134{
135 struct scatterlist *sg;
136 int i, j;
137
138 j = pg_start;
139
140 WARN_ON(!mem->num_sg);
141
142 if (mem->num_sg == mem->page_count) {
143 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 writel(agp_bridge->driver->mask_memory(agp_bridge,
145 sg_dma_address(sg), mask_type),
146 intel_private.gtt+j);
147 j++;
148 }
149 } else {
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
152 unsigned int len, m;
153
154 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 len = sg_dma_len(sg) / PAGE_SIZE;
156 for (m = 0; m < len; m++) {
157 writel(agp_bridge->driver->mask_memory(agp_bridge,
158 sg_dma_address(sg) + m * PAGE_SIZE,
159 mask_type),
160 intel_private.gtt+j);
161 j++;
162 }
163 }
164 }
165 readl(intel_private.gtt+j-1);
166}
167
168#else
169
170static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 off_t pg_start, int mask_type)
172{
173 int i, j;
174 u32 cache_bits = 0;
175
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 {
179 cache_bits = I830_PTE_SYSTEM_CACHED;
180 }
181
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 writel(agp_bridge->driver->mask_memory(agp_bridge,
184 page_to_phys(mem->pages[i]), mask_type),
185 intel_private.gtt+j);
186 }
187
188 readl(intel_private.gtt+j-1);
189}
190
191#endif
192
193static int intel_i810_fetch_size(void)
194{
195 u32 smram_miscc;
196 struct aper_size_info_fixed *values;
197
198 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 return 0;
204 }
205 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206 agp_bridge->previous_size =
207 agp_bridge->current_size = (void *) (values + 1);
208 agp_bridge->aperture_size_idx = 1;
209 return values[1].size;
210 } else {
211 agp_bridge->previous_size =
212 agp_bridge->current_size = (void *) (values);
213 agp_bridge->aperture_size_idx = 0;
214 return values[0].size;
215 }
216
217 return 0;
218}
219
220static int intel_i810_configure(void)
221{
222 struct aper_size_info_fixed *current_size;
223 u32 temp;
224 int i;
225
226 current_size = A_SIZE_FIX(agp_bridge->current_size);
227
228 if (!intel_private.registers) {
229 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
230 temp &= 0xfff80000;
231
232 intel_private.registers = ioremap(temp, 128 * 4096);
233 if (!intel_private.registers) {
234 dev_err(&intel_private.pcidev->dev,
235 "can't remap memory\n");
236 return -ENOMEM;
237 }
238 }
239
240 if ((readl(intel_private.registers+I810_DRAM_CTL)
241 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
242 /* This will need to be dynamically assigned */
243 dev_info(&intel_private.pcidev->dev,
244 "detected 4MB dedicated video ram\n");
245 intel_private.num_dcache_entries = 1024;
246 }
247 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
248 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
249 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
250 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
251
252 if (agp_bridge->driver->needs_scratch_page) {
253 for (i = 0; i < current_size->num_entries; i++) {
254 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
255 }
256 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
257 }
258 global_cache_flush();
259 return 0;
260}
261
262static void intel_i810_cleanup(void)
263{
264 writel(0, intel_private.registers+I810_PGETBL_CTL);
265 readl(intel_private.registers); /* PCI Posting. */
266 iounmap(intel_private.registers);
267}
268
269static void intel_i810_tlbflush(struct agp_memory *mem)
270{
271 return;
272}
273
274static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
275{
276 return;
277}
278
279/* Exists to support ARGB cursors */
280static struct page *i8xx_alloc_pages(void)
281{
282 struct page *page;
283
284 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
285 if (page == NULL)
286 return NULL;
287
288 if (set_pages_uc(page, 4) < 0) {
289 set_pages_wb(page, 4);
290 __free_pages(page, 2);
291 return NULL;
292 }
293 get_page(page);
294 atomic_inc(&agp_bridge->current_memory_agp);
295 return page;
296}
297
298static void i8xx_destroy_pages(struct page *page)
299{
300 if (page == NULL)
301 return;
302
303 set_pages_wb(page, 4);
304 put_page(page);
305 __free_pages(page, 2);
306 atomic_dec(&agp_bridge->current_memory_agp);
307}
308
309static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
310 int type)
311{
312 if (type < AGP_USER_TYPES)
313 return type;
314 else if (type == AGP_USER_CACHED_MEMORY)
315 return INTEL_AGP_CACHED_MEMORY;
316 else
317 return 0;
318}
319
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type)
322{
323 int i, j, num_entries;
324 void *temp;
325 int ret = -EINVAL;
326 int mask_type;
327
328 if (mem->page_count == 0)
329 goto out;
330
331 temp = agp_bridge->current_size;
332 num_entries = A_SIZE_FIX(temp)->num_entries;
333
334 if ((pg_start + mem->page_count) > num_entries)
335 goto out_err;
336
337
338 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
339 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
340 ret = -EBUSY;
341 goto out_err;
342 }
343 }
344
345 if (type != mem->type)
346 goto out_err;
347
348 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
349
350 switch (mask_type) {
351 case AGP_DCACHE_MEMORY:
352 if (!mem->is_flushed)
353 global_cache_flush();
354 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
355 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
356 intel_private.registers+I810_PTE_BASE+(i*4));
357 }
358 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
359 break;
360 case AGP_PHYS_MEMORY:
361 case AGP_NORMAL_MEMORY:
362 if (!mem->is_flushed)
363 global_cache_flush();
364 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
365 writel(agp_bridge->driver->mask_memory(agp_bridge,
366 page_to_phys(mem->pages[i]), mask_type),
367 intel_private.registers+I810_PTE_BASE+(j*4));
368 }
369 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
370 break;
371 default:
372 goto out_err;
373 }
374
375 agp_bridge->driver->tlb_flush(mem);
376out:
377 ret = 0;
378out_err:
379 mem->is_flushed = true;
380 return ret;
381}
382
383static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
384 int type)
385{
386 int i;
387
388 if (mem->page_count == 0)
389 return 0;
390
391 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
392 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
393 }
394 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
395
396 agp_bridge->driver->tlb_flush(mem);
397 return 0;
398}
399
400/*
401 * The i810/i830 requires a physical address to program its mouse
402 * pointer into hardware.
403 * However the Xserver still writes to it through the agp aperture.
404 */
405static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
406{
407 struct agp_memory *new;
408 struct page *page;
409
410 switch (pg_count) {
411 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
412 break;
413 case 4:
414 /* kludge to get 4 physical pages for ARGB cursor */
415 page = i8xx_alloc_pages();
416 break;
417 default:
418 return NULL;
419 }
420
421 if (page == NULL)
422 return NULL;
423
424 new = agp_create_memory(pg_count);
425 if (new == NULL)
426 return NULL;
427
428 new->pages[0] = page;
429 if (pg_count == 4) {
430 /* kludge to get 4 physical pages for ARGB cursor */
431 new->pages[1] = new->pages[0] + 1;
432 new->pages[2] = new->pages[1] + 1;
433 new->pages[3] = new->pages[2] + 1;
434 }
435 new->page_count = pg_count;
436 new->num_scratch_pages = pg_count;
437 new->type = AGP_PHYS_MEMORY;
438 new->physical = page_to_phys(new->pages[0]);
439 return new;
440}
441
442static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
443{
444 struct agp_memory *new;
445
446 if (type == AGP_DCACHE_MEMORY) {
447 if (pg_count != intel_private.num_dcache_entries)
448 return NULL;
449
450 new = agp_create_memory(1);
451 if (new == NULL)
452 return NULL;
453
454 new->type = AGP_DCACHE_MEMORY;
455 new->page_count = pg_count;
456 new->num_scratch_pages = 0;
457 agp_free_page_array(new);
458 return new;
459 }
460 if (type == AGP_PHYS_MEMORY)
461 return alloc_agpphysmem_i8xx(pg_count, type);
462 return NULL;
463}
464
465static void intel_i810_free_by_type(struct agp_memory *curr)
466{
467 agp_free_key(curr->key);
468 if (curr->type == AGP_PHYS_MEMORY) {
469 if (curr->page_count == 4)
470 i8xx_destroy_pages(curr->pages[0]);
471 else {
472 agp_bridge->driver->agp_destroy_page(curr->pages[0],
473 AGP_PAGE_DESTROY_UNMAP);
474 agp_bridge->driver->agp_destroy_page(curr->pages[0],
475 AGP_PAGE_DESTROY_FREE);
476 }
477 agp_free_page_array(curr);
478 }
479 kfree(curr);
480}
481
482static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
483 dma_addr_t addr, int type)
484{
485 /* Type checking must be done elsewhere */
486 return addr | bridge->driver->masks[type].mask;
487}
488
489static struct aper_size_info_fixed intel_i830_sizes[] =
490{
491 {128, 32768, 5},
492 /* The 64M mode still requires a 128k gatt */
493 {64, 16384, 5},
494 {256, 65536, 6},
495 {512, 131072, 7},
496};
497
498static void intel_i830_init_gtt_entries(void)
499{
500 u16 gmch_ctrl;
501 int gtt_entries = 0;
502 u8 rdct;
503 int local = 0;
504 static const int ddt[4] = { 0, 16, 32, 64 };
505 int size; /* reserved space (in kb) at the top of stolen memory */
506
507 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
508
509 if (IS_I965) {
510 u32 pgetbl_ctl;
511 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
512
513 /* The 965 has a field telling us the size of the GTT,
514 * which may be larger than what is necessary to map the
515 * aperture.
516 */
517 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
518 case I965_PGETBL_SIZE_128KB:
519 size = 128;
520 break;
521 case I965_PGETBL_SIZE_256KB:
522 size = 256;
523 break;
524 case I965_PGETBL_SIZE_512KB:
525 size = 512;
526 break;
527 case I965_PGETBL_SIZE_1MB:
528 size = 1024;
529 break;
530 case I965_PGETBL_SIZE_2MB:
531 size = 2048;
532 break;
533 case I965_PGETBL_SIZE_1_5MB:
534 size = 1024 + 512;
535 break;
536 default:
537 dev_info(&intel_private.pcidev->dev,
538 "unknown page table size, assuming 512KB\n");
539 size = 512;
540 }
541 size += 4; /* add in BIOS popup space */
542 } else if (IS_G33 && !IS_PINEVIEW) {
543 /* G33's GTT size defined in gmch_ctrl */
544 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
545 case G33_PGETBL_SIZE_1M:
546 size = 1024;
547 break;
548 case G33_PGETBL_SIZE_2M:
549 size = 2048;
550 break;
551 default:
552 dev_info(&agp_bridge->dev->dev,
553 "unknown page table size 0x%x, assuming 512KB\n",
554 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
555 size = 512;
556 }
557 size += 4;
558 } else if (IS_G4X || IS_PINEVIEW) {
559 /* On 4 series hardware, GTT stolen is separate from graphics
560 * stolen, ignore it in stolen gtt entries counting. However,
561 * 4KB of the stolen memory doesn't get mapped to the GTT.
562 */
563 size = 4;
564 } else {
565 /* On previous hardware, the GTT size was just what was
566 * required to map the aperture.
567 */
568 size = agp_bridge->driver->fetch_size() + 4;
569 }
570
571 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
572 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
573 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
574 case I830_GMCH_GMS_STOLEN_512:
575 gtt_entries = KB(512) - KB(size);
576 break;
577 case I830_GMCH_GMS_STOLEN_1024:
578 gtt_entries = MB(1) - KB(size);
579 break;
580 case I830_GMCH_GMS_STOLEN_8192:
581 gtt_entries = MB(8) - KB(size);
582 break;
583 case I830_GMCH_GMS_LOCAL:
584 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
585 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
586 MB(ddt[I830_RDRAM_DDT(rdct)]);
587 local = 1;
588 break;
589 default:
590 gtt_entries = 0;
591 break;
592 }
593 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
594 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
595 /*
596 * SandyBridge has new memory control reg at 0x50.w
597 */
598 u16 snb_gmch_ctl;
599 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
600 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
601 case SNB_GMCH_GMS_STOLEN_32M:
602 gtt_entries = MB(32) - KB(size);
603 break;
604 case SNB_GMCH_GMS_STOLEN_64M:
605 gtt_entries = MB(64) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_96M:
608 gtt_entries = MB(96) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_128M:
611 gtt_entries = MB(128) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_160M:
614 gtt_entries = MB(160) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_192M:
617 gtt_entries = MB(192) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_224M:
620 gtt_entries = MB(224) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_256M:
623 gtt_entries = MB(256) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_288M:
626 gtt_entries = MB(288) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_320M:
629 gtt_entries = MB(320) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_352M:
632 gtt_entries = MB(352) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_384M:
635 gtt_entries = MB(384) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_416M:
638 gtt_entries = MB(416) - KB(size);
639 break;
640 case SNB_GMCH_GMS_STOLEN_448M:
641 gtt_entries = MB(448) - KB(size);
642 break;
643 case SNB_GMCH_GMS_STOLEN_480M:
644 gtt_entries = MB(480) - KB(size);
645 break;
646 case SNB_GMCH_GMS_STOLEN_512M:
647 gtt_entries = MB(512) - KB(size);
648 break;
649 }
650 } else {
651 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
652 case I855_GMCH_GMS_STOLEN_1M:
653 gtt_entries = MB(1) - KB(size);
654 break;
655 case I855_GMCH_GMS_STOLEN_4M:
656 gtt_entries = MB(4) - KB(size);
657 break;
658 case I855_GMCH_GMS_STOLEN_8M:
659 gtt_entries = MB(8) - KB(size);
660 break;
661 case I855_GMCH_GMS_STOLEN_16M:
662 gtt_entries = MB(16) - KB(size);
663 break;
664 case I855_GMCH_GMS_STOLEN_32M:
665 gtt_entries = MB(32) - KB(size);
666 break;
667 case I915_GMCH_GMS_STOLEN_48M:
668 /* Check it's really I915G */
669 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
670 gtt_entries = MB(48) - KB(size);
671 else
672 gtt_entries = 0;
673 break;
674 case I915_GMCH_GMS_STOLEN_64M:
675 /* Check it's really I915G */
676 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
677 gtt_entries = MB(64) - KB(size);
678 else
679 gtt_entries = 0;
680 break;
681 case G33_GMCH_GMS_STOLEN_128M:
682 if (IS_G33 || IS_I965 || IS_G4X)
683 gtt_entries = MB(128) - KB(size);
684 else
685 gtt_entries = 0;
686 break;
687 case G33_GMCH_GMS_STOLEN_256M:
688 if (IS_G33 || IS_I965 || IS_G4X)
689 gtt_entries = MB(256) - KB(size);
690 else
691 gtt_entries = 0;
692 break;
693 case INTEL_GMCH_GMS_STOLEN_96M:
694 if (IS_I965 || IS_G4X)
695 gtt_entries = MB(96) - KB(size);
696 else
697 gtt_entries = 0;
698 break;
699 case INTEL_GMCH_GMS_STOLEN_160M:
700 if (IS_I965 || IS_G4X)
701 gtt_entries = MB(160) - KB(size);
702 else
703 gtt_entries = 0;
704 break;
705 case INTEL_GMCH_GMS_STOLEN_224M:
706 if (IS_I965 || IS_G4X)
707 gtt_entries = MB(224) - KB(size);
708 else
709 gtt_entries = 0;
710 break;
711 case INTEL_GMCH_GMS_STOLEN_352M:
712 if (IS_I965 || IS_G4X)
713 gtt_entries = MB(352) - KB(size);
714 else
715 gtt_entries = 0;
716 break;
717 default:
718 gtt_entries = 0;
719 break;
720 }
721 }
722 if (gtt_entries > 0) {
723 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
724 gtt_entries / KB(1), local ? "local" : "stolen");
725 gtt_entries /= KB(4);
726 } else {
727 dev_info(&agp_bridge->dev->dev,
728 "no pre-allocated video memory detected\n");
729 gtt_entries = 0;
730 }
731
732 intel_private.gtt_entries = gtt_entries;
733}
734
735static void intel_i830_fini_flush(void)
736{
737 kunmap(intel_private.i8xx_page);
738 intel_private.i8xx_flush_page = NULL;
739 unmap_page_from_agp(intel_private.i8xx_page);
740
741 __free_page(intel_private.i8xx_page);
742 intel_private.i8xx_page = NULL;
743}
744
745static void intel_i830_setup_flush(void)
746{
747 /* return if we've already set the flush mechanism up */
748 if (intel_private.i8xx_page)
749 return;
750
751 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
752 if (!intel_private.i8xx_page)
753 return;
754
755 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
756 if (!intel_private.i8xx_flush_page)
757 intel_i830_fini_flush();
758}
759
760/* The chipset_flush interface needs to get data that has already been
761 * flushed out of the CPU all the way out to main memory, because the GPU
762 * doesn't snoop those buffers.
763 *
764 * The 8xx series doesn't have the same lovely interface for flushing the
765 * chipset write buffers that the later chips do. According to the 865
766 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
767 * that buffer out, we just fill 1KB and clflush it out, on the assumption
768 * that it'll push whatever was in there out. It appears to work.
769 */
770static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
771{
772 unsigned int *pg = intel_private.i8xx_flush_page;
773
774 memset(pg, 0, 1024);
775
776 if (cpu_has_clflush)
777 clflush_cache_range(pg, 1024);
778 else if (wbinvd_on_all_cpus() != 0)
779 printk(KERN_ERR "Timed out waiting for cache flush.\n");
780}
781
782/* The intel i830 automatically initializes the agp aperture during POST.
783 * Use the memory already set aside for in the GTT.
784 */
785static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
786{
787 int page_order;
788 struct aper_size_info_fixed *size;
789 int num_entries;
790 u32 temp;
791
792 size = agp_bridge->current_size;
793 page_order = size->page_order;
794 num_entries = size->num_entries;
795 agp_bridge->gatt_table_real = NULL;
796
797 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
798 temp &= 0xfff80000;
799
800 intel_private.registers = ioremap(temp, 128 * 4096);
801 if (!intel_private.registers)
802 return -ENOMEM;
803
804 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
805 global_cache_flush(); /* FIXME: ?? */
806
807 /* we have to call this as early as possible after the MMIO base address is known */
808 intel_i830_init_gtt_entries();
809
810 agp_bridge->gatt_table = NULL;
811
812 agp_bridge->gatt_bus_addr = temp;
813
814 return 0;
815}
816
817/* Return the gatt table to a sane state. Use the top of stolen
818 * memory for the GTT.
819 */
820static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
821{
822 return 0;
823}
824
825static int intel_i830_fetch_size(void)
826{
827 u16 gmch_ctrl;
828 struct aper_size_info_fixed *values;
829
830 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
831
832 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
833 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
834 /* 855GM/852GM/865G has 128MB aperture size */
835 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
836 agp_bridge->aperture_size_idx = 0;
837 return values[0].size;
838 }
839
840 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
841
842 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
843 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
844 agp_bridge->aperture_size_idx = 0;
845 return values[0].size;
846 } else {
847 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
848 agp_bridge->aperture_size_idx = 1;
849 return values[1].size;
850 }
851
852 return 0;
853}
854
855static int intel_i830_configure(void)
856{
857 struct aper_size_info_fixed *current_size;
858 u32 temp;
859 u16 gmch_ctrl;
860 int i;
861
862 current_size = A_SIZE_FIX(agp_bridge->current_size);
863
864 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
865 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
866
867 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
868 gmch_ctrl |= I830_GMCH_ENABLED;
869 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
870
871 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
872 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
873
874 if (agp_bridge->driver->needs_scratch_page) {
875 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
876 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
877 }
878 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
879 }
880
881 global_cache_flush();
882
883 intel_i830_setup_flush();
884 return 0;
885}
886
887static void intel_i830_cleanup(void)
888{
889 iounmap(intel_private.registers);
890}
891
892static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
893 int type)
894{
895 int i, j, num_entries;
896 void *temp;
897 int ret = -EINVAL;
898 int mask_type;
899
900 if (mem->page_count == 0)
901 goto out;
902
903 temp = agp_bridge->current_size;
904 num_entries = A_SIZE_FIX(temp)->num_entries;
905
906 if (pg_start < intel_private.gtt_entries) {
907 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
908 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
909 pg_start, intel_private.gtt_entries);
910
911 dev_info(&intel_private.pcidev->dev,
912 "trying to insert into local/stolen memory\n");
913 goto out_err;
914 }
915
916 if ((pg_start + mem->page_count) > num_entries)
917 goto out_err;
918
919 /* The i830 can't check the GTT for entries since its read only,
920 * depend on the caller to make the correct offset decisions.
921 */
922
923 if (type != mem->type)
924 goto out_err;
925
926 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
927
928 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
929 mask_type != INTEL_AGP_CACHED_MEMORY)
930 goto out_err;
931
932 if (!mem->is_flushed)
933 global_cache_flush();
934
935 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
936 writel(agp_bridge->driver->mask_memory(agp_bridge,
937 page_to_phys(mem->pages[i]), mask_type),
938 intel_private.registers+I810_PTE_BASE+(j*4));
939 }
940 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
941 agp_bridge->driver->tlb_flush(mem);
942
943out:
944 ret = 0;
945out_err:
946 mem->is_flushed = true;
947 return ret;
948}
949
950static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
951 int type)
952{
953 int i;
954
955 if (mem->page_count == 0)
956 return 0;
957
958 if (pg_start < intel_private.gtt_entries) {
959 dev_info(&intel_private.pcidev->dev,
960 "trying to disable local/stolen memory\n");
961 return -EINVAL;
962 }
963
964 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
965 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
966 }
967 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
968
969 agp_bridge->driver->tlb_flush(mem);
970 return 0;
971}
972
973static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
974{
975 if (type == AGP_PHYS_MEMORY)
976 return alloc_agpphysmem_i8xx(pg_count, type);
977 /* always return NULL for other allocation types for now */
978 return NULL;
979}
980
981static int intel_alloc_chipset_flush_resource(void)
982{
983 int ret;
984 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
985 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
986 pcibios_align_resource, agp_bridge->dev);
987
988 return ret;
989}
990
991static void intel_i915_setup_chipset_flush(void)
992{
993 int ret;
994 u32 temp;
995
996 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
997 if (!(temp & 0x1)) {
998 intel_alloc_chipset_flush_resource();
999 intel_private.resource_valid = 1;
1000 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1001 } else {
1002 temp &= ~1;
1003
1004 intel_private.resource_valid = 1;
1005 intel_private.ifp_resource.start = temp;
1006 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1007 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1008 /* some BIOSes reserve this area in a pnp some don't */
1009 if (ret)
1010 intel_private.resource_valid = 0;
1011 }
1012}
1013
1014static void intel_i965_g33_setup_chipset_flush(void)
1015{
1016 u32 temp_hi, temp_lo;
1017 int ret;
1018
1019 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1020 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1021
1022 if (!(temp_lo & 0x1)) {
1023
1024 intel_alloc_chipset_flush_resource();
1025
1026 intel_private.resource_valid = 1;
1027 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1028 upper_32_bits(intel_private.ifp_resource.start));
1029 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1030 } else {
1031 u64 l64;
1032
1033 temp_lo &= ~0x1;
1034 l64 = ((u64)temp_hi << 32) | temp_lo;
1035
1036 intel_private.resource_valid = 1;
1037 intel_private.ifp_resource.start = l64;
1038 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1039 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1040 /* some BIOSes reserve this area in a pnp some don't */
1041 if (ret)
1042 intel_private.resource_valid = 0;
1043 }
1044}
1045
1046static void intel_i9xx_setup_flush(void)
1047{
1048 /* return if already configured */
1049 if (intel_private.ifp_resource.start)
1050 return;
1051
1052 if (IS_SNB)
1053 return;
1054
1055 /* setup a resource for this object */
1056 intel_private.ifp_resource.name = "Intel Flush Page";
1057 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1058
1059 /* Setup chipset flush for 915 */
1060 if (IS_I965 || IS_G33 || IS_G4X) {
1061 intel_i965_g33_setup_chipset_flush();
1062 } else {
1063 intel_i915_setup_chipset_flush();
1064 }
1065
1066 if (intel_private.ifp_resource.start) {
1067 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1068 if (!intel_private.i9xx_flush_page)
1069 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1070 }
1071}
1072
1073static int intel_i915_configure(void)
1074{
1075 struct aper_size_info_fixed *current_size;
1076 u32 temp;
1077 u16 gmch_ctrl;
1078 int i;
1079
1080 current_size = A_SIZE_FIX(agp_bridge->current_size);
1081
1082 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1083
1084 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1085
1086 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1087 gmch_ctrl |= I830_GMCH_ENABLED;
1088 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1089
1090 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1091 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1092
1093 if (agp_bridge->driver->needs_scratch_page) {
1094 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1095 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1096 }
1097 readl(intel_private.gtt+i-1); /* PCI Posting. */
1098 }
1099
1100 global_cache_flush();
1101
1102 intel_i9xx_setup_flush();
1103
1104 return 0;
1105}
1106
1107static void intel_i915_cleanup(void)
1108{
1109 if (intel_private.i9xx_flush_page)
1110 iounmap(intel_private.i9xx_flush_page);
1111 if (intel_private.resource_valid)
1112 release_resource(&intel_private.ifp_resource);
1113 intel_private.ifp_resource.start = 0;
1114 intel_private.resource_valid = 0;
1115 iounmap(intel_private.gtt);
1116 iounmap(intel_private.registers);
1117}
1118
1119static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1120{
1121 if (intel_private.i9xx_flush_page)
1122 writel(1, intel_private.i9xx_flush_page);
1123}
1124
1125static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1126 int type)
1127{
1128 int num_entries;
1129 void *temp;
1130 int ret = -EINVAL;
1131 int mask_type;
1132
1133 if (mem->page_count == 0)
1134 goto out;
1135
1136 temp = agp_bridge->current_size;
1137 num_entries = A_SIZE_FIX(temp)->num_entries;
1138
1139 if (pg_start < intel_private.gtt_entries) {
1140 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1141 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1142 pg_start, intel_private.gtt_entries);
1143
1144 dev_info(&intel_private.pcidev->dev,
1145 "trying to insert into local/stolen memory\n");
1146 goto out_err;
1147 }
1148
1149 if ((pg_start + mem->page_count) > num_entries)
1150 goto out_err;
1151
1152 /* The i915 can't check the GTT for entries since it's read only;
1153 * depend on the caller to make the correct offset decisions.
1154 */
1155
1156 if (type != mem->type)
1157 goto out_err;
1158
1159 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1160
1161 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1162 mask_type != INTEL_AGP_CACHED_MEMORY)
1163 goto out_err;
1164
1165 if (!mem->is_flushed)
1166 global_cache_flush();
1167
1168 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1169 agp_bridge->driver->tlb_flush(mem);
1170
1171 out:
1172 ret = 0;
1173 out_err:
1174 mem->is_flushed = true;
1175 return ret;
1176}
1177
1178static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1179 int type)
1180{
1181 int i;
1182
1183 if (mem->page_count == 0)
1184 return 0;
1185
1186 if (pg_start < intel_private.gtt_entries) {
1187 dev_info(&intel_private.pcidev->dev,
1188 "trying to disable local/stolen memory\n");
1189 return -EINVAL;
1190 }
1191
1192 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1193 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1194
1195 readl(intel_private.gtt+i-1);
1196
1197 agp_bridge->driver->tlb_flush(mem);
1198 return 0;
1199}
1200
1201/* Return the aperture size by just checking the resource length. The effect
1202 * described in the spec of the MSAC registers is just changing of the
1203 * resource size.
1204 */
1205static int intel_i9xx_fetch_size(void)
1206{
1207 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1208 int aper_size; /* size in megabytes */
1209 int i;
1210
1211 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1212
1213 for (i = 0; i < num_sizes; i++) {
1214 if (aper_size == intel_i830_sizes[i].size) {
1215 agp_bridge->current_size = intel_i830_sizes + i;
1216 agp_bridge->previous_size = agp_bridge->current_size;
1217 return aper_size;
1218 }
1219 }
1220
1221 return 0;
1222}
1223
1224/* The intel i915 automatically initializes the agp aperture during POST.
1225 * Use the memory already set aside for in the GTT.
1226 */
1227static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1228{
1229 int page_order;
1230 struct aper_size_info_fixed *size;
1231 int num_entries;
1232 u32 temp, temp2;
1233 int gtt_map_size = 256 * 1024;
1234
1235 size = agp_bridge->current_size;
1236 page_order = size->page_order;
1237 num_entries = size->num_entries;
1238 agp_bridge->gatt_table_real = NULL;
1239
1240 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1241 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1242
1243 if (IS_G33)
1244 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1245 intel_private.gtt = ioremap(temp2, gtt_map_size);
1246 if (!intel_private.gtt)
1247 return -ENOMEM;
1248
1249 intel_private.gtt_total_size = gtt_map_size / 4;
1250
1251 temp &= 0xfff80000;
1252
1253 intel_private.registers = ioremap(temp, 128 * 4096);
1254 if (!intel_private.registers) {
1255 iounmap(intel_private.gtt);
1256 return -ENOMEM;
1257 }
1258
1259 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1260 global_cache_flush(); /* FIXME: ? */
1261
1262 /* we have to call this as early as possible after the MMIO base address is known */
1263 intel_i830_init_gtt_entries();
1264
1265 agp_bridge->gatt_table = NULL;
1266
1267 agp_bridge->gatt_bus_addr = temp;
1268
1269 return 0;
1270}
1271
1272/*
1273 * The i965 supports 36-bit physical addresses, but to keep
1274 * the format of the GTT the same, the bits that don't fit
1275 * in a 32-bit word are shifted down to bits 4..7.
1276 *
1277 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1278 * is always zero on 32-bit architectures, so no need to make
1279 * this conditional.
1280 */
1281static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1282 dma_addr_t addr, int type)
1283{
1284 /* Shift high bits down */
1285 addr |= (addr >> 28) & 0xf0;
1286
1287 /* Type checking must be done elsewhere */
1288 return addr | bridge->driver->masks[type].mask;
1289}
1290
1291static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1292{
1293 u16 snb_gmch_ctl;
1294
1295 switch (agp_bridge->dev->device) {
1296 case PCI_DEVICE_ID_INTEL_GM45_HB:
1297 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1298 case PCI_DEVICE_ID_INTEL_Q45_HB:
1299 case PCI_DEVICE_ID_INTEL_G45_HB:
1300 case PCI_DEVICE_ID_INTEL_G41_HB:
1301 case PCI_DEVICE_ID_INTEL_B43_HB:
1302 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1303 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1304 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1305 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1306 *gtt_offset = *gtt_size = MB(2);
1307 break;
1308 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1309 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1310 *gtt_offset = MB(2);
1311
1312 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1313 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1314 default:
1315 case SNB_GTT_SIZE_0M:
1316 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1317 *gtt_size = MB(0);
1318 break;
1319 case SNB_GTT_SIZE_1M:
1320 *gtt_size = MB(1);
1321 break;
1322 case SNB_GTT_SIZE_2M:
1323 *gtt_size = MB(2);
1324 break;
1325 }
1326 break;
1327 default:
1328 *gtt_offset = *gtt_size = KB(512);
1329 }
1330}
1331
1332/* The intel i965 automatically initializes the agp aperture during POST.
1333 * Use the memory already set aside for in the GTT.
1334 */
1335static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1336{
1337 int page_order;
1338 struct aper_size_info_fixed *size;
1339 int num_entries;
1340 u32 temp;
1341 int gtt_offset, gtt_size;
1342
1343 size = agp_bridge->current_size;
1344 page_order = size->page_order;
1345 num_entries = size->num_entries;
1346 agp_bridge->gatt_table_real = NULL;
1347
1348 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1349
1350 temp &= 0xfff00000;
1351
1352 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1353
1354 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1355
1356 if (!intel_private.gtt)
1357 return -ENOMEM;
1358
1359 intel_private.gtt_total_size = gtt_size / 4;
1360
1361 intel_private.registers = ioremap(temp, 128 * 4096);
1362 if (!intel_private.registers) {
1363 iounmap(intel_private.gtt);
1364 return -ENOMEM;
1365 }
1366
1367 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1368 global_cache_flush(); /* FIXME: ? */
1369
1370 /* we have to call this as early as possible after the MMIO base address is known */
1371 intel_i830_init_gtt_entries();
1372
1373 agp_bridge->gatt_table = NULL;
1374
1375 agp_bridge->gatt_bus_addr = temp;
1376
1377 return 0;
1378}
1379
1380static const struct agp_bridge_driver intel_810_driver = {
1381 .owner = THIS_MODULE,
1382 .aperture_sizes = intel_i810_sizes,
1383 .size_type = FIXED_APER_SIZE,
1384 .num_aperture_sizes = 2,
1385 .needs_scratch_page = true,
1386 .configure = intel_i810_configure,
1387 .fetch_size = intel_i810_fetch_size,
1388 .cleanup = intel_i810_cleanup,
1389 .tlb_flush = intel_i810_tlbflush,
1390 .mask_memory = intel_i810_mask_memory,
1391 .masks = intel_i810_masks,
1392 .agp_enable = intel_i810_agp_enable,
1393 .cache_flush = global_cache_flush,
1394 .create_gatt_table = agp_generic_create_gatt_table,
1395 .free_gatt_table = agp_generic_free_gatt_table,
1396 .insert_memory = intel_i810_insert_entries,
1397 .remove_memory = intel_i810_remove_entries,
1398 .alloc_by_type = intel_i810_alloc_by_type,
1399 .free_by_type = intel_i810_free_by_type,
1400 .agp_alloc_page = agp_generic_alloc_page,
1401 .agp_alloc_pages = agp_generic_alloc_pages,
1402 .agp_destroy_page = agp_generic_destroy_page,
1403 .agp_destroy_pages = agp_generic_destroy_pages,
1404 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1405};
1406
1407static const struct agp_bridge_driver intel_830_driver = {
1408 .owner = THIS_MODULE,
1409 .aperture_sizes = intel_i830_sizes,
1410 .size_type = FIXED_APER_SIZE,
1411 .num_aperture_sizes = 4,
1412 .needs_scratch_page = true,
1413 .configure = intel_i830_configure,
1414 .fetch_size = intel_i830_fetch_size,
1415 .cleanup = intel_i830_cleanup,
1416 .tlb_flush = intel_i810_tlbflush,
1417 .mask_memory = intel_i810_mask_memory,
1418 .masks = intel_i810_masks,
1419 .agp_enable = intel_i810_agp_enable,
1420 .cache_flush = global_cache_flush,
1421 .create_gatt_table = intel_i830_create_gatt_table,
1422 .free_gatt_table = intel_i830_free_gatt_table,
1423 .insert_memory = intel_i830_insert_entries,
1424 .remove_memory = intel_i830_remove_entries,
1425 .alloc_by_type = intel_i830_alloc_by_type,
1426 .free_by_type = intel_i810_free_by_type,
1427 .agp_alloc_page = agp_generic_alloc_page,
1428 .agp_alloc_pages = agp_generic_alloc_pages,
1429 .agp_destroy_page = agp_generic_destroy_page,
1430 .agp_destroy_pages = agp_generic_destroy_pages,
1431 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1432 .chipset_flush = intel_i830_chipset_flush,
1433};
1434
1435static const struct agp_bridge_driver intel_915_driver = {
1436 .owner = THIS_MODULE,
1437 .aperture_sizes = intel_i830_sizes,
1438 .size_type = FIXED_APER_SIZE,
1439 .num_aperture_sizes = 4,
1440 .needs_scratch_page = true,
1441 .configure = intel_i915_configure,
1442 .fetch_size = intel_i9xx_fetch_size,
1443 .cleanup = intel_i915_cleanup,
1444 .tlb_flush = intel_i810_tlbflush,
1445 .mask_memory = intel_i810_mask_memory,
1446 .masks = intel_i810_masks,
1447 .agp_enable = intel_i810_agp_enable,
1448 .cache_flush = global_cache_flush,
1449 .create_gatt_table = intel_i915_create_gatt_table,
1450 .free_gatt_table = intel_i830_free_gatt_table,
1451 .insert_memory = intel_i915_insert_entries,
1452 .remove_memory = intel_i915_remove_entries,
1453 .alloc_by_type = intel_i830_alloc_by_type,
1454 .free_by_type = intel_i810_free_by_type,
1455 .agp_alloc_page = agp_generic_alloc_page,
1456 .agp_alloc_pages = agp_generic_alloc_pages,
1457 .agp_destroy_page = agp_generic_destroy_page,
1458 .agp_destroy_pages = agp_generic_destroy_pages,
1459 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1460 .chipset_flush = intel_i915_chipset_flush,
1461#ifdef USE_PCI_DMA_API
1462 .agp_map_page = intel_agp_map_page,
1463 .agp_unmap_page = intel_agp_unmap_page,
1464 .agp_map_memory = intel_agp_map_memory,
1465 .agp_unmap_memory = intel_agp_unmap_memory,
1466#endif
1467};
1468
1469static const struct agp_bridge_driver intel_i965_driver = {
1470 .owner = THIS_MODULE,
1471 .aperture_sizes = intel_i830_sizes,
1472 .size_type = FIXED_APER_SIZE,
1473 .num_aperture_sizes = 4,
1474 .needs_scratch_page = true,
1475 .configure = intel_i915_configure,
1476 .fetch_size = intel_i9xx_fetch_size,
1477 .cleanup = intel_i915_cleanup,
1478 .tlb_flush = intel_i810_tlbflush,
1479 .mask_memory = intel_i965_mask_memory,
1480 .masks = intel_i810_masks,
1481 .agp_enable = intel_i810_agp_enable,
1482 .cache_flush = global_cache_flush,
1483 .create_gatt_table = intel_i965_create_gatt_table,
1484 .free_gatt_table = intel_i830_free_gatt_table,
1485 .insert_memory = intel_i915_insert_entries,
1486 .remove_memory = intel_i915_remove_entries,
1487 .alloc_by_type = intel_i830_alloc_by_type,
1488 .free_by_type = intel_i810_free_by_type,
1489 .agp_alloc_page = agp_generic_alloc_page,
1490 .agp_alloc_pages = agp_generic_alloc_pages,
1491 .agp_destroy_page = agp_generic_destroy_page,
1492 .agp_destroy_pages = agp_generic_destroy_pages,
1493 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1494 .chipset_flush = intel_i915_chipset_flush,
1495#ifdef USE_PCI_DMA_API
1496 .agp_map_page = intel_agp_map_page,
1497 .agp_unmap_page = intel_agp_unmap_page,
1498 .agp_map_memory = intel_agp_map_memory,
1499 .agp_unmap_memory = intel_agp_unmap_memory,
1500#endif
1501};
1502
1503static const struct agp_bridge_driver intel_g33_driver = {
1504 .owner = THIS_MODULE,
1505 .aperture_sizes = intel_i830_sizes,
1506 .size_type = FIXED_APER_SIZE,
1507 .num_aperture_sizes = 4,
1508 .needs_scratch_page = true,
1509 .configure = intel_i915_configure,
1510 .fetch_size = intel_i9xx_fetch_size,
1511 .cleanup = intel_i915_cleanup,
1512 .tlb_flush = intel_i810_tlbflush,
1513 .mask_memory = intel_i965_mask_memory,
1514 .masks = intel_i810_masks,
1515 .agp_enable = intel_i810_agp_enable,
1516 .cache_flush = global_cache_flush,
1517 .create_gatt_table = intel_i915_create_gatt_table,
1518 .free_gatt_table = intel_i830_free_gatt_table,
1519 .insert_memory = intel_i915_insert_entries,
1520 .remove_memory = intel_i915_remove_entries,
1521 .alloc_by_type = intel_i830_alloc_by_type,
1522 .free_by_type = intel_i810_free_by_type,
1523 .agp_alloc_page = agp_generic_alloc_page,
1524 .agp_alloc_pages = agp_generic_alloc_pages,
1525 .agp_destroy_page = agp_generic_destroy_page,
1526 .agp_destroy_pages = agp_generic_destroy_pages,
1527 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1528 .chipset_flush = intel_i915_chipset_flush,
1529#ifdef USE_PCI_DMA_API
1530 .agp_map_page = intel_agp_map_page,
1531 .agp_unmap_page = intel_agp_unmap_page,
1532 .agp_map_memory = intel_agp_map_memory,
1533 .agp_unmap_memory = intel_agp_unmap_memory,
1534#endif
1535};