aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-04-13 18:29:52 -0400
committerEric Anholt <eric@anholt.net>2010-04-18 20:35:47 -0400
commitf51b76621137c18501f6d21a995d36a8bcb49999 (patch)
treeea146954362dd400b4582e4fb89242b075aaf556 /drivers/char/agp
parentff7cdd691a0c4925c1803bf89a4c08ccda2d7658 (diff)
agp/intel: split out the GTT support
intel-agp.c contains actually two different drivers: An agp driver for _physical_ agp slots an the gtt driver that is used by the intel drm modules. Split them to prevent any further confusion. This patch just moves the code and includes intel-gtt.c in intel-agp.c Later patches will untangle these two drivers further. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/char/agp')
-rw-r--r--drivers/char/agp/intel-agp.c1525
-rw-r--r--drivers/char/agp/intel-gtt.c1535
2 files changed, 1537 insertions, 1523 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 154bb9256961..6a22aa9783b8 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,1375 +12,11 @@
12#include "agp.h" 12#include "agp.h"
13#include "intel-agp.h" 13#include "intel-agp.h"
14 14
15#include "intel-gtt.c"
16
15int intel_agp_enabled; 17int intel_agp_enabled;
16EXPORT_SYMBOL(intel_agp_enabled); 18EXPORT_SYMBOL(intel_agp_enabled);
17 19
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28extern int agp_memory_reserved;
29
30
31static const struct aper_size_info_fixed intel_i810_sizes[] =
32{
33 {64, 16384, 4},
34 /* The 32M mode still requires a 64k gatt */
35 {32, 8192, 4}
36};
37
38#define AGP_DCACHE_MEMORY 1
39#define AGP_PHYS_MEMORY 2
40#define INTEL_AGP_CACHED_MEMORY 3
41
42static struct gatt_mask intel_i810_masks[] =
43{
44 {.mask = I810_PTE_VALID, .type = 0},
45 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
46 {.mask = I810_PTE_VALID, .type = 0},
47 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
48 .type = INTEL_AGP_CACHED_MEMORY}
49};
50
51static struct _intel_private {
52 struct pci_dev *pcidev; /* device one */
53 u8 __iomem *registers;
54 u32 __iomem *gtt; /* I915G */
55 int num_dcache_entries;
56 /* gtt_entries is the number of gtt entries that are already mapped
57 * to stolen memory. Stolen memory is larger than the memory mapped
58 * through gtt_entries, as it includes some reserved space for the BIOS
59 * popup and for the GTT.
60 */
61 int gtt_entries; /* i830+ */
62 int gtt_total_size;
63 union {
64 void __iomem *i9xx_flush_page;
65 void *i8xx_flush_page;
66 };
67 struct page *i8xx_page;
68 struct resource ifp_resource;
69 int resource_valid;
70} intel_private;
71
72#ifdef USE_PCI_DMA_API
73static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
74{
75 *ret = pci_map_page(intel_private.pcidev, page, 0,
76 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
77 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
78 return -EINVAL;
79 return 0;
80}
81
82static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
83{
84 pci_unmap_page(intel_private.pcidev, dma,
85 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
86}
87
88static void intel_agp_free_sglist(struct agp_memory *mem)
89{
90 struct sg_table st;
91
92 st.sgl = mem->sg_list;
93 st.orig_nents = st.nents = mem->page_count;
94
95 sg_free_table(&st);
96
97 mem->sg_list = NULL;
98 mem->num_sg = 0;
99}
100
101static int intel_agp_map_memory(struct agp_memory *mem)
102{
103 struct sg_table st;
104 struct scatterlist *sg;
105 int i;
106
107 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
108
109 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
110 return -ENOMEM;
111
112 mem->sg_list = sg = st.sgl;
113
114 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
115 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
116
117 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
118 mem->page_count, PCI_DMA_BIDIRECTIONAL);
119 if (unlikely(!mem->num_sg)) {
120 intel_agp_free_sglist(mem);
121 return -ENOMEM;
122 }
123 return 0;
124}
125
126static void intel_agp_unmap_memory(struct agp_memory *mem)
127{
128 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
129
130 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
131 mem->page_count, PCI_DMA_BIDIRECTIONAL);
132 intel_agp_free_sglist(mem);
133}
134
135static void intel_agp_insert_sg_entries(struct agp_memory *mem,
136 off_t pg_start, int mask_type)
137{
138 struct scatterlist *sg;
139 int i, j;
140
141 j = pg_start;
142
143 WARN_ON(!mem->num_sg);
144
145 if (mem->num_sg == mem->page_count) {
146 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
147 writel(agp_bridge->driver->mask_memory(agp_bridge,
148 sg_dma_address(sg), mask_type),
149 intel_private.gtt+j);
150 j++;
151 }
152 } else {
153 /* sg may merge pages, but we have to separate
154 * per-page addr for GTT */
155 unsigned int len, m;
156
157 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
158 len = sg_dma_len(sg) / PAGE_SIZE;
159 for (m = 0; m < len; m++) {
160 writel(agp_bridge->driver->mask_memory(agp_bridge,
161 sg_dma_address(sg) + m * PAGE_SIZE,
162 mask_type),
163 intel_private.gtt+j);
164 j++;
165 }
166 }
167 }
168 readl(intel_private.gtt+j-1);
169}
170
171#else
172
173static void intel_agp_insert_sg_entries(struct agp_memory *mem,
174 off_t pg_start, int mask_type)
175{
176 int i, j;
177 u32 cache_bits = 0;
178
179 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
180 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
181 {
182 cache_bits = I830_PTE_SYSTEM_CACHED;
183 }
184
185 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
186 writel(agp_bridge->driver->mask_memory(agp_bridge,
187 page_to_phys(mem->pages[i]), mask_type),
188 intel_private.gtt+j);
189 }
190
191 readl(intel_private.gtt+j-1);
192}
193
194#endif
195
196static int intel_i810_fetch_size(void)
197{
198 u32 smram_miscc;
199 struct aper_size_info_fixed *values;
200
201 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
202 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
203
204 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
205 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
206 return 0;
207 }
208 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
209 agp_bridge->previous_size =
210 agp_bridge->current_size = (void *) (values + 1);
211 agp_bridge->aperture_size_idx = 1;
212 return values[1].size;
213 } else {
214 agp_bridge->previous_size =
215 agp_bridge->current_size = (void *) (values);
216 agp_bridge->aperture_size_idx = 0;
217 return values[0].size;
218 }
219
220 return 0;
221}
222
223static int intel_i810_configure(void)
224{
225 struct aper_size_info_fixed *current_size;
226 u32 temp;
227 int i;
228
229 current_size = A_SIZE_FIX(agp_bridge->current_size);
230
231 if (!intel_private.registers) {
232 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
233 temp &= 0xfff80000;
234
235 intel_private.registers = ioremap(temp, 128 * 4096);
236 if (!intel_private.registers) {
237 dev_err(&intel_private.pcidev->dev,
238 "can't remap memory\n");
239 return -ENOMEM;
240 }
241 }
242
243 if ((readl(intel_private.registers+I810_DRAM_CTL)
244 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
245 /* This will need to be dynamically assigned */
246 dev_info(&intel_private.pcidev->dev,
247 "detected 4MB dedicated video ram\n");
248 intel_private.num_dcache_entries = 1024;
249 }
250 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
251 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
252 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
253 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
254
255 if (agp_bridge->driver->needs_scratch_page) {
256 for (i = 0; i < current_size->num_entries; i++) {
257 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
258 }
259 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
260 }
261 global_cache_flush();
262 return 0;
263}
264
265static void intel_i810_cleanup(void)
266{
267 writel(0, intel_private.registers+I810_PGETBL_CTL);
268 readl(intel_private.registers); /* PCI Posting. */
269 iounmap(intel_private.registers);
270}
271
272static void intel_i810_tlbflush(struct agp_memory *mem)
273{
274 return;
275}
276
277static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
278{
279 return;
280}
281
282/* Exists to support ARGB cursors */
283static struct page *i8xx_alloc_pages(void)
284{
285 struct page *page;
286
287 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
288 if (page == NULL)
289 return NULL;
290
291 if (set_pages_uc(page, 4) < 0) {
292 set_pages_wb(page, 4);
293 __free_pages(page, 2);
294 return NULL;
295 }
296 get_page(page);
297 atomic_inc(&agp_bridge->current_memory_agp);
298 return page;
299}
300
301static void i8xx_destroy_pages(struct page *page)
302{
303 if (page == NULL)
304 return;
305
306 set_pages_wb(page, 4);
307 put_page(page);
308 __free_pages(page, 2);
309 atomic_dec(&agp_bridge->current_memory_agp);
310}
311
312static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
313 int type)
314{
315 if (type < AGP_USER_TYPES)
316 return type;
317 else if (type == AGP_USER_CACHED_MEMORY)
318 return INTEL_AGP_CACHED_MEMORY;
319 else
320 return 0;
321}
322
323static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
324 int type)
325{
326 int i, j, num_entries;
327 void *temp;
328 int ret = -EINVAL;
329 int mask_type;
330
331 if (mem->page_count == 0)
332 goto out;
333
334 temp = agp_bridge->current_size;
335 num_entries = A_SIZE_FIX(temp)->num_entries;
336
337 if ((pg_start + mem->page_count) > num_entries)
338 goto out_err;
339
340
341 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
342 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
343 ret = -EBUSY;
344 goto out_err;
345 }
346 }
347
348 if (type != mem->type)
349 goto out_err;
350
351 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
352
353 switch (mask_type) {
354 case AGP_DCACHE_MEMORY:
355 if (!mem->is_flushed)
356 global_cache_flush();
357 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
358 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
359 intel_private.registers+I810_PTE_BASE+(i*4));
360 }
361 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
362 break;
363 case AGP_PHYS_MEMORY:
364 case AGP_NORMAL_MEMORY:
365 if (!mem->is_flushed)
366 global_cache_flush();
367 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
368 writel(agp_bridge->driver->mask_memory(agp_bridge,
369 page_to_phys(mem->pages[i]), mask_type),
370 intel_private.registers+I810_PTE_BASE+(j*4));
371 }
372 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
373 break;
374 default:
375 goto out_err;
376 }
377
378 agp_bridge->driver->tlb_flush(mem);
379out:
380 ret = 0;
381out_err:
382 mem->is_flushed = true;
383 return ret;
384}
385
386static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
387 int type)
388{
389 int i;
390
391 if (mem->page_count == 0)
392 return 0;
393
394 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
395 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
396 }
397 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
398
399 agp_bridge->driver->tlb_flush(mem);
400 return 0;
401}
402
403/*
404 * The i810/i830 requires a physical address to program its mouse
405 * pointer into hardware.
406 * However the Xserver still writes to it through the agp aperture.
407 */
408static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
409{
410 struct agp_memory *new;
411 struct page *page;
412
413 switch (pg_count) {
414 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
415 break;
416 case 4:
417 /* kludge to get 4 physical pages for ARGB cursor */
418 page = i8xx_alloc_pages();
419 break;
420 default:
421 return NULL;
422 }
423
424 if (page == NULL)
425 return NULL;
426
427 new = agp_create_memory(pg_count);
428 if (new == NULL)
429 return NULL;
430
431 new->pages[0] = page;
432 if (pg_count == 4) {
433 /* kludge to get 4 physical pages for ARGB cursor */
434 new->pages[1] = new->pages[0] + 1;
435 new->pages[2] = new->pages[1] + 1;
436 new->pages[3] = new->pages[2] + 1;
437 }
438 new->page_count = pg_count;
439 new->num_scratch_pages = pg_count;
440 new->type = AGP_PHYS_MEMORY;
441 new->physical = page_to_phys(new->pages[0]);
442 return new;
443}
444
445static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
446{
447 struct agp_memory *new;
448
449 if (type == AGP_DCACHE_MEMORY) {
450 if (pg_count != intel_private.num_dcache_entries)
451 return NULL;
452
453 new = agp_create_memory(1);
454 if (new == NULL)
455 return NULL;
456
457 new->type = AGP_DCACHE_MEMORY;
458 new->page_count = pg_count;
459 new->num_scratch_pages = 0;
460 agp_free_page_array(new);
461 return new;
462 }
463 if (type == AGP_PHYS_MEMORY)
464 return alloc_agpphysmem_i8xx(pg_count, type);
465 return NULL;
466}
467
468static void intel_i810_free_by_type(struct agp_memory *curr)
469{
470 agp_free_key(curr->key);
471 if (curr->type == AGP_PHYS_MEMORY) {
472 if (curr->page_count == 4)
473 i8xx_destroy_pages(curr->pages[0]);
474 else {
475 agp_bridge->driver->agp_destroy_page(curr->pages[0],
476 AGP_PAGE_DESTROY_UNMAP);
477 agp_bridge->driver->agp_destroy_page(curr->pages[0],
478 AGP_PAGE_DESTROY_FREE);
479 }
480 agp_free_page_array(curr);
481 }
482 kfree(curr);
483}
484
485static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
486 dma_addr_t addr, int type)
487{
488 /* Type checking must be done elsewhere */
489 return addr | bridge->driver->masks[type].mask;
490}
491
492static struct aper_size_info_fixed intel_i830_sizes[] =
493{
494 {128, 32768, 5},
495 /* The 64M mode still requires a 128k gatt */
496 {64, 16384, 5},
497 {256, 65536, 6},
498 {512, 131072, 7},
499};
500
501static void intel_i830_init_gtt_entries(void)
502{
503 u16 gmch_ctrl;
504 int gtt_entries = 0;
505 u8 rdct;
506 int local = 0;
507 static const int ddt[4] = { 0, 16, 32, 64 };
508 int size; /* reserved space (in kb) at the top of stolen memory */
509
510 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
511
512 if (IS_I965) {
513 u32 pgetbl_ctl;
514 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
515
516 /* The 965 has a field telling us the size of the GTT,
517 * which may be larger than what is necessary to map the
518 * aperture.
519 */
520 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
521 case I965_PGETBL_SIZE_128KB:
522 size = 128;
523 break;
524 case I965_PGETBL_SIZE_256KB:
525 size = 256;
526 break;
527 case I965_PGETBL_SIZE_512KB:
528 size = 512;
529 break;
530 case I965_PGETBL_SIZE_1MB:
531 size = 1024;
532 break;
533 case I965_PGETBL_SIZE_2MB:
534 size = 2048;
535 break;
536 case I965_PGETBL_SIZE_1_5MB:
537 size = 1024 + 512;
538 break;
539 default:
540 dev_info(&intel_private.pcidev->dev,
541 "unknown page table size, assuming 512KB\n");
542 size = 512;
543 }
544 size += 4; /* add in BIOS popup space */
545 } else if (IS_G33 && !IS_PINEVIEW) {
546 /* G33's GTT size defined in gmch_ctrl */
547 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
548 case G33_PGETBL_SIZE_1M:
549 size = 1024;
550 break;
551 case G33_PGETBL_SIZE_2M:
552 size = 2048;
553 break;
554 default:
555 dev_info(&agp_bridge->dev->dev,
556 "unknown page table size 0x%x, assuming 512KB\n",
557 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
558 size = 512;
559 }
560 size += 4;
561 } else if (IS_G4X || IS_PINEVIEW) {
562 /* On 4 series hardware, GTT stolen is separate from graphics
563 * stolen, ignore it in stolen gtt entries counting. However,
564 * 4KB of the stolen memory doesn't get mapped to the GTT.
565 */
566 size = 4;
567 } else {
568 /* On previous hardware, the GTT size was just what was
569 * required to map the aperture.
570 */
571 size = agp_bridge->driver->fetch_size() + 4;
572 }
573
574 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
575 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
576 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
577 case I830_GMCH_GMS_STOLEN_512:
578 gtt_entries = KB(512) - KB(size);
579 break;
580 case I830_GMCH_GMS_STOLEN_1024:
581 gtt_entries = MB(1) - KB(size);
582 break;
583 case I830_GMCH_GMS_STOLEN_8192:
584 gtt_entries = MB(8) - KB(size);
585 break;
586 case I830_GMCH_GMS_LOCAL:
587 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
588 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
589 MB(ddt[I830_RDRAM_DDT(rdct)]);
590 local = 1;
591 break;
592 default:
593 gtt_entries = 0;
594 break;
595 }
596 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
597 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
598 /*
599 * SandyBridge has new memory control reg at 0x50.w
600 */
601 u16 snb_gmch_ctl;
602 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
603 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
604 case SNB_GMCH_GMS_STOLEN_32M:
605 gtt_entries = MB(32) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_64M:
608 gtt_entries = MB(64) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_96M:
611 gtt_entries = MB(96) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_128M:
614 gtt_entries = MB(128) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_160M:
617 gtt_entries = MB(160) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_192M:
620 gtt_entries = MB(192) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_224M:
623 gtt_entries = MB(224) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_256M:
626 gtt_entries = MB(256) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_288M:
629 gtt_entries = MB(288) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_320M:
632 gtt_entries = MB(320) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_352M:
635 gtt_entries = MB(352) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_384M:
638 gtt_entries = MB(384) - KB(size);
639 break;
640 case SNB_GMCH_GMS_STOLEN_416M:
641 gtt_entries = MB(416) - KB(size);
642 break;
643 case SNB_GMCH_GMS_STOLEN_448M:
644 gtt_entries = MB(448) - KB(size);
645 break;
646 case SNB_GMCH_GMS_STOLEN_480M:
647 gtt_entries = MB(480) - KB(size);
648 break;
649 case SNB_GMCH_GMS_STOLEN_512M:
650 gtt_entries = MB(512) - KB(size);
651 break;
652 }
653 } else {
654 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
655 case I855_GMCH_GMS_STOLEN_1M:
656 gtt_entries = MB(1) - KB(size);
657 break;
658 case I855_GMCH_GMS_STOLEN_4M:
659 gtt_entries = MB(4) - KB(size);
660 break;
661 case I855_GMCH_GMS_STOLEN_8M:
662 gtt_entries = MB(8) - KB(size);
663 break;
664 case I855_GMCH_GMS_STOLEN_16M:
665 gtt_entries = MB(16) - KB(size);
666 break;
667 case I855_GMCH_GMS_STOLEN_32M:
668 gtt_entries = MB(32) - KB(size);
669 break;
670 case I915_GMCH_GMS_STOLEN_48M:
671 /* Check it's really I915G */
672 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
673 gtt_entries = MB(48) - KB(size);
674 else
675 gtt_entries = 0;
676 break;
677 case I915_GMCH_GMS_STOLEN_64M:
678 /* Check it's really I915G */
679 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
680 gtt_entries = MB(64) - KB(size);
681 else
682 gtt_entries = 0;
683 break;
684 case G33_GMCH_GMS_STOLEN_128M:
685 if (IS_G33 || IS_I965 || IS_G4X)
686 gtt_entries = MB(128) - KB(size);
687 else
688 gtt_entries = 0;
689 break;
690 case G33_GMCH_GMS_STOLEN_256M:
691 if (IS_G33 || IS_I965 || IS_G4X)
692 gtt_entries = MB(256) - KB(size);
693 else
694 gtt_entries = 0;
695 break;
696 case INTEL_GMCH_GMS_STOLEN_96M:
697 if (IS_I965 || IS_G4X)
698 gtt_entries = MB(96) - KB(size);
699 else
700 gtt_entries = 0;
701 break;
702 case INTEL_GMCH_GMS_STOLEN_160M:
703 if (IS_I965 || IS_G4X)
704 gtt_entries = MB(160) - KB(size);
705 else
706 gtt_entries = 0;
707 break;
708 case INTEL_GMCH_GMS_STOLEN_224M:
709 if (IS_I965 || IS_G4X)
710 gtt_entries = MB(224) - KB(size);
711 else
712 gtt_entries = 0;
713 break;
714 case INTEL_GMCH_GMS_STOLEN_352M:
715 if (IS_I965 || IS_G4X)
716 gtt_entries = MB(352) - KB(size);
717 else
718 gtt_entries = 0;
719 break;
720 default:
721 gtt_entries = 0;
722 break;
723 }
724 }
725 if (gtt_entries > 0) {
726 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
727 gtt_entries / KB(1), local ? "local" : "stolen");
728 gtt_entries /= KB(4);
729 } else {
730 dev_info(&agp_bridge->dev->dev,
731 "no pre-allocated video memory detected\n");
732 gtt_entries = 0;
733 }
734
735 intel_private.gtt_entries = gtt_entries;
736}
737
738static void intel_i830_fini_flush(void)
739{
740 kunmap(intel_private.i8xx_page);
741 intel_private.i8xx_flush_page = NULL;
742 unmap_page_from_agp(intel_private.i8xx_page);
743
744 __free_page(intel_private.i8xx_page);
745 intel_private.i8xx_page = NULL;
746}
747
748static void intel_i830_setup_flush(void)
749{
750 /* return if we've already set the flush mechanism up */
751 if (intel_private.i8xx_page)
752 return;
753
754 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
755 if (!intel_private.i8xx_page)
756 return;
757
758 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
759 if (!intel_private.i8xx_flush_page)
760 intel_i830_fini_flush();
761}
762
763/* The chipset_flush interface needs to get data that has already been
764 * flushed out of the CPU all the way out to main memory, because the GPU
765 * doesn't snoop those buffers.
766 *
767 * The 8xx series doesn't have the same lovely interface for flushing the
768 * chipset write buffers that the later chips do. According to the 865
769 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
770 * that buffer out, we just fill 1KB and clflush it out, on the assumption
771 * that it'll push whatever was in there out. It appears to work.
772 */
773static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
774{
775 unsigned int *pg = intel_private.i8xx_flush_page;
776
777 memset(pg, 0, 1024);
778
779 if (cpu_has_clflush)
780 clflush_cache_range(pg, 1024);
781 else if (wbinvd_on_all_cpus() != 0)
782 printk(KERN_ERR "Timed out waiting for cache flush.\n");
783}
784
785/* The intel i830 automatically initializes the agp aperture during POST.
786 * Use the memory already set aside for in the GTT.
787 */
788static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
789{
790 int page_order;
791 struct aper_size_info_fixed *size;
792 int num_entries;
793 u32 temp;
794
795 size = agp_bridge->current_size;
796 page_order = size->page_order;
797 num_entries = size->num_entries;
798 agp_bridge->gatt_table_real = NULL;
799
800 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
801 temp &= 0xfff80000;
802
803 intel_private.registers = ioremap(temp, 128 * 4096);
804 if (!intel_private.registers)
805 return -ENOMEM;
806
807 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
808 global_cache_flush(); /* FIXME: ?? */
809
810 /* we have to call this as early as possible after the MMIO base address is known */
811 intel_i830_init_gtt_entries();
812
813 agp_bridge->gatt_table = NULL;
814
815 agp_bridge->gatt_bus_addr = temp;
816
817 return 0;
818}
819
820/* Return the gatt table to a sane state. Use the top of stolen
821 * memory for the GTT.
822 */
823static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
824{
825 return 0;
826}
827
828static int intel_i830_fetch_size(void)
829{
830 u16 gmch_ctrl;
831 struct aper_size_info_fixed *values;
832
833 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
834
835 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
836 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
837 /* 855GM/852GM/865G has 128MB aperture size */
838 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
839 agp_bridge->aperture_size_idx = 0;
840 return values[0].size;
841 }
842
843 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
844
845 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
846 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
847 agp_bridge->aperture_size_idx = 0;
848 return values[0].size;
849 } else {
850 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
851 agp_bridge->aperture_size_idx = 1;
852 return values[1].size;
853 }
854
855 return 0;
856}
857
858static int intel_i830_configure(void)
859{
860 struct aper_size_info_fixed *current_size;
861 u32 temp;
862 u16 gmch_ctrl;
863 int i;
864
865 current_size = A_SIZE_FIX(agp_bridge->current_size);
866
867 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
868 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
869
870 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
871 gmch_ctrl |= I830_GMCH_ENABLED;
872 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
873
874 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
875 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
876
877 if (agp_bridge->driver->needs_scratch_page) {
878 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
879 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
880 }
881 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
882 }
883
884 global_cache_flush();
885
886 intel_i830_setup_flush();
887 return 0;
888}
889
890static void intel_i830_cleanup(void)
891{
892 iounmap(intel_private.registers);
893}
894
895static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
896 int type)
897{
898 int i, j, num_entries;
899 void *temp;
900 int ret = -EINVAL;
901 int mask_type;
902
903 if (mem->page_count == 0)
904 goto out;
905
906 temp = agp_bridge->current_size;
907 num_entries = A_SIZE_FIX(temp)->num_entries;
908
909 if (pg_start < intel_private.gtt_entries) {
910 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
911 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
912 pg_start, intel_private.gtt_entries);
913
914 dev_info(&intel_private.pcidev->dev,
915 "trying to insert into local/stolen memory\n");
916 goto out_err;
917 }
918
919 if ((pg_start + mem->page_count) > num_entries)
920 goto out_err;
921
922 /* The i830 can't check the GTT for entries since its read only,
923 * depend on the caller to make the correct offset decisions.
924 */
925
926 if (type != mem->type)
927 goto out_err;
928
929 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
930
931 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
932 mask_type != INTEL_AGP_CACHED_MEMORY)
933 goto out_err;
934
935 if (!mem->is_flushed)
936 global_cache_flush();
937
938 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
939 writel(agp_bridge->driver->mask_memory(agp_bridge,
940 page_to_phys(mem->pages[i]), mask_type),
941 intel_private.registers+I810_PTE_BASE+(j*4));
942 }
943 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
944 agp_bridge->driver->tlb_flush(mem);
945
946out:
947 ret = 0;
948out_err:
949 mem->is_flushed = true;
950 return ret;
951}
952
953static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
954 int type)
955{
956 int i;
957
958 if (mem->page_count == 0)
959 return 0;
960
961 if (pg_start < intel_private.gtt_entries) {
962 dev_info(&intel_private.pcidev->dev,
963 "trying to disable local/stolen memory\n");
964 return -EINVAL;
965 }
966
967 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
968 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
969 }
970 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
971
972 agp_bridge->driver->tlb_flush(mem);
973 return 0;
974}
975
976static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
977{
978 if (type == AGP_PHYS_MEMORY)
979 return alloc_agpphysmem_i8xx(pg_count, type);
980 /* always return NULL for other allocation types for now */
981 return NULL;
982}
983
984static int intel_alloc_chipset_flush_resource(void)
985{
986 int ret;
987 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
988 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
989 pcibios_align_resource, agp_bridge->dev);
990
991 return ret;
992}
993
994static void intel_i915_setup_chipset_flush(void)
995{
996 int ret;
997 u32 temp;
998
999 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1000 if (!(temp & 0x1)) {
1001 intel_alloc_chipset_flush_resource();
1002 intel_private.resource_valid = 1;
1003 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1004 } else {
1005 temp &= ~1;
1006
1007 intel_private.resource_valid = 1;
1008 intel_private.ifp_resource.start = temp;
1009 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1010 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1011 /* some BIOSes reserve this area in a pnp some don't */
1012 if (ret)
1013 intel_private.resource_valid = 0;
1014 }
1015}
1016
1017static void intel_i965_g33_setup_chipset_flush(void)
1018{
1019 u32 temp_hi, temp_lo;
1020 int ret;
1021
1022 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1023 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1024
1025 if (!(temp_lo & 0x1)) {
1026
1027 intel_alloc_chipset_flush_resource();
1028
1029 intel_private.resource_valid = 1;
1030 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1031 upper_32_bits(intel_private.ifp_resource.start));
1032 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1033 } else {
1034 u64 l64;
1035
1036 temp_lo &= ~0x1;
1037 l64 = ((u64)temp_hi << 32) | temp_lo;
1038
1039 intel_private.resource_valid = 1;
1040 intel_private.ifp_resource.start = l64;
1041 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1042 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1043 /* some BIOSes reserve this area in a pnp some don't */
1044 if (ret)
1045 intel_private.resource_valid = 0;
1046 }
1047}
1048
1049static void intel_i9xx_setup_flush(void)
1050{
1051 /* return if already configured */
1052 if (intel_private.ifp_resource.start)
1053 return;
1054
1055 if (IS_SNB)
1056 return;
1057
1058 /* setup a resource for this object */
1059 intel_private.ifp_resource.name = "Intel Flush Page";
1060 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1061
1062 /* Setup chipset flush for 915 */
1063 if (IS_I965 || IS_G33 || IS_G4X) {
1064 intel_i965_g33_setup_chipset_flush();
1065 } else {
1066 intel_i915_setup_chipset_flush();
1067 }
1068
1069 if (intel_private.ifp_resource.start) {
1070 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1071 if (!intel_private.i9xx_flush_page)
1072 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1073 }
1074}
1075
1076static int intel_i915_configure(void)
1077{
1078 struct aper_size_info_fixed *current_size;
1079 u32 temp;
1080 u16 gmch_ctrl;
1081 int i;
1082
1083 current_size = A_SIZE_FIX(agp_bridge->current_size);
1084
1085 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1086
1087 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1088
1089 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1090 gmch_ctrl |= I830_GMCH_ENABLED;
1091 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1092
1093 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1094 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1095
1096 if (agp_bridge->driver->needs_scratch_page) {
1097 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1098 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1099 }
1100 readl(intel_private.gtt+i-1); /* PCI Posting. */
1101 }
1102
1103 global_cache_flush();
1104
1105 intel_i9xx_setup_flush();
1106
1107 return 0;
1108}
1109
1110static void intel_i915_cleanup(void)
1111{
1112 if (intel_private.i9xx_flush_page)
1113 iounmap(intel_private.i9xx_flush_page);
1114 if (intel_private.resource_valid)
1115 release_resource(&intel_private.ifp_resource);
1116 intel_private.ifp_resource.start = 0;
1117 intel_private.resource_valid = 0;
1118 iounmap(intel_private.gtt);
1119 iounmap(intel_private.registers);
1120}
1121
1122static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1123{
1124 if (intel_private.i9xx_flush_page)
1125 writel(1, intel_private.i9xx_flush_page);
1126}
1127
1128static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1129 int type)
1130{
1131 int num_entries;
1132 void *temp;
1133 int ret = -EINVAL;
1134 int mask_type;
1135
1136 if (mem->page_count == 0)
1137 goto out;
1138
1139 temp = agp_bridge->current_size;
1140 num_entries = A_SIZE_FIX(temp)->num_entries;
1141
1142 if (pg_start < intel_private.gtt_entries) {
1143 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1144 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1145 pg_start, intel_private.gtt_entries);
1146
1147 dev_info(&intel_private.pcidev->dev,
1148 "trying to insert into local/stolen memory\n");
1149 goto out_err;
1150 }
1151
1152 if ((pg_start + mem->page_count) > num_entries)
1153 goto out_err;
1154
1155 /* The i915 can't check the GTT for entries since it's read only;
1156 * depend on the caller to make the correct offset decisions.
1157 */
1158
1159 if (type != mem->type)
1160 goto out_err;
1161
1162 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1163
1164 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1165 mask_type != INTEL_AGP_CACHED_MEMORY)
1166 goto out_err;
1167
1168 if (!mem->is_flushed)
1169 global_cache_flush();
1170
1171 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1172 agp_bridge->driver->tlb_flush(mem);
1173
1174 out:
1175 ret = 0;
1176 out_err:
1177 mem->is_flushed = true;
1178 return ret;
1179}
1180
1181static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1182 int type)
1183{
1184 int i;
1185
1186 if (mem->page_count == 0)
1187 return 0;
1188
1189 if (pg_start < intel_private.gtt_entries) {
1190 dev_info(&intel_private.pcidev->dev,
1191 "trying to disable local/stolen memory\n");
1192 return -EINVAL;
1193 }
1194
1195 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1196 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1197
1198 readl(intel_private.gtt+i-1);
1199
1200 agp_bridge->driver->tlb_flush(mem);
1201 return 0;
1202}
1203
1204/* Return the aperture size by just checking the resource length. The effect
1205 * described in the spec of the MSAC registers is just changing of the
1206 * resource size.
1207 */
1208static int intel_i9xx_fetch_size(void)
1209{
1210 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1211 int aper_size; /* size in megabytes */
1212 int i;
1213
1214 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1215
1216 for (i = 0; i < num_sizes; i++) {
1217 if (aper_size == intel_i830_sizes[i].size) {
1218 agp_bridge->current_size = intel_i830_sizes + i;
1219 agp_bridge->previous_size = agp_bridge->current_size;
1220 return aper_size;
1221 }
1222 }
1223
1224 return 0;
1225}
1226
1227/* The intel i915 automatically initializes the agp aperture during POST.
1228 * Use the memory already set aside for in the GTT.
1229 */
1230static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1231{
1232 int page_order;
1233 struct aper_size_info_fixed *size;
1234 int num_entries;
1235 u32 temp, temp2;
1236 int gtt_map_size = 256 * 1024;
1237
1238 size = agp_bridge->current_size;
1239 page_order = size->page_order;
1240 num_entries = size->num_entries;
1241 agp_bridge->gatt_table_real = NULL;
1242
1243 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1244 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1245
1246 if (IS_G33)
1247 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1248 intel_private.gtt = ioremap(temp2, gtt_map_size);
1249 if (!intel_private.gtt)
1250 return -ENOMEM;
1251
1252 intel_private.gtt_total_size = gtt_map_size / 4;
1253
1254 temp &= 0xfff80000;
1255
1256 intel_private.registers = ioremap(temp, 128 * 4096);
1257 if (!intel_private.registers) {
1258 iounmap(intel_private.gtt);
1259 return -ENOMEM;
1260 }
1261
1262 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1263 global_cache_flush(); /* FIXME: ? */
1264
1265 /* we have to call this as early as possible after the MMIO base address is known */
1266 intel_i830_init_gtt_entries();
1267
1268 agp_bridge->gatt_table = NULL;
1269
1270 agp_bridge->gatt_bus_addr = temp;
1271
1272 return 0;
1273}
1274
1275/*
1276 * The i965 supports 36-bit physical addresses, but to keep
1277 * the format of the GTT the same, the bits that don't fit
1278 * in a 32-bit word are shifted down to bits 4..7.
1279 *
1280 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1281 * is always zero on 32-bit architectures, so no need to make
1282 * this conditional.
1283 */
1284static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1285 dma_addr_t addr, int type)
1286{
1287 /* Shift high bits down */
1288 addr |= (addr >> 28) & 0xf0;
1289
1290 /* Type checking must be done elsewhere */
1291 return addr | bridge->driver->masks[type].mask;
1292}
1293
1294static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1295{
1296 u16 snb_gmch_ctl;
1297
1298 switch (agp_bridge->dev->device) {
1299 case PCI_DEVICE_ID_INTEL_GM45_HB:
1300 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1301 case PCI_DEVICE_ID_INTEL_Q45_HB:
1302 case PCI_DEVICE_ID_INTEL_G45_HB:
1303 case PCI_DEVICE_ID_INTEL_G41_HB:
1304 case PCI_DEVICE_ID_INTEL_B43_HB:
1305 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1306 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1307 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1308 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1309 *gtt_offset = *gtt_size = MB(2);
1310 break;
1311 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1312 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1313 *gtt_offset = MB(2);
1314
1315 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1316 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1317 default:
1318 case SNB_GTT_SIZE_0M:
1319 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1320 *gtt_size = MB(0);
1321 break;
1322 case SNB_GTT_SIZE_1M:
1323 *gtt_size = MB(1);
1324 break;
1325 case SNB_GTT_SIZE_2M:
1326 *gtt_size = MB(2);
1327 break;
1328 }
1329 break;
1330 default:
1331 *gtt_offset = *gtt_size = KB(512);
1332 }
1333}
1334
1335/* The intel i965 automatically initializes the agp aperture during POST.
1336 * Use the memory already set aside for in the GTT.
1337 */
1338static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1339{
1340 int page_order;
1341 struct aper_size_info_fixed *size;
1342 int num_entries;
1343 u32 temp;
1344 int gtt_offset, gtt_size;
1345
1346 size = agp_bridge->current_size;
1347 page_order = size->page_order;
1348 num_entries = size->num_entries;
1349 agp_bridge->gatt_table_real = NULL;
1350
1351 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1352
1353 temp &= 0xfff00000;
1354
1355 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1356
1357 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1358
1359 if (!intel_private.gtt)
1360 return -ENOMEM;
1361
1362 intel_private.gtt_total_size = gtt_size / 4;
1363
1364 intel_private.registers = ioremap(temp, 128 * 4096);
1365 if (!intel_private.registers) {
1366 iounmap(intel_private.gtt);
1367 return -ENOMEM;
1368 }
1369
1370 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1371 global_cache_flush(); /* FIXME: ? */
1372
1373 /* we have to call this as early as possible after the MMIO base address is known */
1374 intel_i830_init_gtt_entries();
1375
1376 agp_bridge->gatt_table = NULL;
1377
1378 agp_bridge->gatt_bus_addr = temp;
1379
1380 return 0;
1381}
1382
1383
1384static int intel_fetch_size(void) 20static int intel_fetch_size(void)
1385{ 21{
1386 int i; 22 int i;
@@ -1848,33 +484,6 @@ static const struct agp_bridge_driver intel_generic_driver = {
1848 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 484 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1849}; 485};
1850 486
1851static const struct agp_bridge_driver intel_810_driver = {
1852 .owner = THIS_MODULE,
1853 .aperture_sizes = intel_i810_sizes,
1854 .size_type = FIXED_APER_SIZE,
1855 .num_aperture_sizes = 2,
1856 .needs_scratch_page = true,
1857 .configure = intel_i810_configure,
1858 .fetch_size = intel_i810_fetch_size,
1859 .cleanup = intel_i810_cleanup,
1860 .tlb_flush = intel_i810_tlbflush,
1861 .mask_memory = intel_i810_mask_memory,
1862 .masks = intel_i810_masks,
1863 .agp_enable = intel_i810_agp_enable,
1864 .cache_flush = global_cache_flush,
1865 .create_gatt_table = agp_generic_create_gatt_table,
1866 .free_gatt_table = agp_generic_free_gatt_table,
1867 .insert_memory = intel_i810_insert_entries,
1868 .remove_memory = intel_i810_remove_entries,
1869 .alloc_by_type = intel_i810_alloc_by_type,
1870 .free_by_type = intel_i810_free_by_type,
1871 .agp_alloc_page = agp_generic_alloc_page,
1872 .agp_alloc_pages = agp_generic_alloc_pages,
1873 .agp_destroy_page = agp_generic_destroy_page,
1874 .agp_destroy_pages = agp_generic_destroy_pages,
1875 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1876};
1877
1878static const struct agp_bridge_driver intel_815_driver = { 487static const struct agp_bridge_driver intel_815_driver = {
1879 .owner = THIS_MODULE, 488 .owner = THIS_MODULE,
1880 .aperture_sizes = intel_815_sizes, 489 .aperture_sizes = intel_815_sizes,
@@ -1901,34 +510,6 @@ static const struct agp_bridge_driver intel_815_driver = {
1901 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 510 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1902}; 511};
1903 512
1904static const struct agp_bridge_driver intel_830_driver = {
1905 .owner = THIS_MODULE,
1906 .aperture_sizes = intel_i830_sizes,
1907 .size_type = FIXED_APER_SIZE,
1908 .num_aperture_sizes = 4,
1909 .needs_scratch_page = true,
1910 .configure = intel_i830_configure,
1911 .fetch_size = intel_i830_fetch_size,
1912 .cleanup = intel_i830_cleanup,
1913 .tlb_flush = intel_i810_tlbflush,
1914 .mask_memory = intel_i810_mask_memory,
1915 .masks = intel_i810_masks,
1916 .agp_enable = intel_i810_agp_enable,
1917 .cache_flush = global_cache_flush,
1918 .create_gatt_table = intel_i830_create_gatt_table,
1919 .free_gatt_table = intel_i830_free_gatt_table,
1920 .insert_memory = intel_i830_insert_entries,
1921 .remove_memory = intel_i830_remove_entries,
1922 .alloc_by_type = intel_i830_alloc_by_type,
1923 .free_by_type = intel_i810_free_by_type,
1924 .agp_alloc_page = agp_generic_alloc_page,
1925 .agp_alloc_pages = agp_generic_alloc_pages,
1926 .agp_destroy_page = agp_generic_destroy_page,
1927 .agp_destroy_pages = agp_generic_destroy_pages,
1928 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1929 .chipset_flush = intel_i830_chipset_flush,
1930};
1931
1932static const struct agp_bridge_driver intel_820_driver = { 513static const struct agp_bridge_driver intel_820_driver = {
1933 .owner = THIS_MODULE, 514 .owner = THIS_MODULE,
1934 .aperture_sizes = intel_8xx_sizes, 515 .aperture_sizes = intel_8xx_sizes,
@@ -2085,74 +666,6 @@ static const struct agp_bridge_driver intel_860_driver = {
2085 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 666 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2086}; 667};
2087 668
2088static const struct agp_bridge_driver intel_915_driver = {
2089 .owner = THIS_MODULE,
2090 .aperture_sizes = intel_i830_sizes,
2091 .size_type = FIXED_APER_SIZE,
2092 .num_aperture_sizes = 4,
2093 .needs_scratch_page = true,
2094 .configure = intel_i915_configure,
2095 .fetch_size = intel_i9xx_fetch_size,
2096 .cleanup = intel_i915_cleanup,
2097 .tlb_flush = intel_i810_tlbflush,
2098 .mask_memory = intel_i810_mask_memory,
2099 .masks = intel_i810_masks,
2100 .agp_enable = intel_i810_agp_enable,
2101 .cache_flush = global_cache_flush,
2102 .create_gatt_table = intel_i915_create_gatt_table,
2103 .free_gatt_table = intel_i830_free_gatt_table,
2104 .insert_memory = intel_i915_insert_entries,
2105 .remove_memory = intel_i915_remove_entries,
2106 .alloc_by_type = intel_i830_alloc_by_type,
2107 .free_by_type = intel_i810_free_by_type,
2108 .agp_alloc_page = agp_generic_alloc_page,
2109 .agp_alloc_pages = agp_generic_alloc_pages,
2110 .agp_destroy_page = agp_generic_destroy_page,
2111 .agp_destroy_pages = agp_generic_destroy_pages,
2112 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2113 .chipset_flush = intel_i915_chipset_flush,
2114#ifdef USE_PCI_DMA_API
2115 .agp_map_page = intel_agp_map_page,
2116 .agp_unmap_page = intel_agp_unmap_page,
2117 .agp_map_memory = intel_agp_map_memory,
2118 .agp_unmap_memory = intel_agp_unmap_memory,
2119#endif
2120};
2121
2122static const struct agp_bridge_driver intel_i965_driver = {
2123 .owner = THIS_MODULE,
2124 .aperture_sizes = intel_i830_sizes,
2125 .size_type = FIXED_APER_SIZE,
2126 .num_aperture_sizes = 4,
2127 .needs_scratch_page = true,
2128 .configure = intel_i915_configure,
2129 .fetch_size = intel_i9xx_fetch_size,
2130 .cleanup = intel_i915_cleanup,
2131 .tlb_flush = intel_i810_tlbflush,
2132 .mask_memory = intel_i965_mask_memory,
2133 .masks = intel_i810_masks,
2134 .agp_enable = intel_i810_agp_enable,
2135 .cache_flush = global_cache_flush,
2136 .create_gatt_table = intel_i965_create_gatt_table,
2137 .free_gatt_table = intel_i830_free_gatt_table,
2138 .insert_memory = intel_i915_insert_entries,
2139 .remove_memory = intel_i915_remove_entries,
2140 .alloc_by_type = intel_i830_alloc_by_type,
2141 .free_by_type = intel_i810_free_by_type,
2142 .agp_alloc_page = agp_generic_alloc_page,
2143 .agp_alloc_pages = agp_generic_alloc_pages,
2144 .agp_destroy_page = agp_generic_destroy_page,
2145 .agp_destroy_pages = agp_generic_destroy_pages,
2146 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2147 .chipset_flush = intel_i915_chipset_flush,
2148#ifdef USE_PCI_DMA_API
2149 .agp_map_page = intel_agp_map_page,
2150 .agp_unmap_page = intel_agp_unmap_page,
2151 .agp_map_memory = intel_agp_map_memory,
2152 .agp_unmap_memory = intel_agp_unmap_memory,
2153#endif
2154};
2155
2156static const struct agp_bridge_driver intel_7505_driver = { 669static const struct agp_bridge_driver intel_7505_driver = {
2157 .owner = THIS_MODULE, 670 .owner = THIS_MODULE,
2158 .aperture_sizes = intel_8xx_sizes, 671 .aperture_sizes = intel_8xx_sizes,
@@ -2179,40 +692,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
2179 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 692 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2180}; 693};
2181 694
2182static const struct agp_bridge_driver intel_g33_driver = {
2183 .owner = THIS_MODULE,
2184 .aperture_sizes = intel_i830_sizes,
2185 .size_type = FIXED_APER_SIZE,
2186 .num_aperture_sizes = 4,
2187 .needs_scratch_page = true,
2188 .configure = intel_i915_configure,
2189 .fetch_size = intel_i9xx_fetch_size,
2190 .cleanup = intel_i915_cleanup,
2191 .tlb_flush = intel_i810_tlbflush,
2192 .mask_memory = intel_i965_mask_memory,
2193 .masks = intel_i810_masks,
2194 .agp_enable = intel_i810_agp_enable,
2195 .cache_flush = global_cache_flush,
2196 .create_gatt_table = intel_i915_create_gatt_table,
2197 .free_gatt_table = intel_i830_free_gatt_table,
2198 .insert_memory = intel_i915_insert_entries,
2199 .remove_memory = intel_i915_remove_entries,
2200 .alloc_by_type = intel_i830_alloc_by_type,
2201 .free_by_type = intel_i810_free_by_type,
2202 .agp_alloc_page = agp_generic_alloc_page,
2203 .agp_alloc_pages = agp_generic_alloc_pages,
2204 .agp_destroy_page = agp_generic_destroy_page,
2205 .agp_destroy_pages = agp_generic_destroy_pages,
2206 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2207 .chipset_flush = intel_i915_chipset_flush,
2208#ifdef USE_PCI_DMA_API
2209 .agp_map_page = intel_agp_map_page,
2210 .agp_unmap_page = intel_agp_unmap_page,
2211 .agp_map_memory = intel_agp_map_memory,
2212 .agp_unmap_memory = intel_agp_unmap_memory,
2213#endif
2214};
2215
2216static int find_gmch(u16 device) 695static int find_gmch(u16 device)
2217{ 696{
2218 struct pci_dev *gmch_device; 697 struct pci_dev *gmch_device;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..131c5d5e427e
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1535 @@
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28static const struct aper_size_info_fixed intel_i810_sizes[] =
29{
30 {64, 16384, 4},
31 /* The 32M mode still requires a 64k gatt */
32 {32, 8192, 4}
33};
34
35#define AGP_DCACHE_MEMORY 1
36#define AGP_PHYS_MEMORY 2
37#define INTEL_AGP_CACHED_MEMORY 3
38
39static struct gatt_mask intel_i810_masks[] =
40{
41 {.mask = I810_PTE_VALID, .type = 0},
42 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 {.mask = I810_PTE_VALID, .type = 0},
44 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 .type = INTEL_AGP_CACHED_MEMORY}
46};
47
48static struct _intel_private {
49 struct pci_dev *pcidev; /* device one */
50 u8 __iomem *registers;
51 u32 __iomem *gtt; /* I915G */
52 int num_dcache_entries;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
57 */
58 int gtt_entries; /* i830+ */
59 int gtt_total_size;
60 union {
61 void __iomem *i9xx_flush_page;
62 void *i8xx_flush_page;
63 };
64 struct page *i8xx_page;
65 struct resource ifp_resource;
66 int resource_valid;
67} intel_private;
68
69#ifdef USE_PCI_DMA_API
70static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71{
72 *ret = pci_map_page(intel_private.pcidev, page, 0,
73 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 return -EINVAL;
76 return 0;
77}
78
79static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80{
81 pci_unmap_page(intel_private.pcidev, dma,
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83}
84
85static void intel_agp_free_sglist(struct agp_memory *mem)
86{
87 struct sg_table st;
88
89 st.sgl = mem->sg_list;
90 st.orig_nents = st.nents = mem->page_count;
91
92 sg_free_table(&st);
93
94 mem->sg_list = NULL;
95 mem->num_sg = 0;
96}
97
98static int intel_agp_map_memory(struct agp_memory *mem)
99{
100 struct sg_table st;
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM;
108
109 mem->sg_list = sg = st.sgl;
110
111 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) {
117 intel_agp_free_sglist(mem);
118 return -ENOMEM;
119 }
120 return 0;
121}
122
123static void intel_agp_unmap_memory(struct agp_memory *mem)
124{
125 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 intel_agp_free_sglist(mem);
130}
131
132static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 off_t pg_start, int mask_type)
134{
135 struct scatterlist *sg;
136 int i, j;
137
138 j = pg_start;
139
140 WARN_ON(!mem->num_sg);
141
142 if (mem->num_sg == mem->page_count) {
143 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 writel(agp_bridge->driver->mask_memory(agp_bridge,
145 sg_dma_address(sg), mask_type),
146 intel_private.gtt+j);
147 j++;
148 }
149 } else {
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
152 unsigned int len, m;
153
154 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 len = sg_dma_len(sg) / PAGE_SIZE;
156 for (m = 0; m < len; m++) {
157 writel(agp_bridge->driver->mask_memory(agp_bridge,
158 sg_dma_address(sg) + m * PAGE_SIZE,
159 mask_type),
160 intel_private.gtt+j);
161 j++;
162 }
163 }
164 }
165 readl(intel_private.gtt+j-1);
166}
167
168#else
169
170static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 off_t pg_start, int mask_type)
172{
173 int i, j;
174 u32 cache_bits = 0;
175
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 {
179 cache_bits = I830_PTE_SYSTEM_CACHED;
180 }
181
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 writel(agp_bridge->driver->mask_memory(agp_bridge,
184 page_to_phys(mem->pages[i]), mask_type),
185 intel_private.gtt+j);
186 }
187
188 readl(intel_private.gtt+j-1);
189}
190
191#endif
192
193static int intel_i810_fetch_size(void)
194{
195 u32 smram_miscc;
196 struct aper_size_info_fixed *values;
197
198 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 return 0;
204 }
205 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206 agp_bridge->previous_size =
207 agp_bridge->current_size = (void *) (values + 1);
208 agp_bridge->aperture_size_idx = 1;
209 return values[1].size;
210 } else {
211 agp_bridge->previous_size =
212 agp_bridge->current_size = (void *) (values);
213 agp_bridge->aperture_size_idx = 0;
214 return values[0].size;
215 }
216
217 return 0;
218}
219
220static int intel_i810_configure(void)
221{
222 struct aper_size_info_fixed *current_size;
223 u32 temp;
224 int i;
225
226 current_size = A_SIZE_FIX(agp_bridge->current_size);
227
228 if (!intel_private.registers) {
229 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
230 temp &= 0xfff80000;
231
232 intel_private.registers = ioremap(temp, 128 * 4096);
233 if (!intel_private.registers) {
234 dev_err(&intel_private.pcidev->dev,
235 "can't remap memory\n");
236 return -ENOMEM;
237 }
238 }
239
240 if ((readl(intel_private.registers+I810_DRAM_CTL)
241 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
242 /* This will need to be dynamically assigned */
243 dev_info(&intel_private.pcidev->dev,
244 "detected 4MB dedicated video ram\n");
245 intel_private.num_dcache_entries = 1024;
246 }
247 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
248 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
249 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
250 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
251
252 if (agp_bridge->driver->needs_scratch_page) {
253 for (i = 0; i < current_size->num_entries; i++) {
254 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
255 }
256 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
257 }
258 global_cache_flush();
259 return 0;
260}
261
262static void intel_i810_cleanup(void)
263{
264 writel(0, intel_private.registers+I810_PGETBL_CTL);
265 readl(intel_private.registers); /* PCI Posting. */
266 iounmap(intel_private.registers);
267}
268
269static void intel_i810_tlbflush(struct agp_memory *mem)
270{
271 return;
272}
273
274static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
275{
276 return;
277}
278
279/* Exists to support ARGB cursors */
280static struct page *i8xx_alloc_pages(void)
281{
282 struct page *page;
283
284 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
285 if (page == NULL)
286 return NULL;
287
288 if (set_pages_uc(page, 4) < 0) {
289 set_pages_wb(page, 4);
290 __free_pages(page, 2);
291 return NULL;
292 }
293 get_page(page);
294 atomic_inc(&agp_bridge->current_memory_agp);
295 return page;
296}
297
298static void i8xx_destroy_pages(struct page *page)
299{
300 if (page == NULL)
301 return;
302
303 set_pages_wb(page, 4);
304 put_page(page);
305 __free_pages(page, 2);
306 atomic_dec(&agp_bridge->current_memory_agp);
307}
308
309static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
310 int type)
311{
312 if (type < AGP_USER_TYPES)
313 return type;
314 else if (type == AGP_USER_CACHED_MEMORY)
315 return INTEL_AGP_CACHED_MEMORY;
316 else
317 return 0;
318}
319
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type)
322{
323 int i, j, num_entries;
324 void *temp;
325 int ret = -EINVAL;
326 int mask_type;
327
328 if (mem->page_count == 0)
329 goto out;
330
331 temp = agp_bridge->current_size;
332 num_entries = A_SIZE_FIX(temp)->num_entries;
333
334 if ((pg_start + mem->page_count) > num_entries)
335 goto out_err;
336
337
338 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
339 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
340 ret = -EBUSY;
341 goto out_err;
342 }
343 }
344
345 if (type != mem->type)
346 goto out_err;
347
348 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
349
350 switch (mask_type) {
351 case AGP_DCACHE_MEMORY:
352 if (!mem->is_flushed)
353 global_cache_flush();
354 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
355 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
356 intel_private.registers+I810_PTE_BASE+(i*4));
357 }
358 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
359 break;
360 case AGP_PHYS_MEMORY:
361 case AGP_NORMAL_MEMORY:
362 if (!mem->is_flushed)
363 global_cache_flush();
364 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
365 writel(agp_bridge->driver->mask_memory(agp_bridge,
366 page_to_phys(mem->pages[i]), mask_type),
367 intel_private.registers+I810_PTE_BASE+(j*4));
368 }
369 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
370 break;
371 default:
372 goto out_err;
373 }
374
375 agp_bridge->driver->tlb_flush(mem);
376out:
377 ret = 0;
378out_err:
379 mem->is_flushed = true;
380 return ret;
381}
382
383static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
384 int type)
385{
386 int i;
387
388 if (mem->page_count == 0)
389 return 0;
390
391 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
392 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
393 }
394 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
395
396 agp_bridge->driver->tlb_flush(mem);
397 return 0;
398}
399
400/*
401 * The i810/i830 requires a physical address to program its mouse
402 * pointer into hardware.
403 * However the Xserver still writes to it through the agp aperture.
404 */
405static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
406{
407 struct agp_memory *new;
408 struct page *page;
409
410 switch (pg_count) {
411 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
412 break;
413 case 4:
414 /* kludge to get 4 physical pages for ARGB cursor */
415 page = i8xx_alloc_pages();
416 break;
417 default:
418 return NULL;
419 }
420
421 if (page == NULL)
422 return NULL;
423
424 new = agp_create_memory(pg_count);
425 if (new == NULL)
426 return NULL;
427
428 new->pages[0] = page;
429 if (pg_count == 4) {
430 /* kludge to get 4 physical pages for ARGB cursor */
431 new->pages[1] = new->pages[0] + 1;
432 new->pages[2] = new->pages[1] + 1;
433 new->pages[3] = new->pages[2] + 1;
434 }
435 new->page_count = pg_count;
436 new->num_scratch_pages = pg_count;
437 new->type = AGP_PHYS_MEMORY;
438 new->physical = page_to_phys(new->pages[0]);
439 return new;
440}
441
442static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
443{
444 struct agp_memory *new;
445
446 if (type == AGP_DCACHE_MEMORY) {
447 if (pg_count != intel_private.num_dcache_entries)
448 return NULL;
449
450 new = agp_create_memory(1);
451 if (new == NULL)
452 return NULL;
453
454 new->type = AGP_DCACHE_MEMORY;
455 new->page_count = pg_count;
456 new->num_scratch_pages = 0;
457 agp_free_page_array(new);
458 return new;
459 }
460 if (type == AGP_PHYS_MEMORY)
461 return alloc_agpphysmem_i8xx(pg_count, type);
462 return NULL;
463}
464
465static void intel_i810_free_by_type(struct agp_memory *curr)
466{
467 agp_free_key(curr->key);
468 if (curr->type == AGP_PHYS_MEMORY) {
469 if (curr->page_count == 4)
470 i8xx_destroy_pages(curr->pages[0]);
471 else {
472 agp_bridge->driver->agp_destroy_page(curr->pages[0],
473 AGP_PAGE_DESTROY_UNMAP);
474 agp_bridge->driver->agp_destroy_page(curr->pages[0],
475 AGP_PAGE_DESTROY_FREE);
476 }
477 agp_free_page_array(curr);
478 }
479 kfree(curr);
480}
481
482static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
483 dma_addr_t addr, int type)
484{
485 /* Type checking must be done elsewhere */
486 return addr | bridge->driver->masks[type].mask;
487}
488
489static struct aper_size_info_fixed intel_i830_sizes[] =
490{
491 {128, 32768, 5},
492 /* The 64M mode still requires a 128k gatt */
493 {64, 16384, 5},
494 {256, 65536, 6},
495 {512, 131072, 7},
496};
497
498static void intel_i830_init_gtt_entries(void)
499{
500 u16 gmch_ctrl;
501 int gtt_entries = 0;
502 u8 rdct;
503 int local = 0;
504 static const int ddt[4] = { 0, 16, 32, 64 };
505 int size; /* reserved space (in kb) at the top of stolen memory */
506
507 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
508
509 if (IS_I965) {
510 u32 pgetbl_ctl;
511 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
512
513 /* The 965 has a field telling us the size of the GTT,
514 * which may be larger than what is necessary to map the
515 * aperture.
516 */
517 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
518 case I965_PGETBL_SIZE_128KB:
519 size = 128;
520 break;
521 case I965_PGETBL_SIZE_256KB:
522 size = 256;
523 break;
524 case I965_PGETBL_SIZE_512KB:
525 size = 512;
526 break;
527 case I965_PGETBL_SIZE_1MB:
528 size = 1024;
529 break;
530 case I965_PGETBL_SIZE_2MB:
531 size = 2048;
532 break;
533 case I965_PGETBL_SIZE_1_5MB:
534 size = 1024 + 512;
535 break;
536 default:
537 dev_info(&intel_private.pcidev->dev,
538 "unknown page table size, assuming 512KB\n");
539 size = 512;
540 }
541 size += 4; /* add in BIOS popup space */
542 } else if (IS_G33 && !IS_PINEVIEW) {
543 /* G33's GTT size defined in gmch_ctrl */
544 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
545 case G33_PGETBL_SIZE_1M:
546 size = 1024;
547 break;
548 case G33_PGETBL_SIZE_2M:
549 size = 2048;
550 break;
551 default:
552 dev_info(&agp_bridge->dev->dev,
553 "unknown page table size 0x%x, assuming 512KB\n",
554 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
555 size = 512;
556 }
557 size += 4;
558 } else if (IS_G4X || IS_PINEVIEW) {
559 /* On 4 series hardware, GTT stolen is separate from graphics
560 * stolen, ignore it in stolen gtt entries counting. However,
561 * 4KB of the stolen memory doesn't get mapped to the GTT.
562 */
563 size = 4;
564 } else {
565 /* On previous hardware, the GTT size was just what was
566 * required to map the aperture.
567 */
568 size = agp_bridge->driver->fetch_size() + 4;
569 }
570
571 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
572 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
573 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
574 case I830_GMCH_GMS_STOLEN_512:
575 gtt_entries = KB(512) - KB(size);
576 break;
577 case I830_GMCH_GMS_STOLEN_1024:
578 gtt_entries = MB(1) - KB(size);
579 break;
580 case I830_GMCH_GMS_STOLEN_8192:
581 gtt_entries = MB(8) - KB(size);
582 break;
583 case I830_GMCH_GMS_LOCAL:
584 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
585 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
586 MB(ddt[I830_RDRAM_DDT(rdct)]);
587 local = 1;
588 break;
589 default:
590 gtt_entries = 0;
591 break;
592 }
593 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
594 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
595 /*
596 * SandyBridge has new memory control reg at 0x50.w
597 */
598 u16 snb_gmch_ctl;
599 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
600 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
601 case SNB_GMCH_GMS_STOLEN_32M:
602 gtt_entries = MB(32) - KB(size);
603 break;
604 case SNB_GMCH_GMS_STOLEN_64M:
605 gtt_entries = MB(64) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_96M:
608 gtt_entries = MB(96) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_128M:
611 gtt_entries = MB(128) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_160M:
614 gtt_entries = MB(160) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_192M:
617 gtt_entries = MB(192) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_224M:
620 gtt_entries = MB(224) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_256M:
623 gtt_entries = MB(256) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_288M:
626 gtt_entries = MB(288) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_320M:
629 gtt_entries = MB(320) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_352M:
632 gtt_entries = MB(352) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_384M:
635 gtt_entries = MB(384) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_416M:
638 gtt_entries = MB(416) - KB(size);
639 break;
640 case SNB_GMCH_GMS_STOLEN_448M:
641 gtt_entries = MB(448) - KB(size);
642 break;
643 case SNB_GMCH_GMS_STOLEN_480M:
644 gtt_entries = MB(480) - KB(size);
645 break;
646 case SNB_GMCH_GMS_STOLEN_512M:
647 gtt_entries = MB(512) - KB(size);
648 break;
649 }
650 } else {
651 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
652 case I855_GMCH_GMS_STOLEN_1M:
653 gtt_entries = MB(1) - KB(size);
654 break;
655 case I855_GMCH_GMS_STOLEN_4M:
656 gtt_entries = MB(4) - KB(size);
657 break;
658 case I855_GMCH_GMS_STOLEN_8M:
659 gtt_entries = MB(8) - KB(size);
660 break;
661 case I855_GMCH_GMS_STOLEN_16M:
662 gtt_entries = MB(16) - KB(size);
663 break;
664 case I855_GMCH_GMS_STOLEN_32M:
665 gtt_entries = MB(32) - KB(size);
666 break;
667 case I915_GMCH_GMS_STOLEN_48M:
668 /* Check it's really I915G */
669 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
670 gtt_entries = MB(48) - KB(size);
671 else
672 gtt_entries = 0;
673 break;
674 case I915_GMCH_GMS_STOLEN_64M:
675 /* Check it's really I915G */
676 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
677 gtt_entries = MB(64) - KB(size);
678 else
679 gtt_entries = 0;
680 break;
681 case G33_GMCH_GMS_STOLEN_128M:
682 if (IS_G33 || IS_I965 || IS_G4X)
683 gtt_entries = MB(128) - KB(size);
684 else
685 gtt_entries = 0;
686 break;
687 case G33_GMCH_GMS_STOLEN_256M:
688 if (IS_G33 || IS_I965 || IS_G4X)
689 gtt_entries = MB(256) - KB(size);
690 else
691 gtt_entries = 0;
692 break;
693 case INTEL_GMCH_GMS_STOLEN_96M:
694 if (IS_I965 || IS_G4X)
695 gtt_entries = MB(96) - KB(size);
696 else
697 gtt_entries = 0;
698 break;
699 case INTEL_GMCH_GMS_STOLEN_160M:
700 if (IS_I965 || IS_G4X)
701 gtt_entries = MB(160) - KB(size);
702 else
703 gtt_entries = 0;
704 break;
705 case INTEL_GMCH_GMS_STOLEN_224M:
706 if (IS_I965 || IS_G4X)
707 gtt_entries = MB(224) - KB(size);
708 else
709 gtt_entries = 0;
710 break;
711 case INTEL_GMCH_GMS_STOLEN_352M:
712 if (IS_I965 || IS_G4X)
713 gtt_entries = MB(352) - KB(size);
714 else
715 gtt_entries = 0;
716 break;
717 default:
718 gtt_entries = 0;
719 break;
720 }
721 }
722 if (gtt_entries > 0) {
723 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
724 gtt_entries / KB(1), local ? "local" : "stolen");
725 gtt_entries /= KB(4);
726 } else {
727 dev_info(&agp_bridge->dev->dev,
728 "no pre-allocated video memory detected\n");
729 gtt_entries = 0;
730 }
731
732 intel_private.gtt_entries = gtt_entries;
733}
734
735static void intel_i830_fini_flush(void)
736{
737 kunmap(intel_private.i8xx_page);
738 intel_private.i8xx_flush_page = NULL;
739 unmap_page_from_agp(intel_private.i8xx_page);
740
741 __free_page(intel_private.i8xx_page);
742 intel_private.i8xx_page = NULL;
743}
744
745static void intel_i830_setup_flush(void)
746{
747 /* return if we've already set the flush mechanism up */
748 if (intel_private.i8xx_page)
749 return;
750
751 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
752 if (!intel_private.i8xx_page)
753 return;
754
755 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
756 if (!intel_private.i8xx_flush_page)
757 intel_i830_fini_flush();
758}
759
760/* The chipset_flush interface needs to get data that has already been
761 * flushed out of the CPU all the way out to main memory, because the GPU
762 * doesn't snoop those buffers.
763 *
764 * The 8xx series doesn't have the same lovely interface for flushing the
765 * chipset write buffers that the later chips do. According to the 865
766 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
767 * that buffer out, we just fill 1KB and clflush it out, on the assumption
768 * that it'll push whatever was in there out. It appears to work.
769 */
770static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
771{
772 unsigned int *pg = intel_private.i8xx_flush_page;
773
774 memset(pg, 0, 1024);
775
776 if (cpu_has_clflush)
777 clflush_cache_range(pg, 1024);
778 else if (wbinvd_on_all_cpus() != 0)
779 printk(KERN_ERR "Timed out waiting for cache flush.\n");
780}
781
782/* The intel i830 automatically initializes the agp aperture during POST.
783 * Use the memory already set aside for in the GTT.
784 */
785static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
786{
787 int page_order;
788 struct aper_size_info_fixed *size;
789 int num_entries;
790 u32 temp;
791
792 size = agp_bridge->current_size;
793 page_order = size->page_order;
794 num_entries = size->num_entries;
795 agp_bridge->gatt_table_real = NULL;
796
797 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
798 temp &= 0xfff80000;
799
800 intel_private.registers = ioremap(temp, 128 * 4096);
801 if (!intel_private.registers)
802 return -ENOMEM;
803
804 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
805 global_cache_flush(); /* FIXME: ?? */
806
807 /* we have to call this as early as possible after the MMIO base address is known */
808 intel_i830_init_gtt_entries();
809
810 agp_bridge->gatt_table = NULL;
811
812 agp_bridge->gatt_bus_addr = temp;
813
814 return 0;
815}
816
817/* Return the gatt table to a sane state. Use the top of stolen
818 * memory for the GTT.
819 */
820static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
821{
822 return 0;
823}
824
825static int intel_i830_fetch_size(void)
826{
827 u16 gmch_ctrl;
828 struct aper_size_info_fixed *values;
829
830 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
831
832 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
833 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
834 /* 855GM/852GM/865G has 128MB aperture size */
835 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
836 agp_bridge->aperture_size_idx = 0;
837 return values[0].size;
838 }
839
840 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
841
842 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
843 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
844 agp_bridge->aperture_size_idx = 0;
845 return values[0].size;
846 } else {
847 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
848 agp_bridge->aperture_size_idx = 1;
849 return values[1].size;
850 }
851
852 return 0;
853}
854
855static int intel_i830_configure(void)
856{
857 struct aper_size_info_fixed *current_size;
858 u32 temp;
859 u16 gmch_ctrl;
860 int i;
861
862 current_size = A_SIZE_FIX(agp_bridge->current_size);
863
864 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
865 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
866
867 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
868 gmch_ctrl |= I830_GMCH_ENABLED;
869 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
870
871 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
872 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
873
874 if (agp_bridge->driver->needs_scratch_page) {
875 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
876 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
877 }
878 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
879 }
880
881 global_cache_flush();
882
883 intel_i830_setup_flush();
884 return 0;
885}
886
887static void intel_i830_cleanup(void)
888{
889 iounmap(intel_private.registers);
890}
891
892static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
893 int type)
894{
895 int i, j, num_entries;
896 void *temp;
897 int ret = -EINVAL;
898 int mask_type;
899
900 if (mem->page_count == 0)
901 goto out;
902
903 temp = agp_bridge->current_size;
904 num_entries = A_SIZE_FIX(temp)->num_entries;
905
906 if (pg_start < intel_private.gtt_entries) {
907 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
908 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
909 pg_start, intel_private.gtt_entries);
910
911 dev_info(&intel_private.pcidev->dev,
912 "trying to insert into local/stolen memory\n");
913 goto out_err;
914 }
915
916 if ((pg_start + mem->page_count) > num_entries)
917 goto out_err;
918
919 /* The i830 can't check the GTT for entries since its read only,
920 * depend on the caller to make the correct offset decisions.
921 */
922
923 if (type != mem->type)
924 goto out_err;
925
926 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
927
928 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
929 mask_type != INTEL_AGP_CACHED_MEMORY)
930 goto out_err;
931
932 if (!mem->is_flushed)
933 global_cache_flush();
934
935 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
936 writel(agp_bridge->driver->mask_memory(agp_bridge,
937 page_to_phys(mem->pages[i]), mask_type),
938 intel_private.registers+I810_PTE_BASE+(j*4));
939 }
940 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
941 agp_bridge->driver->tlb_flush(mem);
942
943out:
944 ret = 0;
945out_err:
946 mem->is_flushed = true;
947 return ret;
948}
949
950static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
951 int type)
952{
953 int i;
954
955 if (mem->page_count == 0)
956 return 0;
957
958 if (pg_start < intel_private.gtt_entries) {
959 dev_info(&intel_private.pcidev->dev,
960 "trying to disable local/stolen memory\n");
961 return -EINVAL;
962 }
963
964 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
965 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
966 }
967 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
968
969 agp_bridge->driver->tlb_flush(mem);
970 return 0;
971}
972
973static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
974{
975 if (type == AGP_PHYS_MEMORY)
976 return alloc_agpphysmem_i8xx(pg_count, type);
977 /* always return NULL for other allocation types for now */
978 return NULL;
979}
980
981static int intel_alloc_chipset_flush_resource(void)
982{
983 int ret;
984 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
985 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
986 pcibios_align_resource, agp_bridge->dev);
987
988 return ret;
989}
990
991static void intel_i915_setup_chipset_flush(void)
992{
993 int ret;
994 u32 temp;
995
996 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
997 if (!(temp & 0x1)) {
998 intel_alloc_chipset_flush_resource();
999 intel_private.resource_valid = 1;
1000 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1001 } else {
1002 temp &= ~1;
1003
1004 intel_private.resource_valid = 1;
1005 intel_private.ifp_resource.start = temp;
1006 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1007 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1008 /* some BIOSes reserve this area in a pnp some don't */
1009 if (ret)
1010 intel_private.resource_valid = 0;
1011 }
1012}
1013
1014static void intel_i965_g33_setup_chipset_flush(void)
1015{
1016 u32 temp_hi, temp_lo;
1017 int ret;
1018
1019 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1020 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1021
1022 if (!(temp_lo & 0x1)) {
1023
1024 intel_alloc_chipset_flush_resource();
1025
1026 intel_private.resource_valid = 1;
1027 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1028 upper_32_bits(intel_private.ifp_resource.start));
1029 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1030 } else {
1031 u64 l64;
1032
1033 temp_lo &= ~0x1;
1034 l64 = ((u64)temp_hi << 32) | temp_lo;
1035
1036 intel_private.resource_valid = 1;
1037 intel_private.ifp_resource.start = l64;
1038 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1039 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1040 /* some BIOSes reserve this area in a pnp some don't */
1041 if (ret)
1042 intel_private.resource_valid = 0;
1043 }
1044}
1045
1046static void intel_i9xx_setup_flush(void)
1047{
1048 /* return if already configured */
1049 if (intel_private.ifp_resource.start)
1050 return;
1051
1052 if (IS_SNB)
1053 return;
1054
1055 /* setup a resource for this object */
1056 intel_private.ifp_resource.name = "Intel Flush Page";
1057 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1058
1059 /* Setup chipset flush for 915 */
1060 if (IS_I965 || IS_G33 || IS_G4X) {
1061 intel_i965_g33_setup_chipset_flush();
1062 } else {
1063 intel_i915_setup_chipset_flush();
1064 }
1065
1066 if (intel_private.ifp_resource.start) {
1067 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1068 if (!intel_private.i9xx_flush_page)
1069 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1070 }
1071}
1072
1073static int intel_i915_configure(void)
1074{
1075 struct aper_size_info_fixed *current_size;
1076 u32 temp;
1077 u16 gmch_ctrl;
1078 int i;
1079
1080 current_size = A_SIZE_FIX(agp_bridge->current_size);
1081
1082 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1083
1084 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1085
1086 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1087 gmch_ctrl |= I830_GMCH_ENABLED;
1088 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1089
1090 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1091 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1092
1093 if (agp_bridge->driver->needs_scratch_page) {
1094 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1095 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1096 }
1097 readl(intel_private.gtt+i-1); /* PCI Posting. */
1098 }
1099
1100 global_cache_flush();
1101
1102 intel_i9xx_setup_flush();
1103
1104 return 0;
1105}
1106
1107static void intel_i915_cleanup(void)
1108{
1109 if (intel_private.i9xx_flush_page)
1110 iounmap(intel_private.i9xx_flush_page);
1111 if (intel_private.resource_valid)
1112 release_resource(&intel_private.ifp_resource);
1113 intel_private.ifp_resource.start = 0;
1114 intel_private.resource_valid = 0;
1115 iounmap(intel_private.gtt);
1116 iounmap(intel_private.registers);
1117}
1118
1119static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1120{
1121 if (intel_private.i9xx_flush_page)
1122 writel(1, intel_private.i9xx_flush_page);
1123}
1124
1125static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1126 int type)
1127{
1128 int num_entries;
1129 void *temp;
1130 int ret = -EINVAL;
1131 int mask_type;
1132
1133 if (mem->page_count == 0)
1134 goto out;
1135
1136 temp = agp_bridge->current_size;
1137 num_entries = A_SIZE_FIX(temp)->num_entries;
1138
1139 if (pg_start < intel_private.gtt_entries) {
1140 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1141 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1142 pg_start, intel_private.gtt_entries);
1143
1144 dev_info(&intel_private.pcidev->dev,
1145 "trying to insert into local/stolen memory\n");
1146 goto out_err;
1147 }
1148
1149 if ((pg_start + mem->page_count) > num_entries)
1150 goto out_err;
1151
1152 /* The i915 can't check the GTT for entries since it's read only;
1153 * depend on the caller to make the correct offset decisions.
1154 */
1155
1156 if (type != mem->type)
1157 goto out_err;
1158
1159 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1160
1161 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1162 mask_type != INTEL_AGP_CACHED_MEMORY)
1163 goto out_err;
1164
1165 if (!mem->is_flushed)
1166 global_cache_flush();
1167
1168 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1169 agp_bridge->driver->tlb_flush(mem);
1170
1171 out:
1172 ret = 0;
1173 out_err:
1174 mem->is_flushed = true;
1175 return ret;
1176}
1177
1178static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1179 int type)
1180{
1181 int i;
1182
1183 if (mem->page_count == 0)
1184 return 0;
1185
1186 if (pg_start < intel_private.gtt_entries) {
1187 dev_info(&intel_private.pcidev->dev,
1188 "trying to disable local/stolen memory\n");
1189 return -EINVAL;
1190 }
1191
1192 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1193 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1194
1195 readl(intel_private.gtt+i-1);
1196
1197 agp_bridge->driver->tlb_flush(mem);
1198 return 0;
1199}
1200
1201/* Return the aperture size by just checking the resource length. The effect
1202 * described in the spec of the MSAC registers is just changing of the
1203 * resource size.
1204 */
1205static int intel_i9xx_fetch_size(void)
1206{
1207 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1208 int aper_size; /* size in megabytes */
1209 int i;
1210
1211 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1212
1213 for (i = 0; i < num_sizes; i++) {
1214 if (aper_size == intel_i830_sizes[i].size) {
1215 agp_bridge->current_size = intel_i830_sizes + i;
1216 agp_bridge->previous_size = agp_bridge->current_size;
1217 return aper_size;
1218 }
1219 }
1220
1221 return 0;
1222}
1223
1224/* The intel i915 automatically initializes the agp aperture during POST.
1225 * Use the memory already set aside for in the GTT.
1226 */
1227static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1228{
1229 int page_order;
1230 struct aper_size_info_fixed *size;
1231 int num_entries;
1232 u32 temp, temp2;
1233 int gtt_map_size = 256 * 1024;
1234
1235 size = agp_bridge->current_size;
1236 page_order = size->page_order;
1237 num_entries = size->num_entries;
1238 agp_bridge->gatt_table_real = NULL;
1239
1240 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1241 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1242
1243 if (IS_G33)
1244 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1245 intel_private.gtt = ioremap(temp2, gtt_map_size);
1246 if (!intel_private.gtt)
1247 return -ENOMEM;
1248
1249 intel_private.gtt_total_size = gtt_map_size / 4;
1250
1251 temp &= 0xfff80000;
1252
1253 intel_private.registers = ioremap(temp, 128 * 4096);
1254 if (!intel_private.registers) {
1255 iounmap(intel_private.gtt);
1256 return -ENOMEM;
1257 }
1258
1259 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1260 global_cache_flush(); /* FIXME: ? */
1261
1262 /* we have to call this as early as possible after the MMIO base address is known */
1263 intel_i830_init_gtt_entries();
1264
1265 agp_bridge->gatt_table = NULL;
1266
1267 agp_bridge->gatt_bus_addr = temp;
1268
1269 return 0;
1270}
1271
1272/*
1273 * The i965 supports 36-bit physical addresses, but to keep
1274 * the format of the GTT the same, the bits that don't fit
1275 * in a 32-bit word are shifted down to bits 4..7.
1276 *
1277 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1278 * is always zero on 32-bit architectures, so no need to make
1279 * this conditional.
1280 */
1281static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1282 dma_addr_t addr, int type)
1283{
1284 /* Shift high bits down */
1285 addr |= (addr >> 28) & 0xf0;
1286
1287 /* Type checking must be done elsewhere */
1288 return addr | bridge->driver->masks[type].mask;
1289}
1290
1291static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1292{
1293 u16 snb_gmch_ctl;
1294
1295 switch (agp_bridge->dev->device) {
1296 case PCI_DEVICE_ID_INTEL_GM45_HB:
1297 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1298 case PCI_DEVICE_ID_INTEL_Q45_HB:
1299 case PCI_DEVICE_ID_INTEL_G45_HB:
1300 case PCI_DEVICE_ID_INTEL_G41_HB:
1301 case PCI_DEVICE_ID_INTEL_B43_HB:
1302 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1303 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1304 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1305 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1306 *gtt_offset = *gtt_size = MB(2);
1307 break;
1308 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1309 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1310 *gtt_offset = MB(2);
1311
1312 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1313 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1314 default:
1315 case SNB_GTT_SIZE_0M:
1316 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1317 *gtt_size = MB(0);
1318 break;
1319 case SNB_GTT_SIZE_1M:
1320 *gtt_size = MB(1);
1321 break;
1322 case SNB_GTT_SIZE_2M:
1323 *gtt_size = MB(2);
1324 break;
1325 }
1326 break;
1327 default:
1328 *gtt_offset = *gtt_size = KB(512);
1329 }
1330}
1331
1332/* The intel i965 automatically initializes the agp aperture during POST.
1333 * Use the memory already set aside for in the GTT.
1334 */
1335static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1336{
1337 int page_order;
1338 struct aper_size_info_fixed *size;
1339 int num_entries;
1340 u32 temp;
1341 int gtt_offset, gtt_size;
1342
1343 size = agp_bridge->current_size;
1344 page_order = size->page_order;
1345 num_entries = size->num_entries;
1346 agp_bridge->gatt_table_real = NULL;
1347
1348 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1349
1350 temp &= 0xfff00000;
1351
1352 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1353
1354 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1355
1356 if (!intel_private.gtt)
1357 return -ENOMEM;
1358
1359 intel_private.gtt_total_size = gtt_size / 4;
1360
1361 intel_private.registers = ioremap(temp, 128 * 4096);
1362 if (!intel_private.registers) {
1363 iounmap(intel_private.gtt);
1364 return -ENOMEM;
1365 }
1366
1367 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1368 global_cache_flush(); /* FIXME: ? */
1369
1370 /* we have to call this as early as possible after the MMIO base address is known */
1371 intel_i830_init_gtt_entries();
1372
1373 agp_bridge->gatt_table = NULL;
1374
1375 agp_bridge->gatt_bus_addr = temp;
1376
1377 return 0;
1378}
1379
1380static const struct agp_bridge_driver intel_810_driver = {
1381 .owner = THIS_MODULE,
1382 .aperture_sizes = intel_i810_sizes,
1383 .size_type = FIXED_APER_SIZE,
1384 .num_aperture_sizes = 2,
1385 .needs_scratch_page = true,
1386 .configure = intel_i810_configure,
1387 .fetch_size = intel_i810_fetch_size,
1388 .cleanup = intel_i810_cleanup,
1389 .tlb_flush = intel_i810_tlbflush,
1390 .mask_memory = intel_i810_mask_memory,
1391 .masks = intel_i810_masks,
1392 .agp_enable = intel_i810_agp_enable,
1393 .cache_flush = global_cache_flush,
1394 .create_gatt_table = agp_generic_create_gatt_table,
1395 .free_gatt_table = agp_generic_free_gatt_table,
1396 .insert_memory = intel_i810_insert_entries,
1397 .remove_memory = intel_i810_remove_entries,
1398 .alloc_by_type = intel_i810_alloc_by_type,
1399 .free_by_type = intel_i810_free_by_type,
1400 .agp_alloc_page = agp_generic_alloc_page,
1401 .agp_alloc_pages = agp_generic_alloc_pages,
1402 .agp_destroy_page = agp_generic_destroy_page,
1403 .agp_destroy_pages = agp_generic_destroy_pages,
1404 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1405};
1406
1407static const struct agp_bridge_driver intel_830_driver = {
1408 .owner = THIS_MODULE,
1409 .aperture_sizes = intel_i830_sizes,
1410 .size_type = FIXED_APER_SIZE,
1411 .num_aperture_sizes = 4,
1412 .needs_scratch_page = true,
1413 .configure = intel_i830_configure,
1414 .fetch_size = intel_i830_fetch_size,
1415 .cleanup = intel_i830_cleanup,
1416 .tlb_flush = intel_i810_tlbflush,
1417 .mask_memory = intel_i810_mask_memory,
1418 .masks = intel_i810_masks,
1419 .agp_enable = intel_i810_agp_enable,
1420 .cache_flush = global_cache_flush,
1421 .create_gatt_table = intel_i830_create_gatt_table,
1422 .free_gatt_table = intel_i830_free_gatt_table,
1423 .insert_memory = intel_i830_insert_entries,
1424 .remove_memory = intel_i830_remove_entries,
1425 .alloc_by_type = intel_i830_alloc_by_type,
1426 .free_by_type = intel_i810_free_by_type,
1427 .agp_alloc_page = agp_generic_alloc_page,
1428 .agp_alloc_pages = agp_generic_alloc_pages,
1429 .agp_destroy_page = agp_generic_destroy_page,
1430 .agp_destroy_pages = agp_generic_destroy_pages,
1431 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1432 .chipset_flush = intel_i830_chipset_flush,
1433};
1434
1435static const struct agp_bridge_driver intel_915_driver = {
1436 .owner = THIS_MODULE,
1437 .aperture_sizes = intel_i830_sizes,
1438 .size_type = FIXED_APER_SIZE,
1439 .num_aperture_sizes = 4,
1440 .needs_scratch_page = true,
1441 .configure = intel_i915_configure,
1442 .fetch_size = intel_i9xx_fetch_size,
1443 .cleanup = intel_i915_cleanup,
1444 .tlb_flush = intel_i810_tlbflush,
1445 .mask_memory = intel_i810_mask_memory,
1446 .masks = intel_i810_masks,
1447 .agp_enable = intel_i810_agp_enable,
1448 .cache_flush = global_cache_flush,
1449 .create_gatt_table = intel_i915_create_gatt_table,
1450 .free_gatt_table = intel_i830_free_gatt_table,
1451 .insert_memory = intel_i915_insert_entries,
1452 .remove_memory = intel_i915_remove_entries,
1453 .alloc_by_type = intel_i830_alloc_by_type,
1454 .free_by_type = intel_i810_free_by_type,
1455 .agp_alloc_page = agp_generic_alloc_page,
1456 .agp_alloc_pages = agp_generic_alloc_pages,
1457 .agp_destroy_page = agp_generic_destroy_page,
1458 .agp_destroy_pages = agp_generic_destroy_pages,
1459 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1460 .chipset_flush = intel_i915_chipset_flush,
1461#ifdef USE_PCI_DMA_API
1462 .agp_map_page = intel_agp_map_page,
1463 .agp_unmap_page = intel_agp_unmap_page,
1464 .agp_map_memory = intel_agp_map_memory,
1465 .agp_unmap_memory = intel_agp_unmap_memory,
1466#endif
1467};
1468
1469static const struct agp_bridge_driver intel_i965_driver = {
1470 .owner = THIS_MODULE,
1471 .aperture_sizes = intel_i830_sizes,
1472 .size_type = FIXED_APER_SIZE,
1473 .num_aperture_sizes = 4,
1474 .needs_scratch_page = true,
1475 .configure = intel_i915_configure,
1476 .fetch_size = intel_i9xx_fetch_size,
1477 .cleanup = intel_i915_cleanup,
1478 .tlb_flush = intel_i810_tlbflush,
1479 .mask_memory = intel_i965_mask_memory,
1480 .masks = intel_i810_masks,
1481 .agp_enable = intel_i810_agp_enable,
1482 .cache_flush = global_cache_flush,
1483 .create_gatt_table = intel_i965_create_gatt_table,
1484 .free_gatt_table = intel_i830_free_gatt_table,
1485 .insert_memory = intel_i915_insert_entries,
1486 .remove_memory = intel_i915_remove_entries,
1487 .alloc_by_type = intel_i830_alloc_by_type,
1488 .free_by_type = intel_i810_free_by_type,
1489 .agp_alloc_page = agp_generic_alloc_page,
1490 .agp_alloc_pages = agp_generic_alloc_pages,
1491 .agp_destroy_page = agp_generic_destroy_page,
1492 .agp_destroy_pages = agp_generic_destroy_pages,
1493 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1494 .chipset_flush = intel_i915_chipset_flush,
1495#ifdef USE_PCI_DMA_API
1496 .agp_map_page = intel_agp_map_page,
1497 .agp_unmap_page = intel_agp_unmap_page,
1498 .agp_map_memory = intel_agp_map_memory,
1499 .agp_unmap_memory = intel_agp_unmap_memory,
1500#endif
1501};
1502
1503static const struct agp_bridge_driver intel_g33_driver = {
1504 .owner = THIS_MODULE,
1505 .aperture_sizes = intel_i830_sizes,
1506 .size_type = FIXED_APER_SIZE,
1507 .num_aperture_sizes = 4,
1508 .needs_scratch_page = true,
1509 .configure = intel_i915_configure,
1510 .fetch_size = intel_i9xx_fetch_size,
1511 .cleanup = intel_i915_cleanup,
1512 .tlb_flush = intel_i810_tlbflush,
1513 .mask_memory = intel_i965_mask_memory,
1514 .masks = intel_i810_masks,
1515 .agp_enable = intel_i810_agp_enable,
1516 .cache_flush = global_cache_flush,
1517 .create_gatt_table = intel_i915_create_gatt_table,
1518 .free_gatt_table = intel_i830_free_gatt_table,
1519 .insert_memory = intel_i915_insert_entries,
1520 .remove_memory = intel_i915_remove_entries,
1521 .alloc_by_type = intel_i830_alloc_by_type,
1522 .free_by_type = intel_i810_free_by_type,
1523 .agp_alloc_page = agp_generic_alloc_page,
1524 .agp_alloc_pages = agp_generic_alloc_pages,
1525 .agp_destroy_page = agp_generic_destroy_page,
1526 .agp_destroy_pages = agp_generic_destroy_pages,
1527 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1528 .chipset_flush = intel_i915_chipset_flush,
1529#ifdef USE_PCI_DMA_API
1530 .agp_map_page = intel_agp_map_page,
1531 .agp_unmap_page = intel_agp_unmap_page,
1532 .agp_map_memory = intel_agp_map_memory,
1533 .agp_unmap_memory = intel_agp_unmap_memory,
1534#endif
1535};