aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c418
1 files changed, 365 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..35fec1e61346 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31typedef uint32_t gtt_pte_t;
32
33/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
47static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
49 enum i915_cache_level level)
50{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
75
76 return pte;
77}
78
31/* PPGTT support for Sandybdrige/Gen6 and later */ 79/* PPGTT support for Sandybdrige/Gen6 and later */
32static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
33 unsigned first_entry, 81 unsigned first_entry,
34 unsigned num_entries) 82 unsigned num_entries)
35{ 83{
36 uint32_t *pt_vaddr; 84 gtt_pte_t *pt_vaddr;
37 uint32_t scratch_pte; 85 gtt_pte_t scratch_pte;
38 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 86 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
39 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
40 unsigned last_pte, i; 88 unsigned last_pte, i;
41 89
42 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); 90 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
43 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; 91 I915_CACHE_LLC);
44 92
45 while (num_entries) { 93 while (num_entries) {
46 last_pte = first_pte + num_entries; 94 last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
77 if (!ppgtt) 125 if (!ppgtt)
78 return ret; 126 return ret;
79 127
128 ppgtt->dev = dev;
80 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
81 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
82 GFP_KERNEL); 131 GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
118 i915_ppgtt_clear_range(ppgtt, 0, 167 i915_ppgtt_clear_range(ppgtt, 0,
119 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
120 169
121 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); 170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
122 171
123 dev_priv->mm.aliasing_ppgtt = ppgtt; 172 dev_priv->mm.aliasing_ppgtt = ppgtt;
124 173
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 const struct sg_table *pages, 218 const struct sg_table *pages,
170 unsigned first_entry, 219 unsigned first_entry,
171 uint32_t pte_flags) 220 enum i915_cache_level cache_level)
172{ 221{
173 uint32_t *pt_vaddr, pte; 222 gtt_pte_t *pt_vaddr;
174 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
175 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
176 unsigned i, j, m, segment_len; 225 unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
188 237
189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
190 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
191 pte = GEN6_PTE_ADDR_ENCODE(page_addr); 240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
192 pt_vaddr[j] = pte | pte_flags; 241 cache_level);
193 242
194 /* grab the next page */ 243 /* grab the next page */
195 if (++m == segment_len) { 244 if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213 struct drm_i915_gem_object *obj, 262 struct drm_i915_gem_object *obj,
214 enum i915_cache_level cache_level) 263 enum i915_cache_level cache_level)
215{ 264{
216 uint32_t pte_flags = GEN6_PTE_VALID;
217
218 switch (cache_level) {
219 case I915_CACHE_LLC_MLC:
220 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
221 break;
222 case I915_CACHE_LLC:
223 pte_flags |= GEN6_PTE_CACHE_LLC;
224 break;
225 case I915_CACHE_NONE:
226 if (IS_HASWELL(obj->base.dev))
227 pte_flags |= HSW_PTE_UNCACHED;
228 else
229 pte_flags |= GEN6_PTE_UNCACHED;
230 break;
231 default:
232 BUG();
233 }
234
235 i915_ppgtt_insert_sg_entries(ppgtt, 265 i915_ppgtt_insert_sg_entries(ppgtt,
236 obj->pages, 266 obj->pages,
237 obj->gtt_space->start >> PAGE_SHIFT, 267 obj->gtt_space->start >> PAGE_SHIFT,
238 pte_flags); 268 cache_level);
239} 269}
240 270
241void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
246 obj->base.size >> PAGE_SHIFT); 276 obj->base.size >> PAGE_SHIFT);
247} 277}
248 278
249/* XXX kill agp_type! */ 279void i915_gem_init_ppgtt(struct drm_device *dev)
250static unsigned int cache_level_to_agp_type(struct drm_device *dev,
251 enum i915_cache_level cache_level)
252{ 280{
253 switch (cache_level) { 281 drm_i915_private_t *dev_priv = dev->dev_private;
254 case I915_CACHE_LLC_MLC: 282 uint32_t pd_offset;
255 if (INTEL_INFO(dev)->gen >= 6) 283 struct intel_ring_buffer *ring;
256 return AGP_USER_CACHED_MEMORY_LLC_MLC; 284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
257 /* Older chipsets do not have this extra level of CPU 285 uint32_t __iomem *pd_addr;
258 * cacheing, so fallthrough and request the PTE simply 286 uint32_t pd_entry;
259 * as cached. 287 int i;
260 */ 288
261 case I915_CACHE_LLC: 289 if (!dev_priv->mm.aliasing_ppgtt)
262 return AGP_USER_CACHED_MEMORY; 290 return;
263 default: 291
264 case I915_CACHE_NONE: 292
265 return AGP_USER_MEMORY; 293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr;
296
297 if (dev_priv->mm.gtt->needs_dmar)
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID;
304
305 writel(pd_entry, pd_addr + i);
306 }
307 readl(pd_addr);
308
309 pd_offset = ppgtt->pd_offset;
310 pd_offset /= 64; /* in cachelines, */
311 pd_offset <<= 16;
312
313 if (INTEL_INFO(dev)->gen == 6) {
314 uint32_t ecochk, gab_ctl, ecobits;
315
316 ecobits = I915_READ(GAC_ECO_BITS);
317 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
318
319 gab_ctl = I915_READ(GAB_CTL);
320 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
321
322 ecochk = I915_READ(GAM_ECOCHK);
323 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
324 ECOCHK_PPGTT_CACHE64B);
325 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
326 } else if (INTEL_INFO(dev)->gen >= 7) {
327 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
328 /* GFX_MODE is per-ring on gen7+ */
329 }
330
331 for_each_ring(ring, dev_priv, i) {
332 if (INTEL_INFO(dev)->gen >= 7)
333 I915_WRITE(RING_MODE_GEN7(ring),
334 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
335
336 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
337 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
266 } 338 }
267} 339}
268 340
@@ -288,13 +360,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
288 dev_priv->mm.interruptible = interruptible; 360 dev_priv->mm.interruptible = interruptible;
289} 361}
290 362
363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372
373 if (INTEL_INFO(dev)->gen < 6) {
374 intel_gtt_clear_range(first_entry, num_entries);
375 return;
376 }
377
378 if (WARN(num_entries > max_entries,
379 "First entry = %d; Num entries = %d (max=%d)\n",
380 first_entry, num_entries, max_entries))
381 num_entries = max_entries;
382
383 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
384 memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
385 readl(gtt_base);
386}
387
291void i915_gem_restore_gtt_mappings(struct drm_device *dev) 388void i915_gem_restore_gtt_mappings(struct drm_device *dev)
292{ 389{
293 struct drm_i915_private *dev_priv = dev->dev_private; 390 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_i915_gem_object *obj; 391 struct drm_i915_gem_object *obj;
295 392
296 /* First fill our portion of the GTT with scratch pages */ 393 /* First fill our portion of the GTT with scratch pages */
297 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 394 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
298 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 395 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
299 396
300 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 397 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +399,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
302 i915_gem_gtt_bind_object(obj, obj->cache_level); 399 i915_gem_gtt_bind_object(obj, obj->cache_level);
303 } 400 }
304 401
305 intel_gtt_chipset_flush(); 402 i915_gem_chipset_flush(dev);
306} 403}
307 404
308int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 405int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +415,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
318 return 0; 415 return 0;
319} 416}
320 417
418/*
419 * Binds an object into the global gtt with the specified cache level. The object
420 * will be accessible to the GPU via commands whose operands reference offsets
421 * within the global GTT as well as accessible by the GPU through the GMADR
422 * mapped BAR (dev_priv->mm.gtt->gtt).
423 */
424static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
425 enum i915_cache_level level)
426{
427 struct drm_device *dev = obj->base.dev;
428 struct drm_i915_private *dev_priv = dev->dev_private;
429 struct sg_table *st = obj->pages;
430 struct scatterlist *sg = st->sgl;
431 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
432 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
433 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
434 int unused, i = 0;
435 unsigned int len, m = 0;
436 dma_addr_t addr;
437
438 for_each_sg(st->sgl, sg, st->nents, unused) {
439 len = sg_dma_len(sg) >> PAGE_SHIFT;
440 for (m = 0; m < len; m++) {
441 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
442 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
443 i++;
444 }
445 }
446
447 BUG_ON(i > max_entries);
448 BUG_ON(i != obj->base.size / PAGE_SIZE);
449
450 /* XXX: This serves as a posting read to make sure that the PTE has
451 * actually been updated. There is some concern that even though
452 * registers and PTEs are within the same BAR that they are potentially
453 * of NUMA access patterns. Therefore, even with the way we assume
454 * hardware should work, we must keep this posting read for paranoia.
455 */
456 if (i != 0)
457 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
458
459 /* This next bit makes the above posting read even more important. We
460 * want to flush the TLBs only after we're certain all the PTE updates
461 * have finished.
462 */
463 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
464 POSTING_READ(GFX_FLSH_CNTL_GEN6);
465}
466
321void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 467void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
322 enum i915_cache_level cache_level) 468 enum i915_cache_level cache_level)
323{ 469{
324 struct drm_device *dev = obj->base.dev; 470 struct drm_device *dev = obj->base.dev;
325 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 471 if (INTEL_INFO(dev)->gen < 6) {
472 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
473 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
474 intel_gtt_insert_sg_entries(obj->pages,
475 obj->gtt_space->start >> PAGE_SHIFT,
476 flags);
477 } else {
478 gen6_ggtt_bind_object(obj, cache_level);
479 }
326 480
327 intel_gtt_insert_sg_entries(obj->pages,
328 obj->gtt_space->start >> PAGE_SHIFT,
329 agp_type);
330 obj->has_global_gtt_mapping = 1; 481 obj->has_global_gtt_mapping = 1;
331} 482}
332 483
333void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 484void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
334{ 485{
335 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 486 i915_ggtt_clear_range(obj->base.dev,
487 obj->gtt_space->start >> PAGE_SHIFT,
336 obj->base.size >> PAGE_SHIFT); 488 obj->base.size >> PAGE_SHIFT);
337 489
338 obj->has_global_gtt_mapping = 0; 490 obj->has_global_gtt_mapping = 0;
@@ -390,5 +542,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
390 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 542 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
391 543
392 /* ... but ensure that we clear the entire range. */ 544 /* ... but ensure that we clear the entire range. */
393 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); 545 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
546}
547
548static int setup_scratch_page(struct drm_device *dev)
549{
550 struct drm_i915_private *dev_priv = dev->dev_private;
551 struct page *page;
552 dma_addr_t dma_addr;
553
554 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
555 if (page == NULL)
556 return -ENOMEM;
557 get_page(page);
558 set_pages_uc(page, 1);
559
560#ifdef CONFIG_INTEL_IOMMU
561 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
562 PCI_DMA_BIDIRECTIONAL);
563 if (pci_dma_mapping_error(dev->pdev, dma_addr))
564 return -EINVAL;
565#else
566 dma_addr = page_to_phys(page);
567#endif
568 dev_priv->mm.gtt->scratch_page = page;
569 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
570
571 return 0;
572}
573
574static void teardown_scratch_page(struct drm_device *dev)
575{
576 struct drm_i915_private *dev_priv = dev->dev_private;
577 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
578 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
579 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
580 put_page(dev_priv->mm.gtt->scratch_page);
581 __free_page(dev_priv->mm.gtt->scratch_page);
582}
583
584static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
585{
586 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
587 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
588 return snb_gmch_ctl << 20;
589}
590
591static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
592{
593 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
594 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
595 return snb_gmch_ctl << 25; /* 32 MB units */
596}
597
598static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
599{
600 static const int stolen_decoder[] = {
601 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
602 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
603 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
604 return stolen_decoder[snb_gmch_ctl] << 20;
605}
606
607int i915_gem_gtt_init(struct drm_device *dev)
608{
609 struct drm_i915_private *dev_priv = dev->dev_private;
610 phys_addr_t gtt_bus_addr;
611 u16 snb_gmch_ctl;
612 u32 tmp;
613 int ret;
614
615 /* On modern platforms we need not worry ourself with the legacy
616 * hostbridge query stuff. Skip it entirely
617 */
618 if (INTEL_INFO(dev)->gen < 6) {
619 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
620 if (!ret) {
621 DRM_ERROR("failed to set up gmch\n");
622 return -EIO;
623 }
624
625 dev_priv->mm.gtt = intel_gtt_get();
626 if (!dev_priv->mm.gtt) {
627 DRM_ERROR("Failed to initialize GTT\n");
628 intel_gmch_remove();
629 return -ENODEV;
630 }
631 return 0;
632 }
633
634 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
635 if (!dev_priv->mm.gtt)
636 return -ENOMEM;
637
638 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
639 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
640
641 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
642 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
643 gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
644
645 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
646 dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
647
648 /* i9xx_setup */
649 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
650 dev_priv->mm.gtt->gtt_total_entries =
651 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
652 if (INTEL_INFO(dev)->gen < 7)
653 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
654 else
655 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
656
657 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
658 /* 64/512MB is the current min/max we actually know of, but this is just a
659 * coarse sanity check.
660 */
661 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
662 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
663 DRM_ERROR("Unknown GMADR entries (%d)\n",
664 dev_priv->mm.gtt->gtt_mappable_entries);
665 ret = -ENXIO;
666 goto err_out;
667 }
668
669 ret = setup_scratch_page(dev);
670 if (ret) {
671 DRM_ERROR("Scratch setup failed\n");
672 goto err_out;
673 }
674
675 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
676 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
677 if (!dev_priv->mm.gtt->gtt) {
678 DRM_ERROR("Failed to map the gtt page table\n");
679 teardown_scratch_page(dev);
680 ret = -ENOMEM;
681 goto err_out;
682 }
683
684 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
685 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
686 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
687 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
688
689 return 0;
690
691err_out:
692 kfree(dev_priv->mm.gtt);
693 if (INTEL_INFO(dev)->gen < 6)
694 intel_gmch_remove();
695 return ret;
696}
697
698void i915_gem_gtt_fini(struct drm_device *dev)
699{
700 struct drm_i915_private *dev_priv = dev->dev_private;
701 iounmap(dev_priv->mm.gtt->gtt);
702 teardown_scratch_page(dev);
703 if (INTEL_INFO(dev)->gen < 6)
704 intel_gmch_remove();
705 kfree(dev_priv->mm.gtt);
394} 706}