aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2012-11-04 12:21:27 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-11 17:51:42 -0500
commite76e9aebcdbfebae8f4cd147e3c0f800d36e97f3 (patch)
tree94272c5f3ee021931bfe43136d81076b10a26c77
parentb3fcabb15bb83202fb5e4e5b296711b91c4942a3 (diff)
drm/i915: Stop using AGP layer for GEN6+
As a quick hack we make the old intel_gtt structure mutable so we can fool a bunch of the existing code which depends on elements in that data structure. We can/should try to remove this in a subsequent patch. This should preserve the old gtt init behavior which upon writing these patches seems incorrect. The next patch will fix these things. The one exception is VLV which doesn't have the preserved flush control write behavior. Since we want to do that for all GEN6+ stuff, we'll handle that in a later patch. Mainstream VLV support doesn't actually exist yet anyway. v2: Update the comment to remove the "voodoo" Check that the last pte written matches what we readback v3: actually kill cache_level_to_agp_type since most of the flags will disappear in an upcoming patch v4: v3 was actually not what we wanted (Daniel) Make the ggtt bind assertions better and stricter (Chris) Fix some uncaught errors at gtt init (Chris) Some other random stuff that Chris wanted v5: check for i==0 in gen6_ggtt_bind_object to shut up gcc (Ben) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by [v4]: Chris Wilson <chris@chris-wilson.co.uk> [danvet: Make the cache_level -> agp_flags conversion for pre-gen6 a tad more robust by mapping everything != CACHE_NONE to the cached agp flag - we have a 1:1 uncached mapping, but different modes of cacheable (at least on later generations). Suggested by Chris Wilson.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c257
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--include/drm/intel-gtt.h3
8 files changed, 257 insertions, 51 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..4dfbb80f0fd5 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1686,7 +1686,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1686} 1686}
1687EXPORT_SYMBOL(intel_gmch_probe); 1687EXPORT_SYMBOL(intel_gmch_probe);
1688 1688
1689const struct intel_gtt *intel_gtt_get(void) 1689struct intel_gtt *intel_gtt_get(void)
1690{ 1690{
1691 return &intel_private.base; 1691 return &intel_private.base;
1692} 1692}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ff06e3239ada..1eea5be43617 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1496,19 +1496,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1496 goto free_priv; 1496 goto free_priv;
1497 } 1497 }
1498 1498
1499 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); 1499 ret = i915_gem_gtt_init(dev);
1500 if (!ret) { 1500 if (ret)
1501 DRM_ERROR("failed to set up gmch\n");
1502 ret = -EIO;
1503 goto put_bridge; 1501 goto put_bridge;
1504 }
1505
1506 dev_priv->mm.gtt = intel_gtt_get();
1507 if (!dev_priv->mm.gtt) {
1508 DRM_ERROR("Failed to initialize GTT\n");
1509 ret = -ENODEV;
1510 goto put_gmch;
1511 }
1512 1502
1513 i915_kick_out_firmware_fb(dev_priv); 1503 i915_kick_out_firmware_fb(dev_priv);
1514 1504
@@ -1683,7 +1673,7 @@ out_mtrrfree:
1683out_rmmap: 1673out_rmmap:
1684 pci_iounmap(dev->pdev, dev_priv->regs); 1674 pci_iounmap(dev->pdev, dev_priv->regs);
1685put_gmch: 1675put_gmch:
1686 intel_gmch_remove(); 1676 i915_gem_gtt_fini(dev);
1687put_bridge: 1677put_bridge:
1688 pci_dev_put(dev_priv->bridge_dev); 1678 pci_dev_put(dev_priv->bridge_dev);
1689free_priv: 1679free_priv:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c4339c2b1b57..f316916fe65e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -746,7 +746,7 @@ typedef struct drm_i915_private {
746 746
747 struct { 747 struct {
748 /** Bridge to intel-gtt-ko */ 748 /** Bridge to intel-gtt-ko */
749 const struct intel_gtt *gtt; 749 struct intel_gtt *gtt;
750 /** Memory allocator for GTT stolen memory */ 750 /** Memory allocator for GTT stolen memory */
751 struct drm_mm stolen; 751 struct drm_mm stolen;
752 /** Memory allocator for GTT */ 752 /** Memory allocator for GTT */
@@ -1538,6 +1538,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1538 unsigned long start, 1538 unsigned long start,
1539 unsigned long mappable_end, 1539 unsigned long mappable_end,
1540 unsigned long end); 1540 unsigned long end);
1541int i915_gem_gtt_init(struct drm_device *dev);
1542void i915_gem_gtt_fini(struct drm_device *dev);
1543extern inline void i915_gem_chipset_flush(struct drm_device *dev)
1544{
1545 if (INTEL_INFO(dev)->gen < 6)
1546 intel_gtt_chipset_flush();
1547}
1548
1541 1549
1542/* i915_gem_evict.c */ 1550/* i915_gem_evict.c */
1543int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1551int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d8eaebfea93e..c161fdbd830f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
845 * domain anymore. */ 845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj); 847 i915_gem_clflush_object(obj);
848 intel_gtt_chipset_flush(); 848 i915_gem_chipset_flush(dev);
849 } 849 }
850 } 850 }
851 851
852 if (needs_clflush_after) 852 if (needs_clflush_after)
853 intel_gtt_chipset_flush(); 853 i915_gem_chipset_flush(dev);
854 854
855 return ret; 855 return ret;
856} 856}
@@ -3058,7 +3058,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3058 return; 3058 return;
3059 3059
3060 i915_gem_clflush_object(obj); 3060 i915_gem_clflush_object(obj);
3061 intel_gtt_chipset_flush(); 3061 i915_gem_chipset_flush(obj->base.dev);
3062 old_write_domain = obj->base.write_domain; 3062 old_write_domain = obj->base.write_domain;
3063 obj->base.write_domain = 0; 3063 obj->base.write_domain = 0;
3064 3064
@@ -3959,7 +3959,7 @@ i915_gem_init_hw(struct drm_device *dev)
3959 drm_i915_private_t *dev_priv = dev->dev_private; 3959 drm_i915_private_t *dev_priv = dev->dev_private;
3960 int ret; 3960 int ret;
3961 3961
3962 if (!intel_enable_gtt()) 3962 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3963 return -EIO; 3963 return -EIO;
3964 3964
3965 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 3965 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4294,7 +4294,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4294 page_cache_release(page); 4294 page_cache_release(page);
4295 } 4295 }
4296 } 4296 }
4297 intel_gtt_chipset_flush(); 4297 i915_gem_chipset_flush(dev);
4298 4298
4299 obj->phys_obj->cur_obj = NULL; 4299 obj->phys_obj->cur_obj = NULL;
4300 obj->phys_obj = NULL; 4300 obj->phys_obj = NULL;
@@ -4381,7 +4381,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
4381 return -EFAULT; 4381 return -EFAULT;
4382 } 4382 }
4383 4383
4384 intel_gtt_chipset_flush(); 4384 i915_gem_chipset_flush(dev);
4385 return 0; 4385 return 0;
4386} 4386}
4387 4387
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 91d43d5c4526..d80e9dd00c48 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
672 } 672 }
673 673
674 if (flush_domains & I915_GEM_DOMAIN_CPU) 674 if (flush_domains & I915_GEM_DOMAIN_CPU)
675 intel_gtt_chipset_flush(); 675 i915_gem_chipset_flush(ring->dev);
676 676
677 if (flush_domains & I915_GEM_DOMAIN_GTT) 677 if (flush_domains & I915_GEM_DOMAIN_GTT)
678 wmb(); 678 wmb();
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 06202fd6dbdd..e74be0c2c6a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -262,26 +262,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
262 obj->base.size >> PAGE_SHIFT); 262 obj->base.size >> PAGE_SHIFT);
263} 263}
264 264
265/* XXX kill agp_type! */
266static unsigned int cache_level_to_agp_type(struct drm_device *dev,
267 enum i915_cache_level cache_level)
268{
269 switch (cache_level) {
270 case I915_CACHE_LLC_MLC:
271 /* Older chipsets do not have this extra level of CPU
272 * cacheing, so fallthrough and request the PTE simply
273 * as cached.
274 */
275 if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
276 return AGP_USER_CACHED_MEMORY_LLC_MLC;
277 case I915_CACHE_LLC:
278 return AGP_USER_CACHED_MEMORY;
279 default:
280 case I915_CACHE_NONE:
281 return AGP_USER_MEMORY;
282 }
283}
284
285static bool do_idling(struct drm_i915_private *dev_priv) 265static bool do_idling(struct drm_i915_private *dev_priv)
286{ 266{
287 bool ret = dev_priv->mm.interruptible; 267 bool ret = dev_priv->mm.interruptible;
@@ -304,13 +284,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
304 dev_priv->mm.interruptible = interruptible; 284 dev_priv->mm.interruptible = interruptible;
305} 285}
306 286
287
288static void i915_ggtt_clear_range(struct drm_device *dev,
289 unsigned first_entry,
290 unsigned num_entries)
291{
292 struct drm_i915_private *dev_priv = dev->dev_private;
293 gtt_pte_t scratch_pte;
294 volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
295 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
296
297 if (INTEL_INFO(dev)->gen < 6) {
298 intel_gtt_clear_range(first_entry, num_entries);
299 return;
300 }
301
302 if (WARN(num_entries > max_entries,
303 "First entry = %d; Num entries = %d (max=%d)\n",
304 first_entry, num_entries, max_entries))
305 num_entries = max_entries;
306
307 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
308 memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
309 readl(gtt_base);
310}
311
307void i915_gem_restore_gtt_mappings(struct drm_device *dev) 312void i915_gem_restore_gtt_mappings(struct drm_device *dev)
308{ 313{
309 struct drm_i915_private *dev_priv = dev->dev_private; 314 struct drm_i915_private *dev_priv = dev->dev_private;
310 struct drm_i915_gem_object *obj; 315 struct drm_i915_gem_object *obj;
311 316
312 /* First fill our portion of the GTT with scratch pages */ 317 /* First fill our portion of the GTT with scratch pages */
313 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 318 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
314 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 319 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
315 320
316 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 321 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -318,7 +323,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
318 i915_gem_gtt_bind_object(obj, obj->cache_level); 323 i915_gem_gtt_bind_object(obj, obj->cache_level);
319 } 324 }
320 325
321 intel_gtt_chipset_flush(); 326 i915_gem_chipset_flush(dev);
322} 327}
323 328
324int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 329int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -334,21 +339,69 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
334 return 0; 339 return 0;
335} 340}
336 341
342/*
343 * Binds an object into the global gtt with the specified cache level. The object
344 * will be accessible to the GPU via commands whose operands reference offsets
345 * within the global GTT as well as accessible by the GPU through the GMADR
346 * mapped BAR (dev_priv->mm.gtt->gtt).
347 */
348static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
349 enum i915_cache_level level)
350{
351 struct drm_device *dev = obj->base.dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 struct sg_table *st = obj->pages;
354 struct scatterlist *sg = st->sgl;
355 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
356 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
357 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
358 int unused, i = 0;
359 unsigned int len, m = 0;
360 dma_addr_t addr;
361
362 for_each_sg(st->sgl, sg, st->nents, unused) {
363 len = sg_dma_len(sg) >> PAGE_SHIFT;
364 for (m = 0; m < len; m++) {
365 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
366 gtt_entries[i] = pte_encode(dev, addr, level);
367 i++;
368 }
369 }
370
371 BUG_ON(i > max_entries);
372 BUG_ON(i != obj->base.size / PAGE_SIZE);
373
374 /* XXX: This serves as a posting read to make sure that the PTE has
375 * actually been updated. There is some concern that even though
376 * registers and PTEs are within the same BAR that they are potentially
377 * of NUMA access patterns. Therefore, even with the way we assume
378 * hardware should work, we must keep this posting read for paranoia.
379 */
380 if (i != 0)
381 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
382}
383
337void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 384void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
338 enum i915_cache_level cache_level) 385 enum i915_cache_level cache_level)
339{ 386{
340 struct drm_device *dev = obj->base.dev; 387 struct drm_device *dev = obj->base.dev;
341 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 388 if (INTEL_INFO(dev)->gen < 6) {
389 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
390 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
391 intel_gtt_insert_sg_entries(obj->pages,
392 obj->gtt_space->start >> PAGE_SHIFT,
393 flags);
394 } else {
395 gen6_ggtt_bind_object(obj, cache_level);
396 }
342 397
343 intel_gtt_insert_sg_entries(obj->pages,
344 obj->gtt_space->start >> PAGE_SHIFT,
345 agp_type);
346 obj->has_global_gtt_mapping = 1; 398 obj->has_global_gtt_mapping = 1;
347} 399}
348 400
349void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 401void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
350{ 402{
351 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 403 i915_ggtt_clear_range(obj->base.dev,
404 obj->gtt_space->start >> PAGE_SHIFT,
352 obj->base.size >> PAGE_SHIFT); 405 obj->base.size >> PAGE_SHIFT);
353 406
354 obj->has_global_gtt_mapping = 0; 407 obj->has_global_gtt_mapping = 0;
@@ -406,5 +459,153 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
406 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 459 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
407 460
408 /* ... but ensure that we clear the entire range. */ 461 /* ... but ensure that we clear the entire range. */
409 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); 462 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
463}
464
465static int setup_scratch_page(struct drm_device *dev)
466{
467 struct drm_i915_private *dev_priv = dev->dev_private;
468 struct page *page;
469 dma_addr_t dma_addr;
470
471 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
472 if (page == NULL)
473 return -ENOMEM;
474 get_page(page);
475 set_pages_uc(page, 1);
476
477#ifdef CONFIG_INTEL_IOMMU
478 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
479 PCI_DMA_BIDIRECTIONAL);
480 if (pci_dma_mapping_error(dev->pdev, dma_addr))
481 return -EINVAL;
482#else
483 dma_addr = page_to_phys(page);
484#endif
485 dev_priv->mm.gtt->scratch_page = page;
486 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
487
488 return 0;
489}
490
491static void teardown_scratch_page(struct drm_device *dev)
492{
493 struct drm_i915_private *dev_priv = dev->dev_private;
494 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
495 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
496 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
497 put_page(dev_priv->mm.gtt->scratch_page);
498 __free_page(dev_priv->mm.gtt->scratch_page);
499}
500
501static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
502{
503 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
504 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
505 return snb_gmch_ctl << 20;
506}
507
508static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
509{
510 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
511 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
512 return snb_gmch_ctl << 25; /* 32 MB units */
513}
514
515int i915_gem_gtt_init(struct drm_device *dev)
516{
517 struct drm_i915_private *dev_priv = dev->dev_private;
518 phys_addr_t gtt_bus_addr;
519 u16 snb_gmch_ctl;
520 u32 tmp;
521 int ret;
522
523 /* On modern platforms we need not worry ourself with the legacy
524 * hostbridge query stuff. Skip it entirely
525 */
526 if (INTEL_INFO(dev)->gen < 6) {
527 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
528 if (!ret) {
529 DRM_ERROR("failed to set up gmch\n");
530 return -EIO;
531 }
532
533 dev_priv->mm.gtt = intel_gtt_get();
534 if (!dev_priv->mm.gtt) {
535 DRM_ERROR("Failed to initialize GTT\n");
536 intel_gmch_remove();
537 return -ENODEV;
538 }
539 return 0;
540 }
541
542 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
543 if (!dev_priv->mm.gtt)
544 return -ENOMEM;
545
546 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
547 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
548
549 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
550 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
551 gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
552
553 pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
554 dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
555
556 /* i9xx_setup */
557 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
558 dev_priv->mm.gtt->gtt_total_entries =
559 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
560 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
561
562 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
563 /* 64/512MB is the current min/max we actually know of, but this is just a
564 * coarse sanity check.
565 */
566 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
567 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
568 DRM_ERROR("Unknown GMADR entries (%d)\n",
569 dev_priv->mm.gtt->gtt_mappable_entries);
570 ret = -ENXIO;
571 goto err_out;
572 }
573
574 ret = setup_scratch_page(dev);
575 if (ret) {
576 DRM_ERROR("Scratch setup failed\n");
577 goto err_out;
578 }
579
580 dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
581 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
582 if (!dev_priv->mm.gtt->gtt) {
583 DRM_ERROR("Failed to map the gtt page table\n");
584 teardown_scratch_page(dev);
585 ret = -ENOMEM;
586 goto err_out;
587 }
588
589 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
590 DRM_INFO("Memory Usable by graphics device = %dK\n", dev_priv->mm.gtt->gtt_total_entries >> 10);
591 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
592 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
593
594 return 0;
595
596err_out:
597 kfree(dev_priv->mm.gtt);
598 if (INTEL_INFO(dev)->gen < 6)
599 intel_gmch_remove();
600 return ret;
601}
602
603void i915_gem_gtt_fini(struct drm_device *dev)
604{
605 struct drm_i915_private *dev_priv = dev->dev_private;
606 iounmap(dev_priv->mm.gtt->gtt);
607 teardown_scratch_page(dev);
608 if (INTEL_INFO(dev)->gen < 6)
609 intel_gmch_remove();
610 kfree(dev_priv->mm.gtt);
410} 611}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0866ac3d0a3f..449403f60e4f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -41,6 +41,12 @@
41 */ 41 */
42#define INTEL_GMCH_CTRL 0x52 42#define INTEL_GMCH_CTRL 0x52
43#define INTEL_GMCH_VGA_DISABLE (1 << 1) 43#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49
44 50
45/* PCI config space */ 51/* PCI config space */
46 52
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 2e37e9f02e71..94e8f2c7f9e1 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,7 +3,7 @@
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5 5
6const struct intel_gtt { 6struct intel_gtt {
7 /* Size of memory reserved for graphics by the BIOS */ 7 /* Size of memory reserved for graphics by the BIOS */
8 unsigned int stolen_size; 8 unsigned int stolen_size;
9 /* Total number of gtt entries. */ 9 /* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
17 unsigned int do_idle_maps : 1; 17 unsigned int do_idle_maps : 1;
18 /* Share the scratch page dma with ppgtts. */ 18 /* Share the scratch page dma with ppgtts. */
19 dma_addr_t scratch_page_dma; 19 dma_addr_t scratch_page_dma;
20 struct page *scratch_page;
20 /* for ppgtt PDE access */ 21 /* for ppgtt PDE access */
21 u32 __iomem *gtt; 22 u32 __iomem *gtt;
22 /* needed for ioremap in drm/i915 */ 23 /* needed for ioremap in drm/i915 */