aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c121
2 files changed, 83 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bffe222e1616..fb0815bebe0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -365,6 +365,12 @@ struct intel_device_info {
365 u8 has_llc:1; 365 u8 has_llc:1;
366}; 366};
367 367
368enum i915_cache_level {
369 I915_CACHE_NONE = 0,
370 I915_CACHE_LLC,
371 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
372};
373
368/* The Graphics Translation Table is the way in which GEN hardware translates a 374/* The Graphics Translation Table is the way in which GEN hardware translates a
369 * Graphics Virtual Address into a Physical Address. In addition to the normal 375 * Graphics Virtual Address into a Physical Address. In addition to the normal
370 * collateral associated with any va->pa translations GEN hardware also has a 376 * collateral associated with any va->pa translations GEN hardware also has a
@@ -386,6 +392,15 @@ struct i915_gtt {
386 bool do_idle_maps; 392 bool do_idle_maps;
387 dma_addr_t scratch_page_dma; 393 dma_addr_t scratch_page_dma;
388 struct page *scratch_page; 394 struct page *scratch_page;
395
396 /* global gtt ops */
397 void (*gtt_clear_range)(struct drm_device *dev,
398 unsigned int first_entry,
399 unsigned int num_entries);
400 void (*gtt_insert_entries)(struct drm_device *dev,
401 struct sg_table *st,
402 unsigned int pg_start,
403 enum i915_cache_level cache_level);
389}; 404};
390 405
391#define I915_PPGTT_PD_ENTRIES 512 406#define I915_PPGTT_PD_ENTRIES 512
@@ -1023,12 +1038,6 @@ enum hdmi_force_audio {
1023 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1038 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1024}; 1039};
1025 1040
1026enum i915_cache_level {
1027 I915_CACHE_NONE = 0,
1028 I915_CACHE_LLC,
1029 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
1030};
1031
1032#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 1041#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
1033 1042
1034struct drm_i915_gem_object_ops { 1043struct drm_i915_gem_object_ops {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a0ba4a9e53c7..4712626f8dc1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -367,40 +367,14 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
367 dev_priv->mm.interruptible = interruptible; 367 dev_priv->mm.interruptible = interruptible;
368} 368}
369 369
370static void i915_ggtt_clear_range(struct drm_device *dev,
371 unsigned first_entry,
372 unsigned num_entries)
373{
374 struct drm_i915_private *dev_priv = dev->dev_private;
375 gtt_pte_t scratch_pte;
376 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
377 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
378 int i;
379
380 if (INTEL_INFO(dev)->gen < 6) {
381 intel_gtt_clear_range(first_entry, num_entries);
382 return;
383 }
384
385 if (WARN(num_entries > max_entries,
386 "First entry = %d; Num entries = %d (max=%d)\n",
387 first_entry, num_entries, max_entries))
388 num_entries = max_entries;
389
390 scratch_pte = pte_encode(dev, dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC);
391 for (i = 0; i < num_entries; i++)
392 iowrite32(scratch_pte, &gtt_base[i]);
393 readl(gtt_base);
394}
395
396void i915_gem_restore_gtt_mappings(struct drm_device *dev) 370void i915_gem_restore_gtt_mappings(struct drm_device *dev)
397{ 371{
398 struct drm_i915_private *dev_priv = dev->dev_private; 372 struct drm_i915_private *dev_priv = dev->dev_private;
399 struct drm_i915_gem_object *obj; 373 struct drm_i915_gem_object *obj;
400 374
401 /* First fill our portion of the GTT with scratch pages */ 375 /* First fill our portion of the GTT with scratch pages */
402 i915_ggtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, 376 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
403 dev_priv->gtt.total / PAGE_SIZE); 377 dev_priv->gtt.total / PAGE_SIZE);
404 378
405 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 379 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
406 i915_gem_clflush_object(obj); 380 i915_gem_clflush_object(obj);
@@ -429,15 +403,13 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
429 * within the global GTT as well as accessible by the GPU through the GMADR 403 * within the global GTT as well as accessible by the GPU through the GMADR
430 * mapped BAR (dev_priv->mm.gtt->gtt). 404 * mapped BAR (dev_priv->mm.gtt->gtt).
431 */ 405 */
432static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, 406static void gen6_ggtt_insert_entries(struct drm_device *dev,
433 enum i915_cache_level level) 407 struct sg_table *st,
408 unsigned int first_entry,
409 enum i915_cache_level level)
434{ 410{
435 struct drm_device *dev = obj->base.dev;
436 struct drm_i915_private *dev_priv = dev->dev_private; 411 struct drm_i915_private *dev_priv = dev->dev_private;
437 struct sg_table *st = obj->pages;
438 struct scatterlist *sg = st->sgl; 412 struct scatterlist *sg = st->sgl;
439 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
440 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
441 gtt_pte_t __iomem *gtt_entries = 413 gtt_pte_t __iomem *gtt_entries =
442 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 414 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
443 int unused, i = 0; 415 int unused, i = 0;
@@ -453,9 +425,6 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
453 } 425 }
454 } 426 }
455 427
456 BUG_ON(i > max_entries);
457 BUG_ON(i != obj->base.size / PAGE_SIZE);
458
459 /* XXX: This serves as a posting read to make sure that the PTE has 428 /* XXX: This serves as a posting read to make sure that the PTE has
460 * actually been updated. There is some concern that even though 429 * actually been updated. There is some concern that even though
461 * registers and PTEs are within the same BAR that they are potentially 430 * registers and PTEs are within the same BAR that they are potentially
@@ -473,28 +442,69 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
473 POSTING_READ(GFX_FLSH_CNTL_GEN6); 442 POSTING_READ(GFX_FLSH_CNTL_GEN6);
474} 443}
475 444
445static void gen6_ggtt_clear_range(struct drm_device *dev,
446 unsigned int first_entry,
447 unsigned int num_entries)
448{
449 struct drm_i915_private *dev_priv = dev->dev_private;
450 gtt_pte_t scratch_pte;
451 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
452 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
453 int i;
454
455 if (WARN(num_entries > max_entries,
456 "First entry = %d; Num entries = %d (max=%d)\n",
457 first_entry, num_entries, max_entries))
458 num_entries = max_entries;
459
460 scratch_pte = pte_encode(dev, dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC);
461 for (i = 0; i < num_entries; i++)
462 iowrite32(scratch_pte, &gtt_base[i]);
463 readl(gtt_base);
464}
465
466
467static void i915_ggtt_insert_entries(struct drm_device *dev,
468 struct sg_table *st,
469 unsigned int pg_start,
470 enum i915_cache_level cache_level)
471{
472 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
473 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
474
475 intel_gtt_insert_sg_entries(st, pg_start, flags);
476
477}
478
479static void i915_ggtt_clear_range(struct drm_device *dev,
480 unsigned int first_entry,
481 unsigned int num_entries)
482{
483 intel_gtt_clear_range(first_entry, num_entries);
484}
485
486
476void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 487void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
477 enum i915_cache_level cache_level) 488 enum i915_cache_level cache_level)
478{ 489{
479 struct drm_device *dev = obj->base.dev; 490 struct drm_device *dev = obj->base.dev;
480 if (INTEL_INFO(dev)->gen < 6) { 491 struct drm_i915_private *dev_priv = dev->dev_private;
481 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 492
482 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 493 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
483 intel_gtt_insert_sg_entries(obj->pages, 494 obj->gtt_space->start >> PAGE_SHIFT,
484 obj->gtt_space->start >> PAGE_SHIFT, 495 cache_level);
485 flags);
486 } else {
487 gen6_ggtt_bind_object(obj, cache_level);
488 }
489 496
490 obj->has_global_gtt_mapping = 1; 497 obj->has_global_gtt_mapping = 1;
491} 498}
492 499
493void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 500void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
494{ 501{
495 i915_ggtt_clear_range(obj->base.dev, 502 struct drm_device *dev = obj->base.dev;
496 obj->gtt_space->start >> PAGE_SHIFT, 503 struct drm_i915_private *dev_priv = dev->dev_private;
497 obj->base.size >> PAGE_SHIFT); 504
505 dev_priv->gtt.gtt_clear_range(obj->base.dev,
506 obj->gtt_space->start >> PAGE_SHIFT,
507 obj->base.size >> PAGE_SHIFT);
498 508
499 obj->has_global_gtt_mapping = 0; 509 obj->has_global_gtt_mapping = 0;
500} 510}
@@ -570,13 +580,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
570 hole_start, hole_end) { 580 hole_start, hole_end) {
571 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 581 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
572 hole_start, hole_end); 582 hole_start, hole_end);
573 i915_ggtt_clear_range(dev, 583 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
574 hole_start / PAGE_SIZE, 584 (hole_end-hole_start) / PAGE_SIZE);
575 (hole_end-hole_start) / PAGE_SIZE);
576 } 585 }
577 586
578 /* And finally clear the reserved guard page */ 587 /* And finally clear the reserved guard page */
579 i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 588 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
580} 589}
581 590
582static bool 591static bool
@@ -718,6 +727,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
718 727
719 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev); 728 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev);
720 729
730 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
731 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
732
721 return 0; 733 return 0;
722 } 734 }
723 735
@@ -771,6 +783,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
771 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", dev_priv->gtt.mappable_end >> 20); 783 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", dev_priv->gtt.mappable_end >> 20);
772 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); 784 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
773 785
786 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
787 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
788
774 return 0; 789 return 0;
775 790
776err_out: 791err_out: