aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-19 11:54:27 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-19 12:13:35 -0400
commitf7bbe7883c3f119714fd09a8ceaac8075ba04dfe (patch)
tree3a9ebf6c9bb5899ee1c1dab1d8e83430af43b1d9
parentcafaf14a5d8f152ed3c984ecd48dee6e824446bc (diff)
drm/i915: Embed the io-mapping struct inside drm_i915_private
As io_mapping.h now always allocates the struct, we can avoid that allocation and extra pointer dance by embedding the struct inside drm_i915_private Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-5-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
6 files changed, 13 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e1f5dde2e87..5398af7f7580 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
891 * and write to user memory which may result into page 891 * and write to user memory which may result into page
892 * faults, and so we cannot perform this under struct_mutex. 892 * faults, and so we cannot perform this under struct_mutex.
893 */ 893 */
894 if (slow_user_access(ggtt->mappable, page_base, 894 if (slow_user_access(&ggtt->mappable, page_base,
895 page_offset, user_data, 895 page_offset, user_data,
896 page_length, false)) { 896 page_length, false)) {
897 ret = -EFAULT; 897 ret = -EFAULT;
@@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1187 * If the object is non-shmem backed, we retry again with the 1187 * If the object is non-shmem backed, we retry again with the
1188 * path that handles page fault. 1188 * path that handles page fault.
1189 */ 1189 */
1190 if (fast_user_write(ggtt->mappable, page_base, 1190 if (fast_user_write(&ggtt->mappable, page_base,
1191 page_offset, user_data, page_length)) { 1191 page_offset, user_data, page_length)) {
1192 hit_slow_path = true; 1192 hit_slow_path = true;
1193 mutex_unlock(&dev->struct_mutex); 1193 mutex_unlock(&dev->struct_mutex);
1194 if (slow_user_access(ggtt->mappable, 1194 if (slow_user_access(&ggtt->mappable,
1195 page_base, 1195 page_base,
1196 page_offset, user_data, 1196 page_offset, user_data,
1197 page_length, true)) { 1197 page_length, true)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4192066ff60e..601156c353cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
474 offset += page << PAGE_SHIFT; 474 offset += page << PAGE_SHIFT;
475 } 475 }
476 476
477 vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset); 477 vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
478 cache->page = page; 478 cache->page = page;
479 cache->vaddr = (unsigned long)vaddr; 479 cache->vaddr = (unsigned long)vaddr;
480 480
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a18363a0d8c5..b90fdcee992a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2794 2794
2795 if (dev_priv->mm.aliasing_ppgtt) { 2795 if (dev_priv->mm.aliasing_ppgtt) {
2796 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2796 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2797
2798 ppgtt->base.cleanup(&ppgtt->base); 2797 ppgtt->base.cleanup(&ppgtt->base);
2799 kfree(ppgtt); 2798 kfree(ppgtt);
2800 } 2799 }
@@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2811 ggtt->base.cleanup(&ggtt->base); 2810 ggtt->base.cleanup(&ggtt->base);
2812 2811
2813 arch_phys_wc_del(ggtt->mtrr); 2812 arch_phys_wc_del(ggtt->mtrr);
2814 io_mapping_free(ggtt->mappable); 2813 io_mapping_fini(&ggtt->mappable);
2815} 2814}
2816 2815
2817static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2816static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3209 if (!HAS_LLC(dev_priv)) 3208 if (!HAS_LLC(dev_priv))
3210 ggtt->base.mm.color_adjust = i915_gtt_color_adjust; 3209 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
3211 3210
3212 ggtt->mappable = 3211 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3213 io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end); 3212 dev_priv->ggtt.mappable_base,
3214 if (!ggtt->mappable) { 3213 dev_priv->ggtt.mappable_end)) {
3215 ret = -EIO; 3214 ret = -EIO;
3216 goto out_gtt_cleanup; 3215 goto out_gtt_cleanup;
3217 } 3216 }
@@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3681 3680
3682 ptr = vma->iomap; 3681 ptr = vma->iomap;
3683 if (ptr == NULL) { 3682 if (ptr == NULL) {
3684 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, 3683 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
3685 vma->node.start, 3684 vma->node.start,
3686 vma->node.size); 3685 vma->node.size);
3687 if (ptr == NULL) 3686 if (ptr == NULL)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a15cea73f729..a9aec25535ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -439,13 +439,13 @@ struct i915_address_space {
439 */ 439 */
440struct i915_ggtt { 440struct i915_ggtt {
441 struct i915_address_space base; 441 struct i915_address_space base;
442 struct io_mapping mappable; /* Mapping to our CPU mappable region */
442 443
443 size_t stolen_size; /* Total size of stolen memory */ 444 size_t stolen_size; /* Total size of stolen memory */
444 size_t stolen_usable_size; /* Total size minus BIOS reserved */ 445 size_t stolen_usable_size; /* Total size minus BIOS reserved */
445 size_t stolen_reserved_base; 446 size_t stolen_reserved_base;
446 size_t stolen_reserved_size; 447 size_t stolen_reserved_size;
447 u64 mappable_end; /* End offset that we can CPU map */ 448 u64 mappable_end; /* End offset that we can CPU map */
448 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
449 phys_addr_t mappable_base; /* PA of our GMADR */ 449 phys_addr_t mappable_base; /* PA of our GMADR */
450 450
451 /** "Graphics Stolen Memory" holds the global PTEs */ 451 /** "Graphics Stolen Memory" holds the global PTEs */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 84dd5bc06db3..41ec7a183c73 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
729 * captures what the GPU read. 729 * captures what the GPU read.
730 */ 730 */
731 731
732 s = io_mapping_map_atomic_wc(ggtt->mappable, 732 s = io_mapping_map_atomic_wc(&ggtt->mappable,
733 reloc_offset); 733 reloc_offset);
734 memcpy_fromio(d, s, PAGE_SIZE); 734 memcpy_fromio(d, s, PAGE_SIZE);
735 io_mapping_unmap_atomic(s); 735 io_mapping_unmap_atomic(s);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3cf8d02064a8..a24bc8c7889f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) 196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
198 else 198 else
199 regs = io_mapping_map_wc(dev_priv->ggtt.mappable, 199 regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
200 overlay->flip_addr, 200 overlay->flip_addr,
201 PAGE_SIZE); 201 PAGE_SIZE);
202 202
@@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1489 regs = (struct overlay_registers __iomem *) 1489 regs = (struct overlay_registers __iomem *)
1490 overlay->reg_bo->phys_handle->vaddr; 1490 overlay->reg_bo->phys_handle->vaddr;
1491 else 1491 else
1492 regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, 1492 regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
1493 overlay->flip_addr); 1493 overlay->flip_addr);
1494 1494
1495 return regs; 1495 return regs;