aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-07-05 17:41:04 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-08 16:04:34 -0400
commitf343c5f6477354967ee1e331a68a56b9fece2f36 (patch)
tree71bcf1f5c511b3fa13369badf81e074d8d603543 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent338710e7aff3428dc8170a03704a8ae981b58dcd (diff)
drm/i915: Getter/setter for object attributes
Soon we want to gut a lot of our existing assumptions how many address spaces an object can live in, and in doing so, embed the drm_mm_node in the object (and later the VMA). It's possible in the future we'll want to add more getter/setter methods, but for now this is enough to enable the VMAs. v2: Reworked commit message (Ben) Added comments to the main functions (Ben) sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch] sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch] (Daniel) v3: Rebased on new reserve_node patch Changed DRM_DEBUG_KMS to actually work (will need fixing later) Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e51ab552046c..54495df2403e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
424 * registers with the above sequence (the readback of the HEAD registers 424 * registers with the above sequence (the readback of the HEAD registers
425 * also enforces ordering), otherwise the hw might lose the new ring 425 * also enforces ordering), otherwise the hw might lose the new ring
426 * register values. */ 426 * register values. */
427 I915_WRITE_START(ring, obj->gtt_offset); 427 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
428 I915_WRITE_CTL(ring, 428 I915_WRITE_CTL(ring,
429 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 429 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
430 | RING_VALID); 430 | RING_VALID);
431 431
432 /* If the head is still not zero, the ring is dead */ 432 /* If the head is still not zero, the ring is dead */
433 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 433 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
434 I915_READ_START(ring) == obj->gtt_offset && 434 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
435 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 435 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
436 DRM_ERROR("%s initialization failed " 436 DRM_ERROR("%s initialization failed "
437 "ctl %08x head %08x tail %08x start %08x\n", 437 "ctl %08x head %08x tail %08x start %08x\n",
@@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
489 if (ret) 489 if (ret)
490 goto err_unref; 490 goto err_unref;
491 491
492 pc->gtt_offset = obj->gtt_offset; 492 pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
493 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 493 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
494 if (pc->cpu_page == NULL) { 494 if (pc->cpu_page == NULL) {
495 ret = -ENOMEM; 495 ret = -ENOMEM;
@@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1129 intel_ring_advance(ring); 1129 intel_ring_advance(ring);
1130 } else { 1130 } else {
1131 struct drm_i915_gem_object *obj = ring->private; 1131 struct drm_i915_gem_object *obj = ring->private;
1132 u32 cs_offset = obj->gtt_offset; 1132 u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
1133 1133
1134 if (len > I830_BATCH_LIMIT) 1134 if (len > I830_BATCH_LIMIT)
1135 return -ENOSPC; 1135 return -ENOSPC;
@@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1214 goto err_unref; 1214 goto err_unref;
1215 } 1215 }
1216 1216
1217 ring->status_page.gfx_addr = obj->gtt_offset; 1217 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1218 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1218 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1219 if (ring->status_page.page_addr == NULL) { 1219 if (ring->status_page.page_addr == NULL) {
1220 ret = -ENOMEM; 1220 ret = -ENOMEM;
@@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1308 goto err_unpin; 1308 goto err_unpin;
1309 1309
1310 ring->virtual_start = 1310 ring->virtual_start =
1311 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 1311 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1312 ring->size); 1312 ring->size);
1313 if (ring->virtual_start == NULL) { 1313 if (ring->virtual_start == NULL) {
1314 DRM_ERROR("Failed to map ringbuffer.\n"); 1314 DRM_ERROR("Failed to map ringbuffer.\n");