aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-06-01 10:20:22 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-20 08:22:57 -0400
commit9da3da660d8c19a54f6e93361d147509be3fff84 (patch)
tree3479ef13a733975a02dd679e3fc9ae0872e3be25 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentf60d7f0c1d55a935475ab394955cafddefaa6533 (diff)
drm/i915: Replace the array of pages with a scatterlist
Rather than have multiple data structures for describing our page layout in conjunction with the array of pages, we can migrate all users over to a scatterlist. One major advantage, other than unifying the page tracking structures, this offers is that we replace the vmalloc'ed array (which can be up to a megabyte in size) with a chain of individual pages which helps reduce memory pressure. The disadvantage is that we then do not have a simple array to iterate, or to access randomly. The common case for this is in the relocation processing, which will typically fit within a single scatterlist page and so be almost the same cost as the simple array. For iterating over the array, the extra function call could be optimised away, but in reality is an insignificant cost of either binding the pages, or performing the pwrite/pread. v2: Fix drm_clflush_sg() to not invoke wbinvd as well! And fix the trivial compile error from rebasing. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 55cdb4d30a16..984a0c5fbf5d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,7 +464,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
464 goto err_unref; 464 goto err_unref;
465 465
466 pc->gtt_offset = obj->gtt_offset; 466 pc->gtt_offset = obj->gtt_offset;
467 pc->cpu_page = kmap(obj->pages[0]); 467 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
468 if (pc->cpu_page == NULL) 468 if (pc->cpu_page == NULL)
469 goto err_unpin; 469 goto err_unpin;
470 470
@@ -491,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
491 return; 491 return;
492 492
493 obj = pc->obj; 493 obj = pc->obj;
494 kunmap(obj->pages[0]); 494
495 kunmap(sg_page(obj->pages->sgl));
495 i915_gem_object_unpin(obj); 496 i915_gem_object_unpin(obj);
496 drm_gem_object_unreference(&obj->base); 497 drm_gem_object_unreference(&obj->base);
497 498
@@ -1026,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1026 if (obj == NULL) 1027 if (obj == NULL)
1027 return; 1028 return;
1028 1029
1029 kunmap(obj->pages[0]); 1030 kunmap(sg_page(obj->pages->sgl));
1030 i915_gem_object_unpin(obj); 1031 i915_gem_object_unpin(obj);
1031 drm_gem_object_unreference(&obj->base); 1032 drm_gem_object_unreference(&obj->base);
1032 ring->status_page.obj = NULL; 1033 ring->status_page.obj = NULL;
@@ -1053,7 +1054,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1053 } 1054 }
1054 1055
1055 ring->status_page.gfx_addr = obj->gtt_offset; 1056 ring->status_page.gfx_addr = obj->gtt_offset;
1056 ring->status_page.page_addr = kmap(obj->pages[0]); 1057 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1057 if (ring->status_page.page_addr == NULL) { 1058 if (ring->status_page.page_addr == NULL) {
1058 ret = -ENOMEM; 1059 ret = -ENOMEM;
1059 goto err_unpin; 1060 goto err_unpin;