aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorImre Deak <imre.deak@intel.com>2013-02-18 12:28:02 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-03-23 07:16:36 -0400
commit67d5a50c0480d5d41e0423e6fa55984f9fd3381e (patch)
tree39fb959984569314651ea53a7e386e758225adeb
parent5bd4687e57bbacec20930f580d025aee9fa1f4d8 (diff)
drm/i915: handle walking compact dma scatter lists
So far the assumption was that each dma scatter list entry contains only a single page. This might not hold in the future, when we'll introduce compact scatter lists, so prepare for this everywhere in the i915 code where we walk such a list. We'll fix the place _creating_ these lists separately in the next patch to help the reviewing/bisectability. Reference: http://www.spinics.net/lists/dri-devel/msg33917.html Signed-off-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
4 files changed, 31 insertions, 41 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 71f285c56f1e..1246a31c0243 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1528,17 +1528,12 @@ void i915_gem_lastclose(struct drm_device *dev);
1528int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1528int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1529static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1529static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1530{ 1530{
1531 struct scatterlist *sg = obj->pages->sgl; 1531 struct sg_page_iter sg_iter;
1532 int nents = obj->pages->nents; 1532
1533 while (nents > SG_MAX_SINGLE_ALLOC) { 1533 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1534 if (n < SG_MAX_SINGLE_ALLOC - 1) 1534 return sg_iter.page;
1535 break; 1535
1536 1536 return NULL;
1537 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1538 n -= SG_MAX_SINGLE_ALLOC - 1;
1539 nents -= SG_MAX_SINGLE_ALLOC - 1;
1540 }
1541 return sg_page(sg+n);
1542} 1537}
1543static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1538static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1544{ 1539{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 30117dc52212..5cf6140354db 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -411,8 +411,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
411 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 411 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
412 int prefaulted = 0; 412 int prefaulted = 0;
413 int needs_clflush = 0; 413 int needs_clflush = 0;
414 struct scatterlist *sg; 414 struct sg_page_iter sg_iter;
415 int i;
416 415
417 user_data = to_user_ptr(args->data_ptr); 416 user_data = to_user_ptr(args->data_ptr);
418 remain = args->size; 417 remain = args->size;
@@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
441 440
442 offset = args->offset; 441 offset = args->offset;
443 442
444 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 443 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
445 struct page *page; 444 offset >> PAGE_SHIFT) {
446 445 struct page *page = sg_iter.page;
447 if (i < offset >> PAGE_SHIFT)
448 continue;
449 446
450 if (remain <= 0) 447 if (remain <= 0)
451 break; 448 break;
@@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
460 if ((shmem_page_offset + page_length) > PAGE_SIZE) 457 if ((shmem_page_offset + page_length) > PAGE_SIZE)
461 page_length = PAGE_SIZE - shmem_page_offset; 458 page_length = PAGE_SIZE - shmem_page_offset;
462 459
463 page = sg_page(sg);
464 page_do_bit17_swizzling = obj_do_bit17_swizzling && 460 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
465 (page_to_phys(page) & (1 << 17)) != 0; 461 (page_to_phys(page) & (1 << 17)) != 0;
466 462
@@ -732,8 +728,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
732 int hit_slowpath = 0; 728 int hit_slowpath = 0;
733 int needs_clflush_after = 0; 729 int needs_clflush_after = 0;
734 int needs_clflush_before = 0; 730 int needs_clflush_before = 0;
735 int i; 731 struct sg_page_iter sg_iter;
736 struct scatterlist *sg;
737 732
738 user_data = to_user_ptr(args->data_ptr); 733 user_data = to_user_ptr(args->data_ptr);
739 remain = args->size; 734 remain = args->size;
@@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
768 offset = args->offset; 763 offset = args->offset;
769 obj->dirty = 1; 764 obj->dirty = 1;
770 765
771 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 766 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
772 struct page *page; 767 offset >> PAGE_SHIFT) {
768 struct page *page = sg_iter.page;
773 int partial_cacheline_write; 769 int partial_cacheline_write;
774 770
775 if (i < offset >> PAGE_SHIFT)
776 continue;
777
778 if (remain <= 0) 771 if (remain <= 0)
779 break; 772 break;
780 773
@@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
796 ((shmem_page_offset | page_length) 789 ((shmem_page_offset | page_length)
797 & (boot_cpu_data.x86_clflush_size - 1)); 790 & (boot_cpu_data.x86_clflush_size - 1));
798 791
799 page = sg_page(sg);
800 page_do_bit17_swizzling = obj_do_bit17_swizzling && 792 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
801 (page_to_phys(page) & (1 << 17)) != 0; 793 (page_to_phys(page) & (1 << 17)) != 0;
802 794
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6a5af6828624..898615d2d5e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
62 src = obj->pages->sgl; 62 src = obj->pages->sgl;
63 dst = st->sgl; 63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) { 64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); 65 sg_set_page(dst, sg_page(src), src->length, 0);
66 dst = sg_next(dst); 66 dst = sg_next(dst);
67 src = sg_next(src); 67 src = sg_next(src);
68 } 68 }
@@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105{ 105{
106 struct drm_i915_gem_object *obj = dma_buf->priv; 106 struct drm_i915_gem_object *obj = dma_buf->priv;
107 struct drm_device *dev = obj->base.dev; 107 struct drm_device *dev = obj->base.dev;
108 struct scatterlist *sg; 108 struct sg_page_iter sg_iter;
109 struct page **pages; 109 struct page **pages;
110 int ret, i; 110 int ret, i;
111 111
@@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
124 124
125 ret = -ENOMEM; 125 ret = -ENOMEM;
126 126
127 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); 127 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
128 if (pages == NULL) 128 if (pages == NULL)
129 goto error; 129 goto error;
130 130
131 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) 131 i = 0;
132 pages[i] = sg_page(sg); 132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
133 pages[i++] = sg_iter.page;
133 134
134 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); 135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
135 drm_free_large(pages); 136 drm_free_large(pages);
136 137
137 if (!obj->dma_buf_vmapping) 138 if (!obj->dma_buf_vmapping)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index abcba2f5a788..f799708bcb85 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page)
473void 473void
474i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 474i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
475{ 475{
476 struct scatterlist *sg; 476 struct sg_page_iter sg_iter;
477 int page_count = obj->base.size >> PAGE_SHIFT;
478 int i; 477 int i;
479 478
480 if (obj->bit_17 == NULL) 479 if (obj->bit_17 == NULL)
481 return; 480 return;
482 481
483 for_each_sg(obj->pages->sgl, sg, page_count, i) { 482 i = 0;
484 struct page *page = sg_page(sg); 483 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
484 struct page *page = sg_iter.page;
485 char new_bit_17 = page_to_phys(page) >> 17; 485 char new_bit_17 = page_to_phys(page) >> 17;
486 if ((new_bit_17 & 0x1) != 486 if ((new_bit_17 & 0x1) !=
487 (test_bit(i, obj->bit_17) != 0)) { 487 (test_bit(i, obj->bit_17) != 0)) {
488 i915_gem_swizzle_page(page); 488 i915_gem_swizzle_page(page);
489 set_page_dirty(page); 489 set_page_dirty(page);
490 } 490 }
491 i++;
491 } 492 }
492} 493}
493 494
494void 495void
495i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 496i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
496{ 497{
497 struct scatterlist *sg; 498 struct sg_page_iter sg_iter;
498 int page_count = obj->base.size >> PAGE_SHIFT; 499 int page_count = obj->base.size >> PAGE_SHIFT;
499 int i; 500 int i;
500 501
@@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
508 } 509 }
509 } 510 }
510 511
511 for_each_sg(obj->pages->sgl, sg, page_count, i) { 512 i = 0;
512 struct page *page = sg_page(sg); 513 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
513 if (page_to_phys(page) & (1 << 17)) 514 if (page_to_phys(sg_iter.page) & (1 << 17))
514 __set_bit(i, obj->bit_17); 515 __set_bit(i, obj->bit_17);
515 else 516 else
516 __clear_bit(i, obj->bit_17); 517 __clear_bit(i, obj->bit_17);
518 i++;
517 } 519 }
518} 520}