aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/drm_cache.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
6 files changed, 11 insertions, 13 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index bc8edbeca3fd..bb8f58012189 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -109,7 +109,7 @@ drm_clflush_sg(struct sg_table *st)
109 109
110 mb(); 110 mb();
111 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 111 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
112 drm_clflush_page(sg_iter.page); 112 drm_clflush_page(sg_page_iter_page(&sg_iter));
113 mb(); 113 mb();
114 114
115 return; 115 return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1d091ea12fad..f69538508d8c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1543,7 +1543,7 @@ static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *
1543 struct sg_page_iter sg_iter; 1543 struct sg_page_iter sg_iter;
1544 1544
1545 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) 1545 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1546 return sg_iter.page; 1546 return sg_page_iter_page(&sg_iter);
1547 1547
1548 return NULL; 1548 return NULL;
1549} 1549}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a1123a32dc27..911bd40ef513 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -442,7 +442,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
442 442
443 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 443 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
444 offset >> PAGE_SHIFT) { 444 offset >> PAGE_SHIFT) {
445 struct page *page = sg_iter.page; 445 struct page *page = sg_page_iter_page(&sg_iter);
446 446
447 if (remain <= 0) 447 if (remain <= 0)
448 break; 448 break;
@@ -765,7 +765,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
765 765
766 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 766 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
767 offset >> PAGE_SHIFT) { 767 offset >> PAGE_SHIFT) {
768 struct page *page = sg_iter.page; 768 struct page *page = sg_page_iter_page(&sg_iter);
769 int partial_cacheline_write; 769 int partial_cacheline_write;
770 770
771 if (remain <= 0) 771 if (remain <= 0)
@@ -1647,7 +1647,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1647 obj->dirty = 0; 1647 obj->dirty = 0;
1648 1648
1649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 1649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1650 struct page *page = sg_iter.page; 1650 struct page *page = sg_page_iter_page(&sg_iter);
1651 1651
1652 if (obj->dirty) 1652 if (obj->dirty)
1653 set_page_dirty(page); 1653 set_page_dirty(page);
@@ -1827,7 +1827,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1827err_pages: 1827err_pages:
1828 sg_mark_end(sg); 1828 sg_mark_end(sg);
1829 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 1829 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1830 page_cache_release(sg_iter.page); 1830 page_cache_release(sg_page_iter_page(&sg_iter));
1831 sg_free_table(st); 1831 sg_free_table(st);
1832 kfree(st); 1832 kfree(st);
1833 return PTR_ERR(page); 1833 return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 898615d2d5e2..c6dfc1466e3a 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -130,7 +130,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
130 130
131 i = 0; 131 i = 0;
132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0); 132 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
133 pages[i++] = sg_iter.page; 133 pages[i++] = sg_page_iter_page(&sg_iter);
134 134
135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); 135 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
136 drm_free_large(pages); 136 drm_free_large(pages);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4cbae7bbb833..24a23b31b55f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -123,8 +123,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
123 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 123 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
124 dma_addr_t page_addr; 124 dma_addr_t page_addr;
125 125
126 page_addr = sg_dma_address(sg_iter.sg) + 126 page_addr = sg_page_iter_dma_address(&sg_iter);
127 (sg_iter.sg_pgoffset << PAGE_SHIFT);
128 pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, 127 pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
129 cache_level); 128 cache_level);
130 if (++act_pte == I915_PPGTT_PT_ENTRIES) { 129 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
@@ -424,8 +423,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
424 dma_addr_t addr; 423 dma_addr_t addr;
425 424
426 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 425 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
427 addr = sg_dma_address(sg_iter.sg) + 426 addr = sg_page_iter_dma_address(&sg_iter);
428 (sg_iter.sg_pgoffset << PAGE_SHIFT);
429 iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]); 427 iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
430 i++; 428 i++;
431 } 429 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index f799708bcb85..c807eb93755b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -481,7 +481,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
481 481
482 i = 0; 482 i = 0;
483 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 483 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
484 struct page *page = sg_iter.page; 484 struct page *page = sg_page_iter_page(&sg_iter);
485 char new_bit_17 = page_to_phys(page) >> 17; 485 char new_bit_17 = page_to_phys(page) >> 17;
486 if ((new_bit_17 & 0x1) != 486 if ((new_bit_17 & 0x1) !=
487 (test_bit(i, obj->bit_17) != 0)) { 487 (test_bit(i, obj->bit_17) != 0)) {
@@ -511,7 +511,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
511 511
512 i = 0; 512 i = 0;
513 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 513 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
514 if (page_to_phys(sg_iter.page) & (1 << 17)) 514 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
515 __set_bit(i, obj->bit_17); 515 __set_bit(i, obj->bit_17);
516 else 516 else
517 __clear_bit(i, obj->bit_17); 517 __clear_bit(i, obj->bit_17);