aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2016-05-20 06:54:06 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-05-20 08:43:00 -0400
commit85d1225ec066b2ef46fbd0ed1bae78ae1f3e6c91 (patch)
tree249529f6b6dd1ed857d890771caca91bfd8aa9c9
parentb338fa473e16c9be208b0aec7ec4e710a8a5f9ee (diff)
drm/i915: Introduce & use new lightweight SGL iterators
The existing for_each_sg_page() iterator is somewhat heavyweight, and is limiting i915 driver performance in a few benchmarks. So here we introduce somewhat lighter weight iterators, primarily for use with GEM objects or other case where we need only deal with whole aligned pages. Unlike the old iterator, the new iterators use an internal state structure which is not intended to be accessed by the caller; instead each takes as a parameter an output variable which is set before each iteration. This makes them particularly simple to use :) One of the new iterators provides the caller with the DMA address of each page in turn; the other provides the 'struct page' pointer required by many memory management operations. Various uses of for_each_sg_page() are then converted to the new macros. v2: Force inlining of the sg_iter constructor and make the union anonymous. Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: http://patchwork.freedesktop.org/patch/msgid/1463741647-15666-4-git-send-email-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h58
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c76
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c7
5 files changed, 112 insertions, 63 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6894d8e0a4d2..63ff5fa2b2bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2251,9 +2251,56 @@ struct drm_i915_gem_object {
2251}; 2251};
2252#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2252#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2253 2253
2254void i915_gem_track_fb(struct drm_i915_gem_object *old, 2254/*
2255 struct drm_i915_gem_object *new, 2255 * Optimised SGL iterator for GEM objects
2256 unsigned frontbuffer_bits); 2256 */
2257static __always_inline struct sgt_iter {
2258 struct scatterlist *sgp;
2259 union {
2260 unsigned long pfn;
2261 dma_addr_t dma;
2262 };
2263 unsigned int curr;
2264 unsigned int max;
2265} __sgt_iter(struct scatterlist *sgl, bool dma) {
2266 struct sgt_iter s = { .sgp = sgl };
2267
2268 if (s.sgp) {
2269 s.max = s.curr = s.sgp->offset;
2270 s.max += s.sgp->length;
2271 if (dma)
2272 s.dma = sg_dma_address(s.sgp);
2273 else
2274 s.pfn = page_to_pfn(sg_page(s.sgp));
2275 }
2276
2277 return s;
2278}
2279
2280/**
2281 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2282 * @__dmap: DMA address (output)
2283 * @__iter: 'struct sgt_iter' (iterator state, internal)
2284 * @__sgt: sg_table to iterate over (input)
2285 */
2286#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2287 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2288 ((__dmap) = (__iter).dma + (__iter).curr); \
2289 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2290 ((__iter) = __sgt_iter(sg_next((__iter).sgp), true), 0))
2291
2292/**
2293 * for_each_sgt_page - iterate over the pages of the given sg_table
2294 * @__pp: page pointer (output)
2295 * @__iter: 'struct sgt_iter' (iterator state, internal)
2296 * @__sgt: sg_table to iterate over (input)
2297 */
2298#define for_each_sgt_page(__pp, __iter, __sgt) \
2299 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2300 ((__pp) = (__iter).pfn == 0 ? NULL : \
2301 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2302 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2303 ((__iter) = __sgt_iter(sg_next((__iter).sgp), false), 0))
2257 2304
2258/** 2305/**
2259 * Request queue structure. 2306 * Request queue structure.
@@ -3108,6 +3155,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
3108 struct drm_mode_create_dumb *args); 3155 struct drm_mode_create_dumb *args);
3109int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3156int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3110 uint32_t handle, uint64_t *offset); 3157 uint32_t handle, uint64_t *offset);
3158
3159void i915_gem_track_fb(struct drm_i915_gem_object *old,
3160 struct drm_i915_gem_object *new,
3161 unsigned frontbuffer_bits);
3162
3111/** 3163/**
3112 * Returns true if seq1 is later than seq2. 3164 * Returns true if seq1 is later than seq2.
3113 */ 3165 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd3be2b385cb..3251decf10f5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2165,7 +2165,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2165static void 2165static void
2166i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 2166i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2167{ 2167{
2168 struct sg_page_iter sg_iter; 2168 struct sgt_iter sgt_iter;
2169 struct page *page;
2169 int ret; 2170 int ret;
2170 2171
2171 BUG_ON(obj->madv == __I915_MADV_PURGED); 2172 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2187,9 +2188,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2187 if (obj->madv == I915_MADV_DONTNEED) 2188 if (obj->madv == I915_MADV_DONTNEED)
2188 obj->dirty = 0; 2189 obj->dirty = 0;
2189 2190
2190 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2191 for_each_sgt_page(page, sgt_iter, obj->pages) {
2191 struct page *page = sg_page_iter_page(&sg_iter);
2192
2193 if (obj->dirty) 2192 if (obj->dirty)
2194 set_page_dirty(page); 2193 set_page_dirty(page);
2195 2194
@@ -2246,7 +2245,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2246 struct address_space *mapping; 2245 struct address_space *mapping;
2247 struct sg_table *st; 2246 struct sg_table *st;
2248 struct scatterlist *sg; 2247 struct scatterlist *sg;
2249 struct sg_page_iter sg_iter; 2248 struct sgt_iter sgt_iter;
2250 struct page *page; 2249 struct page *page;
2251 unsigned long last_pfn = 0; /* suppress gcc warning */ 2250 unsigned long last_pfn = 0; /* suppress gcc warning */
2252 int ret; 2251 int ret;
@@ -2343,8 +2342,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2343 2342
2344err_pages: 2343err_pages:
2345 sg_mark_end(sg); 2344 sg_mark_end(sg);
2346 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2345 for_each_sgt_page(page, sgt_iter, st)
2347 put_page(sg_page_iter_page(&sg_iter)); 2346 put_page(page);
2348 sg_free_table(st); 2347 sg_free_table(st);
2349 kfree(st); 2348 kfree(st);
2350 2349
@@ -2403,7 +2402,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2403{ 2402{
2404 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2403 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2405 struct sg_table *sgt = obj->pages; 2404 struct sg_table *sgt = obj->pages;
2406 struct sg_page_iter sg_iter; 2405 struct sgt_iter sgt_iter;
2406 struct page *page;
2407 struct page *stack_pages[32]; 2407 struct page *stack_pages[32];
2408 struct page **pages = stack_pages; 2408 struct page **pages = stack_pages;
2409 unsigned long i = 0; 2409 unsigned long i = 0;
@@ -2420,8 +2420,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2420 return NULL; 2420 return NULL;
2421 } 2421 }
2422 2422
2423 for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) 2423 for_each_sgt_page(page, sgt_iter, sgt)
2424 pages[i++] = sg_page_iter_page(&sg_iter); 2424 pages[i++] = page;
2425 2425
2426 /* Check that we have the expected number of pages */ 2426 /* Check that we have the expected number of pages */
2427 GEM_BUG_ON(i != n_pages); 2427 GEM_BUG_ON(i != n_pages);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..2b6bdc267fb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
745void 745void
746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
747{ 747{
748 struct sg_page_iter sg_iter; 748 struct sgt_iter sgt_iter;
749 struct page *page;
749 int i; 750 int i;
750 751
751 if (obj->bit_17 == NULL) 752 if (obj->bit_17 == NULL)
752 return; 753 return;
753 754
754 i = 0; 755 i = 0;
755 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 756 for_each_sgt_page(page, sgt_iter, obj->pages) {
756 struct page *page = sg_page_iter_page(&sg_iter);
757 char new_bit_17 = page_to_phys(page) >> 17; 757 char new_bit_17 = page_to_phys(page) >> 17;
758 if ((new_bit_17 & 0x1) != 758 if ((new_bit_17 & 0x1) !=
759 (test_bit(i, obj->bit_17) != 0)) { 759 (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
775void 775void
776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
777{ 777{
778 struct sg_page_iter sg_iter; 778 struct sgt_iter sgt_iter;
779 struct page *page;
779 int page_count = obj->base.size >> PAGE_SHIFT; 780 int page_count = obj->base.size >> PAGE_SHIFT;
780 int i; 781 int i;
781 782
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
790 } 791 }
791 792
792 i = 0; 793 i = 0;
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 794
794 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 795 for_each_sgt_page(page, sgt_iter, obj->pages) {
796 if (page_to_phys(page) & (1 << 17))
795 __set_bit(i, obj->bit_17); 797 __set_bit(i, obj->bit_17);
796 else 798 else
797 __clear_bit(i, obj->bit_17); 799 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7eab619a3eb2..46684779d4d6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1839,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1839 enum i915_cache_level cache_level, u32 flags) 1839 enum i915_cache_level cache_level, u32 flags)
1840{ 1840{
1841 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1841 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1842 gen6_pte_t *pt_vaddr;
1843 unsigned first_entry = start >> PAGE_SHIFT; 1842 unsigned first_entry = start >> PAGE_SHIFT;
1844 unsigned act_pt = first_entry / GEN6_PTES; 1843 unsigned act_pt = first_entry / GEN6_PTES;
1845 unsigned act_pte = first_entry % GEN6_PTES; 1844 unsigned act_pte = first_entry % GEN6_PTES;
1846 struct sg_page_iter sg_iter; 1845 gen6_pte_t *pt_vaddr = NULL;
1846 struct sgt_iter sgt_iter;
1847 dma_addr_t addr;
1847 1848
1848 pt_vaddr = NULL; 1849 for_each_sgt_dma(addr, sgt_iter, pages) {
1849 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1850 if (pt_vaddr == NULL) 1850 if (pt_vaddr == NULL)
1851 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1851 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1852 1852
1853 pt_vaddr[act_pte] = 1853 pt_vaddr[act_pte] =
1854 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1854 vm->pte_encode(addr, cache_level, true, flags);
1855 cache_level, true, flags);
1856 1855
1857 if (++act_pte == GEN6_PTES) { 1856 if (++act_pte == GEN6_PTES) {
1858 kunmap_px(ppgtt, pt_vaddr); 1857 kunmap_px(ppgtt, pt_vaddr);
@@ -1861,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1861 act_pte = 0; 1860 act_pte = 0;
1862 } 1861 }
1863 } 1862 }
1863
1864 if (pt_vaddr) 1864 if (pt_vaddr)
1865 kunmap_px(ppgtt, pt_vaddr); 1865 kunmap_px(ppgtt, pt_vaddr);
1866} 1866}
@@ -2362,22 +2362,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2362{ 2362{
2363 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2363 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2365 unsigned first_entry = start >> PAGE_SHIFT; 2365 struct sgt_iter sgt_iter;
2366 gen8_pte_t __iomem *gtt_entries = 2366 gen8_pte_t __iomem *gtt_entries;
2367 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2367 gen8_pte_t gtt_entry;
2368 int i = 0; 2368 dma_addr_t addr;
2369 struct sg_page_iter sg_iter;
2370 dma_addr_t addr = 0; /* shut up gcc */
2371 int rpm_atomic_seq; 2369 int rpm_atomic_seq;
2370 int i = 0;
2372 2371
2373 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2372 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2374 2373
2375 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2374 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2376 addr = sg_dma_address(sg_iter.sg) + 2375
2377 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2376 for_each_sgt_dma(addr, sgt_iter, st) {
2378 gen8_set_pte(&gtt_entries[i], 2377 gtt_entry = gen8_pte_encode(addr, level, true);
2379 gen8_pte_encode(addr, level, true)); 2378 gen8_set_pte(&gtt_entries[i++], gtt_entry);
2380 i++;
2381 } 2379 }
2382 2380
2383 /* 2381 /*
@@ -2388,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2388 * hardware should work, we must keep this posting read for paranoia. 2386 * hardware should work, we must keep this posting read for paranoia.
2389 */ 2387 */
2390 if (i != 0) 2388 if (i != 0)
2391 WARN_ON(readq(&gtt_entries[i-1]) 2389 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
2392 != gen8_pte_encode(addr, level, true));
2393 2390
2394 /* This next bit makes the above posting read even more important. We 2391 /* This next bit makes the above posting read even more important. We
2395 * want to flush the TLBs only after we're certain all the PTE updates 2392 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2440,20 +2437,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2440{ 2437{
2441 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2438 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2442 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2439 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2443 unsigned first_entry = start >> PAGE_SHIFT; 2440 struct sgt_iter sgt_iter;
2444 gen6_pte_t __iomem *gtt_entries = 2441 gen6_pte_t __iomem *gtt_entries;
2445 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2442 gen6_pte_t gtt_entry;
2446 int i = 0; 2443 dma_addr_t addr;
2447 struct sg_page_iter sg_iter;
2448 dma_addr_t addr = 0;
2449 int rpm_atomic_seq; 2444 int rpm_atomic_seq;
2445 int i = 0;
2450 2446
2451 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2447 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2452 2448
2453 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2449 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2454 addr = sg_page_iter_dma_address(&sg_iter); 2450
2455 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); 2451 for_each_sgt_dma(addr, sgt_iter, st) {
2456 i++; 2452 gtt_entry = vm->pte_encode(addr, level, true, flags);
2453 iowrite32(gtt_entry, &gtt_entries[i++]);
2457 } 2454 }
2458 2455
2459 /* XXX: This serves as a posting read to make sure that the PTE has 2456 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2462,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2462 * of NUMA access patterns. Therefore, even with the way we assume 2459 * of NUMA access patterns. Therefore, even with the way we assume
2463 * hardware should work, we must keep this posting read for paranoia. 2460 * hardware should work, we must keep this posting read for paranoia.
2464 */ 2461 */
2465 if (i != 0) { 2462 if (i != 0)
2466 unsigned long gtt = readl(&gtt_entries[i-1]); 2463 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2467 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2468 }
2469 2464
2470 /* This next bit makes the above posting read even more important. We 2465 /* This next bit makes the above posting read even more important. We
2471 * want to flush the TLBs only after we're certain all the PTE updates 2466 * want to flush the TLBs only after we're certain all the PTE updates
@@ -3399,9 +3394,11 @@ static struct sg_table *
3399intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3394intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3400 struct drm_i915_gem_object *obj) 3395 struct drm_i915_gem_object *obj)
3401{ 3396{
3397 const size_t n_pages = obj->base.size / PAGE_SIZE;
3402 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3398 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3403 unsigned int size_pages_uv; 3399 unsigned int size_pages_uv;
3404 struct sg_page_iter sg_iter; 3400 struct sgt_iter sgt_iter;
3401 dma_addr_t dma_addr;
3405 unsigned long i; 3402 unsigned long i;
3406 dma_addr_t *page_addr_list; 3403 dma_addr_t *page_addr_list;
3407 struct sg_table *st; 3404 struct sg_table *st;
@@ -3410,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3410 int ret = -ENOMEM; 3407 int ret = -ENOMEM;
3411 3408
3412 /* Allocate a temporary list of source pages for random access. */ 3409 /* Allocate a temporary list of source pages for random access. */
3413 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, 3410 page_addr_list = drm_malloc_gfp(n_pages,
3414 sizeof(dma_addr_t), 3411 sizeof(dma_addr_t),
3415 GFP_TEMPORARY); 3412 GFP_TEMPORARY);
3416 if (!page_addr_list) 3413 if (!page_addr_list)
@@ -3433,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3433 3430
3434 /* Populate source page list from the object. */ 3431 /* Populate source page list from the object. */
3435 i = 0; 3432 i = 0;
3436 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3433 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3437 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3434 page_addr_list[i++] = dma_addr;
3438 i++;
3439 }
3440 3435
3436 GEM_BUG_ON(i != n_pages);
3441 st->nents = 0; 3437 st->nents = 0;
3442 sg = st->sgl; 3438 sg = st->sgl;
3443 3439
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index a84625b71226..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706static void 706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{ 708{
709 struct sg_page_iter sg_iter; 709 struct sgt_iter sgt_iter;
710 struct page *page;
710 711
711 BUG_ON(obj->userptr.work != NULL); 712 BUG_ON(obj->userptr.work != NULL);
712 __i915_gem_userptr_set_active(obj, false); 713 __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
716 717
717 i915_gem_gtt_finish_object(obj); 718 i915_gem_gtt_finish_object(obj);
718 719
719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 720 for_each_sgt_page(page, sgt_iter, obj->pages) {
720 struct page *page = sg_page_iter_page(&sg_iter);
721
722 if (obj->dirty) 721 if (obj->dirty)
723 set_page_dirty(page); 722 set_page_dirty(page);
724 723