aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf6140354db..8a2cbee491a2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1625,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1625static void 1625static void
1626i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1626i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1627{ 1627{
1628 int page_count = obj->base.size / PAGE_SIZE; 1628 struct sg_page_iter sg_iter;
1629 struct scatterlist *sg; 1629 int ret;
1630 int ret, i;
1631 1630
1632 BUG_ON(obj->madv == __I915_MADV_PURGED); 1631 BUG_ON(obj->madv == __I915_MADV_PURGED);
1633 1632
@@ -1647,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1647 if (obj->madv == I915_MADV_DONTNEED) 1646 if (obj->madv == I915_MADV_DONTNEED)
1648 obj->dirty = 0; 1647 obj->dirty = 0;
1649 1648
1650 for_each_sg(obj->pages->sgl, sg, page_count, i) { 1649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1651 struct page *page = sg_page(sg); 1650 struct page *page = sg_iter.page;
1652 1651
1653 if (obj->dirty) 1652 if (obj->dirty)
1654 set_page_dirty(page); 1653 set_page_dirty(page);
@@ -1749,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1749 struct address_space *mapping; 1748 struct address_space *mapping;
1750 struct sg_table *st; 1749 struct sg_table *st;
1751 struct scatterlist *sg; 1750 struct scatterlist *sg;
1751 struct sg_page_iter sg_iter;
1752 struct page *page; 1752 struct page *page;
1753 unsigned long last_pfn = 0; /* suppress gcc warning */
1753 gfp_t gfp; 1754 gfp_t gfp;
1754 1755
1755 /* Assert that the object is not currently in any GPU domain. As it 1756 /* Assert that the object is not currently in any GPU domain. As it
@@ -1779,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1779 gfp = mapping_gfp_mask(mapping); 1780 gfp = mapping_gfp_mask(mapping);
1780 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1781 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1781 gfp &= ~(__GFP_IO | __GFP_WAIT); 1782 gfp &= ~(__GFP_IO | __GFP_WAIT);
1782 for_each_sg(st->sgl, sg, page_count, i) { 1783 sg = st->sgl;
1784 st->nents = 0;
1785 for (i = 0; i < page_count; i++) {
1783 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1786 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1784 if (IS_ERR(page)) { 1787 if (IS_ERR(page)) {
1785 i915_gem_purge(dev_priv, page_count); 1788 i915_gem_purge(dev_priv, page_count);
@@ -1802,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1802 gfp &= ~(__GFP_IO | __GFP_WAIT); 1805 gfp &= ~(__GFP_IO | __GFP_WAIT);
1803 } 1806 }
1804 1807
1805 sg_set_page(sg, page, PAGE_SIZE, 0); 1808 if (!i || page_to_pfn(page) != last_pfn + 1) {
1809 if (i)
1810 sg = sg_next(sg);
1811 st->nents++;
1812 sg_set_page(sg, page, PAGE_SIZE, 0);
1813 } else {
1814 sg->length += PAGE_SIZE;
1815 }
1816 last_pfn = page_to_pfn(page);
1806 } 1817 }
1807 1818
1819 sg_mark_end(sg);
1808 obj->pages = st; 1820 obj->pages = st;
1809 1821
1810 if (i915_gem_object_needs_bit17_swizzle(obj)) 1822 if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1813,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1813 return 0; 1825 return 0;
1814 1826
1815err_pages: 1827err_pages:
1816 for_each_sg(st->sgl, sg, i, page_count) 1828 sg_mark_end(sg);
1817 page_cache_release(sg_page(sg)); 1829 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1830 page_cache_release(sg_iter.page);
1818 sg_free_table(st); 1831 sg_free_table(st);
1819 kfree(st); 1832 kfree(st);
1820 return PTR_ERR(page); 1833 return PTR_ERR(page);