aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-06-07 10:38:42 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-20 08:22:55 -0400
commit37e680a15ff6375ff02773161f817e90a38c51f7 (patch)
treed1b6fae7751c04ce051016019b4d4ebd0ee0d48c /drivers/gpu/drm/i915/i915_gem.c
parent8c0bd3c02d52eff11396e81b4d217ee668e03528 (diff)
drm/i915: Introduce drm_i915_gem_object_ops
In order to specialise functions depending upon the type of object, we can attach vfuncs to each object via a new ->ops pointer. For instance, this will be used in future patches to only bind pages from a dma-buf for the duration that the object is used by the GPU - and so prevent them from pinning those pages for the entire of the object. v2: Bonus comments. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c78
1 files changed, 55 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87a64e5f28fb..4b2ee7ce8b15 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1650,18 +1650,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1650 return obj->madv == I915_MADV_DONTNEED; 1650 return obj->madv == I915_MADV_DONTNEED;
1651} 1651}
1652 1652
1653static int 1653static void
1654i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1654i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1655{ 1655{
1656 int page_count = obj->base.size / PAGE_SIZE; 1656 int page_count = obj->base.size / PAGE_SIZE;
1657 int ret, i; 1657 int ret, i;
1658 1658
1659 BUG_ON(obj->gtt_space);
1660
1661 if (obj->pages == NULL)
1662 return 0;
1663
1664 BUG_ON(obj->gtt_space);
1665 BUG_ON(obj->madv == __I915_MADV_PURGED); 1659 BUG_ON(obj->madv == __I915_MADV_PURGED);
1666 1660
1667 ret = i915_gem_object_set_to_cpu_domain(obj, true); 1661 ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1693,9 +1687,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1693 1687
1694 drm_free_large(obj->pages); 1688 drm_free_large(obj->pages);
1695 obj->pages = NULL; 1689 obj->pages = NULL;
1690}
1696 1691
1697 list_del(&obj->gtt_list); 1692static int
1693i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1694{
1695 const struct drm_i915_gem_object_ops *ops = obj->ops;
1696
1697 if (obj->sg_table || obj->pages == NULL)
1698 return 0;
1699
1700 BUG_ON(obj->gtt_space);
1698 1701
1702 ops->put_pages(obj);
1703
1704 list_del(&obj->gtt_list);
1699 if (i915_gem_object_is_purgeable(obj)) 1705 if (i915_gem_object_is_purgeable(obj))
1700 i915_gem_object_truncate(obj); 1706 i915_gem_object_truncate(obj);
1701 1707
@@ -1712,7 +1718,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1712 &dev_priv->mm.unbound_list, 1718 &dev_priv->mm.unbound_list,
1713 gtt_list) { 1719 gtt_list) {
1714 if (i915_gem_object_is_purgeable(obj) && 1720 if (i915_gem_object_is_purgeable(obj) &&
1715 i915_gem_object_put_pages_gtt(obj) == 0) { 1721 i915_gem_object_put_pages(obj) == 0) {
1716 count += obj->base.size >> PAGE_SHIFT; 1722 count += obj->base.size >> PAGE_SHIFT;
1717 if (count >= target) 1723 if (count >= target)
1718 return count; 1724 return count;
@@ -1724,7 +1730,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1724 mm_list) { 1730 mm_list) {
1725 if (i915_gem_object_is_purgeable(obj) && 1731 if (i915_gem_object_is_purgeable(obj) &&
1726 i915_gem_object_unbind(obj) == 0 && 1732 i915_gem_object_unbind(obj) == 0 &&
1727 i915_gem_object_put_pages_gtt(obj) == 0) { 1733 i915_gem_object_put_pages(obj) == 0) {
1728 count += obj->base.size >> PAGE_SHIFT; 1734 count += obj->base.size >> PAGE_SHIFT;
1729 if (count >= target) 1735 if (count >= target)
1730 return count; 1736 return count;
@@ -1742,10 +1748,10 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1742 i915_gem_evict_everything(dev_priv->dev); 1748 i915_gem_evict_everything(dev_priv->dev);
1743 1749
1744 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 1750 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1745 i915_gem_object_put_pages_gtt(obj); 1751 i915_gem_object_put_pages(obj);
1746} 1752}
1747 1753
1748int 1754static int
1749i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 1755i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1750{ 1756{
1751 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1757 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1754,9 +1760,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1754 struct page *page; 1760 struct page *page;
1755 gfp_t gfp; 1761 gfp_t gfp;
1756 1762
1757 if (obj->pages || obj->sg_table)
1758 return 0;
1759
1760 /* Assert that the object is not currently in any GPU domain. As it 1763 /* Assert that the object is not currently in any GPU domain. As it
1761 * wasn't in the GTT, there shouldn't be any way it could have been in 1764 * wasn't in the GTT, there shouldn't be any way it could have been in
1762 * a GPU cache 1765 * a GPU cache
@@ -1806,7 +1809,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1806 if (i915_gem_object_needs_bit17_swizzle(obj)) 1809 if (i915_gem_object_needs_bit17_swizzle(obj))
1807 i915_gem_object_do_bit_17_swizzle(obj); 1810 i915_gem_object_do_bit_17_swizzle(obj);
1808 1811
1809 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1810 return 0; 1812 return 0;
1811 1813
1812err_pages: 1814err_pages:
@@ -1818,6 +1820,31 @@ err_pages:
1818 return PTR_ERR(page); 1820 return PTR_ERR(page);
1819} 1821}
1820 1822
1823/* Ensure that the associated pages are gathered from the backing storage
1824 * and pinned into our object. i915_gem_object_get_pages() may be called
1825 * multiple times before they are released by a single call to
1826 * i915_gem_object_put_pages() - once the pages are no longer referenced
1827 * either as a result of memory pressure (reaping pages under the shrinker)
1828 * or as the object is itself released.
1829 */
1830int
1831i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1832{
1833 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1834 const struct drm_i915_gem_object_ops *ops = obj->ops;
1835 int ret;
1836
1837 if (obj->sg_table || obj->pages)
1838 return 0;
1839
1840 ret = ops->get_pages(obj);
1841 if (ret)
1842 return ret;
1843
1844 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1845 return 0;
1846}
1847
1821void 1848void
1822i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1849i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1823 struct intel_ring_buffer *ring, 1850 struct intel_ring_buffer *ring,
@@ -2071,7 +2098,6 @@ void i915_gem_reset(struct drm_device *dev)
2071 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 2098 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2072 } 2099 }
2073 2100
2074
2075 /* The fence registers are invalidated so clear them out */ 2101 /* The fence registers are invalidated so clear them out */
2076 i915_gem_reset_fences(dev); 2102 i915_gem_reset_fences(dev);
2077} 2103}
@@ -2871,7 +2897,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2871 return -E2BIG; 2897 return -E2BIG;
2872 } 2898 }
2873 2899
2874 ret = i915_gem_object_get_pages_gtt(obj); 2900 ret = i915_gem_object_get_pages(obj);
2875 if (ret) 2901 if (ret)
2876 return ret; 2902 return ret;
2877 2903
@@ -3610,15 +3636,16 @@ unlock:
3610 return ret; 3636 return ret;
3611} 3637}
3612 3638
3613void i915_gem_object_init(struct drm_i915_gem_object *obj) 3639void i915_gem_object_init(struct drm_i915_gem_object *obj,
3640 const struct drm_i915_gem_object_ops *ops)
3614{ 3641{
3615 obj->base.driver_private = NULL;
3616
3617 INIT_LIST_HEAD(&obj->mm_list); 3642 INIT_LIST_HEAD(&obj->mm_list);
3618 INIT_LIST_HEAD(&obj->gtt_list); 3643 INIT_LIST_HEAD(&obj->gtt_list);
3619 INIT_LIST_HEAD(&obj->ring_list); 3644 INIT_LIST_HEAD(&obj->ring_list);
3620 INIT_LIST_HEAD(&obj->exec_list); 3645 INIT_LIST_HEAD(&obj->exec_list);
3621 3646
3647 obj->ops = ops;
3648
3622 obj->fence_reg = I915_FENCE_REG_NONE; 3649 obj->fence_reg = I915_FENCE_REG_NONE;
3623 obj->madv = I915_MADV_WILLNEED; 3650 obj->madv = I915_MADV_WILLNEED;
3624 /* Avoid an unnecessary call to unbind on the first bind. */ 3651 /* Avoid an unnecessary call to unbind on the first bind. */
@@ -3627,6 +3654,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj)
3627 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 3654 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3628} 3655}
3629 3656
3657static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3658 .get_pages = i915_gem_object_get_pages_gtt,
3659 .put_pages = i915_gem_object_put_pages_gtt,
3660};
3661
3630struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3662struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3631 size_t size) 3663 size_t size)
3632{ 3664{
@@ -3653,7 +3685,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3653 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3685 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3654 mapping_set_gfp_mask(mapping, mask); 3686 mapping_set_gfp_mask(mapping, mask);
3655 3687
3656 i915_gem_object_init(obj); 3688 i915_gem_object_init(obj, &i915_gem_object_ops);
3657 3689
3658 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3690 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3659 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3691 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3711,7 +3743,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3711 dev_priv->mm.interruptible = was_interruptible; 3743 dev_priv->mm.interruptible = was_interruptible;
3712 } 3744 }
3713 3745
3714 i915_gem_object_put_pages_gtt(obj); 3746 i915_gem_object_put_pages(obj);
3715 i915_gem_object_free_mmap_offset(obj); 3747 i915_gem_object_free_mmap_offset(obj);
3716 3748
3717 drm_gem_object_release(&obj->base); 3749 drm_gem_object_release(&obj->base);