aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 876b65cb762..5bf420378b6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1252,6 +1252,31 @@ out_free_list:
1252 return ret; 1252 return ret;
1253} 1253}
1254 1254
1255/**
1256 * i915_gem_release_mmap - remove physical page mappings
1257 * @obj: obj in question
1258 *
1259 * Preserve the reservation of the mmaping with the DRM core code, but
1260 * relinquish ownership of the pages back to the system.
1261 *
1262 * It is vital that we remove the page mapping if we have mapped a tiled
1263 * object through the GTT and then lose the fence register due to
1264 * resource pressure. Similarly if the object has been moved out of the
1265 * aperture, than pages mapped into userspace must be revoked. Removing the
1266 * mapping will then trigger a page fault on the next user access, allowing
1267 * fixup by i915_gem_fault().
1268 */
1269void
1270i915_gem_release_mmap(struct drm_gem_object *obj)
1271{
1272 struct drm_device *dev = obj->dev;
1273 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1274
1275 if (dev->dev_mapping)
1276 unmap_mapping_range(dev->dev_mapping,
1277 obj_priv->mmap_offset, obj->size, 1);
1278}
1279
1255static void 1280static void
1256i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1281i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1257{ 1282{
@@ -1861,7 +1886,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1861{ 1886{
1862 struct drm_device *dev = obj->dev; 1887 struct drm_device *dev = obj->dev;
1863 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1888 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1864 loff_t offset;
1865 int ret = 0; 1889 int ret = 0;
1866 1890
1867#if WATCH_BUF 1891#if WATCH_BUF
@@ -1898,9 +1922,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1898 BUG_ON(obj_priv->active); 1922 BUG_ON(obj_priv->active);
1899 1923
1900 /* blow away mappings if mapped through GTT */ 1924 /* blow away mappings if mapped through GTT */
1901 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; 1925 i915_gem_release_mmap(obj);
1902 if (dev->dev_mapping)
1903 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1904 1926
1905 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1927 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1906 i915_gem_clear_fence_reg(obj); 1928 i915_gem_clear_fence_reg(obj);
@@ -2222,7 +2244,6 @@ try_again:
2222 /* None available, try to steal one or wait for a user to finish */ 2244 /* None available, try to steal one or wait for a user to finish */
2223 if (i == dev_priv->num_fence_regs) { 2245 if (i == dev_priv->num_fence_regs) {
2224 uint32_t seqno = dev_priv->mm.next_gem_seqno; 2246 uint32_t seqno = dev_priv->mm.next_gem_seqno;
2225 loff_t offset;
2226 2247
2227 if (avail == 0) 2248 if (avail == 0)
2228 return -ENOSPC; 2249 return -ENOSPC;
@@ -2274,10 +2295,7 @@ try_again:
2274 * Zap this virtual mapping so we can set up a fence again 2295 * Zap this virtual mapping so we can set up a fence again
2275 * for this object next time we need it. 2296 * for this object next time we need it.
2276 */ 2297 */
2277 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; 2298 i915_gem_release_mmap(reg->obj);
2278 if (dev->dev_mapping)
2279 unmap_mapping_range(dev->dev_mapping, offset,
2280 reg->obj->size, 1);
2281 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2299 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2282 } 2300 }
2283 2301