diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 13 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 54 |
4 files changed, 19 insertions, 52 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e9dbb481c469..8bf3770f294e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
| @@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
| 142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
| 143 | goto free; | 143 | goto free; |
| 144 | 144 | ||
| 145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
| 146 | * ourselves by sacrificing pages from cached buffers. | ||
| 147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
| 148 | */ | ||
| 149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
| 150 | GFP_HIGHUSER | | ||
| 151 | __GFP_COLD | | ||
| 152 | __GFP_FS | | ||
| 153 | __GFP_RECLAIMABLE | | ||
| 154 | __GFP_NORETRY | | ||
| 155 | __GFP_NOWARN | | ||
| 156 | __GFP_NOMEMALLOC); | ||
| 157 | |||
| 158 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
| 159 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
| 160 | obj->size = size; | 147 | obj->size = size; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c9998c4dceb..a894ade03093 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
| 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { |
| 291 | obj = obj_priv->obj; | 291 | obj = obj_priv->obj; |
| 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
| 293 | ret = i915_gem_object_get_pages(obj); | 293 | ret = i915_gem_object_get_pages(obj, 0); |
| 294 | if (ret) { | 294 | if (ret) { |
| 295 | DRM_ERROR("Failed to get pages: %d\n", ret); | 295 | DRM_ERROR("Failed to get pages: %d\n", ret); |
| 296 | spin_unlock(&dev_priv->mm.active_list_lock); | 296 | spin_unlock(&dev_priv->mm.active_list_lock); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c1669488b5a..aaf934d96f21 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -872,7 +872,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev, | |||
| 872 | void i915_gem_detach_phys_object(struct drm_device *dev, | 872 | void i915_gem_detach_phys_object(struct drm_device *dev, |
| 873 | struct drm_gem_object *obj); | 873 | struct drm_gem_object *obj); |
| 874 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 874 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
| 875 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 875 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
| 876 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 876 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
| 877 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 877 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
| 878 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 878 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c67924ca80c..dda787aafcc6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 277 | 277 | ||
| 278 | mutex_lock(&dev->struct_mutex); | 278 | mutex_lock(&dev->struct_mutex); |
| 279 | 279 | ||
| 280 | ret = i915_gem_object_get_pages(obj); | 280 | ret = i915_gem_object_get_pages(obj, 0); |
| 281 | if (ret != 0) | 281 | if (ret != 0) |
| 282 | goto fail_unlock; | 282 | goto fail_unlock; |
| 283 | 283 | ||
| @@ -321,40 +321,24 @@ fail_unlock: | |||
| 321 | return ret; | 321 | return ret; |
| 322 | } | 322 | } |
| 323 | 323 | ||
| 324 | static inline gfp_t | ||
| 325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
| 326 | { | ||
| 327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
| 328 | } | ||
| 329 | |||
| 330 | static inline void | ||
| 331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
| 332 | { | ||
| 333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
| 334 | } | ||
| 335 | |||
| 336 | static int | 324 | static int |
| 337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 325 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
| 338 | { | 326 | { |
| 339 | int ret; | 327 | int ret; |
| 340 | 328 | ||
| 341 | ret = i915_gem_object_get_pages(obj); | 329 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
| 342 | 330 | ||
| 343 | /* If we've insufficient memory to map in the pages, attempt | 331 | /* If we've insufficient memory to map in the pages, attempt |
| 344 | * to make some space by throwing out some old buffers. | 332 | * to make some space by throwing out some old buffers. |
| 345 | */ | 333 | */ |
| 346 | if (ret == -ENOMEM) { | 334 | if (ret == -ENOMEM) { |
| 347 | struct drm_device *dev = obj->dev; | 335 | struct drm_device *dev = obj->dev; |
| 348 | gfp_t gfp; | ||
| 349 | 336 | ||
| 350 | ret = i915_gem_evict_something(dev, obj->size); | 337 | ret = i915_gem_evict_something(dev, obj->size); |
| 351 | if (ret) | 338 | if (ret) |
| 352 | return ret; | 339 | return ret; |
| 353 | 340 | ||
| 354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 341 | ret = i915_gem_object_get_pages(obj, 0); |
| 355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
| 356 | ret = i915_gem_object_get_pages(obj); | ||
| 357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
| 358 | } | 342 | } |
| 359 | 343 | ||
| 360 | return ret; | 344 | return ret; |
| @@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
| 790 | 774 | ||
| 791 | mutex_lock(&dev->struct_mutex); | 775 | mutex_lock(&dev->struct_mutex); |
| 792 | 776 | ||
| 793 | ret = i915_gem_object_get_pages(obj); | 777 | ret = i915_gem_object_get_pages(obj, 0); |
| 794 | if (ret != 0) | 778 | if (ret != 0) |
| 795 | goto fail_unlock; | 779 | goto fail_unlock; |
| 796 | 780 | ||
| @@ -2230,7 +2214,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
| 2230 | } | 2214 | } |
| 2231 | 2215 | ||
| 2232 | int | 2216 | int |
| 2233 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2217 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
| 2218 | gfp_t gfpmask) | ||
| 2234 | { | 2219 | { |
| 2235 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2220 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 2236 | int page_count, i; | 2221 | int page_count, i; |
| @@ -2256,7 +2241,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
| 2256 | inode = obj->filp->f_path.dentry->d_inode; | 2241 | inode = obj->filp->f_path.dentry->d_inode; |
| 2257 | mapping = inode->i_mapping; | 2242 | mapping = inode->i_mapping; |
| 2258 | for (i = 0; i < page_count; i++) { | 2243 | for (i = 0; i < page_count; i++) { |
| 2259 | page = read_mapping_page(mapping, i, NULL); | 2244 | page = read_cache_page_gfp(mapping, i, |
| 2245 | mapping_gfp_mask (mapping) | | ||
| 2246 | __GFP_COLD | | ||
| 2247 | gfpmask); | ||
| 2260 | if (IS_ERR(page)) { | 2248 | if (IS_ERR(page)) { |
| 2261 | ret = PTR_ERR(page); | 2249 | ret = PTR_ERR(page); |
| 2262 | i915_gem_object_put_pages(obj); | 2250 | i915_gem_object_put_pages(obj); |
| @@ -2579,7 +2567,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2579 | drm_i915_private_t *dev_priv = dev->dev_private; | 2567 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 2580 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2568 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
| 2581 | struct drm_mm_node *free_space; | 2569 | struct drm_mm_node *free_space; |
| 2582 | bool retry_alloc = false; | 2570 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
| 2583 | int ret; | 2571 | int ret; |
| 2584 | 2572 | ||
| 2585 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2573 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
| @@ -2623,15 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2623 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2611 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
| 2624 | obj->size, obj_priv->gtt_offset); | 2612 | obj->size, obj_priv->gtt_offset); |
| 2625 | #endif | 2613 | #endif |
| 2626 | if (retry_alloc) { | 2614 | ret = i915_gem_object_get_pages(obj, gfpmask); |
| 2627 | i915_gem_object_set_page_gfp_mask (obj, | ||
| 2628 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
| 2629 | } | ||
| 2630 | ret = i915_gem_object_get_pages(obj); | ||
| 2631 | if (retry_alloc) { | ||
| 2632 | i915_gem_object_set_page_gfp_mask (obj, | ||
| 2633 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
| 2634 | } | ||
| 2635 | if (ret) { | 2615 | if (ret) { |
| 2636 | drm_mm_put_block(obj_priv->gtt_space); | 2616 | drm_mm_put_block(obj_priv->gtt_space); |
| 2637 | obj_priv->gtt_space = NULL; | 2617 | obj_priv->gtt_space = NULL; |
| @@ -2641,9 +2621,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
| 2641 | ret = i915_gem_evict_something(dev, obj->size); | 2621 | ret = i915_gem_evict_something(dev, obj->size); |
| 2642 | if (ret) { | 2622 | if (ret) { |
| 2643 | /* now try to shrink everyone else */ | 2623 | /* now try to shrink everyone else */ |
| 2644 | if (! retry_alloc) { | 2624 | if (gfpmask) { |
| 2645 | retry_alloc = true; | 2625 | gfpmask = 0; |
| 2646 | goto search_free; | 2626 | goto search_free; |
| 2647 | } | 2627 | } |
| 2648 | 2628 | ||
| 2649 | return ret; | 2629 | return ret; |
| @@ -4946,7 +4926,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
| 4946 | if (!obj_priv->phys_obj) | 4926 | if (!obj_priv->phys_obj) |
| 4947 | return; | 4927 | return; |
| 4948 | 4928 | ||
| 4949 | ret = i915_gem_object_get_pages(obj); | 4929 | ret = i915_gem_object_get_pages(obj, 0); |
| 4950 | if (ret) | 4930 | if (ret) |
| 4951 | goto out; | 4931 | goto out; |
| 4952 | 4932 | ||
| @@ -5004,7 +4984,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
| 5004 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 4984 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
| 5005 | obj_priv->phys_obj->cur_obj = obj; | 4985 | obj_priv->phys_obj->cur_obj = obj; |
| 5006 | 4986 | ||
| 5007 | ret = i915_gem_object_get_pages(obj); | 4987 | ret = i915_gem_object_get_pages(obj, 0); |
| 5008 | if (ret) { | 4988 | if (ret) { |
| 5009 | DRM_ERROR("failed to get page list\n"); | 4989 | DRM_ERROR("failed to get page list\n"); |
| 5010 | goto out; | 4990 | goto out; |
