aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-12-07 15:37:07 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-12-18 16:02:29 -0500
commitdc9dd7a20fde95aa81a8307cde79c2dff9f83f3d (patch)
treea88f8658040d07f5bf3e86e71f2888749250447d /drivers/gpu
parentb81034506fc9b879cb726feb01342be0cdbe6e25 (diff)
drm/i915: Preallocate the drm_mm_node prior to manipulating the GTT drm_mm manager
As we may reap neighbouring objects in order to free up pages for allocations, we need to be careful not to allocate in the middle of the drm_mm manager. To accomplish this, we can simply allocate the drm_mm_node up front and then use the combined search & insert drm_mm routines, reducing our code footprint in the process. Fixes (partially) i-g-t/gem_tiled_swapping Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Jani Nikula <jani.nikula@intel.com> [danvet: Again fixup atomic bikeshed.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c62
1 files changed, 25 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1f691958f8..5feda1f6704 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2890,7 +2890,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2890{ 2890{
2891 struct drm_device *dev = obj->base.dev; 2891 struct drm_device *dev = obj->base.dev;
2892 drm_i915_private_t *dev_priv = dev->dev_private; 2892 drm_i915_private_t *dev_priv = dev->dev_private;
2893 struct drm_mm_node *free_space; 2893 struct drm_mm_node *node;
2894 u32 size, fence_size, fence_alignment, unfenced_alignment; 2894 u32 size, fence_size, fence_alignment, unfenced_alignment;
2895 bool mappable, fenceable; 2895 bool mappable, fenceable;
2896 int ret; 2896 int ret;
@@ -2936,66 +2936,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2936 2936
2937 i915_gem_object_pin_pages(obj); 2937 i915_gem_object_pin_pages(obj);
2938 2938
2939 node = kzalloc(sizeof(*node), GFP_KERNEL);
2940 if (node == NULL) {
2941 i915_gem_object_unpin_pages(obj);
2942 return -ENOMEM;
2943 }
2944
2939 search_free: 2945 search_free:
2940 if (map_and_fenceable) 2946 if (map_and_fenceable)
2941 free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, 2947 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2942 size, alignment, obj->cache_level, 2948 size, alignment, obj->cache_level,
2943 0, dev_priv->mm.gtt_mappable_end, 2949 0, dev_priv->mm.gtt_mappable_end);
2944 false);
2945 else 2950 else
2946 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2951 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2947 size, alignment, obj->cache_level, 2952 size, alignment, obj->cache_level);
2948 false); 2953 if (ret) {
2949
2950 if (free_space != NULL) {
2951 if (map_and_fenceable)
2952 free_space =
2953 drm_mm_get_block_range_generic(free_space,
2954 size, alignment, obj->cache_level,
2955 0, dev_priv->mm.gtt_mappable_end,
2956 false);
2957 else
2958 free_space =
2959 drm_mm_get_block_generic(free_space,
2960 size, alignment, obj->cache_level,
2961 false);
2962 }
2963 if (free_space == NULL) {
2964 ret = i915_gem_evict_something(dev, size, alignment, 2954 ret = i915_gem_evict_something(dev, size, alignment,
2965 obj->cache_level, 2955 obj->cache_level,
2966 map_and_fenceable, 2956 map_and_fenceable,
2967 nonblocking); 2957 nonblocking);
2968 if (ret) { 2958 if (ret == 0)
2969 i915_gem_object_unpin_pages(obj); 2959 goto search_free;
2970 return ret;
2971 }
2972 2960
2973 goto search_free; 2961 i915_gem_object_unpin_pages(obj);
2962 kfree(node);
2963 return ret;
2974 } 2964 }
2975 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2965 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2976 free_space,
2977 obj->cache_level))) {
2978 i915_gem_object_unpin_pages(obj); 2966 i915_gem_object_unpin_pages(obj);
2979 drm_mm_put_block(free_space); 2967 drm_mm_put_block(node);
2980 return -EINVAL; 2968 return -EINVAL;
2981 } 2969 }
2982 2970
2983 ret = i915_gem_gtt_prepare_object(obj); 2971 ret = i915_gem_gtt_prepare_object(obj);
2984 if (ret) { 2972 if (ret) {
2985 i915_gem_object_unpin_pages(obj); 2973 i915_gem_object_unpin_pages(obj);
2986 drm_mm_put_block(free_space); 2974 drm_mm_put_block(node);
2987 return ret; 2975 return ret;
2988 } 2976 }
2989 2977
2990 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2978 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2991 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2979 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2992 2980
2993 obj->gtt_space = free_space; 2981 obj->gtt_space = node;
2994 obj->gtt_offset = free_space->start; 2982 obj->gtt_offset = node->start;
2995 2983
2996 fenceable = 2984 fenceable =
2997 free_space->size == fence_size && 2985 node->size == fence_size &&
2998 (free_space->start & (fence_alignment - 1)) == 0; 2986 (node->start & (fence_alignment - 1)) == 0;
2999 2987
3000 mappable = 2988 mappable =
3001 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 2989 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;