aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-02-02 16:04:38 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-02-03 05:10:32 -0500
commit4e64e5539d152e202ad6eea2b6f65f3ab58d9428 (patch)
tree14f90fc609bef7c9e51c6e218cff06062f4f8d6a /drivers
parent17aad8a340e6f98b62c2482d02bc3814eebde9a5 (diff)
drm: Improve drm_mm search (and fix topdown allocation) with rbtrees
The drm_mm range manager claimed to support top-down insertion, but it was neither searching for the top-most hole that could fit the allocation request nor fitting the request to the hole correctly. In order to search the range efficiently, we create a secondary index for the holes using either their size or their address. This index allows us to find the smallest hole or the hole at the bottom or top of the range efficiently, whilst keeping the hole stack to rapidly service evictions. v2: Search for holes both high and low. Rename flags to mode. v3: Discover rb_entry_safe() and use it! v4: Kerneldoc for enum drm_mm_insert_mode. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Sean Paul <seanpaul@chromium.org> Cc: Lucas Stach <l.stach@pengutronix.de> Cc: Christian Gmeiner <christian.gmeiner@gmail.com> Cc: Rob Clark <robdclark@gmail.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Stephen Warren <swarren@wwwdotorg.org> Cc: Alexandre Courbot <gnurou@gmail.com> Cc: Eric Anholt <eric@anholt.net> Cc: Sinclair Yeh <syeh@vmware.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> # vmwgfx Reviewed-by: Lucas Stach <l.stach@pengutronix.de> #etnaviv Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20170202210438.28702-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c488
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c3
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c58
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c6
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
22 files changed, 378 insertions, 350 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index e4eb6dd3798a..0335c2f331e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
97{ 97{
98 struct amdgpu_gtt_mgr *mgr = man->priv; 98 struct amdgpu_gtt_mgr *mgr = man->priv;
99 struct drm_mm_node *node = mem->mm_node; 99 struct drm_mm_node *node = mem->mm_node;
100 enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; 100 enum drm_mm_insert_mode mode;
101 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
102 unsigned long fpfn, lpfn; 101 unsigned long fpfn, lpfn;
103 int r; 102 int r;
104 103
@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
115 else 114 else
116 lpfn = man->size; 115 lpfn = man->size;
117 116
118 if (place && place->flags & TTM_PL_FLAG_TOPDOWN) { 117 mode = DRM_MM_INSERT_BEST;
119 sflags = DRM_MM_SEARCH_BELOW; 118 if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
120 aflags = DRM_MM_CREATE_TOP; 119 mode = DRM_MM_INSERT_HIGH;
121 }
122 120
123 spin_lock(&mgr->lock); 121 spin_lock(&mgr->lock);
124 r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages, 122 r = drm_mm_insert_node_in_range(&mgr->mm, node,
125 mem->page_alignment, 0, 123 mem->num_pages, mem->page_alignment, 0,
126 fpfn, lpfn, sflags, aflags); 124 fpfn, lpfn, mode);
127 spin_unlock(&mgr->lock); 125 spin_unlock(&mgr->lock);
128 126
129 if (!r) { 127 if (!r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index ac9007986c11..9e577e3d3147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
97 struct amdgpu_vram_mgr *mgr = man->priv; 97 struct amdgpu_vram_mgr *mgr = man->priv;
98 struct drm_mm *mm = &mgr->mm; 98 struct drm_mm *mm = &mgr->mm;
99 struct drm_mm_node *nodes; 99 struct drm_mm_node *nodes;
100 enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT; 100 enum drm_mm_insert_mode mode;
101 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
102 unsigned long lpfn, num_nodes, pages_per_node, pages_left; 101 unsigned long lpfn, num_nodes, pages_per_node, pages_left;
103 unsigned i; 102 unsigned i;
104 int r; 103 int r;
@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
121 if (!nodes) 120 if (!nodes)
122 return -ENOMEM; 121 return -ENOMEM;
123 122
124 if (place->flags & TTM_PL_FLAG_TOPDOWN) { 123 mode = DRM_MM_INSERT_BEST;
125 sflags = DRM_MM_SEARCH_BELOW; 124 if (place->flags & TTM_PL_FLAG_TOPDOWN)
126 aflags = DRM_MM_CREATE_TOP; 125 mode = DRM_MM_INSERT_HIGH;
127 }
128 126
129 pages_left = mem->num_pages; 127 pages_left = mem->num_pages;
130 128
@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
135 133
136 if (pages == pages_per_node) 134 if (pages == pages_per_node)
137 alignment = pages_per_node; 135 alignment = pages_per_node;
138 else
139 sflags |= DRM_MM_SEARCH_BEST;
140 136
141 r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages, 137 r = drm_mm_insert_node_in_range(mm, &nodes[i],
142 alignment, 0, 138 pages, alignment, 0,
143 place->fpfn, lpfn, 139 place->fpfn, lpfn,
144 sflags, aflags); 140 mode);
145 if (unlikely(r)) 141 if (unlikely(r))
146 goto error; 142 goto error;
147 143
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a293c8be232c..560d416deab2 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
148 return -ENOSPC; 148 return -ENOSPC;
149 149
150 mutex_lock(&priv->linear_lock); 150 mutex_lock(&priv->linear_lock);
151 ret = drm_mm_insert_node(&priv->linear, node, size, align, 151 ret = drm_mm_insert_node_generic(&priv->linear, node,
152 DRM_MM_SEARCH_DEFAULT); 152 size, align, 0, 0);
153 mutex_unlock(&priv->linear_lock); 153 mutex_unlock(&priv->linear_lock);
154 if (ret) { 154 if (ret) {
155 kfree(node); 155 kfree(node);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index e51876e588d6..8bfb0b327267 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -97,14 +97,6 @@
97 * locking would be fully redundant. 97 * locking would be fully redundant.
98 */ 98 */
99 99
100static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
101 u64 size,
102 u64 alignment,
103 unsigned long color,
104 u64 start,
105 u64 end,
106 enum drm_mm_search_flags flags);
107
108#ifdef CONFIG_DRM_DEBUG_MM 100#ifdef CONFIG_DRM_DEBUG_MM
109#include <linux/stackdepot.h> 101#include <linux/stackdepot.h>
110 102
@@ -226,69 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
226 &drm_mm_interval_tree_augment); 218 &drm_mm_interval_tree_augment);
227} 219}
228 220
229static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 221#define RB_INSERT(root, member, expr) do { \
230 struct drm_mm_node *node, 222 struct rb_node **link = &root.rb_node, *rb = NULL; \
231 u64 size, u64 alignment, 223 u64 x = expr(node); \
232 unsigned long color, 224 while (*link) { \
233 u64 range_start, u64 range_end, 225 rb = *link; \
234 enum drm_mm_allocator_flags flags) 226 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
227 link = &rb->rb_left; \
228 else \
229 link = &rb->rb_right; \
230 } \
231 rb_link_node(&node->member, rb, link); \
232 rb_insert_color(&node->member, &root); \
233} while (0)
234
235#define HOLE_SIZE(NODE) ((NODE)->hole_size)
236#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
237
238static void add_hole(struct drm_mm_node *node)
235{ 239{
236 struct drm_mm *mm = hole_node->mm; 240 struct drm_mm *mm = node->mm;
237 u64 hole_start = drm_mm_hole_node_start(hole_node);
238 u64 hole_end = drm_mm_hole_node_end(hole_node);
239 u64 adj_start = hole_start;
240 u64 adj_end = hole_end;
241 241
242 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated); 242 node->hole_size =
243 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
244 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
243 245
244 if (mm->color_adjust) 246 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
245 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 247 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
246 248
247 adj_start = max(adj_start, range_start); 249 list_add(&node->hole_stack, &mm->hole_stack);
248 adj_end = min(adj_end, range_end); 250}
249 251
250 if (flags & DRM_MM_CREATE_TOP) 252static void rm_hole(struct drm_mm_node *node)
251 adj_start = adj_end - size; 253{
254 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
252 255
253 if (alignment) { 256 list_del(&node->hole_stack);
254 u64 rem; 257 rb_erase(&node->rb_hole_size, &node->mm->holes_size);
258 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
259 node->hole_size = 0;
255 260
256 div64_u64_rem(adj_start, alignment, &rem); 261 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
257 if (rem) { 262}
258 if (flags & DRM_MM_CREATE_TOP) 263
259 adj_start -= rem; 264static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
260 else 265{
261 adj_start += alignment - rem; 266 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
267}
268
269static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
270{
271 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
272}
273
274static inline u64 rb_hole_size(struct rb_node *rb)
275{
276 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
277}
278
279static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
280{
281 struct rb_node *best = NULL;
282 struct rb_node **link = &mm->holes_size.rb_node;
283
284 while (*link) {
285 struct rb_node *rb = *link;
286
287 if (size <= rb_hole_size(rb)) {
288 link = &rb->rb_left;
289 best = rb;
290 } else {
291 link = &rb->rb_right;
262 } 292 }
263 } 293 }
264 294
265 if (adj_start == hole_start) { 295 return rb_hole_size_to_node(best);
266 hole_node->hole_follows = 0; 296}
267 list_del(&hole_node->hole_stack); 297
298static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
299{
300 struct drm_mm_node *node = NULL;
301 struct rb_node **link = &mm->holes_addr.rb_node;
302
303 while (*link) {
304 u64 hole_start;
305
306 node = rb_hole_addr_to_node(*link);
307 hole_start = __drm_mm_hole_node_start(node);
308
309 if (addr < hole_start)
310 link = &node->rb_hole_addr.rb_left;
311 else if (addr > hole_start + node->hole_size)
312 link = &node->rb_hole_addr.rb_right;
313 else
314 break;
268 } 315 }
269 316
270 node->start = adj_start; 317 return node;
271 node->size = size; 318}
272 node->mm = mm;
273 node->color = color;
274 node->allocated = 1;
275 319
276 list_add(&node->node_list, &hole_node->node_list); 320static struct drm_mm_node *
321first_hole(struct drm_mm *mm,
322 u64 start, u64 end, u64 size,
323 enum drm_mm_insert_mode mode)
324{
325 if (RB_EMPTY_ROOT(&mm->holes_size))
326 return NULL;
277 327
278 drm_mm_interval_tree_add_node(hole_node, node); 328 switch (mode) {
329 default:
330 case DRM_MM_INSERT_BEST:
331 return best_hole(mm, size);
279 332
280 DRM_MM_BUG_ON(node->start < range_start); 333 case DRM_MM_INSERT_LOW:
281 DRM_MM_BUG_ON(node->start < adj_start); 334 return find_hole(mm, start);
282 DRM_MM_BUG_ON(node->start + node->size > adj_end);
283 DRM_MM_BUG_ON(node->start + node->size > range_end);
284 335
285 node->hole_follows = 0; 336 case DRM_MM_INSERT_HIGH:
286 if (__drm_mm_hole_node_start(node) < hole_end) { 337 return find_hole(mm, end);
287 list_add(&node->hole_stack, &mm->hole_stack); 338
288 node->hole_follows = 1; 339 case DRM_MM_INSERT_EVICT:
340 return list_first_entry_or_null(&mm->hole_stack,
341 struct drm_mm_node,
342 hole_stack);
289 } 343 }
344}
290 345
291 save_stack(node); 346static struct drm_mm_node *
347next_hole(struct drm_mm *mm,
348 struct drm_mm_node *node,
349 enum drm_mm_insert_mode mode)
350{
351 switch (mode) {
352 default:
353 case DRM_MM_INSERT_BEST:
354 return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
355
356 case DRM_MM_INSERT_LOW:
357 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
358
359 case DRM_MM_INSERT_HIGH:
360 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
361
362 case DRM_MM_INSERT_EVICT:
363 node = list_next_entry(node, hole_stack);
364 return &node->hole_stack == &mm->hole_stack ? NULL : node;
365 }
292} 366}
293 367
294/** 368/**
@@ -317,21 +391,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
317 return -ENOSPC; 391 return -ENOSPC;
318 392
319 /* Find the relevant hole to add our node to */ 393 /* Find the relevant hole to add our node to */
320 hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, 394 hole = find_hole(mm, node->start);
321 node->start, ~(u64)0); 395 if (!hole)
322 if (hole) {
323 if (hole->start < end)
324 return -ENOSPC;
325 } else {
326 hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
327 }
328
329 hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
330 if (!drm_mm_hole_follows(hole))
331 return -ENOSPC; 396 return -ENOSPC;
332 397
333 adj_start = hole_start = __drm_mm_hole_node_start(hole); 398 adj_start = hole_start = __drm_mm_hole_node_start(hole);
334 adj_end = hole_end = __drm_mm_hole_node_end(hole); 399 adj_end = hole_end = hole_start + hole->hole_size;
335 400
336 if (mm->color_adjust) 401 if (mm->color_adjust)
337 mm->color_adjust(hole, node->color, &adj_start, &adj_end); 402 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
@@ -340,70 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
340 return -ENOSPC; 405 return -ENOSPC;
341 406
342 node->mm = mm; 407 node->mm = mm;
343 node->allocated = 1;
344 408
345 list_add(&node->node_list, &hole->node_list); 409 list_add(&node->node_list, &hole->node_list);
346
347 drm_mm_interval_tree_add_node(hole, node); 410 drm_mm_interval_tree_add_node(hole, node);
411 node->allocated = true;
412 node->hole_size = 0;
348 413
349 if (node->start == hole_start) { 414 rm_hole(hole);
350 hole->hole_follows = 0; 415 if (node->start > hole_start)
351 list_del(&hole->hole_stack); 416 add_hole(hole);
352 } 417 if (end < hole_end)
353 418 add_hole(node);
354 node->hole_follows = 0;
355 if (end != hole_end) {
356 list_add(&node->hole_stack, &mm->hole_stack);
357 node->hole_follows = 1;
358 }
359 419
360 save_stack(node); 420 save_stack(node);
361
362 return 0; 421 return 0;
363} 422}
364EXPORT_SYMBOL(drm_mm_reserve_node); 423EXPORT_SYMBOL(drm_mm_reserve_node);
365 424
366/** 425/**
367 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 426 * drm_mm_insert_node_in_range - ranged search for space and insert @node
368 * @mm: drm_mm to allocate from 427 * @mm: drm_mm to allocate from
369 * @node: preallocate node to insert 428 * @node: preallocate node to insert
370 * @size: size of the allocation 429 * @size: size of the allocation
371 * @alignment: alignment of the allocation 430 * @alignment: alignment of the allocation
372 * @color: opaque tag value to use for this node 431 * @color: opaque tag value to use for this node
373 * @start: start of the allowed range for this node 432 * @range_start: start of the allowed range for this node
374 * @end: end of the allowed range for this node 433 * @range_end: end of the allowed range for this node
375 * @sflags: flags to fine-tune the allocation search 434 * @mode: fine-tune the allocation search and placement
376 * @aflags: flags to fine-tune the allocation behavior
377 * 435 *
378 * The preallocated @node must be cleared to 0. 436 * The preallocated @node must be cleared to 0.
379 * 437 *
380 * Returns: 438 * Returns:
381 * 0 on success, -ENOSPC if there's no suitable hole. 439 * 0 on success, -ENOSPC if there's no suitable hole.
382 */ 440 */
383int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 441int drm_mm_insert_node_in_range(struct drm_mm * const mm,
384 u64 size, u64 alignment, 442 struct drm_mm_node * const node,
385 unsigned long color, 443 u64 size, u64 alignment,
386 u64 start, u64 end, 444 unsigned long color,
387 enum drm_mm_search_flags sflags, 445 u64 range_start, u64 range_end,
388 enum drm_mm_allocator_flags aflags) 446 enum drm_mm_insert_mode mode)
389{ 447{
390 struct drm_mm_node *hole_node; 448 struct drm_mm_node *hole;
449 u64 remainder_mask;
391 450
392 if (WARN_ON(size == 0)) 451 DRM_MM_BUG_ON(range_start >= range_end);
393 return -EINVAL;
394 452
395 hole_node = drm_mm_search_free_in_range_generic(mm, 453 if (unlikely(size == 0 || range_end - range_start < size))
396 size, alignment, color,
397 start, end, sflags);
398 if (!hole_node)
399 return -ENOSPC; 454 return -ENOSPC;
400 455
401 drm_mm_insert_helper(hole_node, node, 456 if (alignment <= 1)
402 size, alignment, color, 457 alignment = 0;
403 start, end, aflags); 458
404 return 0; 459 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
460 for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
461 hole = next_hole(mm, hole, mode)) {
462 u64 hole_start = __drm_mm_hole_node_start(hole);
463 u64 hole_end = hole_start + hole->hole_size;
464 u64 adj_start, adj_end;
465 u64 col_start, col_end;
466
467 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
468 break;
469
470 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
471 break;
472
473 col_start = hole_start;
474 col_end = hole_end;
475 if (mm->color_adjust)
476 mm->color_adjust(hole, color, &col_start, &col_end);
477
478 adj_start = max(col_start, range_start);
479 adj_end = min(col_end, range_end);
480
481 if (adj_end <= adj_start || adj_end - adj_start < size)
482 continue;
483
484 if (mode == DRM_MM_INSERT_HIGH)
485 adj_start = adj_end - size;
486
487 if (alignment) {
488 u64 rem;
489
490 if (likely(remainder_mask))
491 rem = adj_start & remainder_mask;
492 else
493 div64_u64_rem(adj_start, alignment, &rem);
494 if (rem) {
495 adj_start -= rem;
496 if (mode != DRM_MM_INSERT_HIGH)
497 adj_start += alignment;
498
499 if (adj_start < max(col_start, range_start) ||
500 min(col_end, range_end) - adj_start < size)
501 continue;
502
503 if (adj_end <= adj_start ||
504 adj_end - adj_start < size)
505 continue;
506 }
507 }
508
509 node->mm = mm;
510 node->size = size;
511 node->start = adj_start;
512 node->color = color;
513 node->hole_size = 0;
514
515 list_add(&node->node_list, &hole->node_list);
516 drm_mm_interval_tree_add_node(hole, node);
517 node->allocated = true;
518
519 rm_hole(hole);
520 if (adj_start > hole_start)
521 add_hole(hole);
522 if (adj_start + size < hole_end)
523 add_hole(node);
524
525 save_stack(node);
526 return 0;
527 }
528
529 return -ENOSPC;
405} 530}
406EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 531EXPORT_SYMBOL(drm_mm_insert_node_in_range);
407 532
408/** 533/**
409 * drm_mm_remove_node - Remove a memory node from the allocator. 534 * drm_mm_remove_node - Remove a memory node from the allocator.
@@ -421,92 +546,20 @@ void drm_mm_remove_node(struct drm_mm_node *node)
421 DRM_MM_BUG_ON(!node->allocated); 546 DRM_MM_BUG_ON(!node->allocated);
422 DRM_MM_BUG_ON(node->scanned_block); 547 DRM_MM_BUG_ON(node->scanned_block);
423 548
424 prev_node = 549 prev_node = list_prev_entry(node, node_list);
425 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
426
427 if (drm_mm_hole_follows(node)) {
428 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
429 __drm_mm_hole_node_end(node));
430 list_del(&node->hole_stack);
431 } else {
432 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
433 __drm_mm_hole_node_end(node));
434 }
435 550
436 if (!drm_mm_hole_follows(prev_node)) { 551 if (drm_mm_hole_follows(node))
437 prev_node->hole_follows = 1; 552 rm_hole(node);
438 list_add(&prev_node->hole_stack, &mm->hole_stack);
439 } else
440 list_move(&prev_node->hole_stack, &mm->hole_stack);
441 553
442 drm_mm_interval_tree_remove(node, &mm->interval_tree); 554 drm_mm_interval_tree_remove(node, &mm->interval_tree);
443 list_del(&node->node_list); 555 list_del(&node->node_list);
444 node->allocated = 0; 556 node->allocated = false;
445}
446EXPORT_SYMBOL(drm_mm_remove_node);
447
448static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
449{
450 if (end - start < size)
451 return 0;
452
453 if (alignment) {
454 u64 rem;
455
456 div64_u64_rem(start, alignment, &rem);
457 if (rem)
458 start += alignment - rem;
459 }
460 557
461 return end >= start + size; 558 if (drm_mm_hole_follows(prev_node))
462} 559 rm_hole(prev_node);
463 560 add_hole(prev_node);
464static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
465 u64 size,
466 u64 alignment,
467 unsigned long color,
468 u64 start,
469 u64 end,
470 enum drm_mm_search_flags flags)
471{
472 struct drm_mm_node *entry;
473 struct drm_mm_node *best;
474 u64 adj_start;
475 u64 adj_end;
476 u64 best_size;
477
478 DRM_MM_BUG_ON(mm->scan_active);
479
480 best = NULL;
481 best_size = ~0UL;
482
483 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
484 flags & DRM_MM_SEARCH_BELOW) {
485 u64 hole_size = adj_end - adj_start;
486
487 if (mm->color_adjust) {
488 mm->color_adjust(entry, color, &adj_start, &adj_end);
489 if (adj_end <= adj_start)
490 continue;
491 }
492
493 adj_start = max(adj_start, start);
494 adj_end = min(adj_end, end);
495
496 if (!check_free_hole(adj_start, adj_end, size, alignment))
497 continue;
498
499 if (!(flags & DRM_MM_SEARCH_BEST))
500 return entry;
501
502 if (hole_size < best_size) {
503 best = entry;
504 best_size = hole_size;
505 }
506 }
507
508 return best;
509} 561}
562EXPORT_SYMBOL(drm_mm_remove_node);
510 563
511/** 564/**
512 * drm_mm_replace_node - move an allocation from @old to @new 565 * drm_mm_replace_node - move an allocation from @old to @new
@@ -521,18 +574,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
521{ 574{
522 DRM_MM_BUG_ON(!old->allocated); 575 DRM_MM_BUG_ON(!old->allocated);
523 576
577 *new = *old;
578
524 list_replace(&old->node_list, &new->node_list); 579 list_replace(&old->node_list, &new->node_list);
525 list_replace(&old->hole_stack, &new->hole_stack);
526 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); 580 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
527 new->hole_follows = old->hole_follows; 581
528 new->mm = old->mm; 582 if (drm_mm_hole_follows(old)) {
529 new->start = old->start; 583 list_replace(&old->hole_stack, &new->hole_stack);
530 new->size = old->size; 584 rb_replace_node(&old->rb_hole_size,
531 new->color = old->color; 585 &new->rb_hole_size,
532 new->__subtree_last = old->__subtree_last; 586 &old->mm->holes_size);
533 587 rb_replace_node(&old->rb_hole_addr,
534 old->allocated = 0; 588 &new->rb_hole_addr,
535 new->allocated = 1; 589 &old->mm->holes_addr);
590 }
591
592 old->allocated = false;
593 new->allocated = true;
536} 594}
537EXPORT_SYMBOL(drm_mm_replace_node); 595EXPORT_SYMBOL(drm_mm_replace_node);
538 596
@@ -577,7 +635,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
577 * @color: opaque tag value to use for the allocation 635 * @color: opaque tag value to use for the allocation
578 * @start: start of the allowed range for the allocation 636 * @start: start of the allowed range for the allocation
579 * @end: end of the allowed range for the allocation 637 * @end: end of the allowed range for the allocation
580 * @flags: flags to specify how the allocation will be performed afterwards 638 * @mode: fine-tune the allocation search and placement
581 * 639 *
582 * This simply sets up the scanning routines with the parameters for the desired 640 * This simply sets up the scanning routines with the parameters for the desired
583 * hole. 641 * hole.
@@ -593,7 +651,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
593 unsigned long color, 651 unsigned long color,
594 u64 start, 652 u64 start,
595 u64 end, 653 u64 end,
596 unsigned int flags) 654 enum drm_mm_insert_mode mode)
597{ 655{
598 DRM_MM_BUG_ON(start >= end); 656 DRM_MM_BUG_ON(start >= end);
599 DRM_MM_BUG_ON(!size || size > end - start); 657 DRM_MM_BUG_ON(!size || size > end - start);
@@ -608,7 +666,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
608 scan->alignment = alignment; 666 scan->alignment = alignment;
609 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 667 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
610 scan->size = size; 668 scan->size = size;
611 scan->flags = flags; 669 scan->mode = mode;
612 670
613 DRM_MM_BUG_ON(end <= start); 671 DRM_MM_BUG_ON(end <= start);
614 scan->range_start = start; 672 scan->range_start = start;
@@ -667,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
667 if (adj_end <= adj_start || adj_end - adj_start < scan->size) 725 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
668 return false; 726 return false;
669 727
670 if (scan->flags == DRM_MM_CREATE_TOP) 728 if (scan->mode == DRM_MM_INSERT_HIGH)
671 adj_start = adj_end - scan->size; 729 adj_start = adj_end - scan->size;
672 730
673 if (scan->alignment) { 731 if (scan->alignment) {
@@ -679,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
679 div64_u64_rem(adj_start, scan->alignment, &rem); 737 div64_u64_rem(adj_start, scan->alignment, &rem);
680 if (rem) { 738 if (rem) {
681 adj_start -= rem; 739 adj_start -= rem;
682 if (scan->flags != DRM_MM_CREATE_TOP) 740 if (scan->mode != DRM_MM_INSERT_HIGH)
683 adj_start += scan->alignment; 741 adj_start += scan->alignment;
684 if (adj_start < max(col_start, scan->range_start) || 742 if (adj_start < max(col_start, scan->range_start) ||
685 min(col_end, scan->range_end) - adj_start < scan->size) 743 min(col_end, scan->range_end) - adj_start < scan->size)
@@ -775,7 +833,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
775 833
776 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); 834 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
777 hole_start = __drm_mm_hole_node_start(hole); 835 hole_start = __drm_mm_hole_node_start(hole);
778 hole_end = __drm_mm_hole_node_end(hole); 836 hole_end = hole_start + hole->hole_size;
779 837
780 DRM_MM_BUG_ON(hole_start > scan->hit_start); 838 DRM_MM_BUG_ON(hole_start > scan->hit_start);
781 DRM_MM_BUG_ON(hole_end < scan->hit_end); 839 DRM_MM_BUG_ON(hole_end < scan->hit_end);
@@ -802,21 +860,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
802{ 860{
803 DRM_MM_BUG_ON(start + size <= start); 861 DRM_MM_BUG_ON(start + size <= start);
804 862
863 mm->color_adjust = NULL;
864
805 INIT_LIST_HEAD(&mm->hole_stack); 865 INIT_LIST_HEAD(&mm->hole_stack);
806 mm->scan_active = 0; 866 mm->interval_tree = RB_ROOT;
867 mm->holes_size = RB_ROOT;
868 mm->holes_addr = RB_ROOT;
807 869
808 /* Clever trick to avoid a special case in the free hole tracking. */ 870 /* Clever trick to avoid a special case in the free hole tracking. */
809 INIT_LIST_HEAD(&mm->head_node.node_list); 871 INIT_LIST_HEAD(&mm->head_node.node_list);
810 mm->head_node.allocated = 0; 872 mm->head_node.allocated = false;
811 mm->head_node.hole_follows = 1;
812 mm->head_node.mm = mm; 873 mm->head_node.mm = mm;
813 mm->head_node.start = start + size; 874 mm->head_node.start = start + size;
814 mm->head_node.size = start - mm->head_node.start; 875 mm->head_node.size = -size;
815 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 876 add_hole(&mm->head_node);
816 877
817 mm->interval_tree = RB_ROOT; 878 mm->scan_active = 0;
818
819 mm->color_adjust = NULL;
820} 879}
821EXPORT_SYMBOL(drm_mm_init); 880EXPORT_SYMBOL(drm_mm_init);
822 881
@@ -837,20 +896,17 @@ EXPORT_SYMBOL(drm_mm_takedown);
837 896
838static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) 897static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
839{ 898{
840 u64 hole_start, hole_end, hole_size; 899 u64 start, size;
841 900
842 if (entry->hole_follows) { 901 size = entry->hole_size;
843 hole_start = drm_mm_hole_node_start(entry); 902 if (size) {
844 hole_end = drm_mm_hole_node_end(entry); 903 start = drm_mm_hole_node_start(entry);
845 hole_size = hole_end - hole_start; 904 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
846 drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start, 905 start, start + size, size);
847 hole_end, hole_size);
848 return hole_size;
849 } 906 }
850 907
851 return 0; 908 return size;
852} 909}
853
854/** 910/**
855 * drm_mm_print - print allocator state 911 * drm_mm_print - print allocator state
856 * @mm: drm_mm allocator to print 912 * @mm: drm_mm allocator to print
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 20cc33d1bfc1..d9100b565198 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
212 goto out_unlock; 212 goto out_unlock;
213 } 213 }
214 214
215 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, 215 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
216 pages, 0, DRM_MM_SEARCH_DEFAULT);
217 if (ret) 216 if (ret)
218 goto out_unlock; 217 goto out_unlock;
219 218
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index ff826c16fb89..f103e787de94 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -108,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
108 struct drm_mm_node *node, size_t size) 108 struct drm_mm_node *node, size_t size)
109{ 109{
110 struct etnaviv_vram_mapping *free = NULL; 110 struct etnaviv_vram_mapping *free = NULL;
111 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
111 int ret; 112 int ret;
112 113
113 lockdep_assert_held(&mmu->lock); 114 lockdep_assert_held(&mmu->lock);
@@ -119,9 +120,9 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
119 bool found; 120 bool found;
120 121
121 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 122 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
122 size, 0, mmu->last_iova, ~0UL, 123 size, 0, 0,
123 DRM_MM_SEARCH_DEFAULT); 124 mmu->last_iova, U64_MAX,
124 125 mode);
125 if (ret != -ENOSPC) 126 if (ret != -ENOSPC)
126 break; 127 break;
127 128
@@ -136,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
136 } 137 }
137 138
138 /* Try to retire some entries */ 139 /* Try to retire some entries */
139 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0); 140 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
140 141
141 found = 0; 142 found = 0;
142 INIT_LIST_HEAD(&list); 143 INIT_LIST_HEAD(&list);
@@ -188,6 +189,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
188 list_del_init(&m->scan_node); 189 list_del_init(&m->scan_node);
189 } 190 }
190 191
192 mode = DRM_MM_INSERT_EVICT;
193
191 /* 194 /*
192 * We removed enough mappings so that the new allocation will 195 * We removed enough mappings so that the new allocation will
193 * succeed, retry the allocation one more time. 196 * succeed, retry the allocation one more time.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a07b62732923..c8689892a89f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
69 struct drm_mm_node *node, u32 size) 69 struct drm_mm_node *node, u32 size)
70{ 70{
71 memset(node, 0, sizeof(*node)); 71 memset(node, 0, sizeof(*node));
72 return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, 72 return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
73 size, 0, 73 size, 0, I915_COLOR_UNEVICTABLE,
74 I915_COLOR_UNEVICTABLE, 74 0, ggtt->mappable_end,
75 0, ggtt->mappable_end, 75 DRM_MM_INSERT_LOW);
76 DRM_MM_SEARCH_DEFAULT,
77 DRM_MM_CREATE_DEFAULT);
78} 76}
79 77
80static void 78static void
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a43e44e18042..c181b1bb3d2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
109 }, **phase; 109 }, **phase;
110 struct i915_vma *vma, *next; 110 struct i915_vma *vma, *next;
111 struct drm_mm_node *node; 111 struct drm_mm_node *node;
112 enum drm_mm_insert_mode mode;
112 int ret; 113 int ret;
113 114
114 lockdep_assert_held(&vm->i915->drm.struct_mutex); 115 lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
127 * On each list, the oldest objects lie at the HEAD with the freshest 128 * On each list, the oldest objects lie at the HEAD with the freshest
128 * object on the TAIL. 129 * object on the TAIL.
129 */ 130 */
131 mode = DRM_MM_INSERT_BEST;
132 if (flags & PIN_HIGH)
133 mode = DRM_MM_INSERT_HIGH;
134 if (flags & PIN_MAPPABLE)
135 mode = DRM_MM_INSERT_LOW;
130 drm_mm_scan_init_with_range(&scan, &vm->mm, 136 drm_mm_scan_init_with_range(&scan, &vm->mm,
131 min_size, alignment, cache_level, 137 min_size, alignment, cache_level,
132 start, end, 138 start, end, mode);
133 flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
134 139
135 /* Retire before we search the active list. Although we have 140 /* Retire before we search the active list. Although we have
136 * reasonable accuracy in our retirement lists, we may have 141 * reasonable accuracy in our retirement lists, we may have
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c66e90571031..57bec08e80c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -436,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
436 PIN_MAPPABLE | PIN_NONBLOCK); 436 PIN_MAPPABLE | PIN_NONBLOCK);
437 if (IS_ERR(vma)) { 437 if (IS_ERR(vma)) {
438 memset(&cache->node, 0, sizeof(cache->node)); 438 memset(&cache->node, 0, sizeof(cache->node));
439 ret = drm_mm_insert_node_in_range_generic 439 ret = drm_mm_insert_node_in_range
440 (&ggtt->base.mm, &cache->node, 440 (&ggtt->base.mm, &cache->node,
441 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 441 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
442 0, ggtt->mappable_end, 442 0, ggtt->mappable_end,
443 DRM_MM_SEARCH_DEFAULT, 443 DRM_MM_INSERT_LOW);
444 DRM_MM_CREATE_DEFAULT);
445 if (ret) /* no inactive aperture space, use cpu reloc */ 444 if (ret) /* no inactive aperture space, use cpu reloc */
446 return NULL; 445 return NULL;
447 } else { 446 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e808aad203d8..30d8dbd04f0b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2748,12 +2748,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2748 return ret; 2748 return ret;
2749 2749
2750 /* Reserve a mappable slot for our lockless error capture */ 2750 /* Reserve a mappable slot for our lockless error capture */
2751 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, 2751 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2752 &ggtt->error_capture, 2752 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2753 PAGE_SIZE, 0, 2753 0, ggtt->mappable_end,
2754 I915_COLOR_UNEVICTABLE, 2754 DRM_MM_INSERT_LOW);
2755 0, ggtt->mappable_end,
2756 0, 0);
2757 if (ret) 2755 if (ret)
2758 return ret; 2756 return ret;
2759 2757
@@ -3663,7 +3661,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
3663 u64 size, u64 alignment, unsigned long color, 3661 u64 size, u64 alignment, unsigned long color,
3664 u64 start, u64 end, unsigned int flags) 3662 u64 start, u64 end, unsigned int flags)
3665{ 3663{
3666 u32 search_flag, alloc_flag; 3664 enum drm_mm_insert_mode mode;
3667 u64 offset; 3665 u64 offset;
3668 int err; 3666 int err;
3669 3667
@@ -3684,13 +3682,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
3684 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) 3682 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3685 return -ENOSPC; 3683 return -ENOSPC;
3686 3684
3687 if (flags & PIN_HIGH) { 3685 mode = DRM_MM_INSERT_BEST;
3688 search_flag = DRM_MM_SEARCH_BELOW; 3686 if (flags & PIN_HIGH)
3689 alloc_flag = DRM_MM_CREATE_TOP; 3687 mode = DRM_MM_INSERT_HIGH;
3690 } else { 3688 if (flags & PIN_MAPPABLE)
3691 search_flag = DRM_MM_SEARCH_DEFAULT; 3689 mode = DRM_MM_INSERT_LOW;
3692 alloc_flag = DRM_MM_CREATE_DEFAULT;
3693 }
3694 3690
3695 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, 3691 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3696 * so we know that we always have a minimum alignment of 4096. 3692 * so we know that we always have a minimum alignment of 4096.
@@ -3702,10 +3698,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
3702 if (alignment <= I915_GTT_MIN_ALIGNMENT) 3698 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3703 alignment = 0; 3699 alignment = 0;
3704 3700
3705 err = drm_mm_insert_node_in_range_generic(&vm->mm, node, 3701 err = drm_mm_insert_node_in_range(&vm->mm, node,
3706 size, alignment, color, 3702 size, alignment, color,
3707 start, end, 3703 start, end, mode);
3708 search_flag, alloc_flag);
3709 if (err != -ENOSPC) 3704 if (err != -ENOSPC)
3710 return err; 3705 return err;
3711 3706
@@ -3743,9 +3738,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
3743 if (err) 3738 if (err)
3744 return err; 3739 return err;
3745 3740
3746 search_flag = DRM_MM_SEARCH_DEFAULT; 3741 return drm_mm_insert_node_in_range(&vm->mm, node,
3747 return drm_mm_insert_node_in_range_generic(&vm->mm, node, 3742 size, alignment, color,
3748 size, alignment, color, 3743 start, end, DRM_MM_INSERT_EVICT);
3749 start, end,
3750 search_flag, alloc_flag);
3751} 3744}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 127d698e7c84..ec7c5d80fe4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
55 return -ENODEV; 55 return -ENODEV;
56 56
57 mutex_lock(&dev_priv->mm.stolen_lock); 57 mutex_lock(&dev_priv->mm.stolen_lock);
58 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, 58 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
59 alignment, start, end, 59 size, alignment, 0,
60 DRM_MM_SEARCH_DEFAULT); 60 start, end, DRM_MM_INSERT_BEST);
61 mutex_unlock(&dev_priv->mm.stolen_lock); 61 mutex_unlock(&dev_priv->mm.stolen_lock);
62 62
63 return ret; 63 return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8098677a3916..c3b43f4d4f1f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
54 if (!p) 54 if (!p)
55 return ERR_PTR(-ENOMEM); 55 return ERR_PTR(-ENOMEM);
56 56
57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, 57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
58 npages, 0, DRM_MM_SEARCH_DEFAULT);
59 if (ret) { 58 if (ret) {
60 drm_free_large(p); 59 drm_free_large(p);
61 return ERR_PTR(ret); 60 return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a311d26ccb21..b654eca7636a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
45 if (WARN_ON(drm_mm_node_allocated(&vma->node))) 45 if (WARN_ON(drm_mm_node_allocated(&vma->node)))
46 return 0; 46 return 0;
47 47
48 ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, 48 ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
49 0, DRM_MM_SEARCH_DEFAULT);
50 if (ret) 49 if (ret)
51 return ret; 50 return ret;
52 51
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 6df53e6c1308..bb5b7480e0b4 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128; 22static unsigned int max_prime = 128;
23 23
24enum { 24enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST, 25 BEST,
26 BOTTOMUP,
27 TOPDOWN,
28 EVICT,
28}; 29};
29 30
30static const struct insert_mode { 31static const struct insert_mode {
31 const char *name; 32 const char *name;
32 unsigned int search_flags; 33 enum drm_mm_insert_mode mode;
33 unsigned int create_flags;
34} insert_modes[] = { 34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, 35 [BEST] = { "best", DRM_MM_INSERT_BEST },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, 36 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT }, 37 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
38 {} 39 {}
39}, evict_modes[] = { 40}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, 41 { "bottom-up", DRM_MM_INSERT_LOW },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, 42 { "top-down", DRM_MM_INSERT_HIGH },
42 {} 43 {}
43}; 44};
44 45
@@ -526,8 +527,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
526 527
527 err = drm_mm_insert_node_generic(mm, node, 528 err = drm_mm_insert_node_generic(mm, node,
528 size, alignment, color, 529 size, alignment, color,
529 mode->search_flags, 530 mode->mode);
530 mode->create_flags);
531 if (err) { 531 if (err) {
532 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n", 532 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
533 size, alignment, color, mode->name, err); 533 size, alignment, color, mode->name, err);
@@ -547,7 +547,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
547 struct drm_mm_node tmp = {}; 547 struct drm_mm_node tmp = {};
548 int err; 548 int err;
549 549
550 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT); 550 err = drm_mm_insert_node(mm, &tmp, size);
551 if (likely(err == -ENOSPC)) 551 if (likely(err == -ENOSPC))
552 return true; 552 return true;
553 553
@@ -753,11 +753,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
753{ 753{
754 int err; 754 int err;
755 755
756 err = drm_mm_insert_node_in_range_generic(mm, node, 756 err = drm_mm_insert_node_in_range(mm, node,
757 size, alignment, color, 757 size, alignment, color,
758 range_start, range_end, 758 range_start, range_end,
759 mode->search_flags, 759 mode->mode);
760 mode->create_flags);
761 if (err) { 760 if (err) {
762 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", 761 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
763 size, alignment, color, mode->name, 762 size, alignment, color, mode->name,
@@ -781,11 +780,10 @@ static bool expect_insert_in_range_fail(struct drm_mm *mm,
781 struct drm_mm_node tmp = {}; 780 struct drm_mm_node tmp = {};
782 int err; 781 int err;
783 782
784 err = drm_mm_insert_node_in_range_generic(mm, &tmp, 783 err = drm_mm_insert_node_in_range(mm, &tmp,
785 size, 0, 0, 784 size, 0, 0,
786 range_start, range_end, 785 range_start, range_end,
787 DRM_MM_SEARCH_DEFAULT, 786 0);
788 DRM_MM_CREATE_DEFAULT);
789 if (likely(err == -ENOSPC)) 787 if (likely(err == -ENOSPC))
790 return true; 788 return true;
791 789
@@ -1324,7 +1322,7 @@ static int evict_something(struct drm_mm *mm,
1324 drm_mm_scan_init_with_range(&scan, mm, 1322 drm_mm_scan_init_with_range(&scan, mm,
1325 size, alignment, 0, 1323 size, alignment, 0,
1326 range_start, range_end, 1324 range_start, range_end,
1327 mode->create_flags); 1325 mode->mode);
1328 if (!evict_nodes(&scan, 1326 if (!evict_nodes(&scan,
1329 nodes, order, count, false, 1327 nodes, order, count, false,
1330 &evict_list)) 1328 &evict_list))
@@ -1332,8 +1330,7 @@ static int evict_something(struct drm_mm *mm,
1332 1330
1333 memset(&tmp, 0, sizeof(tmp)); 1331 memset(&tmp, 0, sizeof(tmp));
1334 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0, 1332 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1335 mode->search_flags, 1333 DRM_MM_INSERT_EVICT);
1336 mode->create_flags);
1337 if (err) { 1334 if (err) {
1338 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n", 1335 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1339 size, alignment); 1336 size, alignment);
@@ -1408,8 +1405,7 @@ static int igt_evict(void *ignored)
1408 ret = -EINVAL; 1405 ret = -EINVAL;
1409 drm_mm_init(&mm, 0, size); 1406 drm_mm_init(&mm, 0, size);
1410 for (n = 0; n < size; n++) { 1407 for (n = 0; n < size; n++) {
1411 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, 1408 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1412 DRM_MM_SEARCH_DEFAULT);
1413 if (err) { 1409 if (err) {
1414 pr_err("insert failed, step %d\n", n); 1410 pr_err("insert failed, step %d\n", n);
1415 ret = err; 1411 ret = err;
@@ -1517,8 +1513,7 @@ static int igt_evict_range(void *ignored)
1517 ret = -EINVAL; 1513 ret = -EINVAL;
1518 drm_mm_init(&mm, 0, size); 1514 drm_mm_init(&mm, 0, size);
1519 for (n = 0; n < size; n++) { 1515 for (n = 0; n < size; n++) {
1520 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, 1516 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1521 DRM_MM_SEARCH_DEFAULT);
1522 if (err) { 1517 if (err) {
1523 pr_err("insert failed, step %d\n", n); 1518 pr_err("insert failed, step %d\n", n);
1524 ret = err; 1519 ret = err;
@@ -1904,7 +1899,7 @@ static int evict_color(struct drm_mm *mm,
1904 drm_mm_scan_init_with_range(&scan, mm, 1899 drm_mm_scan_init_with_range(&scan, mm,
1905 size, alignment, color, 1900 size, alignment, color,
1906 range_start, range_end, 1901 range_start, range_end,
1907 mode->create_flags); 1902 mode->mode);
1908 if (!evict_nodes(&scan, 1903 if (!evict_nodes(&scan,
1909 nodes, order, count, true, 1904 nodes, order, count, true,
1910 &evict_list)) 1905 &evict_list))
@@ -1912,8 +1907,7 @@ static int evict_color(struct drm_mm *mm,
1912 1907
1913 memset(&tmp, 0, sizeof(tmp)); 1908 memset(&tmp, 0, sizeof(tmp));
1914 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color, 1909 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1915 mode->search_flags, 1910 DRM_MM_INSERT_EVICT);
1916 mode->create_flags);
1917 if (err) { 1911 if (err) {
1918 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", 1912 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1919 size, alignment, color, err); 1913 size, alignment, color, err);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 03defda77766..1622db24cd39 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
109 if (pool == AGP_TYPE) { 109 if (pool == AGP_TYPE) {
110 retval = drm_mm_insert_node(&dev_priv->agp_mm, 110 retval = drm_mm_insert_node(&dev_priv->agp_mm,
111 &item->mm_node, 111 &item->mm_node,
112 mem->size, 0, 112 mem->size);
113 DRM_MM_SEARCH_DEFAULT);
114 offset = item->mm_node.start; 113 offset = item->mm_node.start;
115 } else { 114 } else {
116#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 115#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
122#else 121#else
123 retval = drm_mm_insert_node(&dev_priv->vram_mm, 122 retval = drm_mm_insert_node(&dev_priv->vram_mm,
124 &item->mm_node, 123 &item->mm_node,
125 mem->size, 0, 124 mem->size);
126 DRM_MM_SEARCH_DEFAULT);
127 offset = item->mm_node.start; 125 offset = item->mm_node.start;
128#endif 126#endif
129 } 127 }
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7d853e6b5ff0..b523a5d4a38c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
128 if (!bo->mm) 128 if (!bo->mm)
129 return -ENOMEM; 129 return -ENOMEM;
130 130
131 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, 131 err = drm_mm_insert_node_generic(&tegra->mm,
132 PAGE_SIZE, 0, 0, 0); 132 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
133 if (err < 0) { 133 if (err < 0) {
134 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", 134 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
135 err); 135 err);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 988c48d1cf3e..90a6c0b03afc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
54{ 54{
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
56 struct drm_mm *mm = &rman->mm; 56 struct drm_mm *mm = &rman->mm;
57 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node;
58 enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; 58 enum drm_mm_insert_mode mode;
59 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
60 unsigned long lpfn; 59 unsigned long lpfn;
61 int ret; 60 int ret;
62 61
@@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
68 if (!node) 67 if (!node)
69 return -ENOMEM; 68 return -ENOMEM;
70 69
71 if (place->flags & TTM_PL_FLAG_TOPDOWN) { 70 mode = DRM_MM_INSERT_BEST;
72 sflags = DRM_MM_SEARCH_BELOW; 71 if (place->flags & TTM_PL_FLAG_TOPDOWN)
73 aflags = DRM_MM_CREATE_TOP; 72 mode = DRM_MM_INSERT_HIGH;
74 }
75 73
76 spin_lock(&rman->lock); 74 spin_lock(&rman->lock);
77 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, 75 ret = drm_mm_insert_node_in_range(mm, node,
76 mem->num_pages,
78 mem->page_alignment, 0, 77 mem->page_alignment, 0,
79 place->fpfn, lpfn, 78 place->fpfn, lpfn, mode);
80 sflags, aflags);
81 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);
82 80
83 if (unlikely(ret)) { 81 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 63239b5b5092..a0cd4ea15f07 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
593 593
594 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 594 spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
595 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, 595 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
596 dlist_count, 1, 0); 596 dlist_count);
597 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 597 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
598 if (ret) 598 if (ret)
599 return ret; 599 return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index fc68b1b4da52..f7f7677f6d8d 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
141 int ret, i; 141 int ret, i;
142 u32 __iomem *dst_kernel; 142 u32 __iomem *dst_kernel;
143 143
144 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1, 144 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
145 0);
146 if (ret) { 145 if (ret) {
147 DRM_ERROR("Failed to allocate space for filter kernel: %d\n", 146 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
148 ret); 147 ret);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 110d1518f5d5..c1f06897136b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
514 if (lbm_size) { 514 if (lbm_size) {
515 if (!vc4_state->lbm.allocated) { 515 if (!vc4_state->lbm.allocated) {
516 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 516 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
517 ret = drm_mm_insert_node(&vc4->hvs->lbm_mm, 517 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
518 &vc4_state->lbm, 518 &vc4_state->lbm,
519 lbm_size, 32, 0); 519 lbm_size, 32, 0, 0);
520 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 520 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
521 } else { 521 } else {
522 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); 522 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a04ef1c992d9..4217d66a5cc6 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
140 if (mem->type == VIA_MEM_AGP) 140 if (mem->type == VIA_MEM_AGP)
141 retval = drm_mm_insert_node(&dev_priv->agp_mm, 141 retval = drm_mm_insert_node(&dev_priv->agp_mm,
142 &item->mm_node, 142 &item->mm_node,
143 tmpSize, 0, DRM_MM_SEARCH_DEFAULT); 143 tmpSize);
144 else 144 else
145 retval = drm_mm_insert_node(&dev_priv->vram_mm, 145 retval = drm_mm_insert_node(&dev_priv->vram_mm,
146 &item->mm_node, 146 &item->mm_node,
147 tmpSize, 0, DRM_MM_SEARCH_DEFAULT); 147 tmpSize);
148 if (retval) 148 if (retval)
149 goto fail_alloc; 149 goto fail_alloc;
150 150
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index aa04fb0159a7..77cb7c627e09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
673 673
674 memset(info->node, 0, sizeof(*info->node)); 674 memset(info->node, 0, sizeof(*info->node));
675 spin_lock_bh(&man->lock); 675 spin_lock_bh(&man->lock);
676 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, 676 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
677 0, 0,
678 DRM_MM_SEARCH_DEFAULT,
679 DRM_MM_CREATE_DEFAULT);
680 if (ret) { 677 if (ret) {
681 vmw_cmdbuf_man_process(man); 678 vmw_cmdbuf_man_process(man);
682 ret = drm_mm_insert_node_generic(&man->mm, info->node, 679 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
683 info->page_size, 0, 0,
684 DRM_MM_SEARCH_DEFAULT,
685 DRM_MM_CREATE_DEFAULT);
686 } 680 }
687 681
688 spin_unlock_bh(&man->lock); 682 spin_unlock_bh(&man->lock);