diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2017-02-04 06:19:13 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-06 10:57:37 -0500 |
commit | bbba96931762bcad8a691dfbf8d1520b71831c3a (patch) | |
tree | f3e4eaf765b4b1dcb1207a30532e40f6478e5117 | |
parent | c5a828148f302c72a137af7f5487a4e52f19f4c9 (diff) |
drm: Micro-optimise drm_mm_for_each_node_in_range()
As we require valid start/end parameters, we can replace the initial
potential NULL with a pointer to the drm_mm.head_node and so reduce the
test on every iteration from a NULL + address comparison to just an
address comparison.
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-26 (-26)
function old new delta
i915_gem_evict_for_node 719 693 -26
(No other users outside of the test harness.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20170204111913.12416-1-chris@chris-wilson.co.uk
-rw-r--r-- | drivers/gpu/drm/drm_mm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/selftests/test-drm_mm.c | 10 | ||||
-rw-r--r-- | include/drm/drm_mm.h | 5 |
3 files changed, 11 insertions, 6 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 8bfb0b327267..f794089d30ac 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -170,7 +170,7 @@ struct drm_mm_node * | |||
170 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) | 170 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) |
171 | { | 171 | { |
172 | return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, | 172 | return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, |
173 | start, last); | 173 | start, last) ?: (struct drm_mm_node *)&mm->head_node; |
174 | } | 174 | } |
175 | EXPORT_SYMBOL(__drm_mm_interval_first); | 175 | EXPORT_SYMBOL(__drm_mm_interval_first); |
176 | 176 | ||
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 1e71bc182ca9..2958f596081e 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c | |||
@@ -839,16 +839,18 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, | |||
839 | n++; | 839 | n++; |
840 | } | 840 | } |
841 | 841 | ||
842 | drm_mm_for_each_node_in_range(node, mm, 0, start) { | 842 | if (start > 0) { |
843 | if (node) { | 843 | node = __drm_mm_interval_first(mm, 0, start - 1); |
844 | if (node->allocated) { | ||
844 | pr_err("node before start: node=%llx+%llu, start=%llx\n", | 845 | pr_err("node before start: node=%llx+%llu, start=%llx\n", |
845 | node->start, node->size, start); | 846 | node->start, node->size, start); |
846 | return false; | 847 | return false; |
847 | } | 848 | } |
848 | } | 849 | } |
849 | 850 | ||
850 | drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) { | 851 | if (end < U64_MAX) { |
851 | if (node) { | 852 | node = __drm_mm_interval_first(mm, end, U64_MAX); |
853 | if (node->allocated) { | ||
852 | pr_err("node after end: node=%llx+%llu, end=%llx\n", | 854 | pr_err("node after end: node=%llx+%llu, end=%llx\n", |
853 | node->start, node->size, end); | 855 | node->start, node->size, end); |
854 | return false; | 856 | return false; |
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d81b0ba9921f..f262da180117 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h | |||
@@ -459,10 +459,13 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); | |||
459 | * but using the internal interval tree to accelerate the search for the | 459 | * but using the internal interval tree to accelerate the search for the |
460 | * starting node, and so not safe against removal of elements. It assumes | 460 | * starting node, and so not safe against removal of elements. It assumes |
461 | * that @end is within (or is the upper limit of) the drm_mm allocator. | 461 | * that @end is within (or is the upper limit of) the drm_mm allocator. |
462 | * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk | ||
463 | * over the special _unallocated_ &drm_mm.head_node, and may even continue | ||
464 | * indefinitely. | ||
462 | */ | 465 | */ |
463 | #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ | 466 | #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ |
464 | for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ | 467 | for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ |
465 | node__ && node__->start < (end__); \ | 468 | node__->start < (end__); \ |
466 | node__ = list_next_entry(node__, node_list)) | 469 | node__ = list_next_entry(node__, node_list)) |
467 | 470 | ||
468 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, | 471 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, |