aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm/drm_mm.h
diff options
context:
space:
mode:
authorLauri Kasanen <cand@gmx.com>2014-04-02 13:03:57 -0400
committerDave Airlie <airlied@redhat.com>2014-04-03 19:28:14 -0400
commit62347f9e0f81d50e9b0923ec8a192f60ab7a1801 (patch)
treeb7d6db268abfe08ff52742a47902474877bfa0a4 /include/drm/drm_mm.h
parent2614dc66837c2a6fd0e14e3f8e72343782c8b784 (diff)
drm: Add support for two-ended allocation, v3
Clients like i915 need to segregate cache domains within the GTT which can lead to small amounts of fragmentation. By allocating the uncached buffers from the bottom and the cacheable buffers from the top, we can reduce the amount of wasted space and also optimize allocation of the mappable portion of the GTT to only those buffers that require CPU access through the GTT. For other drivers, allocating small bos from one end and large ones from the other helps improve the quality of fragmentation. Based on drm_mm work by Chris Wilson. v3: Changed to use a TTM placement flag v2: Updated kerneldoc Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <ben@bwidawsk.net> Cc: Christian König <deathsimple@vodafone.de> Signed-off-by: Lauri Kasanen <cand@gmx.com> Signed-off-by: David Airlie <airlied@redhat.com>
Diffstat (limited to 'include/drm/drm_mm.h')
-rw-r--r--include/drm/drm_mm.h32
1 files changed, 28 insertions, 4 deletions
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 8b6981ab3fcf..a24addfdfcec 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -47,8 +47,17 @@
47enum drm_mm_search_flags { 47enum drm_mm_search_flags {
48 DRM_MM_SEARCH_DEFAULT = 0, 48 DRM_MM_SEARCH_DEFAULT = 0,
49 DRM_MM_SEARCH_BEST = 1 << 0, 49 DRM_MM_SEARCH_BEST = 1 << 0,
50 DRM_MM_SEARCH_BELOW = 1 << 1,
50}; 51};
51 52
53enum drm_mm_allocator_flags {
54 DRM_MM_CREATE_DEFAULT = 0,
55 DRM_MM_CREATE_TOP = 1 << 0,
56};
57
58#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
59#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
60
52struct drm_mm_node { 61struct drm_mm_node {
53 struct list_head node_list; 62 struct list_head node_list;
54 struct list_head hole_stack; 63 struct list_head hole_stack;
@@ -186,6 +195,9 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
186 * Implementation Note: 195 * Implementation Note:
187 * We need to inline list_for_each_entry in order to be able to set hole_start 196 * We need to inline list_for_each_entry in order to be able to set hole_start
188 * and hole_end on each iteration while keeping the macro sane. 197 * and hole_end on each iteration while keeping the macro sane.
198 *
199 * The __drm_mm_for_each_hole version is similar, but with added support for
200 * going backwards.
189 */ 201 */
190#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 202#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
191 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
@@ -195,6 +207,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
195 1 : 0; \ 207 1 : 0; \
196 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) 208 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
197 209
210#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
211 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
212 &entry->hole_stack != &(mm)->hole_stack ? \
213 hole_start = drm_mm_hole_node_start(entry), \
214 hole_end = drm_mm_hole_node_end(entry), \
215 1 : 0; \
216 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
217
198/* 218/*
199 * Basic range manager support (drm_mm.c) 219 * Basic range manager support (drm_mm.c)
200 */ 220 */
@@ -205,7 +225,8 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
205 unsigned long size, 225 unsigned long size,
206 unsigned alignment, 226 unsigned alignment,
207 unsigned long color, 227 unsigned long color,
208 enum drm_mm_search_flags flags); 228 enum drm_mm_search_flags sflags,
229 enum drm_mm_allocator_flags aflags);
209/** 230/**
210 * drm_mm_insert_node - search for space and insert @node 231 * drm_mm_insert_node - search for space and insert @node
211 * @mm: drm_mm to allocate from 232 * @mm: drm_mm to allocate from
@@ -228,7 +249,8 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
228 unsigned alignment, 249 unsigned alignment,
229 enum drm_mm_search_flags flags) 250 enum drm_mm_search_flags flags)
230{ 251{
231 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); 252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
253 DRM_MM_CREATE_DEFAULT);
232} 254}
233 255
234int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 256int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
@@ -238,7 +260,8 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
238 unsigned long color, 260 unsigned long color,
239 unsigned long start, 261 unsigned long start,
240 unsigned long end, 262 unsigned long end,
241 enum drm_mm_search_flags flags); 263 enum drm_mm_search_flags sflags,
264 enum drm_mm_allocator_flags aflags);
242/** 265/**
243 * drm_mm_insert_node_in_range - ranged search for space and insert @node 266 * drm_mm_insert_node_in_range - ranged search for space and insert @node
244 * @mm: drm_mm to allocate from 267 * @mm: drm_mm to allocate from
@@ -266,7 +289,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
266 enum drm_mm_search_flags flags) 289 enum drm_mm_search_flags flags)
267{ 290{
268 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
269 0, start, end, flags); 292 0, start, end, flags,
293 DRM_MM_CREATE_DEFAULT);
270} 294}
271 295
272void drm_mm_remove_node(struct drm_mm_node *node); 296void drm_mm_remove_node(struct drm_mm_node *node);