aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2016-12-29 15:48:23 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-12-30 06:53:51 -0500
commit05fc03217e08b90bff1ff22792d5f86dd32f15a6 (patch)
tree6d8b566bfe4eec689beefd4a91b963b0eefe9fa0
parenta8182863438232dce79f76cc511d752a219ff33a (diff)
drm/mm: Some doc polish
Added some boilerplate for the structs, documented members where they are relevant and plenty of markup for hyperlinks all over. And a few small wording polish. Note that the intro needs some more love after the DRM_MM_INSERT_* patch from Chris has landed. v2: Spelling fixes (Chris). v3: Use &struct foo instead of &foo structure (Chris). Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: David Herrmann <dh.herrmann@gmail.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1483044517-5770-3-git-send-email-daniel.vetter@ffwll.ch
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--drivers/gpu/drm/drm_mm.c41
-rw-r--r--include/drm/drm_mm.h84
3 files changed, 89 insertions, 38 deletions
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 0285b68f6201..d3c6d77246cd 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -449,7 +449,7 @@ LRU Scan/Eviction Support
449------------------------- 449-------------------------
450 450
451.. kernel-doc:: drivers/gpu/drm/drm_mm.c 451.. kernel-doc:: drivers/gpu/drm/drm_mm.c
452 :doc: lru scan roaster 452 :doc: lru scan roster
453 453
454DRM MM Range Allocator Function References 454DRM MM Range Allocator Function References
455------------------------------------------ 455------------------------------------------
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index e54aa3fa538f..229b3f525dee 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -59,8 +59,8 @@
59 * 59 *
60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61 * Drivers are free to embed either of them into their own suitable 61 * Drivers are free to embed either of them into their own suitable
62 * datastructures. drm_mm itself will not do any allocations of its own, so if 62 * datastructures. drm_mm itself will not do any memory allocations of its own,
63 * drivers choose not to embed nodes they need to still allocate them 63 * so if drivers choose not to embed nodes they need to still allocate them
64 * themselves. 64 * themselves.
65 * 65 *
66 * The range allocator also supports reservation of preallocated blocks. This is 66 * The range allocator also supports reservation of preallocated blocks. This is
@@ -78,7 +78,7 @@
78 * steep cliff not a real concern. Removing a node again is O(1). 78 * steep cliff not a real concern. Removing a node again is O(1).
79 * 79 *
80 * drm_mm supports a few features: Alignment and range restrictions can be 80 * drm_mm supports a few features: Alignment and range restrictions can be
81 * supplied. Further more every &drm_mm_node has a color value (which is just an 81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82 * opaque unsigned long) which in conjunction with a driver callback can be used 82 * opaque unsigned long) which in conjunction with a driver callback can be used
83 * to implement sophisticated placement restrictions. The i915 DRM driver uses 83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
84 * this to implement guard pages between incompatible caching domains in the 84 * this to implement guard pages between incompatible caching domains in the
@@ -296,11 +296,11 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
296 * @mm: drm_mm allocator to insert @node into 296 * @mm: drm_mm allocator to insert @node into
297 * @node: drm_mm_node to insert 297 * @node: drm_mm_node to insert
298 * 298 *
299 * This functions inserts an already set-up drm_mm_node into the allocator, 299 * This functions inserts an already set-up &drm_mm_node into the allocator,
300 * meaning that start, size and color must be set by the caller. This is useful 300 * meaning that start, size and color must be set by the caller. All other
301 * to initialize the allocator with preallocated objects which must be set-up 301 * fields must be cleared to 0. This is useful to initialize the allocator with
302 * before the range allocator can be set-up, e.g. when taking over a firmware 302 * preallocated objects which must be set-up before the range allocator can be
303 * framebuffer. 303 * set-up, e.g. when taking over a firmware framebuffer.
304 * 304 *
305 * Returns: 305 * Returns:
306 * 0 on success, -ENOSPC if there's no hole where @node is. 306 * 0 on success, -ENOSPC if there's no hole where @node is.
@@ -375,7 +375,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
375 * @sflags: flags to fine-tune the allocation search 375 * @sflags: flags to fine-tune the allocation search
376 * @aflags: flags to fine-tune the allocation behavior 376 * @aflags: flags to fine-tune the allocation behavior
377 * 377 *
378 * The preallocated node must be cleared to 0. 378 * The preallocated @node must be cleared to 0.
379 * 379 *
380 * Returns: 380 * Returns:
381 * 0 on success, -ENOSPC if there's no suitable hole. 381 * 0 on success, -ENOSPC if there's no suitable hole.
@@ -537,7 +537,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
537EXPORT_SYMBOL(drm_mm_replace_node); 537EXPORT_SYMBOL(drm_mm_replace_node);
538 538
539/** 539/**
540 * DOC: lru scan roaster 540 * DOC: lru scan roster
541 * 541 *
542 * Very often GPUs need to have continuous allocations for a given object. When 542 * Very often GPUs need to have continuous allocations for a given object. When
543 * evicting objects to make space for a new one it is therefore not most 543 * evicting objects to make space for a new one it is therefore not most
@@ -549,9 +549,11 @@ EXPORT_SYMBOL(drm_mm_replace_node);
549 * The DRM range allocator supports this use-case through the scanning 549 * The DRM range allocator supports this use-case through the scanning
550 * interfaces. First a scan operation needs to be initialized with 550 * interfaces. First a scan operation needs to be initialized with
551 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds 551 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
552 * objects to the roster (probably by walking an LRU list, but this can be 552 * objects to the roster, probably by walking an LRU list, but this can be
553 * freely implemented) (using drm_mm_scan_add_block()) until a suitable hole 553 * freely implemented. Eviction candiates are added using
554 * is found or there are no further evictable objects. 554 * drm_mm_scan_add_block() until a suitable hole is found or there are no
555 * further evictable objects. Eviction roster metadata is tracked in struct
556 * &drm_mm_scan.
555 * 557 *
556 * The driver must walk through all objects again in exactly the reverse 558 * The driver must walk through all objects again in exactly the reverse
557 * order to restore the allocator state. Note that while the allocator is used 559 * order to restore the allocator state. Note that while the allocator is used
@@ -559,7 +561,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
559 * 561 *
560 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() 562 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
561 * reported true) in the scan, and any overlapping nodes after color adjustment 563 * reported true) in the scan, and any overlapping nodes after color adjustment
562 * (drm_mm_scan_evict_color()). Adding and removing an object is O(1), and 564 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
563 * since freeing a node is also O(1) the overall complexity is 565 * since freeing a node is also O(1) the overall complexity is
564 * O(scanned_objects). So like the free stack which needs to be walked before a 566 * O(scanned_objects). So like the free stack which needs to be walked before a
565 * scan operation even begins this is linear in the number of objects. It 567 * scan operation even begins this is linear in the number of objects. It
@@ -705,14 +707,15 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
705 * @scan: the active drm_mm scanner 707 * @scan: the active drm_mm scanner
706 * @node: drm_mm_node to remove 708 * @node: drm_mm_node to remove
707 * 709 *
708 * Nodes _must_ be removed in exactly the reverse order from the scan list as 710 * Nodes **must** be removed in exactly the reverse order from the scan list as
709 * they have been added (e.g. using list_add as they are added and then 711 * they have been added (e.g. using list_add() as they are added and then
710 * list_for_each over that eviction list to remove), otherwise the internal 712 * list_for_each() over that eviction list to remove), otherwise the internal
711 * state of the memory manager will be corrupted. 713 * state of the memory manager will be corrupted.
712 * 714 *
713 * When the scan list is empty, the selected memory nodes can be freed. An 715 * When the scan list is empty, the selected memory nodes can be freed. An
714 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 716 * immediately following drm_mm_insert_node_in_range_generic() or one of the
715 * return the just freed block (because its at the top of the free_stack list). 717 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
718 * the just freed block (because its at the top of the free_stack list).
716 * 719 *
717 * Returns: 720 * Returns:
718 * True if this block should be evicted, false otherwise. Will always 721 * True if this block should be evicted, false otherwise. Will always
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 1383ac2328b8..3bddca8fd2b5 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -67,16 +67,29 @@ enum drm_mm_allocator_flags {
67#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT 67#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
68#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP 68#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
69 69
70/**
71 * struct drm_mm_node - allocated block in the DRM allocator
72 *
73 * This represents an allocated block in a &drm_mm allocator. Except for
74 * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
75 * entirely opaque and should only be accessed through the provided funcions.
76 * Since allocation of these nodes is entirely handled by the driver they can be
77 * embedded.
78 */
70struct drm_mm_node { 79struct drm_mm_node {
80 /** @color: Opaque driver-private tag. */
81 unsigned long color;
82 /** @start: Start address of the allocated block. */
83 u64 start;
84 /** @size: Size of the allocated block. */
85 u64 size;
86 /* private: */
71 struct list_head node_list; 87 struct list_head node_list;
72 struct list_head hole_stack; 88 struct list_head hole_stack;
73 struct rb_node rb; 89 struct rb_node rb;
74 unsigned hole_follows : 1; 90 unsigned hole_follows : 1;
75 unsigned allocated : 1; 91 unsigned allocated : 1;
76 bool scanned_block : 1; 92 bool scanned_block : 1;
77 unsigned long color;
78 u64 start;
79 u64 size;
80 u64 __subtree_last; 93 u64 __subtree_last;
81 struct drm_mm *mm; 94 struct drm_mm *mm;
82#ifdef CONFIG_DRM_DEBUG_MM 95#ifdef CONFIG_DRM_DEBUG_MM
@@ -84,7 +97,29 @@ struct drm_mm_node {
84#endif 97#endif
85}; 98};
86 99
100/**
101 * struct drm_mm - DRM allocator
102 *
103 * DRM range allocator with a few special functions and features geared towards
104 * managing GPU memory. Except for the @color_adjust callback the structure is
105 * entirely opaque and should only be accessed through the provided functions
106 * and macros. This structure can be embedded into larger driver structures.
107 */
87struct drm_mm { 108struct drm_mm {
109 /**
110 * @color_adjust:
111 *
112 * Optional driver callback to further apply restrictions on a hole. The
113 * node argument points at the node containing the hole from which the
114 * block would be allocated (see drm_mm_hole_follows() and friends). The
115 * other arguments are the size of the block to be allocated. The driver
116 * can adjust the start and end as needed to e.g. insert guard pages.
117 */
118 void (*color_adjust)(const struct drm_mm_node *node,
119 unsigned long color,
120 u64 *start, u64 *end);
121
122 /* private: */
88 /* List of all memory nodes that immediately precede a free hole. */ 123 /* List of all memory nodes that immediately precede a free hole. */
89 struct list_head hole_stack; 124 struct list_head hole_stack;
90 /* head_node.node_list is the list of all memory nodes, ordered 125 /* head_node.node_list is the list of all memory nodes, ordered
@@ -93,14 +128,20 @@ struct drm_mm {
93 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 128 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
94 struct rb_root interval_tree; 129 struct rb_root interval_tree;
95 130
96 void (*color_adjust)(const struct drm_mm_node *node,
97 unsigned long color,
98 u64 *start, u64 *end);
99
100 unsigned long scan_active; 131 unsigned long scan_active;
101}; 132};
102 133
134/**
135 * struct drm_mm_scan - DRM allocator eviction roaster data
136 *
137 * This structure tracks data needed for the eviction roaster set up using
138 * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
139 * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
140 * be accessed through the provided functions and macros. It is meant to be
141 * allocated temporarily by the driver on the stack.
142 */
103struct drm_mm_scan { 143struct drm_mm_scan {
144 /* private: */
104 struct drm_mm *mm; 145 struct drm_mm *mm;
105 146
106 u64 size; 147 u64 size;
@@ -159,7 +200,8 @@ static inline bool drm_mm_initialized(const struct drm_mm *mm)
159 * 200 *
160 * Holes are embedded into the drm_mm using the tail of a drm_mm_node. 201 * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
161 * If you wish to know whether a hole follows this particular node, 202 * If you wish to know whether a hole follows this particular node,
162 * query this function. 203 * query this function. See also drm_mm_hole_node_start() and
204 * drm_mm_hole_node_end().
163 * 205 *
164 * Returns: 206 * Returns:
165 * True if a hole follows the @node. 207 * True if a hole follows the @node.
@@ -228,23 +270,23 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
228 270
229/** 271/**
230 * drm_mm_for_each_node - iterator to walk over all allocated nodes 272 * drm_mm_for_each_node - iterator to walk over all allocated nodes
231 * @entry: drm_mm_node structure to assign to in each iteration step 273 * @entry: &struct drm_mm_node to assign to in each iteration step
232 * @mm: drm_mm allocator to walk 274 * @mm: &drm_mm allocator to walk
233 * 275 *
234 * This iterator walks over all nodes in the range allocator. It is implemented 276 * This iterator walks over all nodes in the range allocator. It is implemented
235 * with list_for_each, so not save against removal of elements. 277 * with list_for_each(), so not save against removal of elements.
236 */ 278 */
237#define drm_mm_for_each_node(entry, mm) \ 279#define drm_mm_for_each_node(entry, mm) \
238 list_for_each_entry(entry, drm_mm_nodes(mm), node_list) 280 list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
239 281
240/** 282/**
241 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes 283 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
242 * @entry: drm_mm_node structure to assign to in each iteration step 284 * @entry: &struct drm_mm_node to assign to in each iteration step
243 * @next: drm_mm_node structure to store the next step 285 * @next: &struct drm_mm_node to store the next step
244 * @mm: drm_mm allocator to walk 286 * @mm: &drm_mm allocator to walk
245 * 287 *
246 * This iterator walks over all nodes in the range allocator. It is implemented 288 * This iterator walks over all nodes in the range allocator. It is implemented
247 * with list_for_each_safe, so save against removal of elements. 289 * with list_for_each_safe(), so save against removal of elements.
248 */ 290 */
249#define drm_mm_for_each_node_safe(entry, next, mm) \ 291#define drm_mm_for_each_node_safe(entry, next, mm) \
250 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) 292 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
@@ -259,13 +301,13 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
259 301
260/** 302/**
261 * drm_mm_for_each_hole - iterator to walk over all holes 303 * drm_mm_for_each_hole - iterator to walk over all holes
262 * @entry: drm_mm_node used internally to track progress 304 * @entry: &drm_mm_node used internally to track progress
263 * @mm: drm_mm allocator to walk 305 * @mm: &drm_mm allocator to walk
264 * @hole_start: ulong variable to assign the hole start to on each iteration 306 * @hole_start: ulong variable to assign the hole start to on each iteration
265 * @hole_end: ulong variable to assign the hole end to on each iteration 307 * @hole_end: ulong variable to assign the hole end to on each iteration
266 * 308 *
267 * This iterator walks over all holes in the range allocator. It is implemented 309 * This iterator walks over all holes in the range allocator. It is implemented
268 * with list_for_each, so not save against removal of elements. @entry is used 310 * with list_for_each(), so not save against removal of elements. @entry is used
269 * internally and will not reflect a real drm_mm_node for the very first hole. 311 * internally and will not reflect a real drm_mm_node for the very first hole.
270 * Hence users of this iterator may not access it. 312 * Hence users of this iterator may not access it.
271 * 313 *
@@ -334,6 +376,9 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
334 * @sflags: flags to fine-tune the allocation search 376 * @sflags: flags to fine-tune the allocation search
335 * @aflags: flags to fine-tune the allocation behavior 377 * @aflags: flags to fine-tune the allocation behavior
336 * 378 *
379 * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
380 * range restrictions applied.
381 *
337 * The preallocated node must be cleared to 0. 382 * The preallocated node must be cleared to 0.
338 * 383 *
339 * Returns: 384 * Returns:
@@ -434,6 +479,9 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
434 * @color: opaque tag value to use for the allocation 479 * @color: opaque tag value to use for the allocation
435 * @flags: flags to specify how the allocation will be performed afterwards 480 * @flags: flags to specify how the allocation will be performed afterwards
436 * 481 *
482 * This is a simplified version of drm_mm_scan_init_with_range() with no range
483 * restrictions applied.
484 *
437 * This simply sets up the scanning routines with the parameters for the desired 485 * This simply sets up the scanning routines with the parameters for the desired
438 * hole. 486 * hole.
439 * 487 *