aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLauri Kasanen <cand@gmx.com>2014-04-02 13:03:57 -0400
committerDave Airlie <airlied@redhat.com>2014-04-03 19:28:14 -0400
commit62347f9e0f81d50e9b0923ec8a192f60ab7a1801 (patch)
treeb7d6db268abfe08ff52742a47902474877bfa0a4
parent2614dc66837c2a6fd0e14e3f8e72343782c8b784 (diff)
drm: Add support for two-ended allocation, v3
Clients like i915 need to segregate cache domains within the GTT which can lead to small amounts of fragmentation. By allocating the uncached buffers from the bottom and the cacheable buffers from the top, we can reduce the amount of wasted space and also optimize allocation of the mappable portion of the GTT to only those buffers that require CPU access through the GTT. For other drivers, allocating small bos from one end and large ones from the other helps improve the quality of fragmentation. Based on drm_mm work by Chris Wilson. v3: Changed to use a TTM placement flag v2: Updated kerneldoc Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <ben@bwidawsk.net> Cc: Christian König <deathsimple@vodafone.de> Signed-off-by: Lauri Kasanen <cand@gmx.com> Signed-off-by: David Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/drm_mm.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c11
-rw-r--r--include/drm/drm_mm.h32
-rw-r--r--include/drm/ttm/ttm_placement.h3
6 files changed, 92 insertions, 26 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a2d45b748f86..8f64be44bfe7 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -82,6 +82,10 @@
82 * this to implement guard pages between incompatible caching domains in the 82 * this to implement guard pages between incompatible caching domains in the
83 * graphics TT. 83 * graphics TT.
84 * 84 *
85 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86 * The default is bottom-up. Top-down allocation can be used if the memory area
87 * has different restrictions, or just to reduce fragmentation.
88 *
85 * Finally iteration helpers to walk all nodes and all holes are provided as are 89 * Finally iteration helpers to walk all nodes and all holes are provided as are
86 * some basic allocator dumpers for debugging. 90 * some basic allocator dumpers for debugging.
87 */ 91 */
@@ -102,7 +106,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
102static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
103 struct drm_mm_node *node, 107 struct drm_mm_node *node,
104 unsigned long size, unsigned alignment, 108 unsigned long size, unsigned alignment,
105 unsigned long color) 109 unsigned long color,
110 enum drm_mm_allocator_flags flags)
106{ 111{
107 struct drm_mm *mm = hole_node->mm; 112 struct drm_mm *mm = hole_node->mm;
108 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 113 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -115,12 +120,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
115 if (mm->color_adjust) 120 if (mm->color_adjust)
116 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
117 122
123 if (flags & DRM_MM_CREATE_TOP)
124 adj_start = adj_end - size;
125
118 if (alignment) { 126 if (alignment) {
119 unsigned tmp = adj_start % alignment; 127 unsigned tmp = adj_start % alignment;
120 if (tmp) 128 if (tmp) {
121 adj_start += alignment - tmp; 129 if (flags & DRM_MM_CREATE_TOP)
130 adj_start -= tmp;
131 else
132 adj_start += alignment - tmp;
133 }
122 } 134 }
123 135
136 BUG_ON(adj_start < hole_start);
137 BUG_ON(adj_end > hole_end);
138
124 if (adj_start == hole_start) { 139 if (adj_start == hole_start) {
125 hole_node->hole_follows = 0; 140 hole_node->hole_follows = 0;
126 list_del(&hole_node->hole_stack); 141 list_del(&hole_node->hole_stack);
@@ -205,7 +220,8 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
205 * @size: size of the allocation 220 * @size: size of the allocation
206 * @alignment: alignment of the allocation 221 * @alignment: alignment of the allocation
207 * @color: opaque tag value to use for this node 222 * @color: opaque tag value to use for this node
208 * @flags: flags to fine-tune the allocation 223 * @sflags: flags to fine-tune the allocation search
224 * @aflags: flags to fine-tune the allocation behavior
209 * 225 *
210 * The preallocated node must be cleared to 0. 226 * The preallocated node must be cleared to 0.
211 * 227 *
@@ -215,16 +231,17 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
215int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 231int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
216 unsigned long size, unsigned alignment, 232 unsigned long size, unsigned alignment,
217 unsigned long color, 233 unsigned long color,
218 enum drm_mm_search_flags flags) 234 enum drm_mm_search_flags sflags,
235 enum drm_mm_allocator_flags aflags)
219{ 236{
220 struct drm_mm_node *hole_node; 237 struct drm_mm_node *hole_node;
221 238
222 hole_node = drm_mm_search_free_generic(mm, size, alignment, 239 hole_node = drm_mm_search_free_generic(mm, size, alignment,
223 color, flags); 240 color, sflags);
224 if (!hole_node) 241 if (!hole_node)
225 return -ENOSPC; 242 return -ENOSPC;
226 243
227 drm_mm_insert_helper(hole_node, node, size, alignment, color); 244 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
228 return 0; 245 return 0;
229} 246}
230EXPORT_SYMBOL(drm_mm_insert_node_generic); 247EXPORT_SYMBOL(drm_mm_insert_node_generic);
@@ -233,7 +250,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
233 struct drm_mm_node *node, 250 struct drm_mm_node *node,
234 unsigned long size, unsigned alignment, 251 unsigned long size, unsigned alignment,
235 unsigned long color, 252 unsigned long color,
236 unsigned long start, unsigned long end) 253 unsigned long start, unsigned long end,
254 enum drm_mm_allocator_flags flags)
237{ 255{
238 struct drm_mm *mm = hole_node->mm; 256 struct drm_mm *mm = hole_node->mm;
239 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 257 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -248,13 +266,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248 if (adj_end > end) 266 if (adj_end > end)
249 adj_end = end; 267 adj_end = end;
250 268
269 if (flags & DRM_MM_CREATE_TOP)
270 adj_start = adj_end - size;
271
251 if (mm->color_adjust) 272 if (mm->color_adjust)
252 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 273 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
253 274
254 if (alignment) { 275 if (alignment) {
255 unsigned tmp = adj_start % alignment; 276 unsigned tmp = adj_start % alignment;
256 if (tmp) 277 if (tmp) {
257 adj_start += alignment - tmp; 278 if (flags & DRM_MM_CREATE_TOP)
279 adj_start -= tmp;
280 else
281 adj_start += alignment - tmp;
282 }
258 } 283 }
259 284
260 if (adj_start == hole_start) { 285 if (adj_start == hole_start) {
@@ -271,6 +296,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
271 INIT_LIST_HEAD(&node->hole_stack); 296 INIT_LIST_HEAD(&node->hole_stack);
272 list_add(&node->node_list, &hole_node->node_list); 297 list_add(&node->node_list, &hole_node->node_list);
273 298
299 BUG_ON(node->start < start);
300 BUG_ON(node->start < adj_start);
274 BUG_ON(node->start + node->size > adj_end); 301 BUG_ON(node->start + node->size > adj_end);
275 BUG_ON(node->start + node->size > end); 302 BUG_ON(node->start + node->size > end);
276 303
@@ -290,7 +317,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
290 * @color: opaque tag value to use for this node 317 * @color: opaque tag value to use for this node
291 * @start: start of the allowed range for this node 318 * @start: start of the allowed range for this node
292 * @end: end of the allowed range for this node 319 * @end: end of the allowed range for this node
293 * @flags: flags to fine-tune the allocation 320 * @sflags: flags to fine-tune the allocation search
321 * @aflags: flags to fine-tune the allocation behavior
294 * 322 *
295 * The preallocated node must be cleared to 0. 323 * The preallocated node must be cleared to 0.
296 * 324 *
@@ -298,21 +326,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
298 * 0 on success, -ENOSPC if there's no suitable hole. 326 * 0 on success, -ENOSPC if there's no suitable hole.
299 */ 327 */
300int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 328int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
301 unsigned long size, unsigned alignment, unsigned long color, 329 unsigned long size, unsigned alignment,
330 unsigned long color,
302 unsigned long start, unsigned long end, 331 unsigned long start, unsigned long end,
303 enum drm_mm_search_flags flags) 332 enum drm_mm_search_flags sflags,
333 enum drm_mm_allocator_flags aflags)
304{ 334{
305 struct drm_mm_node *hole_node; 335 struct drm_mm_node *hole_node;
306 336
307 hole_node = drm_mm_search_free_in_range_generic(mm, 337 hole_node = drm_mm_search_free_in_range_generic(mm,
308 size, alignment, color, 338 size, alignment, color,
309 start, end, flags); 339 start, end, sflags);
310 if (!hole_node) 340 if (!hole_node)
311 return -ENOSPC; 341 return -ENOSPC;
312 342
313 drm_mm_insert_helper_range(hole_node, node, 343 drm_mm_insert_helper_range(hole_node, node,
314 size, alignment, color, 344 size, alignment, color,
315 start, end); 345 start, end, aflags);
316 return 0; 346 return 0;
317} 347}
318EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 348EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
@@ -391,7 +421,8 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
391 best = NULL; 421 best = NULL;
392 best_size = ~0UL; 422 best_size = ~0UL;
393 423
394 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 424 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
425 flags & DRM_MM_SEARCH_BELOW) {
395 if (mm->color_adjust) { 426 if (mm->color_adjust) {
396 mm->color_adjust(entry, color, &adj_start, &adj_end); 427 mm->color_adjust(entry, color, &adj_start, &adj_end);
397 if (adj_end <= adj_start) 428 if (adj_end <= adj_start)
@@ -432,7 +463,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
432 best = NULL; 463 best = NULL;
433 best_size = ~0UL; 464 best_size = ~0UL;
434 465
435 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 466 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467 flags & DRM_MM_SEARCH_BELOW) {
436 if (adj_start < start) 468 if (adj_start < start)
437 adj_start = start; 469 adj_start = start;
438 if (adj_end > end) 470 if (adj_end > end)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 33bbaa0d4412..404a5456bf3a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3264,7 +3264,8 @@ search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3265 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3266 obj->cache_level, 0, gtt_max,
3267 DRM_MM_SEARCH_DEFAULT); 3267 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT);
3268 if (ret) { 3269 if (ret) {
3269 ret = i915_gem_evict_something(dev, vm, size, alignment, 3270 ret = i915_gem_evict_something(dev, vm, size, alignment,
3270 obj->cache_level, flags); 3271 obj->cache_level, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ee535514aa41..2b3c79923d90 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1074,7 +1074,8 @@ alloc:
1074 &ppgtt->node, GEN6_PD_SIZE, 1074 &ppgtt->node, GEN6_PD_SIZE,
1075 GEN6_PD_ALIGN, 0, 1075 GEN6_PD_ALIGN, 0,
1076 0, dev_priv->gtt.base.total, 1076 0, dev_priv->gtt.base.total,
1077 DRM_MM_SEARCH_DEFAULT); 1077 DRM_MM_SEARCH_DEFAULT,
1078 DRM_MM_CREATE_DEFAULT);
1078 if (ret == -ENOSPC && !retried) { 1079 if (ret == -ENOSPC && !retried) {
1079 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1080 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1080 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1081 GEN6_PD_SIZE, GEN6_PD_ALIGN,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index c58eba33bd5f..bd850c9f4bca 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
56 struct drm_mm *mm = &rman->mm; 56 struct drm_mm *mm = &rman->mm;
57 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
58 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
58 unsigned long lpfn; 59 unsigned long lpfn;
59 int ret; 60 int ret;
60 61
@@ -66,11 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
66 if (!node) 67 if (!node)
67 return -ENOMEM; 68 return -ENOMEM;
68 69
70 if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
71 aflags = DRM_MM_CREATE_TOP;
72
69 spin_lock(&rman->lock); 73 spin_lock(&rman->lock);
70 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
71 mem->page_alignment, 75 mem->page_alignment, 0,
72 placement->fpfn, lpfn, 76 placement->fpfn, lpfn,
73 DRM_MM_SEARCH_BEST); 77 DRM_MM_SEARCH_BEST,
78 aflags);
74 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);
75 80
76 if (unlikely(ret)) { 81 if (unlikely(ret)) {
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 8b6981ab3fcf..a24addfdfcec 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -47,8 +47,17 @@
47enum drm_mm_search_flags { 47enum drm_mm_search_flags {
48 DRM_MM_SEARCH_DEFAULT = 0, 48 DRM_MM_SEARCH_DEFAULT = 0,
49 DRM_MM_SEARCH_BEST = 1 << 0, 49 DRM_MM_SEARCH_BEST = 1 << 0,
50 DRM_MM_SEARCH_BELOW = 1 << 1,
50}; 51};
51 52
53enum drm_mm_allocator_flags {
54 DRM_MM_CREATE_DEFAULT = 0,
55 DRM_MM_CREATE_TOP = 1 << 0,
56};
57
58#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
59#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
60
52struct drm_mm_node { 61struct drm_mm_node {
53 struct list_head node_list; 62 struct list_head node_list;
54 struct list_head hole_stack; 63 struct list_head hole_stack;
@@ -186,6 +195,9 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
186 * Implementation Note: 195 * Implementation Note:
187 * We need to inline list_for_each_entry in order to be able to set hole_start 196 * We need to inline list_for_each_entry in order to be able to set hole_start
188 * and hole_end on each iteration while keeping the macro sane. 197 * and hole_end on each iteration while keeping the macro sane.
198 *
199 * The __drm_mm_for_each_hole version is similar, but with added support for
200 * going backwards.
189 */ 201 */
190#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 202#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
191 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 203 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
@@ -195,6 +207,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
195 1 : 0; \ 207 1 : 0; \
196 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) 208 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
197 209
210#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
211 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
212 &entry->hole_stack != &(mm)->hole_stack ? \
213 hole_start = drm_mm_hole_node_start(entry), \
214 hole_end = drm_mm_hole_node_end(entry), \
215 1 : 0; \
216 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
217
198/* 218/*
199 * Basic range manager support (drm_mm.c) 219 * Basic range manager support (drm_mm.c)
200 */ 220 */
@@ -205,7 +225,8 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
205 unsigned long size, 225 unsigned long size,
206 unsigned alignment, 226 unsigned alignment,
207 unsigned long color, 227 unsigned long color,
208 enum drm_mm_search_flags flags); 228 enum drm_mm_search_flags sflags,
229 enum drm_mm_allocator_flags aflags);
209/** 230/**
210 * drm_mm_insert_node - search for space and insert @node 231 * drm_mm_insert_node - search for space and insert @node
211 * @mm: drm_mm to allocate from 232 * @mm: drm_mm to allocate from
@@ -228,7 +249,8 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
228 unsigned alignment, 249 unsigned alignment,
229 enum drm_mm_search_flags flags) 250 enum drm_mm_search_flags flags)
230{ 251{
231 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); 252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
253 DRM_MM_CREATE_DEFAULT);
232} 254}
233 255
234int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 256int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
@@ -238,7 +260,8 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
238 unsigned long color, 260 unsigned long color,
239 unsigned long start, 261 unsigned long start,
240 unsigned long end, 262 unsigned long end,
241 enum drm_mm_search_flags flags); 263 enum drm_mm_search_flags sflags,
264 enum drm_mm_allocator_flags aflags);
242/** 265/**
243 * drm_mm_insert_node_in_range - ranged search for space and insert @node 266 * drm_mm_insert_node_in_range - ranged search for space and insert @node
244 * @mm: drm_mm to allocate from 267 * @mm: drm_mm to allocate from
@@ -266,7 +289,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
266 enum drm_mm_search_flags flags) 289 enum drm_mm_search_flags flags)
267{ 290{
268 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
269 0, start, end, flags); 292 0, start, end, flags,
293 DRM_MM_CREATE_DEFAULT);
270} 294}
271 295
272void drm_mm_remove_node(struct drm_mm_node *node); 296void drm_mm_remove_node(struct drm_mm_node *node);
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index c84ff153a564..8ed44f9bbdfb 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -65,6 +65,8 @@
65 * reference the buffer. 65 * reference the buffer.
66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never 66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never
67 * be evicted to make room for other buffers. 67 * be evicted to make room for other buffers.
68 * TTM_PL_FLAG_TOPDOWN requests to be placed from the
69 * top of the memory area, instead of the bottom.
68 */ 70 */
69 71
70#define TTM_PL_FLAG_CACHED (1 << 16) 72#define TTM_PL_FLAG_CACHED (1 << 16)
@@ -72,6 +74,7 @@
72#define TTM_PL_FLAG_WC (1 << 18) 74#define TTM_PL_FLAG_WC (1 << 18)
73#define TTM_PL_FLAG_SHARED (1 << 20) 75#define TTM_PL_FLAG_SHARED (1 << 20)
74#define TTM_PL_FLAG_NO_EVICT (1 << 21) 76#define TTM_PL_FLAG_NO_EVICT (1 << 21)
77#define TTM_PL_FLAG_TOPDOWN (1 << 22)
75 78
76#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ 79#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
77 TTM_PL_FLAG_UNCACHED | \ 80 TTM_PL_FLAG_UNCACHED | \