aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c20
-rw-r--r--drivers/gpu/drm/bochs/bochs.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c20
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c17
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c93
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c136
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c3
-rw-r--r--include/drm/ttm/ttm_bo_api.h40
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
26 files changed, 346 insertions, 253 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 957d4fabf1e1..cb91c2acc3cb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -316,7 +316,7 @@ struct ast_bo {
316 struct ttm_placement placement; 316 struct ttm_placement placement;
317 struct ttm_bo_kmap_obj kmap; 317 struct ttm_bo_kmap_obj kmap;
318 struct drm_gem_object gem; 318 struct drm_gem_object gem;
319 u32 placements[3]; 319 struct ttm_place placements[3];
320 int pin_count; 320 int pin_count;
321}; 321};
322#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) 322#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index b8246227bab0..8008ea0bc76c 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast)
293void ast_ttm_placement(struct ast_bo *bo, int domain) 293void ast_ttm_placement(struct ast_bo *bo, int domain)
294{ 294{
295 u32 c = 0; 295 u32 c = 0;
296 bo->placement.fpfn = 0; 296 unsigned i;
297 bo->placement.lpfn = 0; 297
298 bo->placement.placement = bo->placements; 298 bo->placement.placement = bo->placements;
299 bo->placement.busy_placement = bo->placements; 299 bo->placement.busy_placement = bo->placements;
300 if (domain & TTM_PL_FLAG_VRAM) 300 if (domain & TTM_PL_FLAG_VRAM)
301 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
302 if (domain & TTM_PL_FLAG_SYSTEM) 302 if (domain & TTM_PL_FLAG_SYSTEM)
303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 303 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
304 if (!c) 304 if (!c)
305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 305 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
306 bo->placement.num_placement = c; 306 bo->placement.num_placement = c;
307 bo->placement.num_busy_placement = c; 307 bo->placement.num_busy_placement = c;
308 for (i = 0; i < c; ++i) {
309 bo->placements[i].fpfn = 0;
310 bo->placements[i].lpfn = 0;
311 }
308} 312}
309 313
310int ast_bo_create(struct drm_device *dev, int size, int align, 314int ast_bo_create(struct drm_device *dev, int size, int align,
@@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
360 364
361 ast_ttm_placement(bo, pl_flag); 365 ast_ttm_placement(bo, pl_flag);
362 for (i = 0; i < bo->placement.num_placement; i++) 366 for (i = 0; i < bo->placement.num_placement; i++)
363 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 367 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
364 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 368 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
365 if (ret) 369 if (ret)
366 return ret; 370 return ret;
@@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo)
383 return 0; 387 return 0;
384 388
385 for (i = 0; i < bo->placement.num_placement ; i++) 389 for (i = 0; i < bo->placement.num_placement ; i++)
386 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 390 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
387 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
388 if (ret) 392 if (ret)
389 return ret; 393 return ret;
@@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
407 411
408 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 412 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
409 for (i = 0; i < bo->placement.num_placement ; i++) 413 for (i = 0; i < bo->placement.num_placement ; i++)
410 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 414 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
411 415
412 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 416 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
413 if (ret) { 417 if (ret) {
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 7eb52dd44b01..4f6e7b3a3635 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -99,7 +99,7 @@ struct bochs_bo {
99 struct ttm_placement placement; 99 struct ttm_placement placement;
100 struct ttm_bo_kmap_obj kmap; 100 struct ttm_bo_kmap_obj kmap;
101 struct drm_gem_object gem; 101 struct drm_gem_object gem;
102 u32 placements[3]; 102 struct ttm_place placements[3];
103 int pin_count; 103 int pin_count;
104}; 104};
105 105
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 1728a1b0b813..2af30e7607d7 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs)
257 257
258static void bochs_ttm_placement(struct bochs_bo *bo, int domain) 258static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
259{ 259{
260 unsigned i;
260 u32 c = 0; 261 u32 c = 0;
261 bo->placement.fpfn = 0;
262 bo->placement.lpfn = 0;
263 bo->placement.placement = bo->placements; 262 bo->placement.placement = bo->placements;
264 bo->placement.busy_placement = bo->placements; 263 bo->placement.busy_placement = bo->placements;
265 if (domain & TTM_PL_FLAG_VRAM) { 264 if (domain & TTM_PL_FLAG_VRAM) {
266 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED 265 bo->placements[c++].flags = TTM_PL_FLAG_WC
266 | TTM_PL_FLAG_UNCACHED
267 | TTM_PL_FLAG_VRAM; 267 | TTM_PL_FLAG_VRAM;
268 } 268 }
269 if (domain & TTM_PL_FLAG_SYSTEM) { 269 if (domain & TTM_PL_FLAG_SYSTEM) {
270 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 270 bo->placements[c++].flags = TTM_PL_MASK_CACHING
271 | TTM_PL_FLAG_SYSTEM;
271 } 272 }
272 if (!c) { 273 if (!c) {
273 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 274 bo->placements[c++].flags = TTM_PL_MASK_CACHING
275 | TTM_PL_FLAG_SYSTEM;
276 }
277 for (i = 0; i < c; ++i) {
278 bo->placements[i].fpfn = 0;
279 bo->placements[i].lpfn = 0;
274 } 280 }
275 bo->placement.num_placement = c; 281 bo->placement.num_placement = c;
276 bo->placement.num_busy_placement = c; 282 bo->placement.num_busy_placement = c;
@@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
294 300
295 bochs_ttm_placement(bo, pl_flag); 301 bochs_ttm_placement(bo, pl_flag);
296 for (i = 0; i < bo->placement.num_placement; i++) 302 for (i = 0; i < bo->placement.num_placement; i++)
297 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 303 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
298 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 304 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
299 if (ret) 305 if (ret)
300 return ret; 306 return ret;
@@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
319 return 0; 325 return 0;
320 326
321 for (i = 0; i < bo->placement.num_placement; i++) 327 for (i = 0; i < bo->placement.num_placement; i++)
322 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 328 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
323 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 329 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
324 if (ret) 330 if (ret)
325 return ret; 331 return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 401c890b6c6a..dd2cfc9024aa 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -163,7 +163,7 @@ struct cirrus_bo {
163 struct ttm_placement placement; 163 struct ttm_placement placement;
164 struct ttm_bo_kmap_obj kmap; 164 struct ttm_bo_kmap_obj kmap;
165 struct drm_gem_object gem; 165 struct drm_gem_object gem;
166 u32 placements[3]; 166 struct ttm_place placements[3];
167 int pin_count; 167 int pin_count;
168}; 168};
169#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) 169#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 92e6b7786097..3e7d758330a9 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
298void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) 298void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
299{ 299{
300 u32 c = 0; 300 u32 c = 0;
301 bo->placement.fpfn = 0; 301 unsigned i;
302 bo->placement.lpfn = 0;
303 bo->placement.placement = bo->placements; 302 bo->placement.placement = bo->placements;
304 bo->placement.busy_placement = bo->placements; 303 bo->placement.busy_placement = bo->placements;
305 if (domain & TTM_PL_FLAG_VRAM) 304 if (domain & TTM_PL_FLAG_VRAM)
306 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 305 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
307 if (domain & TTM_PL_FLAG_SYSTEM) 306 if (domain & TTM_PL_FLAG_SYSTEM)
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 307 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
309 if (!c) 308 if (!c)
310 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 309 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
311 bo->placement.num_placement = c; 310 bo->placement.num_placement = c;
312 bo->placement.num_busy_placement = c; 311 bo->placement.num_busy_placement = c;
312 for (i = 0; i < c; ++i) {
313 bo->placements[i].fpfn = 0;
314 bo->placements[i].lpfn = 0;
315 }
313} 316}
314 317
315int cirrus_bo_create(struct drm_device *dev, int size, int align, 318int cirrus_bo_create(struct drm_device *dev, int size, int align,
@@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
365 368
366 cirrus_ttm_placement(bo, pl_flag); 369 cirrus_ttm_placement(bo, pl_flag);
367 for (i = 0; i < bo->placement.num_placement; i++) 370 for (i = 0; i < bo->placement.num_placement; i++)
368 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 372 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
370 if (ret) 373 if (ret)
371 return ret; 374 return ret;
@@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
392 395
393 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 396 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
394 for (i = 0; i < bo->placement.num_placement ; i++) 397 for (i = 0; i < bo->placement.num_placement ; i++)
395 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 398 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
396 399
397 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 400 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
398 if (ret) { 401 if (ret) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 80de23d9b9c9..2e2b76aa4e17 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -224,7 +224,7 @@ struct mgag200_bo {
224 struct ttm_placement placement; 224 struct ttm_placement placement;
225 struct ttm_bo_kmap_obj kmap; 225 struct ttm_bo_kmap_obj kmap;
226 struct drm_gem_object gem; 226 struct drm_gem_object gem;
227 u32 placements[3]; 227 struct ttm_place placements[3];
228 int pin_count; 228 int pin_count;
229}; 229};
230#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem) 230#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 5a00e90696de..be883ef5a1d3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -293,18 +293,22 @@ void mgag200_mm_fini(struct mga_device *mdev)
293void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) 293void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
294{ 294{
295 u32 c = 0; 295 u32 c = 0;
296 bo->placement.fpfn = 0; 296 unsigned i;
297 bo->placement.lpfn = 0; 297
298 bo->placement.placement = bo->placements; 298 bo->placement.placement = bo->placements;
299 bo->placement.busy_placement = bo->placements; 299 bo->placement.busy_placement = bo->placements;
300 if (domain & TTM_PL_FLAG_VRAM) 300 if (domain & TTM_PL_FLAG_VRAM)
301 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
302 if (domain & TTM_PL_FLAG_SYSTEM) 302 if (domain & TTM_PL_FLAG_SYSTEM)
303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 303 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
304 if (!c) 304 if (!c)
305 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 305 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
306 bo->placement.num_placement = c; 306 bo->placement.num_placement = c;
307 bo->placement.num_busy_placement = c; 307 bo->placement.num_busy_placement = c;
308 for (i = 0; i < c; ++i) {
309 bo->placements[i].fpfn = 0;
310 bo->placements[i].lpfn = 0;
311 }
308} 312}
309 313
310int mgag200_bo_create(struct drm_device *dev, int size, int align, 314int mgag200_bo_create(struct drm_device *dev, int size, int align,
@@ -361,7 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
361 365
362 mgag200_ttm_placement(bo, pl_flag); 366 mgag200_ttm_placement(bo, pl_flag);
363 for (i = 0; i < bo->placement.num_placement; i++) 367 for (i = 0; i < bo->placement.num_placement; i++)
364 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 368 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
365 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
366 if (ret) 370 if (ret)
367 return ret; 371 return ret;
@@ -384,7 +388,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
384 return 0; 388 return 0;
385 389
386 for (i = 0; i < bo->placement.num_placement ; i++) 390 for (i = 0; i < bo->placement.num_placement ; i++)
387 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 391 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
388 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
389 if (ret) 393 if (ret)
390 return ret; 394 return ret;
@@ -408,7 +412,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
408 412
409 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 413 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
410 for (i = 0; i < bo->placement.num_placement ; i++) 414 for (i = 0; i < bo->placement.num_placement ; i++)
411 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 415 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
412 416
413 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 417 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
414 if (ret) { 418 if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 01da508625f2..0591ca0734e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -241,16 +241,16 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
241} 241}
242 242
243static void 243static void
244set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) 244set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
245{ 245{
246 *n = 0; 246 *n = 0;
247 247
248 if (type & TTM_PL_FLAG_VRAM) 248 if (type & TTM_PL_FLAG_VRAM)
249 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; 249 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
250 if (type & TTM_PL_FLAG_TT) 250 if (type & TTM_PL_FLAG_TT)
251 pl[(*n)++] = TTM_PL_FLAG_TT | flags; 251 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
252 if (type & TTM_PL_FLAG_SYSTEM) 252 if (type & TTM_PL_FLAG_SYSTEM)
253 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 253 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
254} 254}
255 255
256static void 256static void
@@ -258,6 +258,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
258{ 258{
259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; 260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
261 unsigned i, fpfn, lpfn;
261 262
262 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 263 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 264 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
@@ -269,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
269 * at the same time. 270 * at the same time.
270 */ 271 */
271 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 272 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
272 nvbo->placement.fpfn = vram_pages / 2; 273 fpfn = vram_pages / 2;
273 nvbo->placement.lpfn = ~0; 274 lpfn = ~0;
274 } else { 275 } else {
275 nvbo->placement.fpfn = 0; 276 fpfn = 0;
276 nvbo->placement.lpfn = vram_pages / 2; 277 lpfn = vram_pages / 2;
278 }
279 for (i = 0; i < nvbo->placement.num_placement; ++i) {
280 nvbo->placements[i].fpfn = fpfn;
281 nvbo->placements[i].lpfn = lpfn;
282 }
283 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
284 nvbo->busy_placements[i].fpfn = fpfn;
285 nvbo->busy_placements[i].lpfn = lpfn;
277 } 286 }
278 } 287 }
279} 288}
@@ -1041,12 +1050,15 @@ static int
1041nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1050nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1042 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1051 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1043{ 1052{
1044 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1053 struct ttm_place placement_memtype = {
1054 .fpfn = 0,
1055 .lpfn = 0,
1056 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1057 };
1045 struct ttm_placement placement; 1058 struct ttm_placement placement;
1046 struct ttm_mem_reg tmp_mem; 1059 struct ttm_mem_reg tmp_mem;
1047 int ret; 1060 int ret;
1048 1061
1049 placement.fpfn = placement.lpfn = 0;
1050 placement.num_placement = placement.num_busy_placement = 1; 1062 placement.num_placement = placement.num_busy_placement = 1;
1051 placement.placement = placement.busy_placement = &placement_memtype; 1063 placement.placement = placement.busy_placement = &placement_memtype;
1052 1064
@@ -1074,12 +1086,15 @@ static int
1074nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1086nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1075 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1087 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1076{ 1088{
1077 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1089 struct ttm_place placement_memtype = {
1090 .fpfn = 0,
1091 .lpfn = 0,
1092 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1093 };
1078 struct ttm_placement placement; 1094 struct ttm_placement placement;
1079 struct ttm_mem_reg tmp_mem; 1095 struct ttm_mem_reg tmp_mem;
1080 int ret; 1096 int ret;
1081 1097
1082 placement.fpfn = placement.lpfn = 0;
1083 placement.num_placement = placement.num_busy_placement = 1; 1098 placement.num_placement = placement.num_busy_placement = 1;
1084 placement.placement = placement.busy_placement = &placement_memtype; 1099 placement.placement = placement.busy_placement = &placement_memtype;
1085 1100
@@ -1294,7 +1309,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1294 struct nouveau_bo *nvbo = nouveau_bo(bo); 1309 struct nouveau_bo *nvbo = nouveau_bo(bo);
1295 struct nvif_device *device = &drm->device; 1310 struct nvif_device *device = &drm->device;
1296 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; 1311 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
1297 int ret; 1312 int i, ret;
1298 1313
1299 /* as long as the bo isn't in vram, and isn't tiled, we've got 1314 /* as long as the bo isn't in vram, and isn't tiled, we've got
1300 * nothing to do here. 1315 * nothing to do here.
@@ -1319,9 +1334,16 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1319 bo->mem.start + bo->mem.num_pages < mappable) 1334 bo->mem.start + bo->mem.num_pages < mappable)
1320 return 0; 1335 return 0;
1321 1336
1337 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1338 nvbo->placements[i].fpfn = 0;
1339 nvbo->placements[i].lpfn = mappable;
1340 }
1341
1342 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1343 nvbo->busy_placements[i].fpfn = 0;
1344 nvbo->busy_placements[i].lpfn = mappable;
1345 }
1322 1346
1323 nvbo->placement.fpfn = 0;
1324 nvbo->placement.lpfn = mappable;
1325 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1347 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1326 return nouveau_bo_validate(nvbo, false, false); 1348 return nouveau_bo_validate(nvbo, false, false);
1327} 1349}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ff17c1f432fc..4ef88e84a694 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -9,8 +9,8 @@ struct nouveau_bo {
9 struct ttm_buffer_object bo; 9 struct ttm_buffer_object bo;
10 struct ttm_placement placement; 10 struct ttm_placement placement;
11 u32 valid_domains; 11 u32 valid_domains;
12 u32 placements[3]; 12 struct ttm_place placements[3];
13 u32 busy_placements[3]; 13 struct ttm_place busy_placements[3];
14 struct ttm_bo_kmap_obj kmap; 14 struct ttm_bo_kmap_obj kmap;
15 struct list_head head; 15 struct list_head head;
16 16
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 53874b76b031..e81d086577ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -71,8 +71,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
71static int 71static int
72nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 72nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
73 struct ttm_buffer_object *bo, 73 struct ttm_buffer_object *bo,
74 struct ttm_placement *placement, 74 const struct ttm_place *place,
75 uint32_t flags,
76 struct ttm_mem_reg *mem) 75 struct ttm_mem_reg *mem)
77{ 76{
78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 77 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
@@ -158,8 +157,7 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
158static int 157static int
159nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 158nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *bo, 159 struct ttm_buffer_object *bo,
161 struct ttm_placement *placement, 160 const struct ttm_place *place,
162 uint32_t flags,
163 struct ttm_mem_reg *mem) 161 struct ttm_mem_reg *mem)
164{ 162{
165 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 163 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -239,8 +237,7 @@ nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
239static int 237static int
240nv04_gart_manager_new(struct ttm_mem_type_manager *man, 238nv04_gart_manager_new(struct ttm_mem_type_manager *man,
241 struct ttm_buffer_object *bo, 239 struct ttm_buffer_object *bo,
242 struct ttm_placement *placement, 240 const struct ttm_place *place,
243 uint32_t flags,
244 struct ttm_mem_reg *mem) 241 struct ttm_mem_reg *mem)
245{ 242{
246 struct nouveau_mem *node; 243 struct nouveau_mem *node;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 36ed40ba773f..f6022b703645 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -106,7 +106,7 @@ struct qxl_bo {
106 /* Protected by gem.mutex */ 106 /* Protected by gem.mutex */
107 struct list_head list; 107 struct list_head list;
108 /* Protected by tbo.reserved */ 108 /* Protected by tbo.reserved */
109 u32 placements[3]; 109 struct ttm_place placements[3];
110 struct ttm_placement placement; 110 struct ttm_placement placement;
111 struct ttm_buffer_object tbo; 111 struct ttm_buffer_object tbo;
112 struct ttm_bo_kmap_obj kmap; 112 struct ttm_bo_kmap_obj kmap;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b95f144f0b49..adad12d30372 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -55,21 +55,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 55{
56 u32 c = 0; 56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; 57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
58 unsigned i;
58 59
59 qbo->placement.fpfn = 0;
60 qbo->placement.lpfn = 0;
61 qbo->placement.placement = qbo->placements; 60 qbo->placement.placement = qbo->placements;
62 qbo->placement.busy_placement = qbo->placements; 61 qbo->placement.busy_placement = qbo->placements;
63 if (domain == QXL_GEM_DOMAIN_VRAM) 62 if (domain == QXL_GEM_DOMAIN_VRAM)
64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 63 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
65 if (domain == QXL_GEM_DOMAIN_SURFACE) 64 if (domain == QXL_GEM_DOMAIN_SURFACE)
66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; 65 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
67 if (domain == QXL_GEM_DOMAIN_CPU) 66 if (domain == QXL_GEM_DOMAIN_CPU)
68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 67 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
69 if (!c) 68 if (!c)
70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 69 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
71 qbo->placement.num_placement = c; 70 qbo->placement.num_placement = c;
72 qbo->placement.num_busy_placement = c; 71 qbo->placement.num_busy_placement = c;
72 for (i = 0; i < c; ++i) {
73 qbo->placements[i].fpfn = 0;
74 qbo->placements[i].lpfn = 0;
75 }
73} 76}
74 77
75 78
@@ -259,7 +262,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
259 if (bo->pin_count) 262 if (bo->pin_count)
260 return 0; 263 return 0;
261 for (i = 0; i < bo->placement.num_placement; i++) 264 for (i = 0; i < bo->placement.num_placement; i++)
262 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 265 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
263 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 266 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
264 if (unlikely(r != 0)) 267 if (unlikely(r != 0))
265 dev_err(qdev->dev, "%p validate failed for unpin\n", bo); 268 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 71a1baeac14e..f66c59b222f1 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -188,11 +188,13 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
188 struct ttm_placement *placement) 188 struct ttm_placement *placement)
189{ 189{
190 struct qxl_bo *qbo; 190 struct qxl_bo *qbo;
191 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 191 static struct ttm_place placements = {
192 .fpfn = 0,
193 .lpfn = 0,
194 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
195 };
192 196
193 if (!qxl_ttm_bo_is_qxl_bo(bo)) { 197 if (!qxl_ttm_bo_is_qxl_bo(bo)) {
194 placement->fpfn = 0;
195 placement->lpfn = 0;
196 placement->placement = &placements; 198 placement->placement = &placements;
197 placement->busy_placement = &placements; 199 placement->busy_placement = &placements;
198 placement->num_placement = 1; 200 placement->num_placement = 1;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b321ad4dcafd..bb01dab513dd 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -469,7 +469,7 @@ struct radeon_bo {
469 struct list_head list; 469 struct list_head list;
470 /* Protected by tbo.reserved */ 470 /* Protected by tbo.reserved */
471 u32 initial_domain; 471 u32 initial_domain;
472 u32 placements[3]; 472 struct ttm_place placements[3];
473 struct ttm_placement placement; 473 struct ttm_placement placement;
474 struct ttm_buffer_object tbo; 474 struct ttm_buffer_object tbo;
475 struct ttm_bo_kmap_obj kmap; 475 struct ttm_bo_kmap_obj kmap;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 287523807989..0129c7efae3b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -97,40 +97,56 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
97{ 97{
98 u32 c = 0, i; 98 u32 c = 0, i;
99 99
100 rbo->placement.fpfn = 0;
101 rbo->placement.lpfn = 0;
102 rbo->placement.placement = rbo->placements; 100 rbo->placement.placement = rbo->placements;
103 rbo->placement.busy_placement = rbo->placements; 101 rbo->placement.busy_placement = rbo->placements;
104 if (domain & RADEON_GEM_DOMAIN_VRAM) 102 if (domain & RADEON_GEM_DOMAIN_VRAM)
105 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 103 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
106 TTM_PL_FLAG_VRAM; 104 TTM_PL_FLAG_UNCACHED |
105 TTM_PL_FLAG_VRAM;
106
107 if (domain & RADEON_GEM_DOMAIN_GTT) { 107 if (domain & RADEON_GEM_DOMAIN_GTT) {
108 if (rbo->flags & RADEON_GEM_GTT_UC) { 108 if (rbo->flags & RADEON_GEM_GTT_UC) {
109 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; 109 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
110 TTM_PL_FLAG_TT;
111
110 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 112 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
111 (rbo->rdev->flags & RADEON_IS_AGP)) { 113 (rbo->rdev->flags & RADEON_IS_AGP)) {
112 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 114 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
115 TTM_PL_FLAG_UNCACHED |
113 TTM_PL_FLAG_TT; 116 TTM_PL_FLAG_TT;
114 } else { 117 } else {
115 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 118 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
119 TTM_PL_FLAG_TT;
116 } 120 }
117 } 121 }
122
118 if (domain & RADEON_GEM_DOMAIN_CPU) { 123 if (domain & RADEON_GEM_DOMAIN_CPU) {
119 if (rbo->flags & RADEON_GEM_GTT_UC) { 124 if (rbo->flags & RADEON_GEM_GTT_UC) {
120 rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; 125 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
126 TTM_PL_FLAG_SYSTEM;
127
121 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 128 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
122 rbo->rdev->flags & RADEON_IS_AGP) { 129 rbo->rdev->flags & RADEON_IS_AGP) {
123 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 130 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
131 TTM_PL_FLAG_UNCACHED |
124 TTM_PL_FLAG_SYSTEM; 132 TTM_PL_FLAG_SYSTEM;
125 } else { 133 } else {
126 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; 134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
135 TTM_PL_FLAG_SYSTEM;
127 } 136 }
128 } 137 }
129 if (!c) 138 if (!c)
130 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 139 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
140 TTM_PL_FLAG_SYSTEM;
141
131 rbo->placement.num_placement = c; 142 rbo->placement.num_placement = c;
132 rbo->placement.num_busy_placement = c; 143 rbo->placement.num_busy_placement = c;
133 144
145 for (i = 0; i < c; ++i) {
146 rbo->placements[i].fpfn = 0;
147 rbo->placements[i].lpfn = 0;
148 }
149
134 /* 150 /*
135 * Use two-ended allocation depending on the buffer size to 151 * Use two-ended allocation depending on the buffer size to
136 * improve fragmentation quality. 152 * improve fragmentation quality.
@@ -138,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
138 */ 154 */
139 if (rbo->tbo.mem.size > 512 * 1024) { 155 if (rbo->tbo.mem.size > 512 * 1024) {
140 for (i = 0; i < c; i++) { 156 for (i = 0; i < c; i++) {
141 rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; 157 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
142 } 158 }
143 } 159 }
144} 160}
@@ -287,21 +303,22 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
287 return 0; 303 return 0;
288 } 304 }
289 radeon_ttm_placement_from_domain(bo, domain); 305 radeon_ttm_placement_from_domain(bo, domain);
290 if (domain == RADEON_GEM_DOMAIN_VRAM) { 306 for (i = 0; i < bo->placement.num_placement; i++) {
307 unsigned lpfn = 0;
308
291 /* force to pin into visible video ram */ 309 /* force to pin into visible video ram */
292 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 310 if (bo->placements[i].flags & TTM_PL_FLAG_VRAM)
293 } 311 lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
294 if (max_offset) { 312 else
295 u64 lpfn = max_offset >> PAGE_SHIFT; 313 lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */
296 314
297 if (!bo->placement.lpfn) 315 if (max_offset)
298 bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; 316 lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT));
299 317
300 if (lpfn < bo->placement.lpfn) 318 bo->placements[i].lpfn = lpfn;
301 bo->placement.lpfn = lpfn; 319 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
302 } 320 }
303 for (i = 0; i < bo->placement.num_placement; i++) 321
304 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
305 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 322 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
306 if (likely(r == 0)) { 323 if (likely(r == 0)) {
307 bo->pin_count = 1; 324 bo->pin_count = 1;
@@ -333,8 +350,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
333 bo->pin_count--; 350 bo->pin_count--;
334 if (bo->pin_count) 351 if (bo->pin_count)
335 return 0; 352 return 0;
336 for (i = 0; i < bo->placement.num_placement; i++) 353 for (i = 0; i < bo->placement.num_placement; i++) {
337 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 354 bo->placements[i].lpfn = 0;
355 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
356 }
338 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 357 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
339 if (likely(r == 0)) { 358 if (likely(r == 0)) {
340 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 359 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
@@ -735,7 +754,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
735 754
736 /* hurrah the memory is not visible ! */ 755 /* hurrah the memory is not visible ! */
737 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 756 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
738 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 757 rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
739 r = ttm_bo_validate(bo, &rbo->placement, false, false); 758 r = ttm_bo_validate(bo, &rbo->placement, false, false);
740 if (unlikely(r == -ENOMEM)) { 759 if (unlikely(r == -ENOMEM)) {
741 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 760 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 12e37b1ddc40..822eb3630045 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -178,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
178static void radeon_evict_flags(struct ttm_buffer_object *bo, 178static void radeon_evict_flags(struct ttm_buffer_object *bo,
179 struct ttm_placement *placement) 179 struct ttm_placement *placement)
180{ 180{
181 static struct ttm_place placements = {
182 .fpfn = 0,
183 .lpfn = 0,
184 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
185 };
186
181 struct radeon_bo *rbo; 187 struct radeon_bo *rbo;
182 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
183 188
184 if (!radeon_ttm_bo_is_radeon_bo(bo)) { 189 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
185 placement->fpfn = 0;
186 placement->lpfn = 0;
187 placement->placement = &placements; 190 placement->placement = &placements;
188 placement->busy_placement = &placements; 191 placement->busy_placement = &placements;
189 placement->num_placement = 1; 192 placement->num_placement = 1;
@@ -286,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
286 struct radeon_device *rdev; 289 struct radeon_device *rdev;
287 struct ttm_mem_reg *old_mem = &bo->mem; 290 struct ttm_mem_reg *old_mem = &bo->mem;
288 struct ttm_mem_reg tmp_mem; 291 struct ttm_mem_reg tmp_mem;
289 u32 placements; 292 struct ttm_place placements;
290 struct ttm_placement placement; 293 struct ttm_placement placement;
291 int r; 294 int r;
292 295
293 rdev = radeon_get_rdev(bo->bdev); 296 rdev = radeon_get_rdev(bo->bdev);
294 tmp_mem = *new_mem; 297 tmp_mem = *new_mem;
295 tmp_mem.mm_node = NULL; 298 tmp_mem.mm_node = NULL;
296 placement.fpfn = 0;
297 placement.lpfn = 0;
298 placement.num_placement = 1; 299 placement.num_placement = 1;
299 placement.placement = &placements; 300 placement.placement = &placements;
300 placement.num_busy_placement = 1; 301 placement.num_busy_placement = 1;
301 placement.busy_placement = &placements; 302 placement.busy_placement = &placements;
302 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 303 placements.fpfn = 0;
304 placements.lpfn = 0;
305 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
303 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 306 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
304 interruptible, no_wait_gpu); 307 interruptible, no_wait_gpu);
305 if (unlikely(r)) { 308 if (unlikely(r)) {
@@ -334,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
334 struct ttm_mem_reg *old_mem = &bo->mem; 337 struct ttm_mem_reg *old_mem = &bo->mem;
335 struct ttm_mem_reg tmp_mem; 338 struct ttm_mem_reg tmp_mem;
336 struct ttm_placement placement; 339 struct ttm_placement placement;
337 u32 placements; 340 struct ttm_place placements;
338 int r; 341 int r;
339 342
340 rdev = radeon_get_rdev(bo->bdev); 343 rdev = radeon_get_rdev(bo->bdev);
341 tmp_mem = *new_mem; 344 tmp_mem = *new_mem;
342 tmp_mem.mm_node = NULL; 345 tmp_mem.mm_node = NULL;
343 placement.fpfn = 0;
344 placement.lpfn = 0;
345 placement.num_placement = 1; 346 placement.num_placement = 1;
346 placement.placement = &placements; 347 placement.placement = &placements;
347 placement.num_busy_placement = 1; 348 placement.num_busy_placement = 1;
348 placement.busy_placement = &placements; 349 placement.busy_placement = &placements;
349 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 350 placements.fpfn = 0;
351 placements.lpfn = 0;
352 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
350 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 353 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
351 interruptible, no_wait_gpu); 354 interruptible, no_wait_gpu);
352 if (unlikely(r)) { 355 if (unlikely(r)) {
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 341848a14376..25c8a1fd152c 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -233,8 +233,12 @@ int radeon_uvd_resume(struct radeon_device *rdev)
233 233
234void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) 234void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
235{ 235{
236 rbo->placement.fpfn = 0 >> PAGE_SHIFT; 236 int i;
237 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 237
238 for (i = 0; i < rbo->placement.num_placement; ++i) {
239 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
240 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
241 }
238} 242}
239 243
240void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) 244void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3da89d5dab60..b992ec3c318a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = {
53 .mode = S_IRUGO 53 .mode = S_IRUGO
54}; 54};
55 55
56static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) 56static inline int ttm_mem_type_from_place(const struct ttm_place *place,
57 uint32_t *mem_type)
57{ 58{
58 int i; 59 int i;
59 60
60 for (i = 0; i <= TTM_PL_PRIV5; i++) 61 for (i = 0; i <= TTM_PL_PRIV5; i++)
61 if (flags & (1 << i)) { 62 if (place->flags & (1 << i)) {
62 *mem_type = i; 63 *mem_type = i;
63 return 0; 64 return 0;
64 } 65 }
@@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
89 bo, bo->mem.num_pages, bo->mem.size >> 10, 90 bo, bo->mem.num_pages, bo->mem.size >> 10,
90 bo->mem.size >> 20); 91 bo->mem.size >> 20);
91 for (i = 0; i < placement->num_placement; i++) { 92 for (i = 0; i < placement->num_placement; i++) {
92 ret = ttm_mem_type_from_flags(placement->placement[i], 93 ret = ttm_mem_type_from_place(&placement->placement[i],
93 &mem_type); 94 &mem_type);
94 if (ret) 95 if (ret)
95 return; 96 return;
96 pr_err(" placement[%d]=0x%08X (%d)\n", 97 pr_err(" placement[%d]=0x%08X (%d)\n",
97 i, placement->placement[i], mem_type); 98 i, placement->placement[i].flags, mem_type);
98 ttm_mem_type_debug(bo->bdev, mem_type); 99 ttm_mem_type_debug(bo->bdev, mem_type);
99 } 100 }
100} 101}
@@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
685 evict_mem.bus.io_reserved_vm = false; 686 evict_mem.bus.io_reserved_vm = false;
686 evict_mem.bus.io_reserved_count = 0; 687 evict_mem.bus.io_reserved_count = 0;
687 688
688 placement.fpfn = 0;
689 placement.lpfn = 0;
690 placement.num_placement = 0; 689 placement.num_placement = 0;
691 placement.num_busy_placement = 0; 690 placement.num_busy_placement = 0;
692 bdev->driver->evict_flags(bo, &placement); 691 bdev->driver->evict_flags(bo, &placement);
@@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
774 */ 773 */
775static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, 774static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
776 uint32_t mem_type, 775 uint32_t mem_type,
777 struct ttm_placement *placement, 776 const struct ttm_place *place,
778 struct ttm_mem_reg *mem, 777 struct ttm_mem_reg *mem,
779 bool interruptible, 778 bool interruptible,
780 bool no_wait_gpu) 779 bool no_wait_gpu)
@@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 int ret; 783 int ret;
785 784
786 do { 785 do {
787 ret = (*man->func->get_node)(man, bo, placement, 0, mem); 786 ret = (*man->func->get_node)(man, bo, place, mem);
788 if (unlikely(ret != 0)) 787 if (unlikely(ret != 0))
789 return ret; 788 return ret;
790 if (mem->mm_node) 789 if (mem->mm_node)
@@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
827 826
828static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 827static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
829 uint32_t mem_type, 828 uint32_t mem_type,
830 uint32_t proposed_placement, 829 const struct ttm_place *place,
831 uint32_t *masked_placement) 830 uint32_t *masked_placement)
832{ 831{
833 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 832 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
834 833
835 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) 834 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
836 return false; 835 return false;
837 836
838 if ((proposed_placement & man->available_caching) == 0) 837 if ((place->flags & man->available_caching) == 0)
839 return false; 838 return false;
840 839
841 cur_flags |= (proposed_placement & man->available_caching); 840 cur_flags |= (place->flags & man->available_caching);
842 841
843 *masked_placement = cur_flags; 842 *masked_placement = cur_flags;
844 return true; 843 return true;
@@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
869 868
870 mem->mm_node = NULL; 869 mem->mm_node = NULL;
871 for (i = 0; i < placement->num_placement; ++i) { 870 for (i = 0; i < placement->num_placement; ++i) {
872 ret = ttm_mem_type_from_flags(placement->placement[i], 871 const struct ttm_place *place = &placement->placement[i];
873 &mem_type); 872
873 ret = ttm_mem_type_from_place(place, &mem_type);
874 if (ret) 874 if (ret)
875 return ret; 875 return ret;
876 man = &bdev->man[mem_type]; 876 man = &bdev->man[mem_type];
877 877
878 type_ok = ttm_bo_mt_compatible(man, 878 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
879 mem_type,
880 placement->placement[i],
881 &cur_flags); 879 &cur_flags);
882 880
883 if (!type_ok) 881 if (!type_ok)
@@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 * Use the access and other non-mapping-related flag bits from 887 * Use the access and other non-mapping-related flag bits from
890 * the memory placement flags to the current flags 888 * the memory placement flags to the current flags
891 */ 889 */
892 ttm_flag_masked(&cur_flags, placement->placement[i], 890 ttm_flag_masked(&cur_flags, place->flags,
893 ~TTM_PL_MASK_MEMTYPE); 891 ~TTM_PL_MASK_MEMTYPE);
894 892
895 if (mem_type == TTM_PL_SYSTEM) 893 if (mem_type == TTM_PL_SYSTEM)
@@ -897,8 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
897 895
898 if (man->has_type && man->use_type) { 896 if (man->has_type && man->use_type) {
899 type_found = true; 897 type_found = true;
900 ret = (*man->func->get_node)(man, bo, placement, 898 ret = (*man->func->get_node)(man, bo, place, mem);
901 cur_flags, mem);
902 if (unlikely(ret)) 899 if (unlikely(ret))
903 return ret; 900 return ret;
904 } 901 }
@@ -916,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
916 return -EINVAL; 913 return -EINVAL;
917 914
918 for (i = 0; i < placement->num_busy_placement; ++i) { 915 for (i = 0; i < placement->num_busy_placement; ++i) {
919 ret = ttm_mem_type_from_flags(placement->busy_placement[i], 916 const struct ttm_place *place = &placement->busy_placement[i];
920 &mem_type); 917
918 ret = ttm_mem_type_from_place(place, &mem_type);
921 if (ret) 919 if (ret)
922 return ret; 920 return ret;
923 man = &bdev->man[mem_type]; 921 man = &bdev->man[mem_type];
924 if (!man->has_type) 922 if (!man->has_type)
925 continue; 923 continue;
926 if (!ttm_bo_mt_compatible(man, 924 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
927 mem_type,
928 placement->busy_placement[i],
929 &cur_flags))
930 continue; 925 continue;
931 926
932 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 927 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
@@ -935,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
935 * Use the access and other non-mapping-related flag bits from 930 * Use the access and other non-mapping-related flag bits from
936 * the memory placement flags to the current flags 931 * the memory placement flags to the current flags
937 */ 932 */
938 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 933 ttm_flag_masked(&cur_flags, place->flags,
939 ~TTM_PL_MASK_MEMTYPE); 934 ~TTM_PL_MASK_MEMTYPE);
940 935
941 if (mem_type == TTM_PL_SYSTEM) { 936 if (mem_type == TTM_PL_SYSTEM) {
@@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
945 return 0; 940 return 0;
946 } 941 }
947 942
948 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 943 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
949 interruptible, no_wait_gpu); 944 interruptible, no_wait_gpu);
950 if (ret == 0 && mem->mm_node) { 945 if (ret == 0 && mem->mm_node) {
951 mem->placement = cur_flags; 946 mem->placement = cur_flags;
@@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1006{ 1001{
1007 int i; 1002 int i;
1008 1003
1009 if (mem->mm_node && placement->lpfn != 0 &&
1010 (mem->start < placement->fpfn ||
1011 mem->start + mem->num_pages > placement->lpfn))
1012 return false;
1013
1014 for (i = 0; i < placement->num_placement; i++) { 1004 for (i = 0; i < placement->num_placement; i++) {
1015 *new_flags = placement->placement[i]; 1005 const struct ttm_place *heap = &placement->placement[i];
1006 if (mem->mm_node && heap->lpfn != 0 &&
1007 (mem->start < heap->fpfn ||
1008 mem->start + mem->num_pages > heap->lpfn))
1009 continue;
1010
1011 *new_flags = heap->flags;
1016 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1012 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1017 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1013 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1018 return true; 1014 return true;
1019 } 1015 }
1020 1016
1021 for (i = 0; i < placement->num_busy_placement; i++) { 1017 for (i = 0; i < placement->num_busy_placement; i++) {
1022 *new_flags = placement->busy_placement[i]; 1018 const struct ttm_place *heap = &placement->busy_placement[i];
1019 if (mem->mm_node && heap->lpfn != 0 &&
1020 (mem->start < heap->fpfn ||
1021 mem->start + mem->num_pages > heap->lpfn))
1022 continue;
1023
1024 *new_flags = heap->flags;
1023 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && 1025 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1024 (*new_flags & mem->placement & TTM_PL_MASK_MEM)) 1026 (*new_flags & mem->placement & TTM_PL_MASK_MEM))
1025 return true; 1027 return true;
@@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1037 uint32_t new_flags; 1039 uint32_t new_flags;
1038 1040
1039 lockdep_assert_held(&bo->resv->lock.base); 1041 lockdep_assert_held(&bo->resv->lock.base);
1040 /* Check that range is valid */
1041 if (placement->lpfn || placement->fpfn)
1042 if (placement->fpfn > placement->lpfn ||
1043 (placement->lpfn - placement->fpfn) < bo->num_pages)
1044 return -EINVAL;
1045 /* 1042 /*
1046 * Check whether we need to move buffer. 1043 * Check whether we need to move buffer.
1047 */ 1044 */
@@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1070} 1067}
1071EXPORT_SYMBOL(ttm_bo_validate); 1068EXPORT_SYMBOL(ttm_bo_validate);
1072 1069
1073int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1074 struct ttm_placement *placement)
1075{
1076 BUG_ON((placement->fpfn || placement->lpfn) &&
1077 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1078
1079 return 0;
1080}
1081
1082int ttm_bo_init(struct ttm_bo_device *bdev, 1070int ttm_bo_init(struct ttm_bo_device *bdev,
1083 struct ttm_buffer_object *bo, 1071 struct ttm_buffer_object *bo,
1084 unsigned long size, 1072 unsigned long size,
@@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1147 atomic_inc(&bo->glob->bo_count); 1135 atomic_inc(&bo->glob->bo_count);
1148 drm_vma_node_reset(&bo->vma_node); 1136 drm_vma_node_reset(&bo->vma_node);
1149 1137
1150 ret = ttm_bo_check_placement(bo, placement);
1151
1152 /* 1138 /*
1153 * For ttm_bo_type_device buffers, allocate 1139 * For ttm_bo_type_device buffers, allocate
1154 * address space from the device. 1140 * address space from the device.
1155 */ 1141 */
1156 if (likely(!ret) && 1142 if (bo->type == ttm_bo_type_device ||
1157 (bo->type == ttm_bo_type_device || 1143 bo->type == ttm_bo_type_sg)
1158 bo->type == ttm_bo_type_sg))
1159 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1144 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1160 bo->mem.num_pages); 1145 bo->mem.num_pages);
1161 1146
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 9e103a4875c8..964387fc5c8f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -49,8 +49,7 @@ struct ttm_range_manager {
49 49
50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
52 struct ttm_placement *placement, 52 const struct ttm_place *place,
53 uint32_t flags,
54 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
55{ 54{
56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
60 unsigned long lpfn; 59 unsigned long lpfn;
61 int ret; 60 int ret;
62 61
63 lpfn = placement->lpfn; 62 lpfn = place->lpfn;
64 if (!lpfn) 63 if (!lpfn)
65 lpfn = man->size; 64 lpfn = man->size;
66 65
@@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
68 if (!node) 67 if (!node)
69 return -ENOMEM; 68 return -ENOMEM;
70 69
71 if (flags & TTM_PL_FLAG_TOPDOWN) 70 if (place->flags & TTM_PL_FLAG_TOPDOWN)
72 aflags = DRM_MM_CREATE_TOP; 71 aflags = DRM_MM_CREATE_TOP;
73 72
74 spin_lock(&rman->lock); 73 spin_lock(&rman->lock);
75 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, 74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
76 mem->page_alignment, 0, 75 mem->page_alignment, 0,
77 placement->fpfn, lpfn, 76 place->fpfn, lpfn,
78 DRM_MM_SEARCH_BEST, 77 DRM_MM_SEARCH_BEST,
79 aflags); 78 aflags);
80 spin_unlock(&rman->lock); 79 spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 6327cfc36805..37c093c0c7b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,66 +30,101 @@
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h> 31#include <drm/ttm/ttm_page_alloc.h>
32 32
33static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | 33static struct ttm_place vram_placement_flags = {
34 TTM_PL_FLAG_CACHED; 34 .fpfn = 0,
35 35 .lpfn = 0,
36static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | 36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37 TTM_PL_FLAG_CACHED | 37};
38 TTM_PL_FLAG_NO_EVICT;
39 38
40static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | 39static struct ttm_place vram_ne_placement_flags = {
41 TTM_PL_FLAG_CACHED; 40 .fpfn = 0,
41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43};
42 44
43static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | 45static struct ttm_place sys_placement_flags = {
44 TTM_PL_FLAG_CACHED | 46 .fpfn = 0,
45 TTM_PL_FLAG_NO_EVICT; 47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49};
46 50
47static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | 51static struct ttm_place sys_ne_placement_flags = {
48 TTM_PL_FLAG_CACHED; 52 .fpfn = 0,
53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55};
49 56
50static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | 57static struct ttm_place gmr_placement_flags = {
51 TTM_PL_FLAG_CACHED | 58 .fpfn = 0,
52 TTM_PL_FLAG_NO_EVICT; 59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61};
53 62
54static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | 63static struct ttm_place gmr_ne_placement_flags = {
55 TTM_PL_FLAG_CACHED; 64 .fpfn = 0,
65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67};
56 68
57struct ttm_placement vmw_vram_placement = { 69static struct ttm_place mob_placement_flags = {
58 .fpfn = 0, 70 .fpfn = 0,
59 .lpfn = 0, 71 .lpfn = 0,
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73};
74
75struct ttm_placement vmw_vram_placement = {
60 .num_placement = 1, 76 .num_placement = 1,
61 .placement = &vram_placement_flags, 77 .placement = &vram_placement_flags,
62 .num_busy_placement = 1, 78 .num_busy_placement = 1,
63 .busy_placement = &vram_placement_flags 79 .busy_placement = &vram_placement_flags
64}; 80};
65 81
66static uint32_t vram_gmr_placement_flags[] = { 82static struct ttm_place vram_gmr_placement_flags[] = {
67 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, 83 {
68 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 84 .fpfn = 0,
85 .lpfn = 0,
86 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
87 }, {
88 .fpfn = 0,
89 .lpfn = 0,
90 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
91 }
69}; 92};
70 93
71static uint32_t gmr_vram_placement_flags[] = { 94static struct ttm_place gmr_vram_placement_flags[] = {
72 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, 95 {
73 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED 96 .fpfn = 0,
97 .lpfn = 0,
98 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
99 }, {
100 .fpfn = 0,
101 .lpfn = 0,
102 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
103 }
74}; 104};
75 105
76struct ttm_placement vmw_vram_gmr_placement = { 106struct ttm_placement vmw_vram_gmr_placement = {
77 .fpfn = 0,
78 .lpfn = 0,
79 .num_placement = 2, 107 .num_placement = 2,
80 .placement = vram_gmr_placement_flags, 108 .placement = vram_gmr_placement_flags,
81 .num_busy_placement = 1, 109 .num_busy_placement = 1,
82 .busy_placement = &gmr_placement_flags 110 .busy_placement = &gmr_placement_flags
83}; 111};
84 112
85static uint32_t vram_gmr_ne_placement_flags[] = { 113static struct ttm_place vram_gmr_ne_placement_flags[] = {
86 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, 114 {
87 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 115 .fpfn = 0,
116 .lpfn = 0,
117 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
118 TTM_PL_FLAG_NO_EVICT
119 }, {
120 .fpfn = 0,
121 .lpfn = 0,
122 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
123 TTM_PL_FLAG_NO_EVICT
124 }
88}; 125};
89 126
90struct ttm_placement vmw_vram_gmr_ne_placement = { 127struct ttm_placement vmw_vram_gmr_ne_placement = {
91 .fpfn = 0,
92 .lpfn = 0,
93 .num_placement = 2, 128 .num_placement = 2,
94 .placement = vram_gmr_ne_placement_flags, 129 .placement = vram_gmr_ne_placement_flags,
95 .num_busy_placement = 1, 130 .num_busy_placement = 1,
@@ -97,8 +132,6 @@ struct ttm_placement vmw_vram_gmr_ne_placement = {
97}; 132};
98 133
99struct ttm_placement vmw_vram_sys_placement = { 134struct ttm_placement vmw_vram_sys_placement = {
100 .fpfn = 0,
101 .lpfn = 0,
102 .num_placement = 1, 135 .num_placement = 1,
103 .placement = &vram_placement_flags, 136 .placement = &vram_placement_flags,
104 .num_busy_placement = 1, 137 .num_busy_placement = 1,
@@ -106,8 +139,6 @@ struct ttm_placement vmw_vram_sys_placement = {
106}; 139};
107 140
108struct ttm_placement vmw_vram_ne_placement = { 141struct ttm_placement vmw_vram_ne_placement = {
109 .fpfn = 0,
110 .lpfn = 0,
111 .num_placement = 1, 142 .num_placement = 1,
112 .placement = &vram_ne_placement_flags, 143 .placement = &vram_ne_placement_flags,
113 .num_busy_placement = 1, 144 .num_busy_placement = 1,
@@ -115,8 +146,6 @@ struct ttm_placement vmw_vram_ne_placement = {
115}; 146};
116 147
117struct ttm_placement vmw_sys_placement = { 148struct ttm_placement vmw_sys_placement = {
118 .fpfn = 0,
119 .lpfn = 0,
120 .num_placement = 1, 149 .num_placement = 1,
121 .placement = &sys_placement_flags, 150 .placement = &sys_placement_flags,
122 .num_busy_placement = 1, 151 .num_busy_placement = 1,
@@ -124,24 +153,33 @@ struct ttm_placement vmw_sys_placement = {
124}; 153};
125 154
126struct ttm_placement vmw_sys_ne_placement = { 155struct ttm_placement vmw_sys_ne_placement = {
127 .fpfn = 0,
128 .lpfn = 0,
129 .num_placement = 1, 156 .num_placement = 1,
130 .placement = &sys_ne_placement_flags, 157 .placement = &sys_ne_placement_flags,
131 .num_busy_placement = 1, 158 .num_busy_placement = 1,
132 .busy_placement = &sys_ne_placement_flags 159 .busy_placement = &sys_ne_placement_flags
133}; 160};
134 161
135static uint32_t evictable_placement_flags[] = { 162static struct ttm_place evictable_placement_flags[] = {
136 TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, 163 {
137 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, 164 .fpfn = 0,
138 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, 165 .lpfn = 0,
139 VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 166 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
167 }, {
168 .fpfn = 0,
169 .lpfn = 0,
170 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
171 }, {
172 .fpfn = 0,
173 .lpfn = 0,
174 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
175 }, {
176 .fpfn = 0,
177 .lpfn = 0,
178 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
179 }
140}; 180};
141 181
142struct ttm_placement vmw_evictable_placement = { 182struct ttm_placement vmw_evictable_placement = {
143 .fpfn = 0,
144 .lpfn = 0,
145 .num_placement = 4, 183 .num_placement = 4,
146 .placement = evictable_placement_flags, 184 .placement = evictable_placement_flags,
147 .num_busy_placement = 1, 185 .num_busy_placement = 1,
@@ -149,8 +187,6 @@ struct ttm_placement vmw_evictable_placement = {
149}; 187};
150 188
151struct ttm_placement vmw_srf_placement = { 189struct ttm_placement vmw_srf_placement = {
152 .fpfn = 0,
153 .lpfn = 0,
154 .num_placement = 1, 190 .num_placement = 1,
155 .num_busy_placement = 2, 191 .num_busy_placement = 2,
156 .placement = &gmr_placement_flags, 192 .placement = &gmr_placement_flags,
@@ -158,8 +194,6 @@ struct ttm_placement vmw_srf_placement = {
158}; 194};
159 195
160struct ttm_placement vmw_mob_placement = { 196struct ttm_placement vmw_mob_placement = {
161 .fpfn = 0,
162 .lpfn = 0,
163 .num_placement = 1, 197 .num_placement = 1,
164 .num_busy_placement = 1, 198 .num_busy_placement = 1,
165 .placement = &mob_placement_flags, 199 .placement = &mob_placement_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index ed1d51006ab1..914b375763dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -198,13 +198,19 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
198{ 198{
199 struct ttm_buffer_object *bo = &buf->base; 199 struct ttm_buffer_object *bo = &buf->base;
200 struct ttm_placement placement; 200 struct ttm_placement placement;
201 struct ttm_place place;
201 int ret = 0; 202 int ret = 0;
202 203
203 if (pin) 204 if (pin)
204 placement = vmw_vram_ne_placement; 205 place = vmw_vram_ne_placement.placement[0];
205 else 206 else
206 placement = vmw_vram_placement; 207 place = vmw_vram_placement.placement[0];
207 placement.lpfn = bo->num_pages; 208 place.lpfn = bo->num_pages;
209
210 placement.num_placement = 1;
211 placement.placement = &place;
212 placement.num_busy_placement = 1;
213 placement.busy_placement = &place;
208 214
209 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); 215 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
210 if (unlikely(ret != 0)) 216 if (unlikely(ret != 0))
@@ -293,21 +299,23 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
293 */ 299 */
294void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) 300void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
295{ 301{
296 uint32_t pl_flags; 302 struct ttm_place pl;
297 struct ttm_placement placement; 303 struct ttm_placement placement;
298 uint32_t old_mem_type = bo->mem.mem_type; 304 uint32_t old_mem_type = bo->mem.mem_type;
299 int ret; 305 int ret;
300 306
301 lockdep_assert_held(&bo->resv->lock.base); 307 lockdep_assert_held(&bo->resv->lock.base);
302 308
303 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB 309 pl.fpfn = 0;
310 pl.lpfn = 0;
311 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
304 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; 312 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
305 if (pin) 313 if (pin)
306 pl_flags |= TTM_PL_FLAG_NO_EVICT; 314 pl.flags |= TTM_PL_FLAG_NO_EVICT;
307 315
308 memset(&placement, 0, sizeof(placement)); 316 memset(&placement, 0, sizeof(placement));
309 placement.num_placement = 1; 317 placement.num_placement = 1;
310 placement.placement = &pl_flags; 318 placement.placement = &pl;
311 319
312 ret = ttm_bo_validate(bo, &placement, false, true); 320 ret = ttm_bo_validate(bo, &placement, false, true);
313 321
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b031b48dbb3c..0a474f391fad 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -374,10 +374,16 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out) 374 size_t size, struct vmw_dma_buffer **out)
375{ 375{
376 struct vmw_dma_buffer *vmw_bo; 376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement; 377 struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
378 struct ttm_placement ne_placement;
378 int ret; 379 int ret;
379 380
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 381 ne_placement.num_placement = 1;
382 ne_placement.placement = &ne_place;
383 ne_placement.num_busy_placement = 1;
384 ne_placement.busy_placement = &ne_place;
385
386 ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381 387
382 (void) ttm_write_lock(&vmw_priv->reservation_sem, false); 388 (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
383 389
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 26f8bdde3529..170b61be1e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -46,8 +46,7 @@ struct vmwgfx_gmrid_man {
46 46
47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, 47static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
48 struct ttm_buffer_object *bo, 48 struct ttm_buffer_object *bo,
49 struct ttm_placement *placement, 49 const struct ttm_place *place,
50 uint32_t flags,
51 struct ttm_mem_reg *mem) 50 struct ttm_mem_reg *mem)
52{ 51{
53 struct vmwgfx_gmrid_man *gman = 52 struct vmwgfx_gmrid_man *gman =
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 7526c5bf5610..e3d39c80a091 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,12 +45,24 @@ struct ttm_bo_device;
45 45
46struct drm_mm_node; 46struct drm_mm_node;
47 47
48/**
49 * struct ttm_place
50 *
51 * @fpfn: first valid page frame number to put the object
52 * @lpfn: last valid page frame number to put the object
53 * @flags: memory domain and caching flags for the object
54 *
55 * Structure indicating a possible place to put an object.
56 */
57struct ttm_place {
58 unsigned fpfn;
59 unsigned lpfn;
60 uint32_t flags;
61};
48 62
49/** 63/**
50 * struct ttm_placement 64 * struct ttm_placement
51 * 65 *
52 * @fpfn: first valid page frame number to put the object
53 * @lpfn: last valid page frame number to put the object
54 * @num_placement: number of preferred placements 66 * @num_placement: number of preferred placements
55 * @placement: preferred placements 67 * @placement: preferred placements
56 * @num_busy_placement: number of preferred placements when need to evict buffer 68 * @num_busy_placement: number of preferred placements when need to evict buffer
@@ -59,12 +71,10 @@ struct drm_mm_node;
59 * Structure indicating the placement you request for an object. 71 * Structure indicating the placement you request for an object.
60 */ 72 */
61struct ttm_placement { 73struct ttm_placement {
62 unsigned fpfn; 74 unsigned num_placement;
63 unsigned lpfn; 75 const struct ttm_place *placement;
64 unsigned num_placement; 76 unsigned num_busy_placement;
65 const uint32_t *placement; 77 const struct ttm_place *busy_placement;
66 unsigned num_busy_placement;
67 const uint32_t *busy_placement;
68}; 78};
69 79
70/** 80/**
@@ -519,20 +529,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
519 struct ttm_buffer_object **p_bo); 529 struct ttm_buffer_object **p_bo);
520 530
521/** 531/**
522 * ttm_bo_check_placement
523 *
524 * @bo: the buffer object.
525 * @placement: placements
526 *
527 * Performs minimal validity checking on an intended change of
528 * placement flags.
529 * Returns
530 * -EINVAL: Intended change is invalid or not allowed.
531 */
532extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
533 struct ttm_placement *placement);
534
535/**
536 * ttm_bo_init_mm 532 * ttm_bo_init_mm
537 * 533 *
538 * @bdev: Pointer to a ttm_bo_device struct. 534 * @bdev: Pointer to a ttm_bo_device struct.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1d9f0f1ff52d..5c8bb5699a6f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -208,8 +208,7 @@ struct ttm_mem_type_manager_func {
208 */ 208 */
209 int (*get_node)(struct ttm_mem_type_manager *man, 209 int (*get_node)(struct ttm_mem_type_manager *man,
210 struct ttm_buffer_object *bo, 210 struct ttm_buffer_object *bo,
211 struct ttm_placement *placement, 211 const struct ttm_place *place,
212 uint32_t flags,
213 struct ttm_mem_reg *mem); 212 struct ttm_mem_reg *mem);
214 213
215 /** 214 /**