diff options
author | Dave Airlie <airlied@redhat.com> | 2014-10-01 05:27:38 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-10-01 05:27:38 -0400 |
commit | 7a42e83d36d2d0a68622320900dc4e880b1d920a (patch) | |
tree | 3671f64e59894284cb7a946d9ab981a1f71e669b | |
parent | c5939a7360a69fc415bcfff6b10ced5227495a48 (diff) | |
parent | bb6178b04f5ef6f62990306713fb6afdf5d8bc56 (diff) |
Merge branch 'for-airlied-next' of git://people.freedesktop.org/~mlankhorst/linux into drm-next
fixups for nouveau and fencing
* 'for-airlied-next' of git://people.freedesktop.org/~mlankhorst/linux:
drm/nouveau: export reservation_object from dmabuf to ttm
drm/ttm: add reservation_object as argument to ttm_bo_init
drm: Pass dma-buf as argument to gem_prime_import_sg_table
drm/nouveau: assign fence_chan->name correctly
drm/nouveau: specify if interruptible wait is desired in nouveau_fence_sync
drm/nouveau: bump driver patchlevel to 1.2.1
36 files changed, 126 insertions, 63 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index c65d432f42c4..08f82eae6939 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -339,7 +339,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
339 | ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, | 339 | ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, |
340 | ttm_bo_type_device, &astbo->placement, | 340 | ttm_bo_type_device, &astbo->placement, |
341 | align >> PAGE_SHIFT, false, NULL, acc_size, | 341 | align >> PAGE_SHIFT, false, NULL, acc_size, |
342 | NULL, ast_bo_ttm_destroy); | 342 | NULL, NULL, ast_bo_ttm_destroy); |
343 | if (ret) | 343 | if (ret) |
344 | return ret; | 344 | return ret; |
345 | 345 | ||
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 324f5a09a0a1..66286ff518d4 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -377,7 +377,7 @@ static int bochs_bo_create(struct drm_device *dev, int size, int align, | |||
377 | ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, | 377 | ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, |
378 | ttm_bo_type_device, &bochsbo->placement, | 378 | ttm_bo_type_device, &bochsbo->placement, |
379 | align >> PAGE_SHIFT, false, NULL, acc_size, | 379 | align >> PAGE_SHIFT, false, NULL, acc_size, |
380 | NULL, bochs_bo_ttm_destroy); | 380 | NULL, NULL, bochs_bo_ttm_destroy); |
381 | if (ret) | 381 | if (ret) |
382 | return ret; | 382 | return ret; |
383 | 383 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index d3c615f9b183..dfffd528517a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -343,7 +343,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
343 | ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, | 343 | ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, |
344 | ttm_bo_type_device, &cirrusbo->placement, | 344 | ttm_bo_type_device, &cirrusbo->placement, |
345 | align >> PAGE_SHIFT, false, NULL, acc_size, | 345 | align >> PAGE_SHIFT, false, NULL, acc_size, |
346 | NULL, cirrus_bo_ttm_destroy); | 346 | NULL, NULL, cirrus_bo_ttm_destroy); |
347 | if (ret) | 347 | if (ret) |
348 | return ret; | 348 | return ret; |
349 | 349 | ||
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e467e67af6e7..0316310e2cc4 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c | |||
@@ -316,7 +316,8 @@ out: | |||
316 | EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); | 316 | EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); |
317 | 317 | ||
318 | struct drm_gem_object * | 318 | struct drm_gem_object * |
319 | drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, | 319 | drm_gem_cma_prime_import_sg_table(struct drm_device *dev, |
320 | struct dma_buf_attachment *attach, | ||
320 | struct sg_table *sgt) | 321 | struct sg_table *sgt) |
321 | { | 322 | { |
322 | struct drm_gem_cma_object *cma_obj; | 323 | struct drm_gem_cma_object *cma_obj; |
@@ -325,14 +326,14 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, | |||
325 | return ERR_PTR(-EINVAL); | 326 | return ERR_PTR(-EINVAL); |
326 | 327 | ||
327 | /* Create a CMA GEM buffer. */ | 328 | /* Create a CMA GEM buffer. */ |
328 | cma_obj = __drm_gem_cma_create(dev, size); | 329 | cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); |
329 | if (IS_ERR(cma_obj)) | 330 | if (IS_ERR(cma_obj)) |
330 | return ERR_CAST(cma_obj); | 331 | return ERR_CAST(cma_obj); |
331 | 332 | ||
332 | cma_obj->paddr = sg_dma_address(sgt->sgl); | 333 | cma_obj->paddr = sg_dma_address(sgt->sgl); |
333 | cma_obj->sgt = sgt; | 334 | cma_obj->sgt = sgt; |
334 | 335 | ||
335 | DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, size); | 336 | DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size); |
336 | 337 | ||
337 | return &cma_obj->base; | 338 | return &cma_obj->base; |
338 | } | 339 | } |
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7826de9da276..78ca30808422 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
@@ -525,7 +525,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, | |||
525 | goto fail_detach; | 525 | goto fail_detach; |
526 | } | 526 | } |
527 | 527 | ||
528 | obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); | 528 | obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); |
529 | if (IS_ERR(obj)) { | 529 | if (IS_ERR(obj)) { |
530 | ret = PTR_ERR(obj); | 530 | ret = PTR_ERR(obj); |
531 | goto fail_unmap; | 531 | goto fail_unmap; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 8ac70626df6c..d16964ea0ed4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -339,7 +339,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
339 | ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, | 339 | ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, |
340 | ttm_bo_type_device, &mgabo->placement, | 340 | ttm_bo_type_device, &mgabo->placement, |
341 | align >> PAGE_SHIFT, false, NULL, acc_size, | 341 | align >> PAGE_SHIFT, false, NULL, acc_size, |
342 | NULL, mgag200_bo_ttm_destroy); | 342 | NULL, NULL, mgag200_bo_ttm_destroy); |
343 | if (ret) | 343 | if (ret) |
344 | return ret; | 344 | return ret; |
345 | 345 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index afaafd42dee7..67f9d0a2332c 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -171,7 +171,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); | |||
171 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); | 171 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
172 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 172 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
173 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | 173 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
174 | size_t size, struct sg_table *sg); | 174 | struct dma_buf_attachment *attach, struct sg_table *sg); |
175 | int msm_gem_prime_pin(struct drm_gem_object *obj); | 175 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
176 | void msm_gem_prime_unpin(struct drm_gem_object *obj); | 176 | void msm_gem_prime_unpin(struct drm_gem_object *obj); |
177 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj); | 177 | void *msm_gem_vaddr_locked(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index d48f9fc5129b..ad772fe36115 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
19 | #include "msm_gem.h" | 19 | #include "msm_gem.h" |
20 | 20 | ||
21 | #include <linux/dma-buf.h> | ||
21 | 22 | ||
22 | struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) | 23 | struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) |
23 | { | 24 | { |
@@ -37,9 +38,9 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | |||
37 | } | 38 | } |
38 | 39 | ||
39 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | 40 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
40 | size_t size, struct sg_table *sg) | 41 | struct dma_buf_attachment *attach, struct sg_table *sg) |
41 | { | 42 | { |
42 | return msm_gem_import(dev, size, sg); | 43 | return msm_gem_import(dev, attach->dmabuf->size, sg); |
43 | } | 44 | } |
44 | 45 | ||
45 | int msm_gem_prime_pin(struct drm_gem_object *obj) | 46 | int msm_gem_prime_pin(struct drm_gem_object *obj) |
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index b90aa5c1f90a..fca6a1f9c20c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c | |||
@@ -1127,7 +1127,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) | |||
1127 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); | 1127 | drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); |
1128 | 1128 | ||
1129 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, | 1129 | ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, |
1130 | 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); | 1130 | 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); |
1131 | if (!ret) { | 1131 | if (!ret) { |
1132 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); | 1132 | ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); |
1133 | if (!ret) { | 1133 | if (!ret) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index eea74b127b03..3d474ac03f88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -181,7 +181,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, | |||
181 | int | 181 | int |
182 | nouveau_bo_new(struct drm_device *dev, int size, int align, | 182 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
183 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | 183 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
184 | struct sg_table *sg, | 184 | struct sg_table *sg, struct reservation_object *robj, |
185 | struct nouveau_bo **pnvbo) | 185 | struct nouveau_bo **pnvbo) |
186 | { | 186 | { |
187 | struct nouveau_drm *drm = nouveau_drm(dev); | 187 | struct nouveau_drm *drm = nouveau_drm(dev); |
@@ -230,7 +230,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
230 | ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, | 230 | ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
231 | type, &nvbo->placement, | 231 | type, &nvbo->placement, |
232 | align >> PAGE_SHIFT, false, NULL, acc_size, sg, | 232 | align >> PAGE_SHIFT, false, NULL, acc_size, sg, |
233 | nouveau_bo_del_ttm); | 233 | robj, nouveau_bo_del_ttm); |
234 | if (ret) { | 234 | if (ret) { |
235 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | 235 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ |
236 | return ret; | 236 | return ret; |
@@ -970,7 +970,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
970 | } | 970 | } |
971 | 971 | ||
972 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); | 972 | mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); |
973 | ret = nouveau_fence_sync(nouveau_bo(bo), chan, true); | 973 | ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); |
974 | if (ret == 0) { | 974 | if (ret == 0) { |
975 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); | 975 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
976 | if (ret == 0) { | 976 | if (ret == 0) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index f238def41a92..22d2c764d80b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h | |||
@@ -70,6 +70,7 @@ extern struct ttm_bo_driver nouveau_bo_driver; | |||
70 | void nouveau_bo_move_init(struct nouveau_drm *); | 70 | void nouveau_bo_move_init(struct nouveau_drm *); |
71 | int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, | 71 | int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, |
72 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, | 72 | u32 tile_mode, u32 tile_flags, struct sg_table *sg, |
73 | struct reservation_object *robj, | ||
73 | struct nouveau_bo **); | 74 | struct nouveau_bo **); |
74 | int nouveau_bo_pin(struct nouveau_bo *, u32 flags); | 75 | int nouveau_bo_pin(struct nouveau_bo *, u32 flags); |
75 | int nouveau_bo_unpin(struct nouveau_bo *); | 76 | int nouveau_bo_unpin(struct nouveau_bo *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 9a362ddd8225..977fb8f15d97 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c | |||
@@ -106,7 +106,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, | |||
106 | if (nouveau_vram_pushbuf) | 106 | if (nouveau_vram_pushbuf) |
107 | target = TTM_PL_FLAG_VRAM; | 107 | target = TTM_PL_FLAG_VRAM; |
108 | 108 | ||
109 | ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, | 109 | ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, |
110 | &chan->push.buffer); | 110 | &chan->push.buffer); |
111 | if (ret == 0) { | 111 | if (ret == 0) { |
112 | ret = nouveau_bo_pin(chan->push.buffer, target); | 112 | ret = nouveau_bo_pin(chan->push.buffer, target); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6d0a3cdc752b..334db3c6e40c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -658,7 +658,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan, | |||
658 | spin_unlock_irqrestore(&dev->event_lock, flags); | 658 | spin_unlock_irqrestore(&dev->event_lock, flags); |
659 | 659 | ||
660 | /* Synchronize with the old framebuffer */ | 660 | /* Synchronize with the old framebuffer */ |
661 | ret = nouveau_fence_sync(old_bo, chan, false); | 661 | ret = nouveau_fence_sync(old_bo, chan, false, false); |
662 | if (ret) | 662 | if (ret) |
663 | goto fail; | 663 | goto fail; |
664 | 664 | ||
@@ -722,7 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
722 | goto fail_unpin; | 722 | goto fail_unpin; |
723 | 723 | ||
724 | /* synchronise rendering channel with the kernel's channel */ | 724 | /* synchronise rendering channel with the kernel's channel */ |
725 | ret = nouveau_fence_sync(new_bo, chan, false); | 725 | ret = nouveau_fence_sync(new_bo, chan, false, true); |
726 | if (ret) { | 726 | if (ret) { |
727 | ttm_bo_unreserve(&new_bo->bo); | 727 | ttm_bo_unreserve(&new_bo->bo); |
728 | goto fail_unpin; | 728 | goto fail_unpin; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b02b02452c85..8ae36f265fb8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #define DRIVER_MAJOR 1 | 11 | #define DRIVER_MAJOR 1 |
12 | #define DRIVER_MINOR 2 | 12 | #define DRIVER_MINOR 2 |
13 | #define DRIVER_PATCHLEVEL 0 | 13 | #define DRIVER_PATCHLEVEL 1 |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * 1.1.1: | 16 | * 1.1.1: |
@@ -26,6 +26,8 @@ | |||
26 | * 1.2.0: | 26 | * 1.2.0: |
27 | * - object api exposed to userspace | 27 | * - object api exposed to userspace |
28 | * - fermi,kepler,maxwell zbc | 28 | * - fermi,kepler,maxwell zbc |
29 | * 1.2.1: | ||
30 | * - allow concurrent access to bo's mapped read/write. | ||
29 | */ | 31 | */ |
30 | 32 | ||
31 | #include <nvif/client.h> | 33 | #include <nvif/client.h> |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index decfe6c4ac07..515cd9aebb99 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -101,6 +101,18 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static void | 103 | static void |
104 | nouveau_fence_context_put(struct kref *fence_ref) | ||
105 | { | ||
106 | kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref)); | ||
107 | } | ||
108 | |||
109 | void | ||
110 | nouveau_fence_context_free(struct nouveau_fence_chan *fctx) | ||
111 | { | ||
112 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); | ||
113 | } | ||
114 | |||
115 | static void | ||
104 | nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) | 116 | nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) |
105 | { | 117 | { |
106 | struct nouveau_fence *fence; | 118 | struct nouveau_fence *fence; |
@@ -141,6 +153,7 @@ void | |||
141 | nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) | 153 | nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) |
142 | { | 154 | { |
143 | struct nouveau_fence_priv *priv = (void*)chan->drm->fence; | 155 | struct nouveau_fence_priv *priv = (void*)chan->drm->fence; |
156 | struct nouveau_cli *cli = (void *)nvif_client(chan->object); | ||
144 | int ret; | 157 | int ret; |
145 | 158 | ||
146 | INIT_LIST_HEAD(&fctx->flip); | 159 | INIT_LIST_HEAD(&fctx->flip); |
@@ -148,6 +161,14 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha | |||
148 | spin_lock_init(&fctx->lock); | 161 | spin_lock_init(&fctx->lock); |
149 | fctx->context = priv->context_base + chan->chid; | 162 | fctx->context = priv->context_base + chan->chid; |
150 | 163 | ||
164 | if (chan == chan->drm->cechan) | ||
165 | strcpy(fctx->name, "copy engine channel"); | ||
166 | else if (chan == chan->drm->channel) | ||
167 | strcpy(fctx->name, "generic kernel channel"); | ||
168 | else | ||
169 | strcpy(fctx->name, nvkm_client(&cli->base)->name); | ||
170 | |||
171 | kref_init(&fctx->fence_ref); | ||
151 | if (!priv->uevent) | 172 | if (!priv->uevent) |
152 | return; | 173 | return; |
153 | 174 | ||
@@ -195,8 +216,12 @@ nouveau_fence_work(struct fence *fence, | |||
195 | 216 | ||
196 | work = kmalloc(sizeof(*work), GFP_KERNEL); | 217 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
197 | if (!work) { | 218 | if (!work) { |
219 | /* | ||
220 | * this might not be a nouveau fence any more, | ||
221 | * so force a lazy wait here | ||
222 | */ | ||
198 | WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence, | 223 | WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence, |
199 | false, false)); | 224 | true, false)); |
200 | goto err; | 225 | goto err; |
201 | } | 226 | } |
202 | 227 | ||
@@ -226,12 +251,11 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
226 | 251 | ||
227 | if (priv->uevent) | 252 | if (priv->uevent) |
228 | fence_init(&fence->base, &nouveau_fence_ops_uevent, | 253 | fence_init(&fence->base, &nouveau_fence_ops_uevent, |
229 | &fctx->lock, | 254 | &fctx->lock, fctx->context, ++fctx->sequence); |
230 | priv->context_base + chan->chid, ++fctx->sequence); | ||
231 | else | 255 | else |
232 | fence_init(&fence->base, &nouveau_fence_ops_legacy, | 256 | fence_init(&fence->base, &nouveau_fence_ops_legacy, |
233 | &fctx->lock, | 257 | &fctx->lock, fctx->context, ++fctx->sequence); |
234 | priv->context_base + chan->chid, ++fctx->sequence); | 258 | kref_get(&fctx->fence_ref); |
235 | 259 | ||
236 | trace_fence_emit(&fence->base); | 260 | trace_fence_emit(&fence->base); |
237 | ret = fctx->emit(fence); | 261 | ret = fctx->emit(fence); |
@@ -342,7 +366,7 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) | |||
342 | } | 366 | } |
343 | 367 | ||
344 | int | 368 | int |
345 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive) | 369 | nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) |
346 | { | 370 | { |
347 | struct nouveau_fence_chan *fctx = chan->fence; | 371 | struct nouveau_fence_chan *fctx = chan->fence; |
348 | struct fence *fence; | 372 | struct fence *fence; |
@@ -369,7 +393,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
369 | prev = f->channel; | 393 | prev = f->channel; |
370 | 394 | ||
371 | if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) | 395 | if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) |
372 | ret = fence_wait(fence, true); | 396 | ret = fence_wait(fence, intr); |
373 | 397 | ||
374 | return ret; | 398 | return ret; |
375 | } | 399 | } |
@@ -387,8 +411,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e | |||
387 | if (f) | 411 | if (f) |
388 | prev = f->channel; | 412 | prev = f->channel; |
389 | 413 | ||
390 | if (!prev || (ret = fctx->sync(f, prev, chan))) | 414 | if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) |
391 | ret = fence_wait(fence, true); | 415 | ret = fence_wait(fence, intr); |
392 | 416 | ||
393 | if (ret) | 417 | if (ret) |
394 | break; | 418 | break; |
@@ -482,13 +506,22 @@ static bool nouveau_fence_no_signaling(struct fence *f) | |||
482 | return true; | 506 | return true; |
483 | } | 507 | } |
484 | 508 | ||
509 | static void nouveau_fence_release(struct fence *f) | ||
510 | { | ||
511 | struct nouveau_fence *fence = from_fence(f); | ||
512 | struct nouveau_fence_chan *fctx = nouveau_fctx(fence); | ||
513 | |||
514 | kref_put(&fctx->fence_ref, nouveau_fence_context_put); | ||
515 | fence_free(&fence->base); | ||
516 | } | ||
517 | |||
485 | static const struct fence_ops nouveau_fence_ops_legacy = { | 518 | static const struct fence_ops nouveau_fence_ops_legacy = { |
486 | .get_driver_name = nouveau_fence_get_get_driver_name, | 519 | .get_driver_name = nouveau_fence_get_get_driver_name, |
487 | .get_timeline_name = nouveau_fence_get_timeline_name, | 520 | .get_timeline_name = nouveau_fence_get_timeline_name, |
488 | .enable_signaling = nouveau_fence_no_signaling, | 521 | .enable_signaling = nouveau_fence_no_signaling, |
489 | .signaled = nouveau_fence_is_signaled, | 522 | .signaled = nouveau_fence_is_signaled, |
490 | .wait = nouveau_fence_wait_legacy, | 523 | .wait = nouveau_fence_wait_legacy, |
491 | .release = NULL | 524 | .release = nouveau_fence_release |
492 | }; | 525 | }; |
493 | 526 | ||
494 | static bool nouveau_fence_enable_signaling(struct fence *f) | 527 | static bool nouveau_fence_enable_signaling(struct fence *f) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 986c8135e564..943b0b17b1fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -26,10 +26,12 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); | |||
26 | bool nouveau_fence_done(struct nouveau_fence *); | 26 | bool nouveau_fence_done(struct nouveau_fence *); |
27 | void nouveau_fence_work(struct fence *, void (*)(void *), void *); | 27 | void nouveau_fence_work(struct fence *, void (*)(void *), void *); |
28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); | 28 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); |
29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive); | 29 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); |
30 | 30 | ||
31 | struct nouveau_fence_chan { | 31 | struct nouveau_fence_chan { |
32 | spinlock_t lock; | 32 | spinlock_t lock; |
33 | struct kref fence_ref; | ||
34 | |||
33 | struct list_head pending; | 35 | struct list_head pending; |
34 | struct list_head flip; | 36 | struct list_head flip; |
35 | 37 | ||
@@ -42,7 +44,7 @@ struct nouveau_fence_chan { | |||
42 | 44 | ||
43 | u32 sequence; | 45 | u32 sequence; |
44 | u32 context; | 46 | u32 context; |
45 | char name[24]; | 47 | char name[32]; |
46 | 48 | ||
47 | struct nvif_notify notify; | 49 | struct nvif_notify notify; |
48 | int notify_ref; | 50 | int notify_ref; |
@@ -63,6 +65,7 @@ struct nouveau_fence_priv { | |||
63 | 65 | ||
64 | void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *); | 66 | void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *); |
65 | void nouveau_fence_context_del(struct nouveau_fence_chan *); | 67 | void nouveau_fence_context_del(struct nouveau_fence_chan *); |
68 | void nouveau_fence_context_free(struct nouveau_fence_chan *); | ||
66 | 69 | ||
67 | int nv04_fence_create(struct nouveau_drm *); | 70 | int nv04_fence_create(struct nouveau_drm *); |
68 | int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); | 71 | int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b7dbd16904e0..36951ee4b157 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -165,7 +165,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, | |||
165 | flags |= TTM_PL_FLAG_SYSTEM; | 165 | flags |= TTM_PL_FLAG_SYSTEM; |
166 | 166 | ||
167 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, | 167 | ret = nouveau_bo_new(dev, size, align, flags, tile_mode, |
168 | tile_flags, NULL, pnvbo); | 168 | tile_flags, NULL, NULL, pnvbo); |
169 | if (ret) | 169 | if (ret) |
170 | return ret; | 170 | return ret; |
171 | nvbo = *pnvbo; | 171 | nvbo = *pnvbo; |
@@ -459,7 +459,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, | |||
459 | return ret; | 459 | return ret; |
460 | } | 460 | } |
461 | 461 | ||
462 | ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains); | 462 | ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); |
463 | if (unlikely(ret)) { | 463 | if (unlikely(ret)) { |
464 | if (ret != -ERESTARTSYS) | 464 | if (ret != -ERESTARTSYS) |
465 | NV_PRINTK(error, cli, "fail post-validate sync\n"); | 465 | NV_PRINTK(error, cli, "fail post-validate sync\n"); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h index ddab762d81fe..e4049faca780 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.h +++ b/drivers/gpu/drm/nouveau/nouveau_gem.h | |||
@@ -39,7 +39,7 @@ struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *); | |||
39 | extern void nouveau_gem_prime_unpin(struct drm_gem_object *); | 39 | extern void nouveau_gem_prime_unpin(struct drm_gem_object *); |
40 | extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); | 40 | extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); |
41 | extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( | 41 | extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( |
42 | struct drm_device *, size_t size, struct sg_table *); | 42 | struct drm_device *, struct dma_buf_attachment *, struct sg_table *); |
43 | extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); | 43 | extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); |
44 | extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); | 44 | extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); |
45 | 45 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 1f51008e4d26..228226ab27fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include <linux/dma-buf.h> | ||
26 | 27 | ||
27 | #include "nouveau_drm.h" | 28 | #include "nouveau_drm.h" |
28 | #include "nouveau_gem.h" | 29 | #include "nouveau_gem.h" |
@@ -56,17 +57,20 @@ void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | |||
56 | } | 57 | } |
57 | 58 | ||
58 | struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, | 59 | struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, |
59 | size_t size, | 60 | struct dma_buf_attachment *attach, |
60 | struct sg_table *sg) | 61 | struct sg_table *sg) |
61 | { | 62 | { |
62 | struct nouveau_bo *nvbo; | 63 | struct nouveau_bo *nvbo; |
64 | struct reservation_object *robj = attach->dmabuf->resv; | ||
63 | u32 flags = 0; | 65 | u32 flags = 0; |
64 | int ret; | 66 | int ret; |
65 | 67 | ||
66 | flags = TTM_PL_FLAG_TT; | 68 | flags = TTM_PL_FLAG_TT; |
67 | 69 | ||
68 | ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, | 70 | ww_mutex_lock(&robj->lock, NULL); |
69 | sg, &nvbo); | 71 | ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0, |
72 | sg, robj, &nvbo); | ||
73 | ww_mutex_unlock(&robj->lock); | ||
70 | if (ret) | 74 | if (ret) |
71 | return ERR_PTR(ret); | 75 | return ERR_PTR(ret); |
72 | 76 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 4484131d826a..f9859deb108a 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c | |||
@@ -67,7 +67,7 @@ nv04_fence_context_del(struct nouveau_channel *chan) | |||
67 | struct nv04_fence_chan *fctx = chan->fence; | 67 | struct nv04_fence_chan *fctx = chan->fence; |
68 | nouveau_fence_context_del(&fctx->base); | 68 | nouveau_fence_context_del(&fctx->base); |
69 | chan->fence = NULL; | 69 | chan->fence = NULL; |
70 | kfree(fctx); | 70 | nouveau_fence_context_free(&fctx->base); |
71 | } | 71 | } |
72 | 72 | ||
73 | static int | 73 | static int |
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index 737d066ffc60..5e1ea1cdce75 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c | |||
@@ -63,7 +63,7 @@ nv10_fence_context_del(struct nouveau_channel *chan) | |||
63 | nvif_object_fini(&fctx->head[i]); | 63 | nvif_object_fini(&fctx->head[i]); |
64 | nvif_object_fini(&fctx->sema); | 64 | nvif_object_fini(&fctx->sema); |
65 | chan->fence = NULL; | 65 | chan->fence = NULL; |
66 | kfree(fctx); | 66 | nouveau_fence_context_free(&fctx->base); |
67 | } | 67 | } |
68 | 68 | ||
69 | int | 69 | int |
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 6f9a1f8e2d0f..40b461c7d5c5 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm) | |||
129 | spin_lock_init(&priv->lock); | 129 | spin_lock_init(&priv->lock); |
130 | 130 | ||
131 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 131 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
132 | 0, 0x0000, NULL, &priv->bo); | 132 | 0, 0x0000, NULL, NULL, &priv->bo); |
133 | if (!ret) { | 133 | if (!ret) { |
134 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); | 134 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); |
135 | if (!ret) { | 135 | if (!ret) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index fdb3e1adea1e..ae873d1a8d46 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -1383,7 +1383,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
1383 | drm_mode_crtc_set_gamma_size(crtc, 256); | 1383 | drm_mode_crtc_set_gamma_size(crtc, 256); |
1384 | 1384 | ||
1385 | ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, | 1385 | ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, |
1386 | 0, 0x0000, NULL, &head->base.lut.nvbo); | 1386 | 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); |
1387 | if (!ret) { | 1387 | if (!ret) { |
1388 | ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); | 1388 | ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); |
1389 | if (!ret) { | 1389 | if (!ret) { |
@@ -1406,7 +1406,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
1406 | goto out; | 1406 | goto out; |
1407 | 1407 | ||
1408 | ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, | 1408 | ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM, |
1409 | 0, 0x0000, NULL, &head->base.cursor.nvbo); | 1409 | 0, 0x0000, NULL, NULL, &head->base.cursor.nvbo); |
1410 | if (!ret) { | 1410 | if (!ret) { |
1411 | ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); | 1411 | ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM); |
1412 | if (!ret) { | 1412 | if (!ret) { |
@@ -2468,7 +2468,7 @@ nv50_display_create(struct drm_device *dev) | |||
2468 | 2468 | ||
2469 | /* small shared memory area we use for notifiers and semaphores */ | 2469 | /* small shared memory area we use for notifiers and semaphores */ |
2470 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 2470 | ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
2471 | 0, 0x0000, NULL, &disp->sync); | 2471 | 0, 0x0000, NULL, NULL, &disp->sync); |
2472 | if (!ret) { | 2472 | if (!ret) { |
2473 | ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); | 2473 | ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); |
2474 | if (!ret) { | 2474 | if (!ret) { |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 08fad3668a1c..22d242b37962 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -100,7 +100,7 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
100 | spin_lock_init(&priv->lock); | 100 | spin_lock_init(&priv->lock); |
101 | 101 | ||
102 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 102 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
103 | 0, 0x0000, NULL, &priv->bo); | 103 | 0, 0x0000, NULL, NULL, &priv->bo); |
104 | if (!ret) { | 104 | if (!ret) { |
105 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); | 105 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); |
106 | if (!ret) { | 106 | if (!ret) { |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 1e5017f905db..d6c6c87c3f07 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -125,7 +125,7 @@ nv84_fence_context_del(struct nouveau_channel *chan) | |||
125 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | 125 | nouveau_bo_vma_del(priv->bo, &fctx->vma); |
126 | nouveau_fence_context_del(&fctx->base); | 126 | nouveau_fence_context_del(&fctx->base); |
127 | chan->fence = NULL; | 127 | chan->fence = NULL; |
128 | kfree(fctx); | 128 | nouveau_fence_context_free(&fctx->base); |
129 | } | 129 | } |
130 | 130 | ||
131 | int | 131 | int |
@@ -232,7 +232,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
232 | priv->base.uevent = true; | 232 | priv->base.uevent = true; |
233 | 233 | ||
234 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, | 234 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, |
235 | TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); | 235 | TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo); |
236 | if (ret == 0) { | 236 | if (ret == 0) { |
237 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); | 237 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); |
238 | if (ret == 0) { | 238 | if (ret == 0) { |
@@ -246,7 +246,7 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
246 | 246 | ||
247 | if (ret == 0) | 247 | if (ret == 0) |
248 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, | 248 | ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, |
249 | TTM_PL_FLAG_TT, 0, 0, NULL, | 249 | TTM_PL_FLAG_TT, 0, 0, NULL, NULL, |
250 | &priv->bo_gart); | 250 | &priv->bo_gart); |
251 | if (ret == 0) { | 251 | if (ret == 0) { |
252 | ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); | 252 | ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index ff0772728eb0..7c6cafe21f5f 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -537,7 +537,7 @@ int qxl_gem_prime_pin(struct drm_gem_object *obj); | |||
537 | void qxl_gem_prime_unpin(struct drm_gem_object *obj); | 537 | void qxl_gem_prime_unpin(struct drm_gem_object *obj); |
538 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); | 538 | struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); |
539 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | 539 | struct drm_gem_object *qxl_gem_prime_import_sg_table( |
540 | struct drm_device *dev, size_t size, | 540 | struct drm_device *dev, struct dma_buf_attachment *attach, |
541 | struct sg_table *sgt); | 541 | struct sg_table *sgt); |
542 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj); | 542 | void *qxl_gem_prime_vmap(struct drm_gem_object *obj); |
543 | void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 543 | void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 69c104c3240f..cdeaf08fdc74 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
@@ -110,7 +110,7 @@ int qxl_bo_create(struct qxl_device *qdev, | |||
110 | 110 | ||
111 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | 111 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, |
112 | &bo->placement, 0, !kernel, NULL, size, | 112 | &bo->placement, 0, !kernel, NULL, size, |
113 | NULL, &qxl_ttm_bo_destroy); | 113 | NULL, NULL, &qxl_ttm_bo_destroy); |
114 | if (unlikely(r != 0)) { | 114 | if (unlikely(r != 0)) { |
115 | if (r != -ERESTARTSYS) | 115 | if (r != -ERESTARTSYS) |
116 | dev_err(qdev->dev, | 116 | dev_err(qdev->dev, |
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index ba0689c728e8..3d031b50a8fd 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c | |||
@@ -46,7 +46,7 @@ struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | struct drm_gem_object *qxl_gem_prime_import_sg_table( | 48 | struct drm_gem_object *qxl_gem_prime_import_sg_table( |
49 | struct drm_device *dev, size_t size, | 49 | struct drm_device *dev, struct dma_buf_attachment *attach, |
50 | struct sg_table *table) | 50 | struct sg_table *table) |
51 | { | 51 | { |
52 | WARN_ONCE(1, "not implemented"); | 52 | WARN_ONCE(1, "not implemented"); |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index de108427a197..69c6a835bcd5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -135,7 +135,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, | |||
135 | struct drm_mode_create_dumb *args); | 135 | struct drm_mode_create_dumb *args); |
136 | struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); | 136 | struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); |
137 | struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, | 137 | struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, |
138 | size_t size, | 138 | struct dma_buf_attachment *, |
139 | struct sg_table *sg); | 139 | struct sg_table *sg); |
140 | int radeon_gem_prime_pin(struct drm_gem_object *obj); | 140 | int radeon_gem_prime_pin(struct drm_gem_object *obj); |
141 | void radeon_gem_prime_unpin(struct drm_gem_object *obj); | 141 | void radeon_gem_prime_unpin(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8abee5fa93bd..0e82f0223fd4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -216,7 +216,7 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
216 | down_read(&rdev->pm.mclk_lock); | 216 | down_read(&rdev->pm.mclk_lock); |
217 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, | 217 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
218 | &bo->placement, page_align, !kernel, NULL, | 218 | &bo->placement, page_align, !kernel, NULL, |
219 | acc_size, sg, &radeon_ttm_bo_destroy); | 219 | acc_size, sg, NULL, &radeon_ttm_bo_destroy); |
220 | up_read(&rdev->pm.mclk_lock); | 220 | up_read(&rdev->pm.mclk_lock); |
221 | if (unlikely(r != 0)) { | 221 | if (unlikely(r != 0)) { |
222 | return r; | 222 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index d5414d42e44b..171daf7fc483 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | #include "radeon.h" | 28 | #include "radeon.h" |
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include <linux/dma-buf.h> | ||
30 | 31 | ||
31 | struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) | 32 | struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) |
32 | { | 33 | { |
@@ -57,14 +58,14 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | |||
57 | } | 58 | } |
58 | 59 | ||
59 | struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, | 60 | struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, |
60 | size_t size, | 61 | struct dma_buf_attachment *attach, |
61 | struct sg_table *sg) | 62 | struct sg_table *sg) |
62 | { | 63 | { |
63 | struct radeon_device *rdev = dev->dev_private; | 64 | struct radeon_device *rdev = dev->dev_private; |
64 | struct radeon_bo *bo; | 65 | struct radeon_bo *bo; |
65 | int ret; | 66 | int ret; |
66 | 67 | ||
67 | ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, | 68 | ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, |
68 | RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); | 69 | RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); |
69 | if (ret) | 70 | if (ret) |
70 | return ERR_PTR(ret); | 71 | return ERR_PTR(ret); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a11969acfea5..8f5cec67c47d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1068,6 +1068,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1068 | struct file *persistent_swap_storage, | 1068 | struct file *persistent_swap_storage, |
1069 | size_t acc_size, | 1069 | size_t acc_size, |
1070 | struct sg_table *sg, | 1070 | struct sg_table *sg, |
1071 | struct reservation_object *resv, | ||
1071 | void (*destroy) (struct ttm_buffer_object *)) | 1072 | void (*destroy) (struct ttm_buffer_object *)) |
1072 | { | 1073 | { |
1073 | int ret = 0; | 1074 | int ret = 0; |
@@ -1121,8 +1122,13 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1121 | bo->persistent_swap_storage = persistent_swap_storage; | 1122 | bo->persistent_swap_storage = persistent_swap_storage; |
1122 | bo->acc_size = acc_size; | 1123 | bo->acc_size = acc_size; |
1123 | bo->sg = sg; | 1124 | bo->sg = sg; |
1124 | bo->resv = &bo->ttm_resv; | 1125 | if (resv) { |
1125 | reservation_object_init(bo->resv); | 1126 | bo->resv = resv; |
1127 | lockdep_assert_held(&bo->resv->lock.base); | ||
1128 | } else { | ||
1129 | bo->resv = &bo->ttm_resv; | ||
1130 | reservation_object_init(&bo->ttm_resv); | ||
1131 | } | ||
1126 | atomic_inc(&bo->glob->bo_count); | 1132 | atomic_inc(&bo->glob->bo_count); |
1127 | drm_vma_node_reset(&bo->vma_node); | 1133 | drm_vma_node_reset(&bo->vma_node); |
1128 | 1134 | ||
@@ -1135,13 +1141,19 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1135 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, | 1141 | ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
1136 | bo->mem.num_pages); | 1142 | bo->mem.num_pages); |
1137 | 1143 | ||
1138 | locked = ww_mutex_trylock(&bo->resv->lock); | 1144 | /* passed reservation objects should already be locked, |
1139 | WARN_ON(!locked); | 1145 | * since otherwise lockdep will be angered in radeon. |
1146 | */ | ||
1147 | if (!resv) { | ||
1148 | locked = ww_mutex_trylock(&bo->resv->lock); | ||
1149 | WARN_ON(!locked); | ||
1150 | } | ||
1140 | 1151 | ||
1141 | if (likely(!ret)) | 1152 | if (likely(!ret)) |
1142 | ret = ttm_bo_validate(bo, placement, interruptible, false); | 1153 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
1143 | 1154 | ||
1144 | ttm_bo_unreserve(bo); | 1155 | if (!resv) |
1156 | ttm_bo_unreserve(bo); | ||
1145 | 1157 | ||
1146 | if (unlikely(ret)) | 1158 | if (unlikely(ret)) |
1147 | ttm_bo_unref(&bo); | 1159 | ttm_bo_unref(&bo); |
@@ -1199,7 +1211,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1199 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); | 1211 | acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); |
1200 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, | 1212 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1201 | interruptible, persistent_swap_storage, acc_size, | 1213 | interruptible, persistent_swap_storage, acc_size, |
1202 | NULL, NULL); | 1214 | NULL, NULL, NULL); |
1203 | if (likely(ret == 0)) | 1215 | if (likely(ret == 0)) |
1204 | *p_bo = bo; | 1216 | *p_bo = bo; |
1205 | 1217 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 26584316cb78..026de7cea0f6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -430,7 +430,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, | |||
430 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 430 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
431 | ttm_bo_type_device, placement, | 431 | ttm_bo_type_device, placement, |
432 | 0, interruptible, | 432 | 0, interruptible, |
433 | NULL, acc_size, NULL, bo_free); | 433 | NULL, acc_size, NULL, NULL, bo_free); |
434 | return ret; | 434 | return ret; |
435 | } | 435 | } |
436 | 436 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d2c2b7f3a4e2..53ed87698a74 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -87,6 +87,7 @@ struct drm_gem_object; | |||
87 | struct device_node; | 87 | struct device_node; |
88 | struct videomode; | 88 | struct videomode; |
89 | struct reservation_object; | 89 | struct reservation_object; |
90 | struct dma_buf_attachment; | ||
90 | 91 | ||
91 | /* | 92 | /* |
92 | * 4 debug categories are defined: | 93 | * 4 debug categories are defined: |
@@ -570,7 +571,8 @@ struct drm_driver { | |||
570 | struct drm_gem_object *obj); | 571 | struct drm_gem_object *obj); |
571 | struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); | 572 | struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); |
572 | struct drm_gem_object *(*gem_prime_import_sg_table)( | 573 | struct drm_gem_object *(*gem_prime_import_sg_table)( |
573 | struct drm_device *dev, size_t size, | 574 | struct drm_device *dev, |
575 | struct dma_buf_attachment *attach, | ||
574 | struct sg_table *sgt); | 576 | struct sg_table *sgt); |
575 | void *(*gem_prime_vmap)(struct drm_gem_object *obj); | 577 | void *(*gem_prime_vmap)(struct drm_gem_object *obj); |
576 | void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); | 578 | void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); |
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 42f11f3a8d39..2ff35f3de9c5 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h | |||
@@ -45,7 +45,8 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); | |||
45 | 45 | ||
46 | struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); | 46 | struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); |
47 | struct drm_gem_object * | 47 | struct drm_gem_object * |
48 | drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size, | 48 | drm_gem_cma_prime_import_sg_table(struct drm_device *dev, |
49 | struct dma_buf_attachment *attach, | ||
49 | struct sg_table *sgt); | 50 | struct sg_table *sgt); |
50 | int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, | 51 | int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, |
51 | struct vm_area_struct *vma); | 52 | struct vm_area_struct *vma); |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 70b44917c368..0ccf7f267ff9 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -460,6 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, | |||
460 | * point to the shmem object backing a GEM object if TTM is used to back a | 460 | * point to the shmem object backing a GEM object if TTM is used to back a |
461 | * GEM user interface. | 461 | * GEM user interface. |
462 | * @acc_size: Accounted size for this object. | 462 | * @acc_size: Accounted size for this object. |
463 | * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. | ||
463 | * @destroy: Destroy function. Use NULL for kfree(). | 464 | * @destroy: Destroy function. Use NULL for kfree(). |
464 | * | 465 | * |
465 | * This function initializes a pre-allocated struct ttm_buffer_object. | 466 | * This function initializes a pre-allocated struct ttm_buffer_object. |
@@ -487,6 +488,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, | |||
487 | struct file *persistent_swap_storage, | 488 | struct file *persistent_swap_storage, |
488 | size_t acc_size, | 489 | size_t acc_size, |
489 | struct sg_table *sg, | 490 | struct sg_table *sg, |
491 | struct reservation_object *resv, | ||
490 | void (*destroy) (struct ttm_buffer_object *)); | 492 | void (*destroy) (struct ttm_buffer_object *)); |
491 | 493 | ||
492 | /** | 494 | /** |