aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c73
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c41
4 files changed, 108 insertions, 15 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 86eef68517cb..a1cf8255db50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1550,13 +1550,8 @@ void
1550nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 1550nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1551{ 1551{
1552 if (vma->node) { 1552 if (vma->node) {
1553 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { 1553 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1554 spin_lock(&nvbo->bo.bdev->fence_lock);
1555 ttm_bo_wait(&nvbo->bo, false, false, false);
1556 spin_unlock(&nvbo->bo.bdev->fence_lock);
1557 nouveau_vm_unmap(vma); 1554 nouveau_vm_unmap(vma);
1558 }
1559
1560 nouveau_vm_put(vma); 1555 nouveau_vm_put(vma);
1561 list_del(&vma->head); 1556 list_del(&vma->head);
1562 } 1557 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6c946837a0aa..1680d9187bab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -35,15 +35,34 @@
35 35
36#include <engine/fifo.h> 36#include <engine/fifo.h>
37 37
38struct fence_work {
39 struct work_struct base;
40 struct list_head head;
41 void (*func)(void *);
42 void *data;
43};
44
45static void
46nouveau_fence_signal(struct nouveau_fence *fence)
47{
48 struct fence_work *work, *temp;
49
50 list_for_each_entry_safe(work, temp, &fence->work, head) {
51 schedule_work(&work->base);
52 list_del(&work->head);
53 }
54
55 fence->channel = NULL;
56 list_del(&fence->head);
57}
58
38void 59void
39nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 60nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
40{ 61{
41 struct nouveau_fence *fence, *fnext; 62 struct nouveau_fence *fence, *fnext;
42 spin_lock(&fctx->lock); 63 spin_lock(&fctx->lock);
43 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 64 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
44 fence->channel = NULL; 65 nouveau_fence_signal(fence);
45 list_del(&fence->head);
46 nouveau_fence_unref(&fence);
47 } 66 }
48 spin_unlock(&fctx->lock); 67 spin_unlock(&fctx->lock);
49} 68}
@@ -57,6 +76,50 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
57} 76}
58 77
59static void 78static void
79nouveau_fence_work_handler(struct work_struct *kwork)
80{
81 struct fence_work *work = container_of(kwork, typeof(*work), base);
82 work->func(work->data);
83 kfree(work);
84}
85
86void
87nouveau_fence_work(struct nouveau_fence *fence,
88 void (*func)(void *), void *data)
89{
90 struct nouveau_channel *chan = fence->channel;
91 struct nouveau_fence_chan *fctx;
92 struct fence_work *work = NULL;
93
94 if (nouveau_fence_done(fence)) {
95 func(data);
96 return;
97 }
98
99 fctx = chan->fence;
100 work = kmalloc(sizeof(*work), GFP_KERNEL);
101 if (!work) {
102 WARN_ON(nouveau_fence_wait(fence, false, false));
103 func(data);
104 return;
105 }
106
107 spin_lock(&fctx->lock);
108 if (!fence->channel) {
109 spin_unlock(&fctx->lock);
110 kfree(work);
111 func(data);
112 return;
113 }
114
115 INIT_WORK(&work->base, nouveau_fence_work_handler);
116 work->func = func;
117 work->data = data;
118 list_add(&work->head, &fence->work);
119 spin_unlock(&fctx->lock);
120}
121
122static void
60nouveau_fence_update(struct nouveau_channel *chan) 123nouveau_fence_update(struct nouveau_channel *chan)
61{ 124{
62 struct nouveau_fence_chan *fctx = chan->fence; 125 struct nouveau_fence_chan *fctx = chan->fence;
@@ -67,8 +130,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
67 if (fctx->read(chan) < fence->sequence) 130 if (fctx->read(chan) < fence->sequence)
68 break; 131 break;
69 132
70 fence->channel = NULL; 133 nouveau_fence_signal(fence);
71 list_del(&fence->head);
72 nouveau_fence_unref(&fence); 134 nouveau_fence_unref(&fence);
73 } 135 }
74 spin_unlock(&fctx->lock); 136 spin_unlock(&fctx->lock);
@@ -265,6 +327,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
265 if (!fence) 327 if (!fence)
266 return -ENOMEM; 328 return -ENOMEM;
267 329
330 INIT_LIST_HEAD(&fence->work);
268 fence->sysmem = sysmem; 331 fence->sysmem = sysmem;
269 kref_init(&fence->kref); 332 kref_init(&fence->kref);
270 333
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c89943407b52..c57bb61da58c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -5,6 +5,7 @@ struct nouveau_drm;
5 5
6struct nouveau_fence { 6struct nouveau_fence {
7 struct list_head head; 7 struct list_head head;
8 struct list_head work;
8 struct kref kref; 9 struct kref kref;
9 10
10 bool sysmem; 11 bool sysmem;
@@ -22,6 +23,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
22 23
23int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); 24int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
24bool nouveau_fence_done(struct nouveau_fence *); 25bool nouveau_fence_done(struct nouveau_fence *);
26void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
25int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 27int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
26int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 28int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
27 29
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b4b4d0c1f4af..c0e324b557c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -101,6 +101,41 @@ out:
101 return ret; 101 return ret;
102} 102}
103 103
104static void
105nouveau_gem_object_delete(void *data)
106{
107 struct nouveau_vma *vma = data;
108 nouveau_vm_unmap(vma);
109 nouveau_vm_put(vma);
110 kfree(vma);
111}
112
113static void
114nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
115{
116 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
117 struct nouveau_fence *fence = NULL;
118
119 list_del(&vma->head);
120
121 if (mapped) {
122 spin_lock(&nvbo->bo.bdev->fence_lock);
123 if (nvbo->bo.sync_obj)
124 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
125 spin_unlock(&nvbo->bo.bdev->fence_lock);
126 }
127
128 if (fence) {
129 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
130 } else {
131 if (mapped)
132 nouveau_vm_unmap(vma);
133 nouveau_vm_put(vma);
134 kfree(vma);
135 }
136 nouveau_fence_unref(&fence);
137}
138
104void 139void
105nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 140nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
106{ 141{
@@ -118,10 +153,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
118 153
119 vma = nouveau_bo_vma_find(nvbo, cli->base.vm); 154 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
120 if (vma) { 155 if (vma) {
121 if (--vma->refcount == 0) { 156 if (--vma->refcount == 0)
122 nouveau_bo_vma_del(nvbo, vma); 157 nouveau_gem_object_unmap(nvbo, vma);
123 kfree(vma);
124 }
125 } 158 }
126 ttm_bo_unreserve(&nvbo->bo); 159 ttm_bo_unreserve(&nvbo->bo);
127} 160}