diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2013-02-13 22:20:17 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2013-02-20 01:00:53 -0500 |
commit | 827520ce06568f699dad275dcca61647cce08757 (patch) | |
tree | 707c9ab051ca1bd8a14f14fb153510c7fbf133c3 | |
parent | 60e5cb79cbd27a36836fc04177d7c323ee873563 (diff) |
drm/nouveau/fence: make internal hooks part of the context
A step towards being able to provide fences from other engines not
connected to PFIFO.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_fence.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv10_fence.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv17_fence.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fence.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_fence.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_fence.c | 19 |
8 files changed, 48 insertions, 45 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index b7349a636546..6a7a5b576273 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -61,13 +61,12 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx) | |||
61 | static void | 61 | static void |
62 | nouveau_fence_update(struct nouveau_channel *chan) | 62 | nouveau_fence_update(struct nouveau_channel *chan) |
63 | { | 63 | { |
64 | struct nouveau_fence_priv *priv = chan->drm->fence; | ||
65 | struct nouveau_fence_chan *fctx = chan->fence; | 64 | struct nouveau_fence_chan *fctx = chan->fence; |
66 | struct nouveau_fence *fence, *fnext; | 65 | struct nouveau_fence *fence, *fnext; |
67 | 66 | ||
68 | spin_lock(&fctx->lock); | 67 | spin_lock(&fctx->lock); |
69 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { | 68 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { |
70 | if (priv->read(chan) < fence->sequence) | 69 | if (fctx->read(chan) < fence->sequence) |
71 | break; | 70 | break; |
72 | 71 | ||
73 | if (fence->work) | 72 | if (fence->work) |
@@ -82,7 +81,6 @@ nouveau_fence_update(struct nouveau_channel *chan) | |||
82 | int | 81 | int |
83 | nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | 82 | nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) |
84 | { | 83 | { |
85 | struct nouveau_fence_priv *priv = chan->drm->fence; | ||
86 | struct nouveau_fence_chan *fctx = chan->fence; | 84 | struct nouveau_fence_chan *fctx = chan->fence; |
87 | int ret; | 85 | int ret; |
88 | 86 | ||
@@ -90,7 +88,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
90 | fence->timeout = jiffies + (3 * DRM_HZ); | 88 | fence->timeout = jiffies + (3 * DRM_HZ); |
91 | fence->sequence = ++fctx->sequence; | 89 | fence->sequence = ++fctx->sequence; |
92 | 90 | ||
93 | ret = priv->emit(fence); | 91 | ret = fctx->emit(fence); |
94 | if (!ret) { | 92 | if (!ret) { |
95 | kref_get(&fence->kref); | 93 | kref_get(&fence->kref); |
96 | spin_lock(&fctx->lock); | 94 | spin_lock(&fctx->lock); |
@@ -219,14 +217,14 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) | |||
219 | int | 217 | int |
220 | nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) | 218 | nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) |
221 | { | 219 | { |
222 | struct nouveau_fence_priv *priv = chan->drm->fence; | 220 | struct nouveau_fence_chan *fctx = chan->fence; |
223 | struct nouveau_channel *prev; | 221 | struct nouveau_channel *prev; |
224 | int ret = 0; | 222 | int ret = 0; |
225 | 223 | ||
226 | prev = fence ? fence->channel : NULL; | 224 | prev = fence ? fence->channel : NULL; |
227 | if (prev) { | 225 | if (prev) { |
228 | if (unlikely(prev != chan && !nouveau_fence_done(fence))) { | 226 | if (unlikely(prev != chan && !nouveau_fence_done(fence))) { |
229 | ret = priv->sync(fence, prev, chan); | 227 | ret = fctx->sync(fence, prev, chan); |
230 | if (unlikely(ret)) | 228 | if (unlikely(ret)) |
231 | ret = nouveau_fence_wait(fence, true, false); | 229 | ret = nouveau_fence_wait(fence, true, false); |
232 | } | 230 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index fb0993c3dc39..a5c47e348e22 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -29,6 +29,13 @@ struct nouveau_fence_chan { | |||
29 | struct list_head pending; | 29 | struct list_head pending; |
30 | struct list_head flip; | 30 | struct list_head flip; |
31 | 31 | ||
32 | int (*emit)(struct nouveau_fence *); | ||
33 | int (*sync)(struct nouveau_fence *, struct nouveau_channel *, | ||
34 | struct nouveau_channel *); | ||
35 | u32 (*read)(struct nouveau_channel *); | ||
36 | int (*emit32)(struct nouveau_channel *, u64, u32); | ||
37 | int (*sync32)(struct nouveau_channel *, u64, u32); | ||
38 | |||
32 | spinlock_t lock; | 39 | spinlock_t lock; |
33 | u32 sequence; | 40 | u32 sequence; |
34 | }; | 41 | }; |
@@ -39,12 +46,6 @@ struct nouveau_fence_priv { | |||
39 | void (*resume)(struct nouveau_drm *); | 46 | void (*resume)(struct nouveau_drm *); |
40 | int (*context_new)(struct nouveau_channel *); | 47 | int (*context_new)(struct nouveau_channel *); |
41 | void (*context_del)(struct nouveau_channel *); | 48 | void (*context_del)(struct nouveau_channel *); |
42 | int (*emit32)(struct nouveau_channel *, u64, u32); | ||
43 | int (*emit)(struct nouveau_fence *); | ||
44 | int (*sync32)(struct nouveau_channel *, u64, u32); | ||
45 | int (*sync)(struct nouveau_fence *, struct nouveau_channel *, | ||
46 | struct nouveau_channel *); | ||
47 | u32 (*read)(struct nouveau_channel *); | ||
48 | 49 | ||
49 | wait_queue_head_t waiting; | 50 | wait_queue_head_t waiting; |
50 | bool uevent; | 51 | bool uevent; |
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index a220b94ba9f2..94eadd1dd10a 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c | |||
@@ -78,6 +78,9 @@ nv04_fence_context_new(struct nouveau_channel *chan) | |||
78 | struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); | 78 | struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); |
79 | if (fctx) { | 79 | if (fctx) { |
80 | nouveau_fence_context_new(&fctx->base); | 80 | nouveau_fence_context_new(&fctx->base); |
81 | fctx->base.emit = nv04_fence_emit; | ||
82 | fctx->base.sync = nv04_fence_sync; | ||
83 | fctx->base.read = nv04_fence_read; | ||
81 | chan->fence = fctx; | 84 | chan->fence = fctx; |
82 | return 0; | 85 | return 0; |
83 | } | 86 | } |
@@ -104,8 +107,5 @@ nv04_fence_create(struct nouveau_drm *drm) | |||
104 | priv->base.dtor = nv04_fence_destroy; | 107 | priv->base.dtor = nv04_fence_destroy; |
105 | priv->base.context_new = nv04_fence_context_new; | 108 | priv->base.context_new = nv04_fence_context_new; |
106 | priv->base.context_del = nv04_fence_context_del; | 109 | priv->base.context_del = nv04_fence_context_del; |
107 | priv->base.emit = nv04_fence_emit; | ||
108 | priv->base.sync = nv04_fence_sync; | ||
109 | priv->base.read = nv04_fence_read; | ||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c index e4f124a48d4e..06f434f03fba 100644 --- a/drivers/gpu/drm/nouveau/nv10_fence.c +++ b/drivers/gpu/drm/nouveau/nv10_fence.c | |||
@@ -75,6 +75,9 @@ nv10_fence_context_new(struct nouveau_channel *chan) | |||
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | 76 | ||
77 | nouveau_fence_context_new(&fctx->base); | 77 | nouveau_fence_context_new(&fctx->base); |
78 | fctx->base.emit = nv10_fence_emit; | ||
79 | fctx->base.read = nv10_fence_read; | ||
80 | fctx->base.sync = nv10_fence_sync; | ||
78 | return 0; | 81 | return 0; |
79 | } | 82 | } |
80 | 83 | ||
@@ -102,9 +105,6 @@ nv10_fence_create(struct nouveau_drm *drm) | |||
102 | priv->base.dtor = nv10_fence_destroy; | 105 | priv->base.dtor = nv10_fence_destroy; |
103 | priv->base.context_new = nv10_fence_context_new; | 106 | priv->base.context_new = nv10_fence_context_new; |
104 | priv->base.context_del = nv10_fence_context_del; | 107 | priv->base.context_del = nv10_fence_context_del; |
105 | priv->base.emit = nv10_fence_emit; | ||
106 | priv->base.read = nv10_fence_read; | ||
107 | priv->base.sync = nv10_fence_sync; | ||
108 | spin_lock_init(&priv->lock); | 108 | spin_lock_init(&priv->lock); |
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index fe194451e0ad..8e47a9bae8c3 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c | |||
@@ -84,6 +84,9 @@ nv17_fence_context_new(struct nouveau_channel *chan) | |||
84 | return -ENOMEM; | 84 | return -ENOMEM; |
85 | 85 | ||
86 | nouveau_fence_context_new(&fctx->base); | 86 | nouveau_fence_context_new(&fctx->base); |
87 | fctx->base.emit = nv10_fence_emit; | ||
88 | fctx->base.read = nv10_fence_read; | ||
89 | fctx->base.sync = nv17_fence_sync; | ||
87 | 90 | ||
88 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 91 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
89 | NvSema, 0x0002, | 92 | NvSema, 0x0002, |
@@ -121,9 +124,6 @@ nv17_fence_create(struct nouveau_drm *drm) | |||
121 | priv->base.resume = nv17_fence_resume; | 124 | priv->base.resume = nv17_fence_resume; |
122 | priv->base.context_new = nv17_fence_context_new; | 125 | priv->base.context_new = nv17_fence_context_new; |
123 | priv->base.context_del = nv10_fence_context_del; | 126 | priv->base.context_del = nv10_fence_context_del; |
124 | priv->base.emit = nv10_fence_emit; | ||
125 | priv->base.read = nv10_fence_read; | ||
126 | priv->base.sync = nv17_fence_sync; | ||
127 | spin_lock_init(&priv->lock); | 127 | spin_lock_init(&priv->lock); |
128 | 128 | ||
129 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 129 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 72791d658b40..f9701e567db8 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c | |||
@@ -46,6 +46,9 @@ nv50_fence_context_new(struct nouveau_channel *chan) | |||
46 | return -ENOMEM; | 46 | return -ENOMEM; |
47 | 47 | ||
48 | nouveau_fence_context_new(&fctx->base); | 48 | nouveau_fence_context_new(&fctx->base); |
49 | fctx->base.emit = nv10_fence_emit; | ||
50 | fctx->base.read = nv10_fence_read; | ||
51 | fctx->base.sync = nv17_fence_sync; | ||
49 | 52 | ||
50 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 53 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, |
51 | NvSema, 0x0002, | 54 | NvSema, 0x0002, |
@@ -88,11 +91,9 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
88 | return -ENOMEM; | 91 | return -ENOMEM; |
89 | 92 | ||
90 | priv->base.dtor = nv10_fence_destroy; | 93 | priv->base.dtor = nv10_fence_destroy; |
94 | priv->base.resume = nv17_fence_resume; | ||
91 | priv->base.context_new = nv50_fence_context_new; | 95 | priv->base.context_new = nv50_fence_context_new; |
92 | priv->base.context_del = nv10_fence_context_del; | 96 | priv->base.context_del = nv10_fence_context_del; |
93 | priv->base.emit = nv10_fence_emit; | ||
94 | priv->base.read = nv10_fence_read; | ||
95 | priv->base.sync = nv17_fence_sync; | ||
96 | spin_lock_init(&priv->lock); | 97 | spin_lock_init(&priv->lock); |
97 | 98 | ||
98 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, | 99 | ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
@@ -108,13 +109,11 @@ nv50_fence_create(struct nouveau_drm *drm) | |||
108 | nouveau_bo_ref(NULL, &priv->bo); | 109 | nouveau_bo_ref(NULL, &priv->bo); |
109 | } | 110 | } |
110 | 111 | ||
111 | if (ret == 0) { | 112 | if (ret) { |
112 | nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); | 113 | nv10_fence_destroy(drm); |
113 | priv->base.sync = nv17_fence_sync; | 114 | return ret; |
114 | priv->base.resume = nv17_fence_resume; | ||
115 | } | 115 | } |
116 | 116 | ||
117 | if (ret) | 117 | nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); |
118 | nv10_fence_destroy(drm); | ||
119 | return ret; | 118 | return ret; |
120 | } | 119 | } |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 8a80ad7c0cf5..bc6493c1a1ef 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -80,22 +80,20 @@ int | |||
80 | nv84_fence_emit(struct nouveau_fence *fence) | 80 | nv84_fence_emit(struct nouveau_fence *fence) |
81 | { | 81 | { |
82 | struct nouveau_channel *chan = fence->channel; | 82 | struct nouveau_channel *chan = fence->channel; |
83 | struct nv84_fence_priv *priv = chan->drm->fence; | ||
84 | struct nv84_fence_chan *fctx = chan->fence; | 83 | struct nv84_fence_chan *fctx = chan->fence; |
85 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 84 | struct nouveau_fifo_chan *fifo = (void *)chan->object; |
86 | u64 addr = fctx->vma.offset + fifo->chid * 16; | 85 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
87 | return priv->base.emit32(chan, addr, fence->sequence); | 86 | return fctx->base.emit32(chan, addr, fence->sequence); |
88 | } | 87 | } |
89 | 88 | ||
90 | int | 89 | int |
91 | nv84_fence_sync(struct nouveau_fence *fence, | 90 | nv84_fence_sync(struct nouveau_fence *fence, |
92 | struct nouveau_channel *prev, struct nouveau_channel *chan) | 91 | struct nouveau_channel *prev, struct nouveau_channel *chan) |
93 | { | 92 | { |
94 | struct nv84_fence_priv *priv = chan->drm->fence; | ||
95 | struct nv84_fence_chan *fctx = chan->fence; | 93 | struct nv84_fence_chan *fctx = chan->fence; |
96 | struct nouveau_fifo_chan *fifo = (void *)prev->object; | 94 | struct nouveau_fifo_chan *fifo = (void *)prev->object; |
97 | u64 addr = fctx->vma.offset + fifo->chid * 16; | 95 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
98 | return priv->base.sync32(chan, addr, fence->sequence); | 96 | return fctx->base.sync32(chan, addr, fence->sequence); |
99 | } | 97 | } |
100 | 98 | ||
101 | u32 | 99 | u32 |
@@ -139,6 +137,11 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
139 | return -ENOMEM; | 137 | return -ENOMEM; |
140 | 138 | ||
141 | nouveau_fence_context_new(&fctx->base); | 139 | nouveau_fence_context_new(&fctx->base); |
140 | fctx->base.emit = nv84_fence_emit; | ||
141 | fctx->base.sync = nv84_fence_sync; | ||
142 | fctx->base.read = nv84_fence_read; | ||
143 | fctx->base.emit32 = nv84_fence_emit32; | ||
144 | fctx->base.sync32 = nv84_fence_sync32; | ||
142 | 145 | ||
143 | ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); | 146 | ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); |
144 | if (ret) | 147 | if (ret) |
@@ -213,11 +216,6 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
213 | priv->base.resume = nv84_fence_resume; | 216 | priv->base.resume = nv84_fence_resume; |
214 | priv->base.context_new = nv84_fence_context_new; | 217 | priv->base.context_new = nv84_fence_context_new; |
215 | priv->base.context_del = nv84_fence_context_del; | 218 | priv->base.context_del = nv84_fence_context_del; |
216 | priv->base.emit32 = nv84_fence_emit32; | ||
217 | priv->base.emit = nv84_fence_emit; | ||
218 | priv->base.sync32 = nv84_fence_sync32; | ||
219 | priv->base.sync = nv84_fence_sync; | ||
220 | priv->base.read = nv84_fence_read; | ||
221 | 219 | ||
222 | init_waitqueue_head(&priv->base.waiting); | 220 | init_waitqueue_head(&priv->base.waiting); |
223 | priv->base.uevent = true; | 221 | priv->base.uevent = true; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c index 8213f7de92fa..b7def390d808 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c | |||
@@ -66,6 +66,18 @@ nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence) | |||
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int | ||
70 | nvc0_fence_context_new(struct nouveau_channel *chan) | ||
71 | { | ||
72 | int ret = nv84_fence_context_new(chan); | ||
73 | if (ret == 0) { | ||
74 | struct nv84_fence_chan *fctx = chan->fence; | ||
75 | fctx->base.emit32 = nvc0_fence_emit32; | ||
76 | fctx->base.sync32 = nvc0_fence_sync32; | ||
77 | } | ||
78 | return ret; | ||
79 | } | ||
80 | |||
69 | int | 81 | int |
70 | nvc0_fence_create(struct nouveau_drm *drm) | 82 | nvc0_fence_create(struct nouveau_drm *drm) |
71 | { | 83 | { |
@@ -80,13 +92,8 @@ nvc0_fence_create(struct nouveau_drm *drm) | |||
80 | priv->base.dtor = nv84_fence_destroy; | 92 | priv->base.dtor = nv84_fence_destroy; |
81 | priv->base.suspend = nv84_fence_suspend; | 93 | priv->base.suspend = nv84_fence_suspend; |
82 | priv->base.resume = nv84_fence_resume; | 94 | priv->base.resume = nv84_fence_resume; |
83 | priv->base.context_new = nv84_fence_context_new; | 95 | priv->base.context_new = nvc0_fence_context_new; |
84 | priv->base.context_del = nv84_fence_context_del; | 96 | priv->base.context_del = nv84_fence_context_del; |
85 | priv->base.emit32 = nvc0_fence_emit32; | ||
86 | priv->base.emit = nv84_fence_emit; | ||
87 | priv->base.sync32 = nvc0_fence_sync32; | ||
88 | priv->base.sync = nv84_fence_sync; | ||
89 | priv->base.read = nv84_fence_read; | ||
90 | 97 | ||
91 | init_waitqueue_head(&priv->base.waiting); | 98 | init_waitqueue_head(&priv->base.waiting); |
92 | priv->base.uevent = true; | 99 | priv->base.uevent = true; |