diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2013-02-13 18:28:37 -0500 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2013-02-20 01:00:51 -0500 |
commit | a34caf78f26bda63869471cb3f46f354f4658758 (patch) | |
tree | dc9b1ce9d37ea3d2f0c6a34f7e4c90cc5be3a8f0 | |
parent | fa531bc8b4278010fd11819c089f6679890addee (diff) |
drm/nv84/fence: access fences with full virtual address, not offset
Allows most of the code to be shared between nv84/nvc0 implementations,
and paves the way for doing emit/sync on non-VRAM buffers (multi-gpu,
dma-buf).
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.h | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_display.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv84_fence.c | 153 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_fence.c | 138 |
4 files changed, 149 insertions, 191 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index be6166eb966d..2324911fc4c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -68,8 +68,27 @@ void nv17_fence_resume(struct nouveau_drm *drm); | |||
68 | int nv50_fence_create(struct nouveau_drm *); | 68 | int nv50_fence_create(struct nouveau_drm *); |
69 | int nv84_fence_create(struct nouveau_drm *); | 69 | int nv84_fence_create(struct nouveau_drm *); |
70 | int nvc0_fence_create(struct nouveau_drm *); | 70 | int nvc0_fence_create(struct nouveau_drm *); |
71 | u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc); | ||
72 | 71 | ||
73 | int nouveau_flip_complete(void *chan); | 72 | int nouveau_flip_complete(void *chan); |
74 | 73 | ||
74 | struct nv84_fence_chan { | ||
75 | struct nouveau_fence_chan base; | ||
76 | struct nouveau_vma vma; | ||
77 | struct nouveau_vma dispc_vma[4]; | ||
78 | }; | ||
79 | |||
80 | struct nv84_fence_priv { | ||
81 | struct nouveau_fence_priv base; | ||
82 | struct nouveau_bo *bo; | ||
83 | u32 *suspend; | ||
84 | }; | ||
85 | |||
86 | u64 nv84_fence_crtc(struct nouveau_channel *, int); | ||
87 | u32 nv84_fence_read(struct nouveau_channel *); | ||
88 | int nv84_fence_context_new(struct nouveau_channel *); | ||
89 | void nv84_fence_context_del(struct nouveau_channel *); | ||
90 | bool nv84_fence_suspend(struct nouveau_drm *); | ||
91 | void nv84_fence_resume(struct nouveau_drm *); | ||
92 | void nv84_fence_destroy(struct nouveau_drm *); | ||
93 | |||
75 | #endif | 94 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index c9da4f108924..102a8734d377 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -502,7 +502,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
502 | if (ret) | 502 | if (ret) |
503 | return ret; | 503 | return ret; |
504 | 504 | ||
505 | if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 505 | if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { |
506 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 506 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
507 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); | 507 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
508 | OUT_RING (chan, sync->sem.offset); | 508 | OUT_RING (chan, sync->sem.offset); |
@@ -512,24 +512,36 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
512 | OUT_RING (chan, sync->sem.offset ^ 0x10); | 512 | OUT_RING (chan, sync->sem.offset ^ 0x10); |
513 | OUT_RING (chan, 0x74b1e000); | 513 | OUT_RING (chan, 0x74b1e000); |
514 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 514 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
515 | if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS) | 515 | OUT_RING (chan, NvSema); |
516 | OUT_RING (chan, NvSema); | 516 | } else |
517 | else | 517 | if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
518 | OUT_RING (chan, chan->vram); | 518 | u64 offset = nv84_fence_crtc(chan, nv_crtc->index); |
519 | offset += sync->sem.offset; | ||
520 | |||
521 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
522 | OUT_RING (chan, upper_32_bits(offset)); | ||
523 | OUT_RING (chan, lower_32_bits(offset)); | ||
524 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); | ||
525 | OUT_RING (chan, 0x00000002); | ||
526 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
527 | OUT_RING (chan, upper_32_bits(offset)); | ||
528 | OUT_RING (chan, lower_32_bits(offset ^ 0x10)); | ||
529 | OUT_RING (chan, 0x74b1e000); | ||
530 | OUT_RING (chan, 0x00000001); | ||
519 | } else { | 531 | } else { |
520 | u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); | 532 | u64 offset = nv84_fence_crtc(chan, nv_crtc->index); |
521 | offset += sync->sem.offset; | 533 | offset += sync->sem.offset; |
522 | 534 | ||
523 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | 535 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
524 | OUT_RING (chan, upper_32_bits(offset)); | 536 | OUT_RING (chan, upper_32_bits(offset)); |
525 | OUT_RING (chan, lower_32_bits(offset)); | 537 | OUT_RING (chan, lower_32_bits(offset)); |
526 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); | 538 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); |
527 | OUT_RING (chan, 0x1002); | 539 | OUT_RING (chan, 0x00001002); |
528 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | 540 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
529 | OUT_RING (chan, upper_32_bits(offset)); | 541 | OUT_RING (chan, upper_32_bits(offset)); |
530 | OUT_RING (chan, lower_32_bits(offset ^ 0x10)); | 542 | OUT_RING (chan, lower_32_bits(offset ^ 0x10)); |
531 | OUT_RING (chan, 0x74b1e000); | 543 | OUT_RING (chan, 0x74b1e000); |
532 | OUT_RING (chan, 0x1001); | 544 | OUT_RING (chan, 0x00001001); |
533 | } | 545 | } |
534 | 546 | ||
535 | FIRE_RING (chan); | 547 | FIRE_RING (chan); |
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index e64e8154a5af..58c2401b18ff 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c | |||
@@ -23,6 +23,7 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <core/object.h> | 25 | #include <core/object.h> |
26 | #include <core/client.h> | ||
26 | #include <core/class.h> | 27 | #include <core/class.h> |
27 | 28 | ||
28 | #include <engine/fifo.h> | 29 | #include <engine/fifo.h> |
@@ -33,80 +34,96 @@ | |||
33 | 34 | ||
34 | #include "nv50_display.h" | 35 | #include "nv50_display.h" |
35 | 36 | ||
36 | struct nv84_fence_chan { | 37 | u64 |
37 | struct nouveau_fence_chan base; | 38 | nv84_fence_crtc(struct nouveau_channel *chan, int crtc) |
38 | }; | 39 | { |
39 | 40 | struct nv84_fence_chan *fctx = chan->fence; | |
40 | struct nv84_fence_priv { | 41 | return fctx->dispc_vma[crtc].offset; |
41 | struct nouveau_fence_priv base; | 42 | } |
42 | struct nouveau_gpuobj *mem; | ||
43 | }; | ||
44 | 43 | ||
45 | static int | 44 | static int |
46 | nv84_fence_emit(struct nouveau_fence *fence) | 45 | nv84_fence_emit(struct nouveau_fence *fence) |
47 | { | 46 | { |
48 | struct nouveau_channel *chan = fence->channel; | 47 | struct nouveau_channel *chan = fence->channel; |
48 | struct nv84_fence_chan *fctx = chan->fence; | ||
49 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 49 | struct nouveau_fifo_chan *fifo = (void *)chan->object; |
50 | int ret = RING_SPACE(chan, 8); | 50 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
51 | int ret; | ||
52 | |||
53 | ret = RING_SPACE(chan, 8); | ||
51 | if (ret == 0) { | 54 | if (ret == 0) { |
52 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 55 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
53 | OUT_RING (chan, NvSema); | 56 | OUT_RING (chan, chan->vram); |
54 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); | 57 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5); |
55 | OUT_RING (chan, upper_32_bits(fifo->chid * 16)); | 58 | OUT_RING (chan, upper_32_bits(addr)); |
56 | OUT_RING (chan, lower_32_bits(fifo->chid * 16)); | 59 | OUT_RING (chan, lower_32_bits(addr)); |
57 | OUT_RING (chan, fence->sequence); | 60 | OUT_RING (chan, fence->sequence); |
58 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); | 61 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); |
59 | OUT_RING (chan, 0x00000000); | 62 | OUT_RING (chan, 0x00000000); |
60 | FIRE_RING (chan); | 63 | FIRE_RING (chan); |
61 | } | 64 | } |
65 | |||
62 | return ret; | 66 | return ret; |
63 | } | 67 | } |
64 | 68 | ||
65 | |||
66 | static int | 69 | static int |
67 | nv84_fence_sync(struct nouveau_fence *fence, | 70 | nv84_fence_sync(struct nouveau_fence *fence, |
68 | struct nouveau_channel *prev, struct nouveau_channel *chan) | 71 | struct nouveau_channel *prev, struct nouveau_channel *chan) |
69 | { | 72 | { |
73 | struct nv84_fence_chan *fctx = chan->fence; | ||
70 | struct nouveau_fifo_chan *fifo = (void *)prev->object; | 74 | struct nouveau_fifo_chan *fifo = (void *)prev->object; |
71 | int ret = RING_SPACE(chan, 7); | 75 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
76 | int ret; | ||
77 | |||
78 | ret = RING_SPACE(chan, 7); | ||
72 | if (ret == 0) { | 79 | if (ret == 0) { |
73 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 80 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); |
74 | OUT_RING (chan, NvSema); | 81 | OUT_RING (chan, chan->vram); |
75 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | 82 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
76 | OUT_RING (chan, upper_32_bits(fifo->chid * 16)); | 83 | OUT_RING (chan, upper_32_bits(addr)); |
77 | OUT_RING (chan, lower_32_bits(fifo->chid * 16)); | 84 | OUT_RING (chan, lower_32_bits(addr)); |
78 | OUT_RING (chan, fence->sequence); | 85 | OUT_RING (chan, fence->sequence); |
79 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); | 86 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); |
80 | FIRE_RING (chan); | 87 | FIRE_RING (chan); |
81 | } | 88 | } |
89 | |||
82 | return ret; | 90 | return ret; |
83 | } | 91 | } |
84 | 92 | ||
85 | static u32 | 93 | u32 |
86 | nv84_fence_read(struct nouveau_channel *chan) | 94 | nv84_fence_read(struct nouveau_channel *chan) |
87 | { | 95 | { |
88 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 96 | struct nouveau_fifo_chan *fifo = (void *)chan->object; |
89 | struct nv84_fence_priv *priv = chan->drm->fence; | 97 | struct nv84_fence_priv *priv = chan->drm->fence; |
90 | return nv_ro32(priv->mem, fifo->chid * 16); | 98 | return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); |
91 | } | 99 | } |
92 | 100 | ||
93 | static void | 101 | void |
94 | nv84_fence_context_del(struct nouveau_channel *chan) | 102 | nv84_fence_context_del(struct nouveau_channel *chan) |
95 | { | 103 | { |
104 | struct drm_device *dev = chan->drm->dev; | ||
105 | struct nv84_fence_priv *priv = chan->drm->fence; | ||
96 | struct nv84_fence_chan *fctx = chan->fence; | 106 | struct nv84_fence_chan *fctx = chan->fence; |
107 | int i; | ||
108 | |||
109 | for (i = 0; i < dev->mode_config.num_crtc; i++) { | ||
110 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | ||
111 | nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); | ||
112 | } | ||
113 | |||
114 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | ||
97 | nouveau_fence_context_del(&fctx->base); | 115 | nouveau_fence_context_del(&fctx->base); |
98 | chan->fence = NULL; | 116 | chan->fence = NULL; |
99 | kfree(fctx); | 117 | kfree(fctx); |
100 | } | 118 | } |
101 | 119 | ||
102 | static int | 120 | int |
103 | nv84_fence_context_new(struct nouveau_channel *chan) | 121 | nv84_fence_context_new(struct nouveau_channel *chan) |
104 | { | 122 | { |
105 | struct drm_device *dev = chan->drm->dev; | ||
106 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 123 | struct nouveau_fifo_chan *fifo = (void *)chan->object; |
124 | struct nouveau_client *client = nouveau_client(fifo); | ||
107 | struct nv84_fence_priv *priv = chan->drm->fence; | 125 | struct nv84_fence_priv *priv = chan->drm->fence; |
108 | struct nv84_fence_chan *fctx; | 126 | struct nv84_fence_chan *fctx; |
109 | struct nouveau_object *object; | ||
110 | int ret, i; | 127 | int ret, i; |
111 | 128 | ||
112 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | 129 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); |
@@ -115,43 +132,59 @@ nv84_fence_context_new(struct nouveau_channel *chan) | |||
115 | 132 | ||
116 | nouveau_fence_context_new(&fctx->base); | 133 | nouveau_fence_context_new(&fctx->base); |
117 | 134 | ||
118 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 135 | ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); |
119 | NvSema, 0x0002, | 136 | if (ret) |
120 | &(struct nv_dma_class) { | 137 | nv84_fence_context_del(chan); |
121 | .flags = NV_DMA_TARGET_VRAM | | ||
122 | NV_DMA_ACCESS_RDWR, | ||
123 | .start = priv->mem->addr, | ||
124 | .limit = priv->mem->addr + | ||
125 | priv->mem->size - 1, | ||
126 | }, sizeof(struct nv_dma_class), | ||
127 | &object); | ||
128 | |||
129 | /* dma objects for display sync channel semaphore blocks */ | ||
130 | for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { | ||
131 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | ||
132 | 138 | ||
133 | ret = nouveau_object_new(nv_object(chan->cli), chan->handle, | 139 | /* map display semaphore buffers into channel's vm */ |
134 | NvEvoSema0 + i, 0x003d, | 140 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { |
135 | &(struct nv_dma_class) { | 141 | struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); |
136 | .flags = NV_DMA_TARGET_VRAM | | 142 | ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); |
137 | NV_DMA_ACCESS_RDWR, | ||
138 | .start = bo->bo.offset, | ||
139 | .limit = bo->bo.offset + 0xfff, | ||
140 | }, sizeof(struct nv_dma_class), | ||
141 | &object); | ||
142 | } | 143 | } |
143 | 144 | ||
144 | if (ret) | 145 | nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); |
145 | nv84_fence_context_del(chan); | ||
146 | nv_wo32(priv->mem, fifo->chid * 16, 0x00000000); | ||
147 | return ret; | 146 | return ret; |
148 | } | 147 | } |
149 | 148 | ||
150 | static void | 149 | bool |
150 | nv84_fence_suspend(struct nouveau_drm *drm) | ||
151 | { | ||
152 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | ||
153 | struct nv84_fence_priv *priv = drm->fence; | ||
154 | int i; | ||
155 | |||
156 | priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32)); | ||
157 | if (priv->suspend) { | ||
158 | for (i = 0; i <= pfifo->max; i++) | ||
159 | priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); | ||
160 | } | ||
161 | |||
162 | return priv->suspend != NULL; | ||
163 | } | ||
164 | |||
165 | void | ||
166 | nv84_fence_resume(struct nouveau_drm *drm) | ||
167 | { | ||
168 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | ||
169 | struct nv84_fence_priv *priv = drm->fence; | ||
170 | int i; | ||
171 | |||
172 | if (priv->suspend) { | ||
173 | for (i = 0; i <= pfifo->max; i++) | ||
174 | nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); | ||
175 | vfree(priv->suspend); | ||
176 | priv->suspend = NULL; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | void | ||
151 | nv84_fence_destroy(struct nouveau_drm *drm) | 181 | nv84_fence_destroy(struct nouveau_drm *drm) |
152 | { | 182 | { |
153 | struct nv84_fence_priv *priv = drm->fence; | 183 | struct nv84_fence_priv *priv = drm->fence; |
154 | nouveau_gpuobj_ref(NULL, &priv->mem); | 184 | nouveau_bo_unmap(priv->bo); |
185 | if (priv->bo) | ||
186 | nouveau_bo_unpin(priv->bo); | ||
187 | nouveau_bo_ref(NULL, &priv->bo); | ||
155 | drm->fence = NULL; | 188 | drm->fence = NULL; |
156 | kfree(priv); | 189 | kfree(priv); |
157 | } | 190 | } |
@@ -161,7 +194,6 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
161 | { | 194 | { |
162 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | 195 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); |
163 | struct nv84_fence_priv *priv; | 196 | struct nv84_fence_priv *priv; |
164 | u32 chan = pfifo->max + 1; | ||
165 | int ret; | 197 | int ret; |
166 | 198 | ||
167 | priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); | 199 | priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -169,6 +201,8 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
169 | return -ENOMEM; | 201 | return -ENOMEM; |
170 | 202 | ||
171 | priv->base.dtor = nv84_fence_destroy; | 203 | priv->base.dtor = nv84_fence_destroy; |
204 | priv->base.suspend = nv84_fence_suspend; | ||
205 | priv->base.resume = nv84_fence_resume; | ||
172 | priv->base.context_new = nv84_fence_context_new; | 206 | priv->base.context_new = nv84_fence_context_new; |
173 | priv->base.context_del = nv84_fence_context_del; | 207 | priv->base.context_del = nv84_fence_context_del; |
174 | priv->base.emit = nv84_fence_emit; | 208 | priv->base.emit = nv84_fence_emit; |
@@ -178,8 +212,19 @@ nv84_fence_create(struct nouveau_drm *drm) | |||
178 | init_waitqueue_head(&priv->base.waiting); | 212 | init_waitqueue_head(&priv->base.waiting); |
179 | priv->base.uevent = true; | 213 | priv->base.uevent = true; |
180 | 214 | ||
181 | ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0, | 215 | ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, |
182 | &priv->mem); | 216 | TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); |
217 | if (ret == 0) { | ||
218 | ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); | ||
219 | if (ret == 0) { | ||
220 | ret = nouveau_bo_map(priv->bo); | ||
221 | if (ret) | ||
222 | nouveau_bo_unpin(priv->bo); | ||
223 | } | ||
224 | if (ret) | ||
225 | nouveau_bo_ref(NULL, &priv->bo); | ||
226 | } | ||
227 | |||
183 | if (ret) | 228 | if (ret) |
184 | nv84_fence_destroy(drm); | 229 | nv84_fence_destroy(drm); |
185 | return ret; | 230 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c index d8ed2c5f4fab..e4c4ead24805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/drivers/gpu/drm/nouveau/nvc0_fence.c | |||
@@ -34,30 +34,11 @@ | |||
34 | 34 | ||
35 | #include "nv50_display.h" | 35 | #include "nv50_display.h" |
36 | 36 | ||
37 | struct nvc0_fence_priv { | ||
38 | struct nouveau_fence_priv base; | ||
39 | struct nouveau_bo *bo; | ||
40 | u32 *suspend; | ||
41 | }; | ||
42 | |||
43 | struct nvc0_fence_chan { | ||
44 | struct nouveau_fence_chan base; | ||
45 | struct nouveau_vma vma; | ||
46 | struct nouveau_vma dispc_vma[4]; | ||
47 | }; | ||
48 | |||
49 | u64 | ||
50 | nvc0_fence_crtc(struct nouveau_channel *chan, int crtc) | ||
51 | { | ||
52 | struct nvc0_fence_chan *fctx = chan->fence; | ||
53 | return fctx->dispc_vma[crtc].offset; | ||
54 | } | ||
55 | |||
56 | static int | 37 | static int |
57 | nvc0_fence_emit(struct nouveau_fence *fence) | 38 | nvc0_fence_emit(struct nouveau_fence *fence) |
58 | { | 39 | { |
59 | struct nouveau_channel *chan = fence->channel; | 40 | struct nouveau_channel *chan = fence->channel; |
60 | struct nvc0_fence_chan *fctx = chan->fence; | 41 | struct nv84_fence_chan *fctx = chan->fence; |
61 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | 42 | struct nouveau_fifo_chan *fifo = (void *)chan->object; |
62 | u64 addr = fctx->vma.offset + fifo->chid * 16; | 43 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
63 | int ret; | 44 | int ret; |
@@ -80,7 +61,7 @@ static int | |||
80 | nvc0_fence_sync(struct nouveau_fence *fence, | 61 | nvc0_fence_sync(struct nouveau_fence *fence, |
81 | struct nouveau_channel *prev, struct nouveau_channel *chan) | 62 | struct nouveau_channel *prev, struct nouveau_channel *chan) |
82 | { | 63 | { |
83 | struct nvc0_fence_chan *fctx = chan->fence; | 64 | struct nv84_fence_chan *fctx = chan->fence; |
84 | struct nouveau_fifo_chan *fifo = (void *)prev->object; | 65 | struct nouveau_fifo_chan *fifo = (void *)prev->object; |
85 | u64 addr = fctx->vma.offset + fifo->chid * 16; | 66 | u64 addr = fctx->vma.offset + fifo->chid * 16; |
86 | int ret; | 67 | int ret; |
@@ -99,124 +80,25 @@ nvc0_fence_sync(struct nouveau_fence *fence, | |||
99 | return ret; | 80 | return ret; |
100 | } | 81 | } |
101 | 82 | ||
102 | static u32 | ||
103 | nvc0_fence_read(struct nouveau_channel *chan) | ||
104 | { | ||
105 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | ||
106 | struct nvc0_fence_priv *priv = chan->drm->fence; | ||
107 | return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4); | ||
108 | } | ||
109 | |||
110 | static void | ||
111 | nvc0_fence_context_del(struct nouveau_channel *chan) | ||
112 | { | ||
113 | struct drm_device *dev = chan->drm->dev; | ||
114 | struct nvc0_fence_priv *priv = chan->drm->fence; | ||
115 | struct nvc0_fence_chan *fctx = chan->fence; | ||
116 | int i; | ||
117 | |||
118 | for (i = 0; i < dev->mode_config.num_crtc; i++) { | ||
119 | struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); | ||
120 | nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); | ||
121 | } | ||
122 | |||
123 | nouveau_bo_vma_del(priv->bo, &fctx->vma); | ||
124 | nouveau_fence_context_del(&fctx->base); | ||
125 | chan->fence = NULL; | ||
126 | kfree(fctx); | ||
127 | } | ||
128 | |||
129 | static int | ||
130 | nvc0_fence_context_new(struct nouveau_channel *chan) | ||
131 | { | ||
132 | struct nouveau_fifo_chan *fifo = (void *)chan->object; | ||
133 | struct nouveau_client *client = nouveau_client(fifo); | ||
134 | struct nvc0_fence_priv *priv = chan->drm->fence; | ||
135 | struct nvc0_fence_chan *fctx; | ||
136 | int ret, i; | ||
137 | |||
138 | fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); | ||
139 | if (!fctx) | ||
140 | return -ENOMEM; | ||
141 | |||
142 | nouveau_fence_context_new(&fctx->base); | ||
143 | |||
144 | ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); | ||
145 | if (ret) | ||
146 | nvc0_fence_context_del(chan); | ||
147 | |||
148 | /* map display semaphore buffers into channel's vm */ | ||
149 | for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { | ||
150 | struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); | ||
151 | ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); | ||
152 | } | ||
153 | |||
154 | nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | static bool | ||
159 | nvc0_fence_suspend(struct nouveau_drm *drm) | ||
160 | { | ||
161 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | ||
162 | struct nvc0_fence_priv *priv = drm->fence; | ||
163 | int i; | ||
164 | |||
165 | priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32)); | ||
166 | if (priv->suspend) { | ||
167 | for (i = 0; i <= pfifo->max; i++) | ||
168 | priv->suspend[i] = nouveau_bo_rd32(priv->bo, i); | ||
169 | } | ||
170 | |||
171 | return priv->suspend != NULL; | ||
172 | } | ||
173 | |||
174 | static void | ||
175 | nvc0_fence_resume(struct nouveau_drm *drm) | ||
176 | { | ||
177 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | ||
178 | struct nvc0_fence_priv *priv = drm->fence; | ||
179 | int i; | ||
180 | |||
181 | if (priv->suspend) { | ||
182 | for (i = 0; i <= pfifo->max; i++) | ||
183 | nouveau_bo_wr32(priv->bo, i, priv->suspend[i]); | ||
184 | vfree(priv->suspend); | ||
185 | priv->suspend = NULL; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | nvc0_fence_destroy(struct nouveau_drm *drm) | ||
191 | { | ||
192 | struct nvc0_fence_priv *priv = drm->fence; | ||
193 | nouveau_bo_unmap(priv->bo); | ||
194 | if (priv->bo) | ||
195 | nouveau_bo_unpin(priv->bo); | ||
196 | nouveau_bo_ref(NULL, &priv->bo); | ||
197 | drm->fence = NULL; | ||
198 | kfree(priv); | ||
199 | } | ||
200 | |||
201 | int | 83 | int |
202 | nvc0_fence_create(struct nouveau_drm *drm) | 84 | nvc0_fence_create(struct nouveau_drm *drm) |
203 | { | 85 | { |
204 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); | 86 | struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); |
205 | struct nvc0_fence_priv *priv; | 87 | struct nv84_fence_priv *priv; |
206 | int ret; | 88 | int ret; |
207 | 89 | ||
208 | priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); | 90 | priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); |
209 | if (!priv) | 91 | if (!priv) |
210 | return -ENOMEM; | 92 | return -ENOMEM; |
211 | 93 | ||
212 | priv->base.dtor = nvc0_fence_destroy; | 94 | priv->base.dtor = nv84_fence_destroy; |
213 | priv->base.suspend = nvc0_fence_suspend; | 95 | priv->base.suspend = nv84_fence_suspend; |
214 | priv->base.resume = nvc0_fence_resume; | 96 | priv->base.resume = nv84_fence_resume; |
215 | priv->base.context_new = nvc0_fence_context_new; | 97 | priv->base.context_new = nv84_fence_context_new; |
216 | priv->base.context_del = nvc0_fence_context_del; | 98 | priv->base.context_del = nv84_fence_context_del; |
217 | priv->base.emit = nvc0_fence_emit; | 99 | priv->base.emit = nvc0_fence_emit; |
218 | priv->base.sync = nvc0_fence_sync; | 100 | priv->base.sync = nvc0_fence_sync; |
219 | priv->base.read = nvc0_fence_read; | 101 | priv->base.read = nv84_fence_read; |
220 | 102 | ||
221 | init_waitqueue_head(&priv->base.waiting); | 103 | init_waitqueue_head(&priv->base.waiting); |
222 | priv->base.uevent = true; | 104 | priv->base.uevent = true; |
@@ -235,6 +117,6 @@ nvc0_fence_create(struct nouveau_drm *drm) | |||
235 | } | 117 | } |
236 | 118 | ||
237 | if (ret) | 119 | if (ret) |
238 | nvc0_fence_destroy(drm); | 120 | nv84_fence_destroy(drm); |
239 | return ret; | 121 | return ret; |
240 | } | 122 | } |