diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_channel.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 383 |
1 files changed, 222 insertions, 161 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 373950e34814..3960d66d7aba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
38 | int ret; | 38 | int ret; |
39 | 39 | ||
40 | if (dev_priv->card_type >= NV_50) { | 40 | if (dev_priv->card_type >= NV_50) { |
41 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 41 | if (dev_priv->card_type < NV_C0) { |
42 | dev_priv->vm_end, NV_DMA_ACCESS_RO, | 42 | ret = nouveau_gpuobj_dma_new(chan, |
43 | NV_DMA_TARGET_AGP, &pushbuf); | 43 | NV_CLASS_DMA_IN_MEMORY, 0, |
44 | (1ULL << 40), | ||
45 | NV_MEM_ACCESS_RO, | ||
46 | NV_MEM_TARGET_VM, | ||
47 | &pushbuf); | ||
48 | } | ||
44 | chan->pushbuf_base = pb->bo.offset; | 49 | chan->pushbuf_base = pb->bo.offset; |
45 | } else | 50 | } else |
46 | if (pb->bo.mem.mem_type == TTM_PL_TT) { | 51 | if (pb->bo.mem.mem_type == TTM_PL_TT) { |
47 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | 52 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
48 | dev_priv->gart_info.aper_size, | 53 | dev_priv->gart_info.aper_size, |
49 | NV_DMA_ACCESS_RO, &pushbuf, | 54 | NV_MEM_ACCESS_RO, |
50 | NULL); | 55 | NV_MEM_TARGET_GART, &pushbuf); |
51 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 56 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
52 | } else | 57 | } else |
53 | if (dev_priv->card_type != NV_04) { | 58 | if (dev_priv->card_type != NV_04) { |
54 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | 59 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, |
55 | dev_priv->fb_available_size, | 60 | dev_priv->fb_available_size, |
56 | NV_DMA_ACCESS_RO, | 61 | NV_MEM_ACCESS_RO, |
57 | NV_DMA_TARGET_VIDMEM, &pushbuf); | 62 | NV_MEM_TARGET_VRAM, &pushbuf); |
58 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 63 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
59 | } else { | 64 | } else { |
60 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | 65 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's |
@@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) | |||
62 | * VRAM. | 67 | * VRAM. |
63 | */ | 68 | */ |
64 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | 69 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, |
65 | pci_resource_start(dev->pdev, | 70 | pci_resource_start(dev->pdev, 1), |
66 | 1), | ||
67 | dev_priv->fb_available_size, | 71 | dev_priv->fb_available_size, |
68 | NV_DMA_ACCESS_RO, | 72 | NV_MEM_ACCESS_RO, |
69 | NV_DMA_TARGET_PCI, &pushbuf); | 73 | NV_MEM_TARGET_PCI, &pushbuf); |
70 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; | 74 | chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; |
71 | } | 75 | } |
72 | 76 | ||
73 | nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); | 77 | nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); |
74 | nouveau_gpuobj_ref(NULL, &pushbuf); | 78 | nouveau_gpuobj_ref(NULL, &pushbuf); |
75 | return 0; | 79 | return ret; |
76 | } | 80 | } |
77 | 81 | ||
78 | static struct nouveau_bo * | 82 | static struct nouveau_bo * |
@@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |||
100 | return NULL; | 104 | return NULL; |
101 | } | 105 | } |
102 | 106 | ||
107 | ret = nouveau_bo_map(pushbuf); | ||
108 | if (ret) { | ||
109 | nouveau_bo_unpin(pushbuf); | ||
110 | nouveau_bo_ref(NULL, &pushbuf); | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
103 | return pushbuf; | 114 | return pushbuf; |
104 | } | 115 | } |
105 | 116 | ||
@@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |||
107 | int | 118 | int |
108 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | 119 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, |
109 | struct drm_file *file_priv, | 120 | struct drm_file *file_priv, |
110 | uint32_t vram_handle, uint32_t tt_handle) | 121 | uint32_t vram_handle, uint32_t gart_handle) |
111 | { | 122 | { |
112 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 123 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
113 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
114 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 124 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
115 | struct nouveau_channel *chan; | 125 | struct nouveau_channel *chan; |
116 | int channel, user; | 126 | unsigned long flags; |
117 | int ret; | 127 | int ret; |
118 | 128 | ||
119 | /* | 129 | /* allocate and lock channel structure */ |
120 | * Alright, here is the full story | 130 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
121 | * Nvidia cards have multiple hw fifo contexts (praise them for that, | 131 | if (!chan) |
122 | * no complicated crash-prone context switches) | 132 | return -ENOMEM; |
123 | * We allocate a new context for each app and let it write to it | 133 | chan->dev = dev; |
124 | * directly (woo, full userspace command submission !) | 134 | chan->file_priv = file_priv; |
125 | * When there are no more contexts, you lost | 135 | chan->vram_handle = vram_handle; |
126 | */ | 136 | chan->gart_handle = gart_handle; |
127 | for (channel = 0; channel < pfifo->channels; channel++) { | 137 | |
128 | if (dev_priv->fifos[channel] == NULL) | 138 | kref_init(&chan->ref); |
139 | atomic_set(&chan->users, 1); | ||
140 | mutex_init(&chan->mutex); | ||
141 | mutex_lock(&chan->mutex); | ||
142 | |||
143 | /* allocate hw channel id */ | ||
144 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
145 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | ||
146 | if (!dev_priv->channels.ptr[chan->id]) { | ||
147 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); | ||
129 | break; | 148 | break; |
149 | } | ||
130 | } | 150 | } |
151 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
131 | 152 | ||
132 | /* no more fifos. you lost. */ | 153 | if (chan->id == pfifo->channels) { |
133 | if (channel == pfifo->channels) | 154 | mutex_unlock(&chan->mutex); |
134 | return -EINVAL; | 155 | kfree(chan); |
156 | return -ENODEV; | ||
157 | } | ||
135 | 158 | ||
136 | dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), | 159 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); |
137 | GFP_KERNEL); | ||
138 | if (!dev_priv->fifos[channel]) | ||
139 | return -ENOMEM; | ||
140 | chan = dev_priv->fifos[channel]; | ||
141 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 160 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
161 | INIT_LIST_HEAD(&chan->nvsw.flip); | ||
142 | INIT_LIST_HEAD(&chan->fence.pending); | 162 | INIT_LIST_HEAD(&chan->fence.pending); |
143 | chan->dev = dev; | ||
144 | chan->id = channel; | ||
145 | chan->file_priv = file_priv; | ||
146 | chan->vram_handle = vram_handle; | ||
147 | chan->gart_handle = tt_handle; | ||
148 | |||
149 | NV_INFO(dev, "Allocating FIFO number %d\n", channel); | ||
150 | 163 | ||
151 | /* Allocate DMA push buffer */ | 164 | /* Allocate DMA push buffer */ |
152 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); | 165 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); |
153 | if (!chan->pushbuf_bo) { | 166 | if (!chan->pushbuf_bo) { |
154 | ret = -ENOMEM; | 167 | ret = -ENOMEM; |
155 | NV_ERROR(dev, "pushbuf %d\n", ret); | 168 | NV_ERROR(dev, "pushbuf %d\n", ret); |
156 | nouveau_channel_free(chan); | 169 | nouveau_channel_put(&chan); |
157 | return ret; | 170 | return ret; |
158 | } | 171 | } |
159 | 172 | ||
160 | nouveau_dma_pre_init(chan); | 173 | nouveau_dma_pre_init(chan); |
161 | |||
162 | /* Locate channel's user control regs */ | ||
163 | if (dev_priv->card_type < NV_40) | ||
164 | user = NV03_USER(channel); | ||
165 | else | ||
166 | if (dev_priv->card_type < NV_50) | ||
167 | user = NV40_USER(channel); | ||
168 | else | ||
169 | user = NV50_USER(channel); | ||
170 | |||
171 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, | ||
172 | PAGE_SIZE); | ||
173 | if (!chan->user) { | ||
174 | NV_ERROR(dev, "ioremap of regs failed.\n"); | ||
175 | nouveau_channel_free(chan); | ||
176 | return -ENOMEM; | ||
177 | } | ||
178 | chan->user_put = 0x40; | 174 | chan->user_put = 0x40; |
179 | chan->user_get = 0x44; | 175 | chan->user_get = 0x44; |
180 | 176 | ||
@@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
182 | ret = nouveau_notifier_init_channel(chan); | 178 | ret = nouveau_notifier_init_channel(chan); |
183 | if (ret) { | 179 | if (ret) { |
184 | NV_ERROR(dev, "ntfy %d\n", ret); | 180 | NV_ERROR(dev, "ntfy %d\n", ret); |
185 | nouveau_channel_free(chan); | 181 | nouveau_channel_put(&chan); |
186 | return ret; | 182 | return ret; |
187 | } | 183 | } |
188 | 184 | ||
189 | /* Setup channel's default objects */ | 185 | /* Setup channel's default objects */ |
190 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); | 186 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
191 | if (ret) { | 187 | if (ret) { |
192 | NV_ERROR(dev, "gpuobj %d\n", ret); | 188 | NV_ERROR(dev, "gpuobj %d\n", ret); |
193 | nouveau_channel_free(chan); | 189 | nouveau_channel_put(&chan); |
194 | return ret; | 190 | return ret; |
195 | } | 191 | } |
196 | 192 | ||
@@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
198 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); | 194 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); |
199 | if (ret) { | 195 | if (ret) { |
200 | NV_ERROR(dev, "pbctxdma %d\n", ret); | 196 | NV_ERROR(dev, "pbctxdma %d\n", ret); |
201 | nouveau_channel_free(chan); | 197 | nouveau_channel_put(&chan); |
202 | return ret; | 198 | return ret; |
203 | } | 199 | } |
204 | 200 | ||
205 | /* disable the fifo caches */ | 201 | /* disable the fifo caches */ |
206 | pfifo->reassign(dev, false); | 202 | pfifo->reassign(dev, false); |
207 | 203 | ||
208 | /* Create a graphics context for new channel */ | ||
209 | ret = pgraph->create_context(chan); | ||
210 | if (ret) { | ||
211 | nouveau_channel_free(chan); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | /* Construct inital RAMFC for new channel */ | 204 | /* Construct inital RAMFC for new channel */ |
216 | ret = pfifo->create_context(chan); | 205 | ret = pfifo->create_context(chan); |
217 | if (ret) { | 206 | if (ret) { |
218 | nouveau_channel_free(chan); | 207 | nouveau_channel_put(&chan); |
219 | return ret; | 208 | return ret; |
220 | } | 209 | } |
221 | 210 | ||
@@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
225 | if (!ret) | 214 | if (!ret) |
226 | ret = nouveau_fence_channel_init(chan); | 215 | ret = nouveau_fence_channel_init(chan); |
227 | if (ret) { | 216 | if (ret) { |
228 | nouveau_channel_free(chan); | 217 | nouveau_channel_put(&chan); |
229 | return ret; | 218 | return ret; |
230 | } | 219 | } |
231 | 220 | ||
232 | nouveau_debugfs_channel_init(chan); | 221 | nouveau_debugfs_channel_init(chan); |
233 | 222 | ||
234 | NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); | 223 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
235 | *chan_ret = chan; | 224 | *chan_ret = chan; |
236 | return 0; | 225 | return 0; |
237 | } | 226 | } |
238 | 227 | ||
239 | /* stops a fifo */ | 228 | struct nouveau_channel * |
229 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | ||
230 | { | ||
231 | struct nouveau_channel *chan = NULL; | ||
232 | |||
233 | if (likely(ref && atomic_inc_not_zero(&ref->users))) | ||
234 | nouveau_channel_ref(ref, &chan); | ||
235 | |||
236 | return chan; | ||
237 | } | ||
238 | |||
239 | struct nouveau_channel * | ||
240 | nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) | ||
241 | { | ||
242 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
243 | struct nouveau_channel *chan; | ||
244 | unsigned long flags; | ||
245 | |||
246 | if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR)) | ||
247 | return ERR_PTR(-EINVAL); | ||
248 | |||
249 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
250 | chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); | ||
251 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
252 | |||
253 | if (unlikely(!chan)) | ||
254 | return ERR_PTR(-EINVAL); | ||
255 | |||
256 | if (unlikely(file_priv && chan->file_priv != file_priv)) { | ||
257 | nouveau_channel_put_unlocked(&chan); | ||
258 | return ERR_PTR(-EINVAL); | ||
259 | } | ||
260 | |||
261 | mutex_lock(&chan->mutex); | ||
262 | return chan; | ||
263 | } | ||
264 | |||
240 | void | 265 | void |
241 | nouveau_channel_free(struct nouveau_channel *chan) | 266 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) |
242 | { | 267 | { |
268 | struct nouveau_channel *chan = *pchan; | ||
243 | struct drm_device *dev = chan->dev; | 269 | struct drm_device *dev = chan->dev; |
244 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 270 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
245 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
246 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 271 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
272 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
273 | struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; | ||
247 | unsigned long flags; | 274 | unsigned long flags; |
248 | int ret; | ||
249 | 275 | ||
250 | NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); | 276 | /* decrement the refcount, and we're done if there's still refs */ |
277 | if (likely(!atomic_dec_and_test(&chan->users))) { | ||
278 | nouveau_channel_ref(NULL, pchan); | ||
279 | return; | ||
280 | } | ||
251 | 281 | ||
282 | /* noone wants the channel anymore */ | ||
283 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | ||
252 | nouveau_debugfs_channel_fini(chan); | 284 | nouveau_debugfs_channel_fini(chan); |
253 | 285 | ||
254 | /* Give outstanding push buffers a chance to complete */ | 286 | /* give it chance to idle */ |
255 | nouveau_fence_update(chan); | 287 | nouveau_channel_idle(chan); |
256 | if (chan->fence.sequence != chan->fence.sequence_ack) { | ||
257 | struct nouveau_fence *fence = NULL; | ||
258 | 288 | ||
259 | ret = nouveau_fence_new(chan, &fence, true); | 289 | /* ensure all outstanding fences are signaled. they should be if the |
260 | if (ret == 0) { | ||
261 | ret = nouveau_fence_wait(fence, NULL, false, false); | ||
262 | nouveau_fence_unref((void *)&fence); | ||
263 | } | ||
264 | |||
265 | if (ret) | ||
266 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
267 | } | ||
268 | |||
269 | /* Ensure all outstanding fences are signaled. They should be if the | ||
270 | * above attempts at idling were OK, but if we failed this'll tell TTM | 290 | * above attempts at idling were OK, but if we failed this'll tell TTM |
271 | * we're done with the buffers. | 291 | * we're done with the buffers. |
272 | */ | 292 | */ |
273 | nouveau_fence_channel_fini(chan); | 293 | nouveau_fence_channel_fini(chan); |
274 | 294 | ||
275 | /* This will prevent pfifo from switching channels. */ | 295 | /* boot it off the hardware */ |
276 | pfifo->reassign(dev, false); | 296 | pfifo->reassign(dev, false); |
277 | 297 | ||
278 | /* We want to give pgraph a chance to idle and get rid of all potential | 298 | /* We want to give pgraph a chance to idle and get rid of all |
279 | * errors. We need to do this before the lock, otherwise the irq handler | 299 | * potential errors. We need to do this without the context |
280 | * is unable to process them. | 300 | * switch lock held, otherwise the irq handler is unable to |
301 | * process them. | ||
281 | */ | 302 | */ |
282 | if (pgraph->channel(dev) == chan) | 303 | if (pgraph->channel(dev) == chan) |
283 | nouveau_wait_for_idle(dev); | 304 | nouveau_wait_for_idle(dev); |
284 | 305 | ||
285 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 306 | /* destroy the engine specific contexts */ |
286 | |||
287 | pgraph->fifo_access(dev, false); | ||
288 | if (pgraph->channel(dev) == chan) | ||
289 | pgraph->unload_context(dev); | ||
290 | pgraph->destroy_context(chan); | ||
291 | pgraph->fifo_access(dev, true); | ||
292 | |||
293 | if (pfifo->channel_id(dev) == chan->id) { | ||
294 | pfifo->disable(dev); | ||
295 | pfifo->unload_context(dev); | ||
296 | pfifo->enable(dev); | ||
297 | } | ||
298 | pfifo->destroy_context(chan); | 307 | pfifo->destroy_context(chan); |
308 | pgraph->destroy_context(chan); | ||
309 | if (pcrypt->destroy_context) | ||
310 | pcrypt->destroy_context(chan); | ||
299 | 311 | ||
300 | pfifo->reassign(dev, true); | 312 | pfifo->reassign(dev, true); |
301 | 313 | ||
302 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 314 | /* aside from its resources, the channel should now be dead, |
315 | * remove it from the channel list | ||
316 | */ | ||
317 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
318 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); | ||
319 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
303 | 320 | ||
304 | /* Release the channel's resources */ | 321 | /* destroy any resources the channel owned */ |
305 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | 322 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
306 | if (chan->pushbuf_bo) { | 323 | if (chan->pushbuf_bo) { |
307 | nouveau_bo_unmap(chan->pushbuf_bo); | 324 | nouveau_bo_unmap(chan->pushbuf_bo); |
@@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
310 | } | 327 | } |
311 | nouveau_gpuobj_channel_takedown(chan); | 328 | nouveau_gpuobj_channel_takedown(chan); |
312 | nouveau_notifier_takedown_channel(chan); | 329 | nouveau_notifier_takedown_channel(chan); |
313 | if (chan->user) | ||
314 | iounmap(chan->user); | ||
315 | 330 | ||
316 | dev_priv->fifos[chan->id] = NULL; | 331 | nouveau_channel_ref(NULL, pchan); |
332 | } | ||
333 | |||
334 | void | ||
335 | nouveau_channel_put(struct nouveau_channel **pchan) | ||
336 | { | ||
337 | mutex_unlock(&(*pchan)->mutex); | ||
338 | nouveau_channel_put_unlocked(pchan); | ||
339 | } | ||
340 | |||
341 | static void | ||
342 | nouveau_channel_del(struct kref *ref) | ||
343 | { | ||
344 | struct nouveau_channel *chan = | ||
345 | container_of(ref, struct nouveau_channel, ref); | ||
346 | |||
317 | kfree(chan); | 347 | kfree(chan); |
318 | } | 348 | } |
319 | 349 | ||
350 | void | ||
351 | nouveau_channel_ref(struct nouveau_channel *chan, | ||
352 | struct nouveau_channel **pchan) | ||
353 | { | ||
354 | if (chan) | ||
355 | kref_get(&chan->ref); | ||
356 | |||
357 | if (*pchan) | ||
358 | kref_put(&(*pchan)->ref, nouveau_channel_del); | ||
359 | |||
360 | *pchan = chan; | ||
361 | } | ||
362 | |||
363 | void | ||
364 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
365 | { | ||
366 | struct drm_device *dev = chan->dev; | ||
367 | struct nouveau_fence *fence = NULL; | ||
368 | int ret; | ||
369 | |||
370 | nouveau_fence_update(chan); | ||
371 | |||
372 | if (chan->fence.sequence != chan->fence.sequence_ack) { | ||
373 | ret = nouveau_fence_new(chan, &fence, true); | ||
374 | if (!ret) { | ||
375 | ret = nouveau_fence_wait(fence, false, false); | ||
376 | nouveau_fence_unref(&fence); | ||
377 | } | ||
378 | |||
379 | if (ret) | ||
380 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
381 | } | ||
382 | } | ||
383 | |||
320 | /* cleans up all the fifos from file_priv */ | 384 | /* cleans up all the fifos from file_priv */ |
321 | void | 385 | void |
322 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | 386 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) |
323 | { | 387 | { |
324 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 388 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
325 | struct nouveau_engine *engine = &dev_priv->engine; | 389 | struct nouveau_engine *engine = &dev_priv->engine; |
390 | struct nouveau_channel *chan; | ||
326 | int i; | 391 | int i; |
327 | 392 | ||
328 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | 393 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
329 | for (i = 0; i < engine->fifo.channels; i++) { | 394 | for (i = 0; i < engine->fifo.channels; i++) { |
330 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 395 | chan = nouveau_channel_get(dev, file_priv, i); |
396 | if (IS_ERR(chan)) | ||
397 | continue; | ||
331 | 398 | ||
332 | if (chan && chan->file_priv == file_priv) | 399 | atomic_dec(&chan->users); |
333 | nouveau_channel_free(chan); | 400 | nouveau_channel_put(&chan); |
334 | } | 401 | } |
335 | } | 402 | } |
336 | 403 | ||
337 | int | ||
338 | nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, | ||
339 | int channel) | ||
340 | { | ||
341 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
342 | struct nouveau_engine *engine = &dev_priv->engine; | ||
343 | |||
344 | if (channel >= engine->fifo.channels) | ||
345 | return 0; | ||
346 | if (dev_priv->fifos[channel] == NULL) | ||
347 | return 0; | ||
348 | |||
349 | return (dev_priv->fifos[channel]->file_priv == file_priv); | ||
350 | } | ||
351 | 404 | ||
352 | /*********************************** | 405 | /*********************************** |
353 | * ioctls wrapping the functions | 406 | * ioctls wrapping the functions |
@@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
383 | else | 436 | else |
384 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; | 437 | init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; |
385 | 438 | ||
386 | init->subchan[0].handle = NvM2MF; | 439 | if (dev_priv->card_type < NV_C0) { |
387 | if (dev_priv->card_type < NV_50) | 440 | init->subchan[0].handle = NvM2MF; |
388 | init->subchan[0].grclass = 0x0039; | 441 | if (dev_priv->card_type < NV_50) |
389 | else | 442 | init->subchan[0].grclass = 0x0039; |
390 | init->subchan[0].grclass = 0x5039; | 443 | else |
391 | init->subchan[1].handle = NvSw; | 444 | init->subchan[0].grclass = 0x5039; |
392 | init->subchan[1].grclass = NV_SW; | 445 | init->subchan[1].handle = NvSw; |
393 | init->nr_subchan = 2; | 446 | init->subchan[1].grclass = NV_SW; |
447 | init->nr_subchan = 2; | ||
448 | } else { | ||
449 | init->subchan[0].handle = 0x9039; | ||
450 | init->subchan[0].grclass = 0x9039; | ||
451 | init->nr_subchan = 1; | ||
452 | } | ||
394 | 453 | ||
395 | /* Named memory object area */ | 454 | /* Named memory object area */ |
396 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | 455 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, |
397 | &init->notifier_handle); | 456 | &init->notifier_handle); |
398 | if (ret) { | ||
399 | nouveau_channel_free(chan); | ||
400 | return ret; | ||
401 | } | ||
402 | 457 | ||
403 | return 0; | 458 | if (ret == 0) |
459 | atomic_inc(&chan->users); /* userspace reference */ | ||
460 | nouveau_channel_put(&chan); | ||
461 | return ret; | ||
404 | } | 462 | } |
405 | 463 | ||
406 | static int | 464 | static int |
407 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | 465 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, |
408 | struct drm_file *file_priv) | 466 | struct drm_file *file_priv) |
409 | { | 467 | { |
410 | struct drm_nouveau_channel_free *cfree = data; | 468 | struct drm_nouveau_channel_free *req = data; |
411 | struct nouveau_channel *chan; | 469 | struct nouveau_channel *chan; |
412 | 470 | ||
413 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); | 471 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
472 | if (IS_ERR(chan)) | ||
473 | return PTR_ERR(chan); | ||
414 | 474 | ||
415 | nouveau_channel_free(chan); | 475 | atomic_dec(&chan->users); |
476 | nouveau_channel_put(&chan); | ||
416 | return 0; | 477 | return 0; |
417 | } | 478 | } |
418 | 479 | ||
@@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | |||
421 | ***********************************/ | 482 | ***********************************/ |
422 | 483 | ||
423 | struct drm_ioctl_desc nouveau_ioctls[] = { | 484 | struct drm_ioctl_desc nouveau_ioctls[] = { |
424 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), | 485 | DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), |
425 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 486 | DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
426 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), | 487 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), |
427 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), | 488 | DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), |
428 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), | 489 | DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), |
429 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), | 490 | DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), |
430 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), | 491 | DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), |
431 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), | 492 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), |
432 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), | 493 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), |
433 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), | 494 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), |
434 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), | 495 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), |
435 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), | 496 | DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), |
436 | }; | 497 | }; |
437 | 498 | ||
438 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); | 499 | int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); |