diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-10-06 02:16:59 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-12-03 00:05:18 -0500 |
commit | cff5c1332486ced8ff4180e957e04983cb72a39e (patch) | |
tree | ec1f6687156277632aef96693c1b8eca0c022b7c /drivers/gpu | |
parent | 6a6b73f254123851f7f73ab5e57344a569d6a0ab (diff) |
drm/nouveau: add more fine-grained locking to channel list + structures
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 186 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_irq.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_notifier.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_fifo.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_graph.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv10_fifo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv10_graph.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_fifo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fb.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_fifo.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_graph.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 14 |
19 files changed, 236 insertions, 161 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 8636478c477a..47bf2d3d658c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -107,54 +107,54 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) | |||
107 | int | 107 | int |
108 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | 108 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, |
109 | struct drm_file *file_priv, | 109 | struct drm_file *file_priv, |
110 | uint32_t vram_handle, uint32_t tt_handle) | 110 | uint32_t vram_handle, uint32_t gart_handle) |
111 | { | 111 | { |
112 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 112 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
113 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | 113 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
114 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 114 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
115 | struct nouveau_channel *chan; | 115 | struct nouveau_channel *chan; |
116 | int channel, user; | 116 | unsigned long flags; |
117 | int ret; | 117 | int user, ret; |
118 | |||
119 | /* | ||
120 | * Alright, here is the full story | ||
121 | * Nvidia cards have multiple hw fifo contexts (praise them for that, | ||
122 | * no complicated crash-prone context switches) | ||
123 | * We allocate a new context for each app and let it write to it | ||
124 | * directly (woo, full userspace command submission !) | ||
125 | * When there are no more contexts, you lost | ||
126 | */ | ||
127 | for (channel = 0; channel < pfifo->channels; channel++) { | ||
128 | if (dev_priv->fifos[channel] == NULL) | ||
129 | break; | ||
130 | } | ||
131 | |||
132 | /* no more fifos. you lost. */ | ||
133 | if (channel == pfifo->channels) | ||
134 | return -EINVAL; | ||
135 | 118 | ||
136 | dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), | 119 | /* allocate and lock channel structure */ |
137 | GFP_KERNEL); | 120 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
138 | if (!dev_priv->fifos[channel]) | 121 | if (!chan) |
139 | return -ENOMEM; | 122 | return -ENOMEM; |
140 | chan = dev_priv->fifos[channel]; | ||
141 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | ||
142 | INIT_LIST_HEAD(&chan->fence.pending); | ||
143 | chan->dev = dev; | 123 | chan->dev = dev; |
144 | chan->id = channel; | ||
145 | chan->file_priv = file_priv; | 124 | chan->file_priv = file_priv; |
146 | chan->vram_handle = vram_handle; | 125 | chan->vram_handle = vram_handle; |
147 | chan->gart_handle = tt_handle; | 126 | chan->gart_handle = gart_handle; |
127 | |||
128 | atomic_set(&chan->refcount, 1); | ||
148 | mutex_init(&chan->mutex); | 129 | mutex_init(&chan->mutex); |
130 | mutex_lock(&chan->mutex); | ||
149 | 131 | ||
150 | NV_INFO(dev, "Allocating FIFO number %d\n", channel); | 132 | /* allocate hw channel id */ |
133 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
134 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | ||
135 | if (!dev_priv->channels.ptr[chan->id]) { | ||
136 | dev_priv->channels.ptr[chan->id] = chan; | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
141 | |||
142 | if (chan->id == pfifo->channels) { | ||
143 | mutex_unlock(&chan->mutex); | ||
144 | kfree(chan); | ||
145 | return -ENODEV; | ||
146 | } | ||
147 | |||
148 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); | ||
149 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | ||
150 | INIT_LIST_HEAD(&chan->fence.pending); | ||
151 | 151 | ||
152 | /* Allocate DMA push buffer */ | 152 | /* Allocate DMA push buffer */ |
153 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); | 153 | chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); |
154 | if (!chan->pushbuf_bo) { | 154 | if (!chan->pushbuf_bo) { |
155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
156 | NV_ERROR(dev, "pushbuf %d\n", ret); | 156 | NV_ERROR(dev, "pushbuf %d\n", ret); |
157 | nouveau_channel_free(chan); | 157 | nouveau_channel_put(&chan); |
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
@@ -162,18 +162,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
162 | 162 | ||
163 | /* Locate channel's user control regs */ | 163 | /* Locate channel's user control regs */ |
164 | if (dev_priv->card_type < NV_40) | 164 | if (dev_priv->card_type < NV_40) |
165 | user = NV03_USER(channel); | 165 | user = NV03_USER(chan->id); |
166 | else | 166 | else |
167 | if (dev_priv->card_type < NV_50) | 167 | if (dev_priv->card_type < NV_50) |
168 | user = NV40_USER(channel); | 168 | user = NV40_USER(chan->id); |
169 | else | 169 | else |
170 | user = NV50_USER(channel); | 170 | user = NV50_USER(chan->id); |
171 | 171 | ||
172 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, | 172 | chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, |
173 | PAGE_SIZE); | 173 | PAGE_SIZE); |
174 | if (!chan->user) { | 174 | if (!chan->user) { |
175 | NV_ERROR(dev, "ioremap of regs failed.\n"); | 175 | NV_ERROR(dev, "ioremap of regs failed.\n"); |
176 | nouveau_channel_free(chan); | 176 | nouveau_channel_put(&chan); |
177 | return -ENOMEM; | 177 | return -ENOMEM; |
178 | } | 178 | } |
179 | chan->user_put = 0x40; | 179 | chan->user_put = 0x40; |
@@ -183,15 +183,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
183 | ret = nouveau_notifier_init_channel(chan); | 183 | ret = nouveau_notifier_init_channel(chan); |
184 | if (ret) { | 184 | if (ret) { |
185 | NV_ERROR(dev, "ntfy %d\n", ret); | 185 | NV_ERROR(dev, "ntfy %d\n", ret); |
186 | nouveau_channel_free(chan); | 186 | nouveau_channel_put(&chan); |
187 | return ret; | 187 | return ret; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* Setup channel's default objects */ | 190 | /* Setup channel's default objects */ |
191 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); | 191 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
192 | if (ret) { | 192 | if (ret) { |
193 | NV_ERROR(dev, "gpuobj %d\n", ret); | 193 | NV_ERROR(dev, "gpuobj %d\n", ret); |
194 | nouveau_channel_free(chan); | 194 | nouveau_channel_put(&chan); |
195 | return ret; | 195 | return ret; |
196 | } | 196 | } |
197 | 197 | ||
@@ -199,7 +199,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
199 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); | 199 | ret = nouveau_channel_pushbuf_ctxdma_init(chan); |
200 | if (ret) { | 200 | if (ret) { |
201 | NV_ERROR(dev, "pbctxdma %d\n", ret); | 201 | NV_ERROR(dev, "pbctxdma %d\n", ret); |
202 | nouveau_channel_free(chan); | 202 | nouveau_channel_put(&chan); |
203 | return ret; | 203 | return ret; |
204 | } | 204 | } |
205 | 205 | ||
@@ -209,14 +209,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
209 | /* Create a graphics context for new channel */ | 209 | /* Create a graphics context for new channel */ |
210 | ret = pgraph->create_context(chan); | 210 | ret = pgraph->create_context(chan); |
211 | if (ret) { | 211 | if (ret) { |
212 | nouveau_channel_free(chan); | 212 | nouveau_channel_put(&chan); |
213 | return ret; | 213 | return ret; |
214 | } | 214 | } |
215 | 215 | ||
216 | /* Construct inital RAMFC for new channel */ | 216 | /* Construct inital RAMFC for new channel */ |
217 | ret = pfifo->create_context(chan); | 217 | ret = pfifo->create_context(chan); |
218 | if (ret) { | 218 | if (ret) { |
219 | nouveau_channel_free(chan); | 219 | nouveau_channel_put(&chan); |
220 | return ret; | 220 | return ret; |
221 | } | 221 | } |
222 | 222 | ||
@@ -226,33 +226,70 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
226 | if (!ret) | 226 | if (!ret) |
227 | ret = nouveau_fence_channel_init(chan); | 227 | ret = nouveau_fence_channel_init(chan); |
228 | if (ret) { | 228 | if (ret) { |
229 | nouveau_channel_free(chan); | 229 | nouveau_channel_put(&chan); |
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
233 | nouveau_debugfs_channel_init(chan); | 233 | nouveau_debugfs_channel_init(chan); |
234 | 234 | ||
235 | NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); | 235 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); |
236 | *chan_ret = chan; | 236 | *chan_ret = chan; |
237 | return 0; | 237 | return 0; |
238 | } | 238 | } |
239 | 239 | ||
240 | /* stops a fifo */ | 240 | struct nouveau_channel * |
241 | nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) | ||
242 | { | ||
243 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
244 | struct nouveau_channel *chan = ERR_PTR(-ENODEV); | ||
245 | unsigned long flags; | ||
246 | |||
247 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
248 | chan = dev_priv->channels.ptr[id]; | ||
249 | |||
250 | if (unlikely(!chan || atomic_read(&chan->refcount) == 0)) { | ||
251 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
252 | return ERR_PTR(-EINVAL); | ||
253 | } | ||
254 | |||
255 | if (unlikely(file_priv && chan->file_priv != file_priv)) { | ||
256 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
257 | return ERR_PTR(-EINVAL); | ||
258 | } | ||
259 | |||
260 | atomic_inc(&chan->refcount); | ||
261 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
262 | |||
263 | mutex_lock(&chan->mutex); | ||
264 | return chan; | ||
265 | } | ||
266 | |||
241 | void | 267 | void |
242 | nouveau_channel_free(struct nouveau_channel *chan) | 268 | nouveau_channel_put(struct nouveau_channel **pchan) |
243 | { | 269 | { |
270 | struct nouveau_channel *chan = *pchan; | ||
244 | struct drm_device *dev = chan->dev; | 271 | struct drm_device *dev = chan->dev; |
245 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 272 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
246 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
247 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | 273 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; |
274 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
248 | unsigned long flags; | 275 | unsigned long flags; |
249 | int ret; | 276 | int ret; |
250 | 277 | ||
251 | NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); | 278 | /* unlock the channel */ |
279 | mutex_unlock(&chan->mutex); | ||
280 | |||
281 | /* decrement the refcount, and we're done if there's still refs */ | ||
282 | if (likely(!atomic_dec_and_test(&chan->refcount))) { | ||
283 | *pchan = NULL; | ||
284 | return; | ||
285 | } | ||
252 | 286 | ||
287 | /* noone wants the channel anymore */ | ||
288 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | ||
253 | nouveau_debugfs_channel_fini(chan); | 289 | nouveau_debugfs_channel_fini(chan); |
290 | *pchan = NULL; | ||
254 | 291 | ||
255 | /* Give outstanding push buffers a chance to complete */ | 292 | /* give it chance to idle */ |
256 | nouveau_fence_update(chan); | 293 | nouveau_fence_update(chan); |
257 | if (chan->fence.sequence != chan->fence.sequence_ack) { | 294 | if (chan->fence.sequence != chan->fence.sequence_ack) { |
258 | struct nouveau_fence *fence = NULL; | 295 | struct nouveau_fence *fence = NULL; |
@@ -267,13 +304,13 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
267 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | 304 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); |
268 | } | 305 | } |
269 | 306 | ||
270 | /* Ensure all outstanding fences are signaled. They should be if the | 307 | /* ensure all outstanding fences are signaled. they should be if the |
271 | * above attempts at idling were OK, but if we failed this'll tell TTM | 308 | * above attempts at idling were OK, but if we failed this'll tell TTM |
272 | * we're done with the buffers. | 309 | * we're done with the buffers. |
273 | */ | 310 | */ |
274 | nouveau_fence_channel_fini(chan); | 311 | nouveau_fence_channel_fini(chan); |
275 | 312 | ||
276 | /* This will prevent pfifo from switching channels. */ | 313 | /* boot it off the hardware */ |
277 | pfifo->reassign(dev, false); | 314 | pfifo->reassign(dev, false); |
278 | 315 | ||
279 | /* We want to give pgraph a chance to idle and get rid of all potential | 316 | /* We want to give pgraph a chance to idle and get rid of all potential |
@@ -302,7 +339,14 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
302 | 339 | ||
303 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 340 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
304 | 341 | ||
305 | /* Release the channel's resources */ | 342 | /* aside from its resources, the channel should now be dead, |
343 | * remove it from the channel list | ||
344 | */ | ||
345 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
346 | dev_priv->channels.ptr[chan->id] = NULL; | ||
347 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
348 | |||
349 | /* destroy any resources the channel owned */ | ||
306 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | 350 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); |
307 | if (chan->pushbuf_bo) { | 351 | if (chan->pushbuf_bo) { |
308 | nouveau_bo_unmap(chan->pushbuf_bo); | 352 | nouveau_bo_unmap(chan->pushbuf_bo); |
@@ -314,7 +358,6 @@ nouveau_channel_free(struct nouveau_channel *chan) | |||
314 | if (chan->user) | 358 | if (chan->user) |
315 | iounmap(chan->user); | 359 | iounmap(chan->user); |
316 | 360 | ||
317 | dev_priv->fifos[chan->id] = NULL; | ||
318 | kfree(chan); | 361 | kfree(chan); |
319 | } | 362 | } |
320 | 363 | ||
@@ -324,31 +367,20 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | |||
324 | { | 367 | { |
325 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 368 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
326 | struct nouveau_engine *engine = &dev_priv->engine; | 369 | struct nouveau_engine *engine = &dev_priv->engine; |
370 | struct nouveau_channel *chan; | ||
327 | int i; | 371 | int i; |
328 | 372 | ||
329 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | 373 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); |
330 | for (i = 0; i < engine->fifo.channels; i++) { | 374 | for (i = 0; i < engine->fifo.channels; i++) { |
331 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 375 | chan = nouveau_channel_get(dev, file_priv, i); |
376 | if (IS_ERR(chan)) | ||
377 | continue; | ||
332 | 378 | ||
333 | if (chan && chan->file_priv == file_priv) | 379 | atomic_dec(&chan->refcount); |
334 | nouveau_channel_free(chan); | 380 | nouveau_channel_put(&chan); |
335 | } | 381 | } |
336 | } | 382 | } |
337 | 383 | ||
338 | int | ||
339 | nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, | ||
340 | int channel) | ||
341 | { | ||
342 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
343 | struct nouveau_engine *engine = &dev_priv->engine; | ||
344 | |||
345 | if (channel >= engine->fifo.channels) | ||
346 | return 0; | ||
347 | if (dev_priv->fifos[channel] == NULL) | ||
348 | return 0; | ||
349 | |||
350 | return (dev_priv->fifos[channel]->file_priv == file_priv); | ||
351 | } | ||
352 | 384 | ||
353 | /*********************************** | 385 | /*********************************** |
354 | * ioctls wrapping the functions | 386 | * ioctls wrapping the functions |
@@ -396,24 +428,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, | |||
396 | /* Named memory object area */ | 428 | /* Named memory object area */ |
397 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, | 429 | ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, |
398 | &init->notifier_handle); | 430 | &init->notifier_handle); |
399 | if (ret) { | ||
400 | nouveau_channel_free(chan); | ||
401 | return ret; | ||
402 | } | ||
403 | 431 | ||
404 | return 0; | 432 | if (ret == 0) |
433 | atomic_inc(&chan->refcount); /* userspace reference */ | ||
434 | nouveau_channel_put(&chan); | ||
435 | return ret; | ||
405 | } | 436 | } |
406 | 437 | ||
407 | static int | 438 | static int |
408 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, | 439 | nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, |
409 | struct drm_file *file_priv) | 440 | struct drm_file *file_priv) |
410 | { | 441 | { |
411 | struct drm_nouveau_channel_free *cfree = data; | 442 | struct drm_nouveau_channel_free *req = data; |
412 | struct nouveau_channel *chan; | 443 | struct nouveau_channel *chan; |
413 | 444 | ||
414 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); | 445 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
446 | if (IS_ERR(chan)) | ||
447 | return PTR_ERR(chan); | ||
415 | 448 | ||
416 | nouveau_channel_free(chan); | 449 | atomic_dec(&chan->refcount); |
450 | nouveau_channel_put(&chan); | ||
417 | return 0; | 451 | return 0; |
418 | } | 452 | } |
419 | 453 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 90875494a65a..f139aa2cbe5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
@@ -195,9 +195,8 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) | |||
195 | for (i = 0; i < pfifo->channels; i++) { | 195 | for (i = 0; i < pfifo->channels; i++) { |
196 | struct nouveau_fence *fence = NULL; | 196 | struct nouveau_fence *fence = NULL; |
197 | 197 | ||
198 | chan = dev_priv->fifos[i]; | 198 | chan = dev_priv->channels.ptr[i]; |
199 | if (!chan || (dev_priv->card_type >= NV_50 && | 199 | if (!chan || !chan->pushbuf_bo) |
200 | chan == dev_priv->fifos[0])) | ||
201 | continue; | 200 | continue; |
202 | 201 | ||
203 | ret = nouveau_fence_new(chan, &fence, true); | 202 | ret = nouveau_fence_new(chan, &fence, true); |
@@ -313,7 +312,7 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
313 | int j; | 312 | int j; |
314 | 313 | ||
315 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 314 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
316 | chan = dev_priv->fifos[i]; | 315 | chan = dev_priv->channels.ptr[i]; |
317 | if (!chan || !chan->pushbuf_bo) | 316 | if (!chan || !chan->pushbuf_bo) |
318 | continue; | 317 | continue; |
319 | 318 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 04bc56cf4f1a..c3f102125083 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -166,6 +166,7 @@ struct nouveau_channel { | |||
166 | struct drm_device *dev; | 166 | struct drm_device *dev; |
167 | int id; | 167 | int id; |
168 | 168 | ||
169 | atomic_t refcount; | ||
169 | struct mutex mutex; | 170 | struct mutex mutex; |
170 | 171 | ||
171 | /* owner of this fifo */ | 172 | /* owner of this fifo */ |
@@ -607,8 +608,10 @@ struct drm_nouveau_private { | |||
607 | struct nouveau_bo *bo; | 608 | struct nouveau_bo *bo; |
608 | } fence; | 609 | } fence; |
609 | 610 | ||
610 | int fifo_alloc_count; | 611 | struct { |
611 | struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; | 612 | spinlock_t lock; |
613 | struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; | ||
614 | } channels; | ||
612 | 615 | ||
613 | struct nouveau_engine engine; | 616 | struct nouveau_engine engine; |
614 | struct nouveau_channel *channel; | 617 | struct nouveau_channel *channel; |
@@ -721,16 +724,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) | |||
721 | return 0; | 724 | return 0; |
722 | } | 725 | } |
723 | 726 | ||
724 | #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ | ||
725 | struct drm_nouveau_private *nv = dev->dev_private; \ | ||
726 | if (!nouveau_channel_owner(dev, (cl), (id))) { \ | ||
727 | NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ | ||
728 | DRM_CURRENTPID, (id)); \ | ||
729 | return -EPERM; \ | ||
730 | } \ | ||
731 | (ch) = nv->fifos[(id)]; \ | ||
732 | } while (0) | ||
733 | |||
734 | /* nouveau_drv.c */ | 727 | /* nouveau_drv.c */ |
735 | extern int nouveau_agpmode; | 728 | extern int nouveau_agpmode; |
736 | extern int nouveau_duallink; | 729 | extern int nouveau_duallink; |
@@ -805,13 +798,13 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, | |||
805 | extern struct drm_ioctl_desc nouveau_ioctls[]; | 798 | extern struct drm_ioctl_desc nouveau_ioctls[]; |
806 | extern int nouveau_max_ioctl; | 799 | extern int nouveau_max_ioctl; |
807 | extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); | 800 | extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); |
808 | extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, | ||
809 | int channel); | ||
810 | extern int nouveau_channel_alloc(struct drm_device *dev, | 801 | extern int nouveau_channel_alloc(struct drm_device *dev, |
811 | struct nouveau_channel **chan, | 802 | struct nouveau_channel **chan, |
812 | struct drm_file *file_priv, | 803 | struct drm_file *file_priv, |
813 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); | 804 | uint32_t fb_ctxdma, uint32_t tt_ctxdma); |
814 | extern void nouveau_channel_free(struct nouveau_channel *); | 805 | extern struct nouveau_channel * |
806 | nouveau_channel_get(struct drm_device *, struct drm_file *, int id); | ||
807 | extern void nouveau_channel_put(struct nouveau_channel **); | ||
815 | 808 | ||
816 | /* nouveau_object.c */ | 809 | /* nouveau_object.c */ |
817 | extern int nouveau_gpuobj_early_init(struct drm_device *); | 810 | extern int nouveau_gpuobj_early_init(struct drm_device *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab1bbfbf266e..42694b122eef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -393,8 +393,18 @@ nouveau_fence_sync(struct nouveau_fence *fence, | |||
393 | return nouveau_fence_wait(fence, NULL, false, false); | 393 | return nouveau_fence_wait(fence, NULL, false, false); |
394 | } | 394 | } |
395 | 395 | ||
396 | /* try to take wchan's mutex, if we can't take it right away | ||
397 | * we have to fallback to software sync to prevent locking | ||
398 | * order issues | ||
399 | */ | ||
400 | if (!mutex_trylock(&wchan->mutex)) { | ||
401 | free_semaphore(&sema->ref); | ||
402 | return nouveau_fence_wait(fence, NULL, false, false); | ||
403 | } | ||
404 | |||
396 | /* Make wchan wait until it gets signalled */ | 405 | /* Make wchan wait until it gets signalled */ |
397 | ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); | 406 | ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); |
407 | mutex_unlock(&wchan->mutex); | ||
398 | if (ret) | 408 | if (ret) |
399 | goto out; | 409 | goto out; |
400 | 410 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 1f2301d26c0a..454d5ceb28f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -146,11 +146,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
146 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) | 146 | if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) |
147 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; | 147 | dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; |
148 | 148 | ||
149 | if (req->channel_hint) { | ||
150 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, | ||
151 | file_priv, chan); | ||
152 | } | ||
153 | |||
154 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) | 149 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) |
155 | flags |= TTM_PL_FLAG_VRAM; | 150 | flags |= TTM_PL_FLAG_VRAM; |
156 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) | 151 | if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) |
@@ -161,10 +156,18 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, | |||
161 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) | 156 | if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) |
162 | return -EINVAL; | 157 | return -EINVAL; |
163 | 158 | ||
159 | if (req->channel_hint) { | ||
160 | chan = nouveau_channel_get(dev, file_priv, req->channel_hint); | ||
161 | if (IS_ERR(chan)) | ||
162 | return PTR_ERR(chan); | ||
163 | } | ||
164 | |||
164 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, | 165 | ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, |
165 | req->info.tile_mode, req->info.tile_flags, false, | 166 | req->info.tile_mode, req->info.tile_flags, false, |
166 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), | 167 | (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), |
167 | &nvbo); | 168 | &nvbo); |
169 | if (chan) | ||
170 | nouveau_channel_put(&chan); | ||
168 | if (ret) | 171 | if (ret) |
169 | return ret; | 172 | return ret; |
170 | 173 | ||
@@ -341,9 +344,7 @@ retry: | |||
341 | return -EINVAL; | 344 | return -EINVAL; |
342 | } | 345 | } |
343 | 346 | ||
344 | mutex_unlock(&drm_global_mutex); | ||
345 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); | 347 | ret = ttm_bo_wait_cpu(&nvbo->bo, false); |
346 | mutex_lock(&drm_global_mutex); | ||
347 | if (ret) { | 348 | if (ret) { |
348 | NV_ERROR(dev, "fail wait_cpu\n"); | 349 | NV_ERROR(dev, "fail wait_cpu\n"); |
349 | return ret; | 350 | return ret; |
@@ -585,7 +586,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
585 | struct nouveau_fence *fence = NULL; | 586 | struct nouveau_fence *fence = NULL; |
586 | int i, j, ret = 0, do_reloc = 0; | 587 | int i, j, ret = 0, do_reloc = 0; |
587 | 588 | ||
588 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); | 589 | chan = nouveau_channel_get(dev, file_priv, req->channel); |
590 | if (IS_ERR(chan)) | ||
591 | return PTR_ERR(chan); | ||
589 | 592 | ||
590 | req->vram_available = dev_priv->fb_aper_free; | 593 | req->vram_available = dev_priv->fb_aper_free; |
591 | req->gart_available = dev_priv->gart_info.aper_free; | 594 | req->gart_available = dev_priv->gart_info.aper_free; |
@@ -595,28 +598,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, | |||
595 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { | 598 | if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { |
596 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", | 599 | NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", |
597 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); | 600 | req->nr_push, NOUVEAU_GEM_MAX_PUSH); |
601 | nouveau_channel_put(&chan); | ||
598 | return -EINVAL; | 602 | return -EINVAL; |
599 | } | 603 | } |
600 | 604 | ||
601 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { | 605 | if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { |
602 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", | 606 | NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", |
603 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); | 607 | req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); |
608 | nouveau_channel_put(&chan); | ||
604 | return -EINVAL; | 609 | return -EINVAL; |
605 | } | 610 | } |
606 | 611 | ||
607 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { | 612 | if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { |
608 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", | 613 | NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", |
609 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); | 614 | req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); |
615 | nouveau_channel_put(&chan); | ||
610 | return -EINVAL; | 616 | return -EINVAL; |
611 | } | 617 | } |
612 | 618 | ||
613 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); | 619 | push = u_memcpya(req->push, req->nr_push, sizeof(*push)); |
614 | if (IS_ERR(push)) | 620 | if (IS_ERR(push)) { |
621 | nouveau_channel_put(&chan); | ||
615 | return PTR_ERR(push); | 622 | return PTR_ERR(push); |
623 | } | ||
616 | 624 | ||
617 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); | 625 | bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); |
618 | if (IS_ERR(bo)) { | 626 | if (IS_ERR(bo)) { |
619 | kfree(push); | 627 | kfree(push); |
628 | nouveau_channel_put(&chan); | ||
620 | return PTR_ERR(bo); | 629 | return PTR_ERR(bo); |
621 | } | 630 | } |
622 | 631 | ||
@@ -750,6 +759,7 @@ out_next: | |||
750 | req->suffix1 = 0x00000000; | 759 | req->suffix1 = 0x00000000; |
751 | } | 760 | } |
752 | 761 | ||
762 | nouveau_channel_put(&chan); | ||
753 | return ret; | 763 | return ret; |
754 | } | 764 | } |
755 | 765 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index a4fa9e14d66d..c5e37bc17192 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c | |||
@@ -113,15 +113,17 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | |||
113 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 113 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
114 | struct nouveau_channel *chan = NULL; | 114 | struct nouveau_channel *chan = NULL; |
115 | struct nouveau_gpuobj *obj; | 115 | struct nouveau_gpuobj *obj; |
116 | unsigned long flags; | ||
116 | const int subc = (addr >> 13) & 0x7; | 117 | const int subc = (addr >> 13) & 0x7; |
117 | const int mthd = addr & 0x1ffc; | 118 | const int mthd = addr & 0x1ffc; |
118 | bool handled = false; | 119 | bool handled = false; |
119 | u32 engine; | 120 | u32 engine; |
120 | 121 | ||
122 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
121 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) | 123 | if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) |
122 | chan = dev_priv->fifos[chid]; | 124 | chan = dev_priv->channels.ptr[chid]; |
123 | if (unlikely(!chan)) | 125 | if (unlikely(!chan)) |
124 | return false; | 126 | goto out; |
125 | 127 | ||
126 | switch (mthd) { | 128 | switch (mthd) { |
127 | case 0x0000: /* bind object to subchannel */ | 129 | case 0x0000: /* bind object to subchannel */ |
@@ -146,6 +148,8 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) | |||
146 | break; | 148 | break; |
147 | } | 149 | } |
148 | 150 | ||
151 | out: | ||
152 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
149 | return handled; | 153 | return handled; |
150 | } | 154 | } |
151 | 155 | ||
@@ -398,6 +402,8 @@ static int | |||
398 | nouveau_graph_chid_from_grctx(struct drm_device *dev) | 402 | nouveau_graph_chid_from_grctx(struct drm_device *dev) |
399 | { | 403 | { |
400 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 404 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
405 | struct nouveau_channel *chan; | ||
406 | unsigned long flags; | ||
401 | uint32_t inst; | 407 | uint32_t inst; |
402 | int i; | 408 | int i; |
403 | 409 | ||
@@ -407,27 +413,29 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) | |||
407 | if (dev_priv->card_type < NV_50) { | 413 | if (dev_priv->card_type < NV_50) { |
408 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; | 414 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; |
409 | 415 | ||
416 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
410 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 417 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
411 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 418 | chan = dev_priv->channels.ptr[i]; |
412 | |||
413 | if (!chan || !chan->ramin_grctx) | 419 | if (!chan || !chan->ramin_grctx) |
414 | continue; | 420 | continue; |
415 | 421 | ||
416 | if (inst == chan->ramin_grctx->pinst) | 422 | if (inst == chan->ramin_grctx->pinst) |
417 | break; | 423 | break; |
418 | } | 424 | } |
425 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
419 | } else { | 426 | } else { |
420 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; | 427 | inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; |
421 | 428 | ||
429 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
422 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 430 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
423 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 431 | chan = dev_priv->channels.ptr[i]; |
424 | |||
425 | if (!chan || !chan->ramin) | 432 | if (!chan || !chan->ramin) |
426 | continue; | 433 | continue; |
427 | 434 | ||
428 | if (inst == chan->ramin->vinst) | 435 | if (inst == chan->ramin->vinst) |
429 | break; | 436 | break; |
430 | } | 437 | } |
438 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
431 | } | 439 | } |
432 | 440 | ||
433 | 441 | ||
@@ -449,7 +457,8 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) | |||
449 | else | 457 | else |
450 | channel = nouveau_graph_chid_from_grctx(dev); | 458 | channel = nouveau_graph_chid_from_grctx(dev); |
451 | 459 | ||
452 | if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { | 460 | if (channel >= engine->fifo.channels || |
461 | !dev_priv->channels.ptr[channel]) { | ||
453 | NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); | 462 | NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); |
454 | return -EINVAL; | 463 | return -EINVAL; |
455 | } | 464 | } |
@@ -532,14 +541,19 @@ nouveau_pgraph_intr_swmthd(struct drm_device *dev, | |||
532 | struct nouveau_pgraph_trap *trap) | 541 | struct nouveau_pgraph_trap *trap) |
533 | { | 542 | { |
534 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 543 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
544 | unsigned long flags; | ||
545 | int ret = -EINVAL; | ||
546 | |||
547 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
548 | if (trap->channel > 0 && | ||
549 | trap->channel < dev_priv->engine.fifo.channels && | ||
550 | dev_priv->channels.ptr[trap->channel]) { | ||
551 | ret = nouveau_call_method(dev_priv->channels.ptr[trap->channel], | ||
552 | trap->class, trap->mthd, trap->data); | ||
553 | } | ||
554 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
535 | 555 | ||
536 | if (trap->channel < 0 || | 556 | return ret; |
537 | trap->channel >= dev_priv->engine.fifo.channels || | ||
538 | !dev_priv->fifos[trap->channel]) | ||
539 | return -ENODEV; | ||
540 | |||
541 | return nouveau_call_method(dev_priv->fifos[trap->channel], | ||
542 | trap->class, trap->mthd, trap->data); | ||
543 | } | 557 | } |
544 | 558 | ||
545 | static inline void | 559 | static inline void |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2cc59f8c658b..2c5a1f66f7f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -185,11 +185,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
185 | struct nouveau_channel *chan; | 185 | struct nouveau_channel *chan; |
186 | int ret; | 186 | int ret; |
187 | 187 | ||
188 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); | 188 | chan = nouveau_channel_get(dev, file_priv, na->channel); |
189 | if (IS_ERR(chan)) | ||
190 | return PTR_ERR(chan); | ||
189 | 191 | ||
190 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 192 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); |
191 | if (ret) | 193 | nouveau_channel_put(&chan); |
192 | return ret; | 194 | return ret; |
193 | |||
194 | return 0; | ||
195 | } | 195 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd572adca02a..068441c4b563 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -876,8 +876,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
876 | struct nouveau_channel *chan; | 876 | struct nouveau_channel *chan; |
877 | int ret; | 877 | int ret; |
878 | 878 | ||
879 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); | ||
880 | |||
881 | if (init->handle == ~0) | 879 | if (init->handle == ~0) |
882 | return -EINVAL; | 880 | return -EINVAL; |
883 | 881 | ||
@@ -893,8 +891,14 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
893 | return -EPERM; | 891 | return -EPERM; |
894 | } | 892 | } |
895 | 893 | ||
896 | if (nouveau_ramht_find(chan, init->handle)) | 894 | chan = nouveau_channel_get(dev, file_priv, init->channel); |
897 | return -EEXIST; | 895 | if (IS_ERR(chan)) |
896 | return PTR_ERR(chan); | ||
897 | |||
898 | if (nouveau_ramht_find(chan, init->handle)) { | ||
899 | ret = -EEXIST; | ||
900 | goto out; | ||
901 | } | ||
898 | 902 | ||
899 | if (!grc->software) | 903 | if (!grc->software) |
900 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | 904 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); |
@@ -903,7 +907,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
903 | if (ret) { | 907 | if (ret) { |
904 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | 908 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", |
905 | ret, init->channel, init->handle); | 909 | ret, init->channel, init->handle); |
906 | return ret; | 910 | goto out; |
907 | } | 911 | } |
908 | 912 | ||
909 | ret = nouveau_ramht_insert(chan, init->handle, gr); | 913 | ret = nouveau_ramht_insert(chan, init->handle, gr); |
@@ -911,10 +915,11 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | |||
911 | if (ret) { | 915 | if (ret) { |
912 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | 916 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", |
913 | ret, init->channel, init->handle); | 917 | ret, init->channel, init->handle); |
914 | return ret; | ||
915 | } | 918 | } |
916 | 919 | ||
917 | return 0; | 920 | out: |
921 | nouveau_channel_put(&chan); | ||
922 | return ret; | ||
918 | } | 923 | } |
919 | 924 | ||
920 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | 925 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, |
@@ -923,15 +928,20 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | |||
923 | struct drm_nouveau_gpuobj_free *objfree = data; | 928 | struct drm_nouveau_gpuobj_free *objfree = data; |
924 | struct nouveau_gpuobj *gpuobj; | 929 | struct nouveau_gpuobj *gpuobj; |
925 | struct nouveau_channel *chan; | 930 | struct nouveau_channel *chan; |
931 | int ret = -ENOENT; | ||
926 | 932 | ||
927 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | 933 | chan = nouveau_channel_get(dev, file_priv, objfree->channel); |
934 | if (IS_ERR(chan)) | ||
935 | return PTR_ERR(chan); | ||
928 | 936 | ||
929 | gpuobj = nouveau_ramht_find(chan, objfree->handle); | 937 | gpuobj = nouveau_ramht_find(chan, objfree->handle); |
930 | if (!gpuobj) | 938 | if (gpuobj) { |
931 | return -ENOENT; | 939 | nouveau_ramht_remove(chan, objfree->handle); |
940 | ret = 0; | ||
941 | } | ||
932 | 942 | ||
933 | nouveau_ramht_remove(chan, objfree->handle); | 943 | nouveau_channel_put(&chan); |
934 | return 0; | 944 | return ret; |
935 | } | 945 | } |
936 | 946 | ||
937 | u32 | 947 | u32 |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 049f755567e5..513c1063fb5e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -516,11 +516,11 @@ nouveau_card_init_channel(struct drm_device *dev) | |||
516 | if (ret) | 516 | if (ret) |
517 | goto out_err; | 517 | goto out_err; |
518 | 518 | ||
519 | mutex_unlock(&dev_priv->channel->mutex); | ||
519 | return 0; | 520 | return 0; |
520 | 521 | ||
521 | out_err: | 522 | out_err: |
522 | nouveau_channel_free(dev_priv->channel); | 523 | nouveau_channel_put(&dev_priv->channel); |
523 | dev_priv->channel = NULL; | ||
524 | return ret; | 524 | return ret; |
525 | } | 525 | } |
526 | 526 | ||
@@ -567,6 +567,7 @@ nouveau_card_init(struct drm_device *dev) | |||
567 | if (ret) | 567 | if (ret) |
568 | goto out; | 568 | goto out; |
569 | engine = &dev_priv->engine; | 569 | engine = &dev_priv->engine; |
570 | spin_lock_init(&dev_priv->channels.lock); | ||
570 | spin_lock_init(&dev_priv->context_switch_lock); | 571 | spin_lock_init(&dev_priv->context_switch_lock); |
571 | 572 | ||
572 | /* Make the CRTCs and I2C buses accessible */ | 573 | /* Make the CRTCs and I2C buses accessible */ |
@@ -713,8 +714,7 @@ static void nouveau_card_takedown(struct drm_device *dev) | |||
713 | 714 | ||
714 | if (!engine->graph.accel_blocked) { | 715 | if (!engine->graph.accel_blocked) { |
715 | nouveau_fence_fini(dev); | 716 | nouveau_fence_fini(dev); |
716 | nouveau_channel_free(dev_priv->channel); | 717 | nouveau_channel_put(&dev_priv->channel); |
717 | dev_priv->channel = NULL; | ||
718 | } | 718 | } |
719 | 719 | ||
720 | if (!nouveau_noaccel) { | 720 | if (!nouveau_noaccel) { |
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 708293b7ddcd..25c439dcdfd9 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c | |||
@@ -208,7 +208,7 @@ nv04_fifo_unload_context(struct drm_device *dev) | |||
208 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | 208 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) |
209 | return 0; | 209 | return 0; |
210 | 210 | ||
211 | chan = dev_priv->fifos[chid]; | 211 | chan = dev_priv->channels.ptr[chid]; |
212 | if (!chan) { | 212 | if (!chan) { |
213 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | 213 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); |
214 | return -EINVAL; | 214 | return -EINVAL; |
@@ -289,7 +289,7 @@ nv04_fifo_init(struct drm_device *dev) | |||
289 | pfifo->reassign(dev, true); | 289 | pfifo->reassign(dev, true); |
290 | 290 | ||
291 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 291 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
292 | if (dev_priv->fifos[i]) { | 292 | if (dev_priv->channels.ptr[i]) { |
293 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 293 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
294 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 294 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
295 | } | 295 | } |
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index c8973421b635..98b9525c1ebd 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
@@ -357,7 +357,7 @@ nv04_graph_channel(struct drm_device *dev) | |||
357 | if (chid >= dev_priv->engine.fifo.channels) | 357 | if (chid >= dev_priv->engine.fifo.channels) |
358 | return NULL; | 358 | return NULL; |
359 | 359 | ||
360 | return dev_priv->fifos[chid]; | 360 | return dev_priv->channels.ptr[chid]; |
361 | } | 361 | } |
362 | 362 | ||
363 | void | 363 | void |
@@ -376,7 +376,7 @@ nv04_graph_context_switch(struct drm_device *dev) | |||
376 | 376 | ||
377 | /* Load context for next channel */ | 377 | /* Load context for next channel */ |
378 | chid = dev_priv->engine.fifo.channel_id(dev); | 378 | chid = dev_priv->engine.fifo.channel_id(dev); |
379 | chan = dev_priv->fifos[chid]; | 379 | chan = dev_priv->channels.ptr[chid]; |
380 | if (chan) | 380 | if (chan) |
381 | nv04_graph_load_context(chan); | 381 | nv04_graph_load_context(chan); |
382 | 382 | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index f1b03ad58fd5..39328fcce709 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c | |||
@@ -241,7 +241,7 @@ nv10_fifo_init(struct drm_device *dev) | |||
241 | pfifo->reassign(dev, true); | 241 | pfifo->reassign(dev, true); |
242 | 242 | ||
243 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 243 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
244 | if (dev_priv->fifos[i]) { | 244 | if (dev_priv->channels.ptr[i]) { |
245 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 245 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
246 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 246 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
247 | } | 247 | } |
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 8e68c9731159..cd931b57cf05 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
@@ -802,7 +802,7 @@ nv10_graph_context_switch(struct drm_device *dev) | |||
802 | 802 | ||
803 | /* Load context for next channel */ | 803 | /* Load context for next channel */ |
804 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; | 804 | chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; |
805 | chan = dev_priv->fifos[chid]; | 805 | chan = dev_priv->channels.ptr[chid]; |
806 | if (chan && chan->pgraph_ctx) | 806 | if (chan && chan->pgraph_ctx) |
807 | nv10_graph_load_context(chan); | 807 | nv10_graph_load_context(chan); |
808 | 808 | ||
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev) | |||
833 | if (chid >= dev_priv->engine.fifo.channels) | 833 | if (chid >= dev_priv->engine.fifo.channels) |
834 | return NULL; | 834 | return NULL; |
835 | 835 | ||
836 | return dev_priv->fifos[chid]; | 836 | return dev_priv->channels.ptr[chid]; |
837 | } | 837 | } |
838 | 838 | ||
839 | int nv10_graph_create_context(struct nouveau_channel *chan) | 839 | int nv10_graph_create_context(struct nouveau_channel *chan) |
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index d337b8b28cdd..3c7be3dc8b88 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c | |||
@@ -301,7 +301,7 @@ nv40_fifo_init(struct drm_device *dev) | |||
301 | pfifo->reassign(dev, true); | 301 | pfifo->reassign(dev, true); |
302 | 302 | ||
303 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 303 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
304 | if (dev_priv->fifos[i]) { | 304 | if (dev_priv->channels.ptr[i]) { |
305 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); | 305 | uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); |
306 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); | 306 | nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); |
307 | } | 307 | } |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7ee1b91569b8..e0b41a26447f 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -42,7 +42,7 @@ nv40_graph_channel(struct drm_device *dev) | |||
42 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | 42 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; |
43 | 43 | ||
44 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 44 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
45 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 45 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
46 | 46 | ||
47 | if (chan && chan->ramin_grctx && | 47 | if (chan && chan->ramin_grctx && |
48 | chan->ramin_grctx->pinst == inst) | 48 | chan->ramin_grctx->pinst == inst) |
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index cd1988b15d2c..d745c952986a 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c | |||
@@ -42,6 +42,7 @@ void | |||
42 | nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | 42 | nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) |
43 | { | 43 | { |
44 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 44 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
45 | unsigned long flags; | ||
45 | u32 trap[6], idx, chinst; | 46 | u32 trap[6], idx, chinst; |
46 | int i, ch; | 47 | int i, ch; |
47 | 48 | ||
@@ -60,8 +61,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | |||
60 | return; | 61 | return; |
61 | 62 | ||
62 | chinst = (trap[2] << 16) | trap[1]; | 63 | chinst = (trap[2] << 16) | trap[1]; |
64 | |||
65 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
63 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { | 66 | for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { |
64 | struct nouveau_channel *chan = dev_priv->fifos[ch]; | 67 | struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; |
65 | 68 | ||
66 | if (!chan || !chan->ramin) | 69 | if (!chan || !chan->ramin) |
67 | continue; | 70 | continue; |
@@ -69,6 +72,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) | |||
69 | if (chinst == chan->ramin->vinst >> 12) | 72 | if (chinst == chan->ramin->vinst >> 12) |
70 | break; | 73 | break; |
71 | } | 74 | } |
75 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
72 | 76 | ||
73 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " | 77 | NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " |
74 | "channel %d (0x%08x)\n", | 78 | "channel %d (0x%08x)\n", |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 1da65bd60c10..815960fe4f43 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
@@ -44,7 +44,8 @@ nv50_fifo_playlist_update(struct drm_device *dev) | |||
44 | 44 | ||
45 | /* We never schedule channel 0 or 127 */ | 45 | /* We never schedule channel 0 or 127 */ |
46 | for (i = 1, nr = 0; i < 127; i++) { | 46 | for (i = 1, nr = 0; i < 127; i++) { |
47 | if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { | 47 | if (dev_priv->channels.ptr[i] && |
48 | dev_priv->channels.ptr[i]->ramfc) { | ||
48 | nv_wo32(cur, (nr * 4), i); | 49 | nv_wo32(cur, (nr * 4), i); |
49 | nr++; | 50 | nr++; |
50 | } | 51 | } |
@@ -60,7 +61,7 @@ static void | |||
60 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) | 61 | nv50_fifo_channel_enable(struct drm_device *dev, int channel) |
61 | { | 62 | { |
62 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 63 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
63 | struct nouveau_channel *chan = dev_priv->fifos[channel]; | 64 | struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; |
64 | uint32_t inst; | 65 | uint32_t inst; |
65 | 66 | ||
66 | NV_DEBUG(dev, "ch%d\n", channel); | 67 | NV_DEBUG(dev, "ch%d\n", channel); |
@@ -118,7 +119,7 @@ nv50_fifo_init_context_table(struct drm_device *dev) | |||
118 | NV_DEBUG(dev, "\n"); | 119 | NV_DEBUG(dev, "\n"); |
119 | 120 | ||
120 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { | 121 | for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { |
121 | if (dev_priv->fifos[i]) | 122 | if (dev_priv->channels.ptr[i]) |
122 | nv50_fifo_channel_enable(dev, i); | 123 | nv50_fifo_channel_enable(dev, i); |
123 | else | 124 | else |
124 | nv50_fifo_channel_disable(dev, i); | 125 | nv50_fifo_channel_disable(dev, i); |
@@ -392,7 +393,7 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
392 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) | 393 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) |
393 | return 0; | 394 | return 0; |
394 | 395 | ||
395 | chan = dev_priv->fifos[chid]; | 396 | chan = dev_priv->channels.ptr[chid]; |
396 | if (!chan) { | 397 | if (!chan) { |
397 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); | 398 | NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); |
398 | return -EINVAL; | 399 | return -EINVAL; |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8b669d0af610..24a3f8487579 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -190,7 +190,7 @@ nv50_graph_channel(struct drm_device *dev) | |||
190 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; | 190 | inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; |
191 | 191 | ||
192 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 192 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
193 | struct nouveau_channel *chan = dev_priv->fifos[i]; | 193 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; |
194 | 194 | ||
195 | if (chan && chan->ramin && chan->ramin->vinst == inst) | 195 | if (chan && chan->ramin && chan->ramin->vinst == inst) |
196 | return chan; | 196 | return chan; |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index b773229b7647..0651e7629235 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -131,10 +131,10 @@ nv50_instmem_init(struct drm_device *dev) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* we need a channel to plug into the hw to control the BARs */ | 133 | /* we need a channel to plug into the hw to control the BARs */ |
134 | ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); | 134 | ret = nv50_channel_new(dev, 128*1024, &dev_priv->channels.ptr[0]); |
135 | if (ret) | 135 | if (ret) |
136 | return ret; | 136 | return ret; |
137 | chan = dev_priv->fifos[127] = dev_priv->fifos[0]; | 137 | chan = dev_priv->channels.ptr[127] = dev_priv->channels.ptr[0]; |
138 | 138 | ||
139 | /* allocate page table for PRAMIN BAR */ | 139 | /* allocate page table for PRAMIN BAR */ |
140 | ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, | 140 | ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, |
@@ -240,7 +240,7 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
240 | { | 240 | { |
241 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 241 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
242 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 242 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
243 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 243 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; |
244 | int i; | 244 | int i; |
245 | 245 | ||
246 | NV_DEBUG(dev, "\n"); | 246 | NV_DEBUG(dev, "\n"); |
@@ -264,8 +264,8 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
264 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); | 264 | nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); |
265 | dev_priv->vm_vram_pt_nr = 0; | 265 | dev_priv->vm_vram_pt_nr = 0; |
266 | 266 | ||
267 | nv50_channel_del(&dev_priv->fifos[0]); | 267 | nv50_channel_del(&dev_priv->channels.ptr[0]); |
268 | dev_priv->fifos[127] = NULL; | 268 | dev_priv->channels.ptr[127] = NULL; |
269 | } | 269 | } |
270 | 270 | ||
271 | dev_priv->engine.instmem.priv = NULL; | 271 | dev_priv->engine.instmem.priv = NULL; |
@@ -276,7 +276,7 @@ int | |||
276 | nv50_instmem_suspend(struct drm_device *dev) | 276 | nv50_instmem_suspend(struct drm_device *dev) |
277 | { | 277 | { |
278 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 278 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
279 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 279 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; |
280 | struct nouveau_gpuobj *ramin = chan->ramin; | 280 | struct nouveau_gpuobj *ramin = chan->ramin; |
281 | int i; | 281 | int i; |
282 | 282 | ||
@@ -294,7 +294,7 @@ nv50_instmem_resume(struct drm_device *dev) | |||
294 | { | 294 | { |
295 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 295 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
296 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 296 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
297 | struct nouveau_channel *chan = dev_priv->fifos[0]; | 297 | struct nouveau_channel *chan = dev_priv->channels.ptr[0]; |
298 | struct nouveau_gpuobj *ramin = chan->ramin; | 298 | struct nouveau_gpuobj *ramin = chan->ramin; |
299 | int i; | 299 | int i; |
300 | 300 | ||